mirror of
https://github.com/intel/llvm.git
synced 2026-01-17 06:40:01 +08:00
Revert "[InstCombine] Simplify and/or of icmp eq with op replacement (#70335)"
This reverts commit 1770a2e325.
Stage 2 llvm-tblgen crashes when generating X86GenAsmWriter.inc and
other files.
This commit is contained in:
@@ -2025,52 +2025,6 @@ static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
|
||||
const SimplifyQuery &Q,
|
||||
bool AllowRefinement,
|
||||
SmallVectorImpl<Instruction *> *DropFlags,
|
||||
unsigned MaxRecurse);
|
||||
|
||||
static Value *simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1,
|
||||
const SimplifyQuery &Q,
|
||||
unsigned MaxRecurse) {
|
||||
assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
|
||||
"Must be and/or");
|
||||
ICmpInst::Predicate Pred;
|
||||
Value *A, *B;
|
||||
if (!match(Op0, m_ICmp(Pred, m_Value(A), m_Value(B))) ||
|
||||
!ICmpInst::isEquality(Pred) || !MaxRecurse--)
|
||||
return nullptr;
|
||||
|
||||
auto Simplify = [&](Value *Res) -> Value * {
|
||||
// and (icmp eq a, b), x implies (a==b) inside x.
|
||||
// or (icmp ne a, b), x implies (a==b) inside x.
|
||||
// If x simplifies to true/false, we can simplify the and/or.
|
||||
if (Pred ==
|
||||
(Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE))
|
||||
return simplifyBinOp(Opcode, Op0, Res, Q, MaxRecurse);
|
||||
// If we have and (icmp ne a, b), x and for a==b we can simplify x to false,
|
||||
// then we can drop the icmp, as x will already be false in the case where
|
||||
// the icmp is false. Similar for or and true.
|
||||
if (Res == ConstantExpr::getBinOpAbsorber(Opcode, Res->getType()))
|
||||
return Op1;
|
||||
return nullptr;
|
||||
};
|
||||
|
||||
// Increment MaxRecurse again, because simplifyWithOpReplaced() does its own
|
||||
// decrement.
|
||||
if (Value *Res =
|
||||
simplifyWithOpReplaced(Op1, A, B, Q, /* AllowRefinement */ true,
|
||||
/* DropFlags */ nullptr, MaxRecurse + 1))
|
||||
return Simplify(Res);
|
||||
if (Value *Res =
|
||||
simplifyWithOpReplaced(Op1, B, A, Q, /* AllowRefinement */ true,
|
||||
/* DropFlags */ nullptr, MaxRecurse + 1))
|
||||
return Simplify(Res);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// Given a bitwise logic op, check if the operands are add/sub with a common
|
||||
/// source value and inverted constant (identity: C - X -> ~(X + ~C)).
|
||||
static Value *simplifyLogicOfAddSub(Value *Op0, Value *Op1,
|
||||
@@ -2205,13 +2159,6 @@ static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
|
||||
isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
|
||||
return Constant::getNullValue(Op0->getType());
|
||||
|
||||
if (Value *V =
|
||||
simplifyAndOrWithICmpEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
|
||||
return V;
|
||||
if (Value *V =
|
||||
simplifyAndOrWithICmpEq(Instruction::And, Op1, Op0, Q, MaxRecurse))
|
||||
return V;
|
||||
|
||||
if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
|
||||
return V;
|
||||
|
||||
@@ -2488,13 +2435,6 @@ static Value *simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
|
||||
match(Op0, m_LShr(m_Specific(X), m_Specific(Y))))
|
||||
return Op1;
|
||||
|
||||
if (Value *V =
|
||||
simplifyAndOrWithICmpEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
|
||||
return V;
|
||||
if (Value *V =
|
||||
simplifyAndOrWithICmpEq(Instruction::Or, Op1, Op0, Q, MaxRecurse))
|
||||
return V;
|
||||
|
||||
if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
|
||||
return V;
|
||||
|
||||
|
||||
@@ -20,16 +20,20 @@ define hidden void @julia_tryparse_internal_45896() #0 {
|
||||
; CHECK-NEXT: .LBB0_6: # %fail194
|
||||
; CHECK-NEXT: .LBB0_7: # %L670
|
||||
; CHECK-NEXT: li r5, -3
|
||||
; CHECK-NEXT: cmpdi r3, 0
|
||||
; CHECK-NEXT: sradi r4, r3, 63
|
||||
; CHECK-NEXT: rldic r5, r5, 4, 32
|
||||
; CHECK-NEXT: crnot 4*cr5+lt, eq
|
||||
; CHECK-NEXT: mulhdu r3, r3, r5
|
||||
; CHECK-NEXT: maddld r6, r4, r5, r3
|
||||
; CHECK-NEXT: cmpld cr1, r6, r3
|
||||
; CHECK-NEXT: mulhdu. r3, r4, r5
|
||||
; CHECK-NEXT: bc 4, 4*cr5+lt, .LBB0_10
|
||||
; CHECK-NEXT: # %bb.8: # %L670
|
||||
; CHECK-NEXT: crorc 4*cr5+lt, 4*cr1+lt, eq
|
||||
; CHECK-NEXT: bc 4, 4*cr5+lt, .LBB0_9
|
||||
; CHECK-NEXT: # %bb.8: # %L917
|
||||
; CHECK-NEXT: .LBB0_9: # %L994
|
||||
; CHECK-NEXT: bc 4, 4*cr5+lt, .LBB0_10
|
||||
; CHECK-NEXT: # %bb.9: # %L917
|
||||
; CHECK-NEXT: .LBB0_10: # %L994
|
||||
top:
|
||||
%0 = load i64, ptr undef, align 8
|
||||
%1 = icmp ne i64 %0, 0
|
||||
|
||||
@@ -47,7 +47,11 @@ define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
|
||||
|
||||
define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
|
||||
; CHECK-LABEL: @n3_wrong_pred(
|
||||
; CHECK-NEXT: ret i1 false
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
|
||||
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
|
||||
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
|
||||
; CHECK-NEXT: [[AND:%.*]] = and i1 [[SMUL_OV]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[AND]]
|
||||
;
|
||||
%cmp = icmp eq i4 %size, 0 ; not 'ne'
|
||||
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
|
||||
@@ -59,7 +63,10 @@ define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
|
||||
define i1 @n4_not_and(i4 %size, i4 %nmemb) {
|
||||
; CHECK-LABEL: @n4_not_and(
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
|
||||
; CHECK-NEXT: ret i1 [[CMP]]
|
||||
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
|
||||
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
|
||||
; CHECK-NEXT: [[AND:%.*]] = or i1 [[SMUL_OV]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[AND]]
|
||||
;
|
||||
%cmp = icmp ne i4 %size, 0
|
||||
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
|
||||
|
||||
@@ -47,7 +47,11 @@ define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
|
||||
|
||||
define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
|
||||
; CHECK-LABEL: @n3_wrong_pred(
|
||||
; CHECK-NEXT: ret i1 false
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
|
||||
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
|
||||
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
|
||||
; CHECK-NEXT: [[AND:%.*]] = and i1 [[UMUL_OV]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[AND]]
|
||||
;
|
||||
%cmp = icmp eq i4 %size, 0 ; not 'ne'
|
||||
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
|
||||
@@ -59,7 +63,10 @@ define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
|
||||
define i1 @n4_not_and(i4 %size, i4 %nmemb) {
|
||||
; CHECK-LABEL: @n4_not_and(
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
|
||||
; CHECK-NEXT: ret i1 [[CMP]]
|
||||
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
|
||||
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
|
||||
; CHECK-NEXT: [[AND:%.*]] = or i1 [[UMUL_OV]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[AND]]
|
||||
;
|
||||
%cmp = icmp ne i4 %size, 0
|
||||
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
|
||||
|
||||
@@ -392,7 +392,9 @@ define i1 @is_pow2_ctpop_wrong_pred1(i32 %x) {
|
||||
; CHECK-LABEL: @is_pow2_ctpop_wrong_pred1(
|
||||
; CHECK-NEXT: [[T0:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X:%.*]]), !range [[RNG0]]
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[T0]], 2
|
||||
; CHECK-NEXT: ret i1 [[CMP]]
|
||||
; CHECK-NEXT: [[NOTZERO:%.*]] = icmp ne i32 [[X]], 0
|
||||
; CHECK-NEXT: [[R:%.*]] = and i1 [[NOTZERO]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[R]]
|
||||
;
|
||||
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
|
||||
%cmp = icmp ugt i32 %t0, 2
|
||||
@@ -944,7 +946,9 @@ define i1 @is_pow2or0_ctpop_wrong_pred1(i32 %x) {
|
||||
; CHECK-LABEL: @is_pow2or0_ctpop_wrong_pred1(
|
||||
; CHECK-NEXT: [[T0:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X:%.*]]), !range [[RNG0]]
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[T0]], 1
|
||||
; CHECK-NEXT: ret i1 [[CMP]]
|
||||
; CHECK-NEXT: [[ISZERO:%.*]] = icmp eq i32 [[X]], 0
|
||||
; CHECK-NEXT: [[R:%.*]] = or i1 [[ISZERO]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[R]]
|
||||
;
|
||||
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
|
||||
%cmp = icmp ne i32 %t0, 1
|
||||
@@ -955,7 +959,11 @@ define i1 @is_pow2or0_ctpop_wrong_pred1(i32 %x) {
|
||||
|
||||
define i1 @is_pow2or0_ctpop_wrong_pred2(i32 %x) {
|
||||
; CHECK-LABEL: @is_pow2or0_ctpop_wrong_pred2(
|
||||
; CHECK-NEXT: ret i1 true
|
||||
; CHECK-NEXT: [[T0:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X:%.*]]), !range [[RNG0]]
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[T0]], 1
|
||||
; CHECK-NEXT: [[ISZERO:%.*]] = icmp ne i32 [[X]], 0
|
||||
; CHECK-NEXT: [[R:%.*]] = or i1 [[ISZERO]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[R]]
|
||||
;
|
||||
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
|
||||
%cmp = icmp ne i32 %t0, 1
|
||||
@@ -1141,7 +1149,9 @@ define i1 @isnot_pow2nor0_ctpop_wrong_pred1(i32 %x) {
|
||||
; CHECK-LABEL: @isnot_pow2nor0_ctpop_wrong_pred1(
|
||||
; CHECK-NEXT: [[T0:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X:%.*]]), !range [[RNG0]]
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[T0]], 1
|
||||
; CHECK-NEXT: ret i1 [[CMP]]
|
||||
; CHECK-NEXT: [[NOTZERO:%.*]] = icmp ne i32 [[X]], 0
|
||||
; CHECK-NEXT: [[R:%.*]] = and i1 [[NOTZERO]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[R]]
|
||||
;
|
||||
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
|
||||
%cmp = icmp eq i32 %t0, 1
|
||||
@@ -1152,7 +1162,11 @@ define i1 @isnot_pow2nor0_ctpop_wrong_pred1(i32 %x) {
|
||||
|
||||
define i1 @isnot_pow2nor0_ctpop_wrong_pred2(i32 %x) {
|
||||
; CHECK-LABEL: @isnot_pow2nor0_ctpop_wrong_pred2(
|
||||
; CHECK-NEXT: ret i1 false
|
||||
; CHECK-NEXT: [[T0:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X:%.*]]), !range [[RNG0]]
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[T0]], 1
|
||||
; CHECK-NEXT: [[NOTZERO:%.*]] = icmp eq i32 [[X]], 0
|
||||
; CHECK-NEXT: [[R:%.*]] = and i1 [[NOTZERO]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[R]]
|
||||
;
|
||||
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
|
||||
%cmp = icmp eq i32 %t0, 1
|
||||
|
||||
@@ -40,7 +40,11 @@ define <2 x i1> @eq_or_non_0_commute(<2 x i32> %x) {
|
||||
|
||||
define i1 @eq_or_non_0_wrong_pred1(i32 %x) {
|
||||
; CHECK-LABEL: @eq_or_non_0_wrong_pred1(
|
||||
; CHECK-NEXT: ret i1 true
|
||||
; CHECK-NEXT: [[T0:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X:%.*]])
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[T0]], 10
|
||||
; CHECK-NEXT: [[NOTZERO:%.*]] = icmp ne i32 [[X]], 0
|
||||
; CHECK-NEXT: [[R:%.*]] = or i1 [[NOTZERO]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[R]]
|
||||
;
|
||||
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
|
||||
%cmp = icmp ne i32 %t0, 10
|
||||
@@ -86,7 +90,9 @@ define i1 @ne_and_is_0_wrong_pred1(i32 %x) {
|
||||
; CHECK-LABEL: @ne_and_is_0_wrong_pred1(
|
||||
; CHECK-NEXT: [[T0:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X:%.*]])
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[T0]], 10
|
||||
; CHECK-NEXT: ret i1 [[CMP]]
|
||||
; CHECK-NEXT: [[ISZERO:%.*]] = icmp eq i32 [[X]], 0
|
||||
; CHECK-NEXT: [[R:%.*]] = or i1 [[ISZERO]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[R]]
|
||||
;
|
||||
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
|
||||
%cmp = icmp ne i32 %t0, 10
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -236,7 +236,9 @@ define i1 @pr69050(i32 %arg, i32 %arg1) {
|
||||
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[ARG:%.*]], -1
|
||||
; CHECK-NEXT: [[AND:%.*]] = and i32 [[XOR]], [[ARG1:%.*]]
|
||||
; CHECK-NEXT: [[ICMP:%.*]] = icmp ne i32 [[AND]], 0
|
||||
; CHECK-NEXT: ret i1 [[ICMP]]
|
||||
; CHECK-NEXT: [[ICMP2:%.*]] = icmp ne i32 [[ARG]], -1
|
||||
; CHECK-NEXT: [[AND3:%.*]] = and i1 [[ICMP2]], [[ICMP]]
|
||||
; CHECK-NEXT: ret i1 [[AND3]]
|
||||
;
|
||||
%xor = xor i32 %arg, -1
|
||||
%and = and i32 %xor, %arg1
|
||||
@@ -249,7 +251,11 @@ define i1 @pr69050(i32 %arg, i32 %arg1) {
|
||||
define i1 @pr69091(i32 %arg, i32 %arg1) {
|
||||
; CHECK-LABEL: @pr69091(
|
||||
; CHECK-NEXT: [[ICMP:%.*]] = icmp ne i32 [[ARG:%.*]], -1
|
||||
; CHECK-NEXT: ret i1 [[ICMP]]
|
||||
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[ARG]], 1
|
||||
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[ADD]], [[ARG1:%.*]]
|
||||
; CHECK-NEXT: [[ICMP2:%.*]] = icmp ne i32 [[MUL]], 0
|
||||
; CHECK-NEXT: [[OR:%.*]] = or i1 [[ICMP]], [[ICMP2]]
|
||||
; CHECK-NEXT: ret i1 [[OR]]
|
||||
;
|
||||
%icmp = icmp ne i32 %arg, -1
|
||||
%add = add i32 %arg, 1
|
||||
|
||||
@@ -52,7 +52,12 @@ define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
|
||||
|
||||
define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
|
||||
; CHECK-LABEL: @n3_wrong_pred(
|
||||
; CHECK-NEXT: ret i1 true
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
|
||||
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
|
||||
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
|
||||
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
|
||||
; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
|
||||
; CHECK-NEXT: ret i1 [[OR]]
|
||||
;
|
||||
%cmp = icmp ne i4 %size, 0 ; not 'eq'
|
||||
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
|
||||
@@ -65,7 +70,11 @@ define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
|
||||
define i1 @n4_not_and(i4 %size, i4 %nmemb) {
|
||||
; CHECK-LABEL: @n4_not_and(
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
|
||||
; CHECK-NEXT: ret i1 [[CMP]]
|
||||
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
|
||||
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
|
||||
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
|
||||
; CHECK-NEXT: [[OR:%.*]] = and i1 [[CMP]], [[PHITMP]]
|
||||
; CHECK-NEXT: ret i1 [[OR]]
|
||||
;
|
||||
%cmp = icmp eq i4 %size, 0
|
||||
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
|
||||
|
||||
@@ -46,7 +46,11 @@ define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
|
||||
|
||||
define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
|
||||
; CHECK-LABEL: @n3_wrong_pred(
|
||||
; CHECK-NEXT: ret i1 false
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
|
||||
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
|
||||
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
|
||||
; CHECK-NEXT: [[AND:%.*]] = and i1 [[SMUL_OV]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[AND]]
|
||||
;
|
||||
%cmp = icmp eq i4 %size, 0 ; not 'ne'
|
||||
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
|
||||
@@ -58,7 +62,10 @@ define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
|
||||
define i1 @n4_not_and(i4 %size, i4 %nmemb) {
|
||||
; CHECK-LABEL: @n4_not_and(
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
|
||||
; CHECK-NEXT: ret i1 [[CMP]]
|
||||
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
|
||||
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
|
||||
; CHECK-NEXT: [[AND:%.*]] = or i1 [[SMUL_OV]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[AND]]
|
||||
;
|
||||
%cmp = icmp ne i4 %size, 0
|
||||
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
|
||||
|
||||
@@ -52,7 +52,12 @@ define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
|
||||
|
||||
define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
|
||||
; CHECK-LABEL: @n3_wrong_pred(
|
||||
; CHECK-NEXT: ret i1 true
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
|
||||
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
|
||||
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
|
||||
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
|
||||
; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
|
||||
; CHECK-NEXT: ret i1 [[OR]]
|
||||
;
|
||||
%cmp = icmp ne i4 %size, 0 ; not 'eq'
|
||||
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
|
||||
@@ -65,7 +70,11 @@ define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
|
||||
define i1 @n4_not_and(i4 %size, i4 %nmemb) {
|
||||
; CHECK-LABEL: @n4_not_and(
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
|
||||
; CHECK-NEXT: ret i1 [[CMP]]
|
||||
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
|
||||
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
|
||||
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
|
||||
; CHECK-NEXT: [[OR:%.*]] = and i1 [[CMP]], [[PHITMP]]
|
||||
; CHECK-NEXT: ret i1 [[OR]]
|
||||
;
|
||||
%cmp = icmp eq i4 %size, 0
|
||||
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
|
||||
|
||||
@@ -46,7 +46,11 @@ define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
|
||||
|
||||
define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
|
||||
; CHECK-LABEL: @n3_wrong_pred(
|
||||
; CHECK-NEXT: ret i1 false
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
|
||||
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
|
||||
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
|
||||
; CHECK-NEXT: [[AND:%.*]] = and i1 [[UMUL_OV]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[AND]]
|
||||
;
|
||||
%cmp = icmp eq i4 %size, 0 ; not 'ne'
|
||||
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
|
||||
@@ -58,7 +62,10 @@ define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
|
||||
define i1 @n4_not_and(i4 %size, i4 %nmemb) {
|
||||
; CHECK-LABEL: @n4_not_and(
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
|
||||
; CHECK-NEXT: ret i1 [[CMP]]
|
||||
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
|
||||
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
|
||||
; CHECK-NEXT: [[AND:%.*]] = or i1 [[UMUL_OV]], [[CMP]]
|
||||
; CHECK-NEXT: ret i1 [[AND]]
|
||||
;
|
||||
%cmp = icmp ne i4 %size, 0
|
||||
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
|
||||
|
||||
@@ -18,7 +18,10 @@ define i1 @t1(i8 %base, i8 %offset) {
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
|
||||
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
||||
; CHECK-NEXT: ret i1 true
|
||||
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
|
||||
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
|
||||
; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
|
||||
; CHECK-NEXT: ret i1 [[R]]
|
||||
;
|
||||
%cmp = icmp slt i8 %base, 0
|
||||
call void @llvm.assume(i1 %cmp)
|
||||
@@ -36,7 +39,10 @@ define i1 @t2_commutative(i8 %base, i8 %offset) {
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
|
||||
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
||||
; CHECK-NEXT: ret i1 true
|
||||
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
|
||||
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i8 [[BASE]], [[ADJUSTED]]
|
||||
; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
|
||||
; CHECK-NEXT: ret i1 [[R]]
|
||||
;
|
||||
%cmp = icmp slt i8 %base, 0
|
||||
call void @llvm.assume(i1 %cmp)
|
||||
@@ -57,7 +63,10 @@ define i1 @t3(i8 %base, i8 %offset) {
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
|
||||
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
||||
; CHECK-NEXT: ret i1 false
|
||||
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
|
||||
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[ADJUSTED]], [[BASE]]
|
||||
; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
|
||||
; CHECK-NEXT: ret i1 [[R]]
|
||||
;
|
||||
%cmp = icmp slt i8 %base, 0
|
||||
call void @llvm.assume(i1 %cmp)
|
||||
@@ -75,7 +84,10 @@ define i1 @t4_commutative(i8 %base, i8 %offset) {
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
|
||||
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
|
||||
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
|
||||
; CHECK-NEXT: ret i1 false
|
||||
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
|
||||
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[BASE]], [[ADJUSTED]]
|
||||
; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
|
||||
; CHECK-NEXT: ret i1 [[R]]
|
||||
;
|
||||
%cmp = icmp slt i8 %base, 0
|
||||
call void @llvm.assume(i1 %cmp)
|
||||
|
||||
@@ -1298,11 +1298,12 @@ define i32 @test_chr_14(ptr %i, ptr %j, i32 %sum0, i1 %pred, i32 %z) !prof !14 {
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[Z_FR:%.*]] = freeze i32 [[Z:%.*]]
|
||||
; CHECK-NEXT: [[I0:%.*]] = load i32, ptr [[I:%.*]], align 4
|
||||
; CHECK-NEXT: [[V1:%.*]] = icmp eq i32 [[Z_FR]], 1
|
||||
; CHECK-NEXT: br i1 [[V1]], label [[BB1:%.*]], label [[ENTRY_SPLIT_NONCHR:%.*]], !prof [[PROF15]]
|
||||
; CHECK-NEXT: [[V1_NOT:%.*]] = icmp eq i32 [[Z_FR]], 1
|
||||
; CHECK-NEXT: br i1 [[V1_NOT]], label [[BB1:%.*]], label [[ENTRY_SPLIT_NONCHR:%.*]], !prof [[PROF15]]
|
||||
; CHECK: entry.split.nonchr:
|
||||
; CHECK-NEXT: [[PRED_FR:%.*]] = freeze i1 [[PRED:%.*]]
|
||||
; CHECK-NEXT: [[V0:%.*]] = icmp eq i32 [[Z_FR]], 0
|
||||
; CHECK-NEXT: [[V3_NONCHR:%.*]] = and i1 [[V0]], [[PRED:%.*]]
|
||||
; CHECK-NEXT: [[V3_NONCHR:%.*]] = and i1 [[V0]], [[PRED_FR]]
|
||||
; CHECK-NEXT: br i1 [[V3_NONCHR]], label [[BB0_NONCHR:%.*]], label [[BB1]], !prof [[PROF16]]
|
||||
; CHECK: bb0.nonchr:
|
||||
; CHECK-NEXT: call void @foo()
|
||||
|
||||
Reference in New Issue
Block a user