[InstCombine] fold icmp ult of offset value with constant

This is one sibling of the fold added with c7b658aeb5 .

(X + C2) <u C --> X >s ~C2 (if C == C2 + SMIN)
I'm still not sure how to describe it best, but we're
translating 2 constants from an unsigned range comparison
to signed because that eliminates the offset (add) op.

This could be extended to handle the more general (non-constant)
pattern too:
https://alive2.llvm.org/ce/z/K-fMBf

  define i1 @src(i8 %a, i8 %c2) {
    %t = add i8 %a, %c2
    %c = add i8 %c2, 128 ; SMIN
    %ov = icmp ult i8 %t, %c
    ret i1 %ov
  }

  define i1 @tgt(i8 %a, i8 %c2) {
    %not_c2 = xor i8 %c2, -1
    %ov = icmp sgt i8 %a, %not_c2
    ret i1 %ov
  }
This commit is contained in:
Sanjay Patel
2021-06-30 17:27:10 -04:00
parent b2fe025707
commit 0c400e8953
2 changed files with 11 additions and 6 deletions

View File

@@ -2640,11 +2640,16 @@ Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp,
// Fold an unsigned compare with offset to signed compare:
// (X + C2) >u C --> X <s -C2 (if C == C2 + SMAX)
// TODO: Find the ULT and signed predicate siblings.
// TODO: Find the signed predicate siblings.
if (Pred == CmpInst::ICMP_UGT &&
C == *C2 + APInt::getSignedMaxValue(Ty->getScalarSizeInBits()))
return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, -(*C2)));
// (X + C2) <u C --> X >s ~C2 (if C == C2 + SMIN)
if (Pred == CmpInst::ICMP_ULT &&
C == *C2 + APInt::getSignedMinValue(Ty->getScalarSizeInBits()))
return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantInt::get(Ty, ~(*C2)));
// If the add does not wrap, we can always adjust the compare by subtracting
// the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
// are canonicalized to SGT/SLT/UGT/ULT.

View File

@@ -796,8 +796,7 @@ define i1 @ugt_wrong_offset(i8 %a) {
define i1 @ult_offset(i8 %a) {
; CHECK-LABEL: @ult_offset(
; CHECK-NEXT: [[T:%.*]] = add i8 [[A:%.*]], -6
; CHECK-NEXT: [[OV:%.*]] = icmp ult i8 [[T]], 122
; CHECK-NEXT: [[OV:%.*]] = icmp sgt i8 [[A:%.*]], 5
; CHECK-NEXT: ret i1 [[OV]]
;
%t = add i8 %a, 250
@@ -809,7 +808,7 @@ define i1 @ult_offset_use(i32 %a) {
; CHECK-LABEL: @ult_offset_use(
; CHECK-NEXT: [[T:%.*]] = add i32 [[A:%.*]], 42
; CHECK-NEXT: call void @use(i32 [[T]])
; CHECK-NEXT: [[OV:%.*]] = icmp ult i32 [[T]], -2147483606
; CHECK-NEXT: [[OV:%.*]] = icmp sgt i32 [[A]], -43
; CHECK-NEXT: ret i1 [[OV]]
;
%t = add i32 %a, 42
@@ -820,8 +819,7 @@ define i1 @ult_offset_use(i32 %a) {
define <2 x i1> @ult_offset_splat(<2 x i5> %a) {
; CHECK-LABEL: @ult_offset_splat(
; CHECK-NEXT: [[T:%.*]] = add <2 x i5> [[A:%.*]], <i5 9, i5 9>
; CHECK-NEXT: [[OV:%.*]] = icmp ult <2 x i5> [[T]], <i5 -7, i5 -7>
; CHECK-NEXT: [[OV:%.*]] = icmp sgt <2 x i5> [[A:%.*]], <i5 -10, i5 -10>
; CHECK-NEXT: ret <2 x i1> [[OV]]
;
%t = add <2 x i5> %a, <i5 9, i5 9>
@@ -829,6 +827,8 @@ define <2 x i1> @ult_offset_splat(<2 x i5> %a) {
ret <2 x i1> %ov
}
; negative test - constants must differ by SMIN
define i1 @ult_wrong_offset(i8 %a) {
; CHECK-LABEL: @ult_wrong_offset(
; CHECK-NEXT: [[T:%.*]] = add i8 [[A:%.*]], -6