diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp index 902eee26a456..b83700cc258d 100644 --- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp @@ -174,7 +174,7 @@ Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { assert(DL.getTypeAllocSize(Builder.getInt8Ty()) == 1 && "alloc size of i8 must by 1 byte for the GEP to be correct"); auto *GEP = Builder.CreateGEP( - Builder.getInt8Ty(), Constant::getNullValue(Int8PtrTy), V, "uglygep"); + Builder.getInt8Ty(), Constant::getNullValue(Int8PtrTy), V, "scevgep"); return Builder.CreateBitCast(GEP, Ty); } } @@ -613,7 +613,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, } // Emit a GEP. - return Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep"); + return Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "scevgep"); } { diff --git a/llvm/test/Analysis/ScalarEvolution/scev-expander-reuse-gep.ll b/llvm/test/Analysis/ScalarEvolution/scev-expander-reuse-gep.ll index bea8559dd4b7..32d6a958198e 100644 --- a/llvm/test/Analysis/ScalarEvolution/scev-expander-reuse-gep.ll +++ b/llvm/test/Analysis/ScalarEvolution/scev-expander-reuse-gep.ll @@ -3,7 +3,7 @@ ; RUN: opt -mtriple=i386-apple-macosx10.12.0 < %s -loop-reduce -S | FileCheck %s ; CHECK: %ptr4.ptr1 = select i1 %cmp.i, ptr %ptr4, ptr %ptr1 -; CHECK-NEXT: %uglygep = getelementptr i8, ptr %ptr4.ptr1, i32 1 +; CHECK-NEXT: %scevgep = getelementptr i8, ptr %ptr4.ptr1, i32 1 ; CHECK-NEXT: br label %while.cond.i target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128" diff --git a/llvm/test/CodeGen/PowerPC/lsr-postinc-pos.ll b/llvm/test/CodeGen/PowerPC/lsr-postinc-pos.ll index f9ef6f6998fd..83763f5ef76a 100644 --- a/llvm/test/CodeGen/PowerPC/lsr-postinc-pos.ll +++ b/llvm/test/CodeGen/PowerPC/lsr-postinc-pos.ll @@ -4,8 +4,8 @@ ; scevgep needs to be inserted in %bb so that it is dominated by %t. ; CHECK: %t = load ptr, ptr %inp -; CHECK: %uglygep = getelementptr i8, ptr %t, i32 %lsr.iv.next -; CHECK: %c1 = icmp ult ptr %uglygep, %inp2 +; CHECK: %scevgep = getelementptr i8, ptr %t, i32 %lsr.iv.next +; CHECK: %c1 = icmp ult ptr %scevgep, %inp2 target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128-n32" target triple = "powerpc-unknown-linux-gnu" diff --git a/llvm/test/CodeGen/SystemZ/prefetch-04.ll b/llvm/test/CodeGen/SystemZ/prefetch-04.ll index 6ab85321d909..61a2a1460c58 100644 --- a/llvm/test/CodeGen/SystemZ/prefetch-04.ll +++ b/llvm/test/CodeGen/SystemZ/prefetch-04.ll @@ -5,7 +5,7 @@ ; write prefetch. ; ; CHECK-LABEL: for.body -; CHECK: call void @llvm.prefetch.p0(ptr %uglygep, i32 1, i32 3, i32 1 +; CHECK: call void @llvm.prefetch.p0(ptr %scevgep, i32 1, i32 3, i32 1 ; CHECK-not: call void @llvm.prefetch define void @fun(ptr nocapture %Src, ptr nocapture readonly %Dst) { diff --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-factor-out-constant.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-factor-out-constant.ll index 97c3147c54c2..c70537f6e561 100644 --- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-factor-out-constant.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-factor-out-constant.ll @@ -8,7 +8,7 @@ target triple = "aarch64-unknown-linux-gnu" ; CHECK-LABEL: test ; FIXME: Handle VectorType in SCEVExpander::expandAddToGEP. ; The generated IR is not ideal with base 'scalar_vector' cast to i8*, and do ugly getelementptr over casted base. -; CHECK: uglygep +; CHECK: scevgep define void @test(ptr %a, i32 %v, i64 %n) { entry: %scalar_vector = alloca , align 16 diff --git a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll index b67235c0cc80..b23ef8f4c218 100644 --- a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll @@ -7,16 +7,16 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3 ; OPT-LABEL: @test_local_atomicrmw_addressing_loop_uniform_index_max_offset_i32( ; OPT: .lr.ph.preheader: -; OPT: %uglygep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65532 +; OPT: %scevgep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65532 ; OPT: br label %.lr.ph ; OPT: .lr.ph: -; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ] -; OPT: %lsr.iv1 = phi ptr addrspace(3) [ %uglygep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ] +; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ] +; OPT: %lsr.iv1 = phi ptr addrspace(3) [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ] ; OPT: %lsr.iv = phi i32 [ %lsr.iv.next, %.lr.ph ], [ %n, %.lr.ph.preheader ] ; OPT: %tmp4 = atomicrmw add ptr addrspace(3) %lsr.iv3, i32 undef seq_cst, align 4 ; OPT: %tmp7 = atomicrmw add ptr addrspace(3) %lsr.iv1, i32 undef seq_cst, align 4 ; OPT: %0 = atomicrmw add ptr addrspace(3) %lsr.iv1, i32 %tmp8 seq_cst, align 4 -; OPT: %uglygep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 4 +; OPT: %scevgep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 4 ; OPT: br i1 %exitcond define amdgpu_kernel void @test_local_atomicrmw_addressing_loop_uniform_index_max_offset_i32(ptr addrspace(3) noalias nocapture %arg0, ptr addrspace(3) noalias nocapture readonly %arg1, i32 %n) #0 { bb: @@ -49,14 +49,14 @@ bb: ; OPT-LABEL: test_local_cmpxchg_addressing_loop_uniform_index_max_offset_i32( ; OPT: .lr.ph.preheader: -; OPT: %uglygep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65532 +; OPT: %scevgep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65532 ; OPT: br label %.lr.ph ; OPT: .lr.ph: -; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ] -; OPT: %lsr.iv1 = phi ptr addrspace(3) [ %uglygep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ] +; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ] +; OPT: %lsr.iv1 = phi ptr addrspace(3) [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ] ; OPT: %lsr.iv = phi i32 [ %lsr.iv.next, %.lr.ph ], [ %n, %.lr.ph.preheader ] ; OPT: %tmp4 = cmpxchg ptr addrspace(3) %lsr.iv3, i32 undef, i32 undef seq_cst monotonic, align 4 -; OPT: %uglygep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 4 +; OPT: %scevgep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 4 define amdgpu_kernel void @test_local_cmpxchg_addressing_loop_uniform_index_max_offset_i32(ptr addrspace(3) noalias nocapture %arg0, ptr addrspace(3) noalias nocapture readonly %arg1, i32 %n) #0 { bb: %tmp = icmp sgt i32 %n, 0 @@ -90,15 +90,15 @@ bb: ; OPT-LABEL: @test_local_atomicinc_addressing_loop_uniform_index_max_offset_i32( ; OPT: .lr.ph.preheader: -; OPT: %uglygep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65532 +; OPT: %scevgep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65532 ; OPT: br label %.lr.ph ; OPT: .lr.ph: -; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ] -; OPT: %lsr.iv1 = phi ptr addrspace(3) [ %uglygep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ] +; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ] +; OPT: %lsr.iv1 = phi ptr addrspace(3) [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ] ; OPT: %lsr.iv = phi i32 [ %lsr.iv.next, %.lr.ph ], [ %n, %.lr.ph.preheader ] ; OPT: %tmp4 = call i32 @llvm.amdgcn.atomic.inc.i32.p3(ptr addrspace(3) %lsr.iv3, i32 undef, i32 0, i32 0, i1 false) ; OPT: %tmp7 = call i32 @llvm.amdgcn.atomic.inc.i32.p3(ptr addrspace(3) %lsr.iv1, i32 undef, i32 0, i32 0, i1 false) -; OPT: %uglygep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 4 +; OPT: %scevgep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 4 define amdgpu_kernel void @test_local_atomicinc_addressing_loop_uniform_index_max_offset_i32(ptr addrspace(3) noalias nocapture %arg0, ptr addrspace(3) noalias nocapture readonly %arg1, i32 %n) #0 { bb: %tmp = icmp sgt i32 %n, 0 @@ -129,15 +129,15 @@ bb: ; OPT-LABEL: @test_local_atomicdec_addressing_loop_uniform_index_max_offset_i32( ; OPT: .lr.ph.preheader: -; OPT: %uglygep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65532 +; OPT: %scevgep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65532 ; OPT: br label %.lr.ph ; OPT: .lr.ph: -; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ] -; OPT: %lsr.iv1 = phi ptr addrspace(3) [ %uglygep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ] +; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ] +; OPT: %lsr.iv1 = phi ptr addrspace(3) [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ] ; OPT: %lsr.iv = phi i32 [ %lsr.iv.next, %.lr.ph ], [ %n, %.lr.ph.preheader ] ; OPT: %tmp4 = call i32 @llvm.amdgcn.atomic.dec.i32.p3(ptr addrspace(3) %lsr.iv3, i32 undef, i32 0, i32 0, i1 false) ; OPT: %tmp7 = call i32 @llvm.amdgcn.atomic.dec.i32.p3(ptr addrspace(3) %lsr.iv1, i32 undef, i32 0, i32 0, i1 false) -; OPT: %uglygep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 4 +; OPT: %scevgep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 4 define amdgpu_kernel void @test_local_atomicdec_addressing_loop_uniform_index_max_offset_i32(ptr addrspace(3) noalias nocapture %arg0, ptr addrspace(3) noalias nocapture readonly %arg1, i32 %n) #0 { bb: %tmp = icmp sgt i32 %n, 0 diff --git a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll index 0a757419b59b..40306e799892 100644 --- a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll @@ -7,12 +7,12 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3 ; OPT-LABEL: @test_global_addressing_loop_uniform_index_max_offset_i32( ; OPT: .lr.ph.preheader: -; OPT: %uglygep2 = getelementptr i8, ptr addrspace(1) %arg1, i64 4095 +; OPT: %scevgep2 = getelementptr i8, ptr addrspace(1) %arg1, i64 4095 ; OPT: br label %.lr.ph ; OPT: {{^}}.lr.ph: -; OPT: %lsr.iv3 = phi ptr addrspace(1) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ] +; OPT: %lsr.iv3 = phi ptr addrspace(1) [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ] ; OPT: load i8, ptr addrspace(1) %lsr.iv3, align 1 -; OPT: %uglygep4 = getelementptr i8, ptr addrspace(1) %lsr.iv3, i64 1 +; OPT: %scevgep4 = getelementptr i8, ptr addrspace(1) %lsr.iv3, i64 1 define amdgpu_kernel void @test_global_addressing_loop_uniform_index_max_offset_i32(ptr addrspace(1) noalias nocapture %arg0, ptr addrspace(1) noalias nocapture readonly %arg1, i32 %n) #0 { bb: %tmp = icmp sgt i32 %n, 0 @@ -45,12 +45,12 @@ bb: ; OPT-LABEL: @test_global_addressing_loop_uniform_index_max_offset_p1_i32( ; OPT: {{^}}.lr.ph.preheader: -; OPT: %uglygep2 = getelementptr i8, ptr addrspace(1) %arg1, i64 4096 +; OPT: %scevgep2 = getelementptr i8, ptr addrspace(1) %arg1, i64 4096 ; OPT: br label %.lr.ph ; OPT: {{^}}.lr.ph: -; OPT: %lsr.iv3 = phi ptr addrspace(1) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ] -; OPT: %uglygep4 = getelementptr i8, ptr addrspace(1) %lsr.iv3, i64 1 +; OPT: %lsr.iv3 = phi ptr addrspace(1) [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ] +; OPT: %scevgep4 = getelementptr i8, ptr addrspace(1) %lsr.iv3, i64 1 define amdgpu_kernel void @test_global_addressing_loop_uniform_index_max_offset_p1_i32(ptr addrspace(1) noalias nocapture %arg0, ptr addrspace(1) noalias nocapture readonly %arg1, i32 %n) #0 { bb: %tmp = icmp sgt i32 %n, 0 @@ -83,12 +83,12 @@ bb: ; OPT-LABEL: @test_local_addressing_loop_uniform_index_max_offset_i32( ; OPT: .lr.ph.preheader: -; OPT: %uglygep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65535 +; OPT: %scevgep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65535 ; OPT: br label %.lr.ph ; OPT: {{^}}.lr.ph -; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ] +; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ] ; OPT: %tmp4 = load i8, ptr addrspace(3) %lsr.iv3, align 1 -; OPT: %uglygep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 1 +; OPT: %scevgep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 1 define amdgpu_kernel void @test_local_addressing_loop_uniform_index_max_offset_i32(ptr addrspace(1) noalias nocapture %arg0, ptr addrspace(3) noalias nocapture readonly %arg1, i32 %n) #0 { bb: %tmp = icmp sgt i32 %n, 0 @@ -122,12 +122,12 @@ bb: ; OPT-LABEL: @test_local_addressing_loop_uniform_index_max_offset_p1_i32( ; OPT: {{^}}.lr.ph.preheader: -; OPT: %uglygep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65536 +; OPT: %scevgep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65536 ; OPT: br label %.lr.ph ; OPT: {{^}}.lr.ph: -; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ] -; OPT: %uglygep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 1 +; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ] +; OPT: %scevgep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 1 define amdgpu_kernel void @test_local_addressing_loop_uniform_index_max_offset_p1_i32(ptr addrspace(1) noalias nocapture %arg0, ptr addrspace(3) noalias nocapture readonly %arg1, i32 %n) #0 { bb: %tmp = icmp sgt i32 %n, 0 diff --git a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll index 2006ac0e6de6..99432cc86691 100644 --- a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll @@ -8,11 +8,11 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3 %0 = type { i32, double, i32, float } ; CHECK-LABEL: @lsr_crash_preserve_addrspace_unknown_type( -; CHECK: %uglygep1 = getelementptr i8, ptr addrspace(3) %tmp, i32 8 -; CHECK: load double, ptr addrspace(3) %uglygep1 +; CHECK: %scevgep1 = getelementptr i8, ptr addrspace(3) %tmp, i32 8 +; CHECK: load double, ptr addrspace(3) %scevgep1 -; CHECK: %uglygep = getelementptr i8, ptr addrspace(3) %tmp, i32 16 -; CHECK: %tmp14 = load i32, ptr addrspace(3) %uglygep +; CHECK: %scevgep = getelementptr i8, ptr addrspace(3) %tmp, i32 16 +; CHECK: %tmp14 = load i32, ptr addrspace(3) %scevgep define amdgpu_kernel void @lsr_crash_preserve_addrspace_unknown_type() #0 { bb: br label %bb1 diff --git a/polly/test/ScopInfo/int2ptr_ptr2int.ll b/polly/test/ScopInfo/int2ptr_ptr2int.ll index 7cd6ec4c239f..2e7bfbe5eaee 100644 --- a/polly/test/ScopInfo/int2ptr_ptr2int.ll +++ b/polly/test/ScopInfo/int2ptr_ptr2int.ll @@ -25,9 +25,9 @@ ; IR-NEXT: %p_tmp2 = ptrtoint ptr %p_add.ptr2 to i64 ; IR-NEXT: %p_arrayidx = getelementptr inbounds i64, ptr %A, i64 %p_tmp2 ; IR-NEXT: %tmp3_p_scalar_ = load i64, ptr %p_arrayidx, align 8, !alias.scope !0, !noalias !3 -; IR-NEXT: %tmp4_p_scalar_ = load i64, ptr %uglygep, align 8, !alias.scope !0, !noalias !3 +; IR-NEXT: %tmp4_p_scalar_ = load i64, ptr %scevgep, align 8, !alias.scope !0, !noalias !3 ; IR-NEXT: %p_add4 = add nsw i64 %tmp4_p_scalar_, %tmp3_p_scalar_ -; IR-NEXT: store i64 %p_add4, ptr %uglygep, align 8, !alias.scope !0, !noalias !3 +; IR-NEXT: store i64 %p_add4, ptr %scevgep, align 8, !alias.scope !0, !noalias !3 ; IR-NEXT: %polly.indvar_next = add nsw i64 %polly.indvar, 1 ; IR-NEXT: %polly.loop_cond = icmp sle i64 %polly.indvar_next, 99 ; IR-NEXT: br i1 %polly.loop_cond, label %polly.loop_header, label %polly.loop_exit @@ -36,7 +36,7 @@ ; IR-NEXT: %14 = add i64 %val, 1 ; IR-NEXT: %15 = shl i32 %ptr1, 3 ; IR-NEXT: %16 = add i32 %15, 72 -; IR-NEXT: %uglygep = getelementptr i8, ptr %A, i32 %16 +; IR-NEXT: %scevgep = getelementptr i8, ptr %A, i32 %16 ; IR-NEXT: br label %polly.loop_header ; diff --git a/polly/test/ScopInfo/int2ptr_ptr2int_2.ll b/polly/test/ScopInfo/int2ptr_ptr2int_2.ll index a9c3431270fb..97878f7091b1 100644 --- a/polly/test/ScopInfo/int2ptr_ptr2int_2.ll +++ b/polly/test/ScopInfo/int2ptr_ptr2int_2.ll @@ -24,9 +24,9 @@ ; IR-NEXT: %ptr13 = ptrtoint ptr %ptr to i16 ; ; IR: polly.stmt.for.body: -; IR-NEXT: %tmp4_p_scalar_ = load i64, ptr %uglygep, align 8, !alias.scope !3, !noalias !0 +; IR-NEXT: %tmp4_p_scalar_ = load i64, ptr %scevgep, align 8, !alias.scope !3, !noalias !0 ; IR-NEXT: %p_add4 = add nsw i64 %tmp4_p_scalar_, %polly.preload.tmp3.merge -; IR-NEXT: store i64 %p_add4, ptr %uglygep, align 8, !alias.scope !3, !noalias !0 +; IR-NEXT: store i64 %p_add4, ptr %scevgep, align 8, !alias.scope !3, !noalias !0 ; IR-NEXT: %polly.indvar_next = add nsw i64 %polly.indvar, 1 ; IR-NEXT: %polly.loop_cond = icmp sle i64 %polly.indvar_next, 99 ; IR-NEXT: br i1 %polly.loop_cond, label %polly.loop_header, label %polly.loop_exit @@ -35,7 +35,7 @@ ; IR-NEXT: %41 = add i16 %val, 1 ; IR-NEXT: %42 = shl i16 %ptr13, 3 ; IR-NEXT: %43 = add i16 %42, 72 -; IR-NEXT: %uglygep = getelementptr i8, ptr %A, i16 %43 +; IR-NEXT: %scevgep = getelementptr i8, ptr %A, i16 %43 ; IR-NEXT: br label %polly.loop_header ; target datalayout = "e-p:16:16:16-m:e-i64:64-f80:128-n8:16:16:64-S128"