[AArch64][GlobalISel] Update and regenerate some vecreduce and other tests. NFC

This commit is contained in:
David Green
2025-01-17 13:19:11 +00:00
parent 5153a90453
commit eff6b64258
4 changed files with 1129 additions and 1142 deletions

View File

@@ -6,15 +6,15 @@ tracksRegLiveness: true
body: |
bb.1:
liveins: $x0
; CHECK-LABEL: name: add_v16s8
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<16 x s8>))
; CHECK: [[VECREDUCE_ADD:%[0-9]+]]:_(s8) = G_VECREDUCE_ADD [[LOAD]](<16 x s8>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[VECREDUCE_ADD]](s8)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<16 x s8>))
; CHECK-NEXT: [[VECREDUCE_ADD:%[0-9]+]]:_(s8) = G_VECREDUCE_ADD [[LOAD]](<16 x s8>)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[VECREDUCE_ADD]](s8)
; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(<16 x s8>) = G_LOAD %0(p0) :: (load (<16 x s8>))
%2:_(s8) = G_VECREDUCE_ADD %1(<16 x s8>)
@@ -29,15 +29,15 @@ tracksRegLiveness: true
body: |
bb.1:
liveins: $x0
; CHECK-LABEL: name: add_v8s16
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<8 x s16>))
; CHECK: [[VECREDUCE_ADD:%[0-9]+]]:_(s16) = G_VECREDUCE_ADD [[LOAD]](<8 x s16>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[VECREDUCE_ADD]](s16)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<8 x s16>))
; CHECK-NEXT: [[VECREDUCE_ADD:%[0-9]+]]:_(s16) = G_VECREDUCE_ADD [[LOAD]](<8 x s16>)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[VECREDUCE_ADD]](s16)
; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(<8 x s16>) = G_LOAD %0(p0) :: (load (<8 x s16>))
%2:_(s16) = G_VECREDUCE_ADD %1(<8 x s16>)
@@ -52,14 +52,14 @@ tracksRegLiveness: true
body: |
bb.1:
liveins: $x0
; CHECK-LABEL: name: add_v4s32
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
; CHECK: [[VECREDUCE_ADD:%[0-9]+]]:_(s32) = G_VECREDUCE_ADD [[LOAD]](<4 x s32>)
; CHECK: $w0 = COPY [[VECREDUCE_ADD]](s32)
; CHECK: RET_ReallyLR implicit $w0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
; CHECK-NEXT: [[VECREDUCE_ADD:%[0-9]+]]:_(s32) = G_VECREDUCE_ADD [[LOAD]](<4 x s32>)
; CHECK-NEXT: $w0 = COPY [[VECREDUCE_ADD]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(<4 x s32>) = G_LOAD %0(p0) :: (load (<4 x s32>))
%2:_(s32) = G_VECREDUCE_ADD %1(<4 x s32>)
@@ -73,14 +73,14 @@ tracksRegLiveness: true
body: |
bb.1:
liveins: $x0
; CHECK-LABEL: name: add_v2s64
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>))
; CHECK: [[VECREDUCE_ADD:%[0-9]+]]:_(s64) = G_VECREDUCE_ADD [[LOAD]](<2 x s64>)
; CHECK: $x0 = COPY [[VECREDUCE_ADD]](s64)
; CHECK: RET_ReallyLR implicit $x0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>))
; CHECK-NEXT: [[VECREDUCE_ADD:%[0-9]+]]:_(s64) = G_VECREDUCE_ADD [[LOAD]](<2 x s64>)
; CHECK-NEXT: $x0 = COPY [[VECREDUCE_ADD]](s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%0:_(p0) = COPY $x0
%1:_(<2 x s64>) = G_LOAD %0(p0) :: (load (<2 x s64>))
%2:_(s64) = G_VECREDUCE_ADD %1(<2 x s64>)
@@ -94,14 +94,14 @@ tracksRegLiveness: true
body: |
bb.1:
liveins: $x0
; CHECK-LABEL: name: add_v2s32
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<2 x s32>))
; CHECK: [[VECREDUCE_ADD:%[0-9]+]]:_(s32) = G_VECREDUCE_ADD [[LOAD]](<2 x s32>)
; CHECK: $w0 = COPY [[VECREDUCE_ADD]](s32)
; CHECK: RET_ReallyLR implicit $w0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<2 x s32>))
; CHECK-NEXT: [[VECREDUCE_ADD:%[0-9]+]]:_(s32) = G_VECREDUCE_ADD [[LOAD]](<2 x s32>)
; CHECK-NEXT: $w0 = COPY [[VECREDUCE_ADD]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(<2 x s32>) = G_LOAD %0(p0) :: (load (<2 x s32>))
%2:_(s32) = G_VECREDUCE_ADD %1(<2 x s32>)
@@ -111,24 +111,25 @@ body: |
...
---
name: test_v8i64
# This is a power-of-2 legalization, so use a tree reduction.
alignment: 4
tracksRegLiveness: true
body: |
bb.1:
liveins: $q0, $q1, $q2, $q3
; This is a power-of-2 legalization, so use a tree reduction.
; CHECK-LABEL: name: test_v8i64
; CHECK: liveins: $q0, $q1, $q2, $q3
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY $q3
; CHECK: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[COPY]], [[COPY1]]
; CHECK: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[COPY2]], [[COPY3]]
; CHECK: [[ADD2:%[0-9]+]]:_(<2 x s64>) = G_ADD [[ADD]], [[ADD1]]
; CHECK: [[VECREDUCE_ADD:%[0-9]+]]:_(s64) = G_VECREDUCE_ADD [[ADD2]](<2 x s64>)
; CHECK: $x0 = COPY [[VECREDUCE_ADD]](s64)
; CHECK: RET_ReallyLR implicit $x0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY $q3
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[COPY]], [[COPY1]]
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[COPY2]], [[COPY3]]
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(<2 x s64>) = G_ADD [[ADD]], [[ADD1]]
; CHECK-NEXT: [[VECREDUCE_ADD:%[0-9]+]]:_(s64) = G_VECREDUCE_ADD [[ADD2]](<2 x s64>)
; CHECK-NEXT: $x0 = COPY [[VECREDUCE_ADD]](s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%0:_(<2 x s64>) = COPY $q0
%1:_(<2 x s64>) = COPY $q1
%2:_(<2 x s64>) = COPY $q2
@@ -143,25 +144,26 @@ body: |
...
---
name: test_v6i64
# This is a non-power-of-2 legalization, generate multiple vector reductions
# and combine them with scalar ops.
alignment: 4
tracksRegLiveness: true
body: |
bb.1:
liveins: $q0, $q1, $q2, $q3
; This is a non-power-of-2 legalization, generate multiple vector reductions
; and combine them with scalar ops.
; CHECK-LABEL: name: test_v6i64
; CHECK: liveins: $q0, $q1, $q2, $q3
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
; CHECK: [[VECREDUCE_ADD:%[0-9]+]]:_(s64) = G_VECREDUCE_ADD [[COPY]](<2 x s64>)
; CHECK: [[VECREDUCE_ADD1:%[0-9]+]]:_(s64) = G_VECREDUCE_ADD [[COPY1]](<2 x s64>)
; CHECK: [[VECREDUCE_ADD2:%[0-9]+]]:_(s64) = G_VECREDUCE_ADD [[COPY2]](<2 x s64>)
; CHECK: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VECREDUCE_ADD]], [[VECREDUCE_ADD1]]
; CHECK: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[VECREDUCE_ADD2]]
; CHECK: $x0 = COPY [[ADD1]](s64)
; CHECK: RET_ReallyLR implicit $x0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
; CHECK-NEXT: [[VECREDUCE_ADD:%[0-9]+]]:_(s64) = G_VECREDUCE_ADD [[COPY]](<2 x s64>)
; CHECK-NEXT: [[VECREDUCE_ADD1:%[0-9]+]]:_(s64) = G_VECREDUCE_ADD [[COPY1]](<2 x s64>)
; CHECK-NEXT: [[VECREDUCE_ADD2:%[0-9]+]]:_(s64) = G_VECREDUCE_ADD [[COPY2]](<2 x s64>)
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VECREDUCE_ADD]], [[VECREDUCE_ADD1]]
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[VECREDUCE_ADD2]]
; CHECK-NEXT: $x0 = COPY [[ADD1]](s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
%0:_(<2 x s64>) = COPY $q0
%1:_(<2 x s64>) = COPY $q1
%2:_(<2 x s64>) = COPY $q2

View File

@@ -1,8 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64 -aarch64-neon-syntax=generic | FileCheck %s -check-prefixes=CHECK,SDAG
; RUN: llc < %s -global-isel=1 -global-isel-abort=2 -mtriple=aarch64 -aarch64-neon-syntax=generic 2>&1 | FileCheck %s --check-prefixes=CHECK,GISEL
; RUN: llc < %s -mtriple=aarch64 -aarch64-neon-syntax=generic | FileCheck %s -check-prefixes=CHECK,CHECK-SD
; RUN: llc < %s -mtriple=aarch64 -global-isel=1 -global-isel-abort=2 -aarch64-neon-syntax=generic 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; Function Attrs: nounwind readnone
declare i8 @llvm.vector.reduce.add.v2i8(<2 x i8>)
declare i8 @llvm.vector.reduce.add.v3i8(<3 x i8>)
declare i8 @llvm.vector.reduce.add.v4i8(<4 x i8>)
@@ -23,14 +22,14 @@ declare i64 @llvm.vector.reduce.add.v3i64(<3 x i64>)
declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
declare i128 @llvm.vector.reduce.add.v2i128(<2 x i128>)
; GISEL: warning: Instruction selection used fallback path for addv_v2i8
; GISEL-NEXT: warning: Instruction selection used fallback path for addv_v3i8
; GISEL-NEXT: warning: Instruction selection used fallback path for addv_v4i8
; GISEL-NEXT: warning: Instruction selection used fallback path for addv_v2i16
; GISEL-NEXT: warning: Instruction selection used fallback path for addv_v3i16
; GISEL-NEXT: warning: Instruction selection used fallback path for addv_v3i32
; GISEL-NEXT: warning: Instruction selection used fallback path for addv_v3i64
; GISEL-NEXT: warning: Instruction selection used fallback path for addv_v2i128
; CHECK-GI: warning: Instruction selection used fallback path for addv_v2i8
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for addv_v3i8
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for addv_v4i8
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for addv_v2i16
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for addv_v3i16
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for addv_v3i32
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for addv_v3i64
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for addv_v2i128
define i8 @add_B(ptr %arr) {
@@ -83,34 +82,34 @@ define i64 @add_D(ptr %arr) {
define i32 @oversized_ADDV_256(ptr noalias nocapture readonly %arg1, ptr noalias nocapture readonly %arg2) {
; SDAG-LABEL: oversized_ADDV_256:
; SDAG: // %bb.0: // %entry
; SDAG-NEXT: ldr d0, [x0]
; SDAG-NEXT: ldr d1, [x1]
; SDAG-NEXT: uabdl v0.8h, v0.8b, v1.8b
; SDAG-NEXT: uaddlv s0, v0.8h
; SDAG-NEXT: fmov w0, s0
; SDAG-NEXT: ret
; CHECK-SD-LABEL: oversized_ADDV_256:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: ldr d0, [x0]
; CHECK-SD-NEXT: ldr d1, [x1]
; CHECK-SD-NEXT: uabdl v0.8h, v0.8b, v1.8b
; CHECK-SD-NEXT: uaddlv s0, v0.8h
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; GISEL-LABEL: oversized_ADDV_256:
; GISEL: // %bb.0: // %entry
; GISEL-NEXT: ldr d1, [x0]
; GISEL-NEXT: ldr d2, [x1]
; GISEL-NEXT: movi v0.2d, #0000000000000000
; GISEL-NEXT: usubl v1.8h, v1.8b, v2.8b
; GISEL-NEXT: sshll v2.4s, v1.4h, #0
; GISEL-NEXT: sshll2 v3.4s, v1.8h, #0
; GISEL-NEXT: ssubw2 v0.4s, v0.4s, v1.8h
; GISEL-NEXT: cmlt v4.4s, v2.4s, #0
; GISEL-NEXT: cmlt v5.4s, v3.4s, #0
; GISEL-NEXT: neg v6.4s, v2.4s
; GISEL-NEXT: mov v1.16b, v4.16b
; GISEL-NEXT: bif v0.16b, v3.16b, v5.16b
; GISEL-NEXT: bsl v1.16b, v6.16b, v2.16b
; GISEL-NEXT: add v0.4s, v1.4s, v0.4s
; GISEL-NEXT: addv s0, v0.4s
; GISEL-NEXT: fmov w0, s0
; GISEL-NEXT: ret
; CHECK-GI-LABEL: oversized_ADDV_256:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr d1, [x0]
; CHECK-GI-NEXT: ldr d2, [x1]
; CHECK-GI-NEXT: movi v0.2d, #0000000000000000
; CHECK-GI-NEXT: usubl v1.8h, v1.8b, v2.8b
; CHECK-GI-NEXT: sshll v2.4s, v1.4h, #0
; CHECK-GI-NEXT: sshll2 v3.4s, v1.8h, #0
; CHECK-GI-NEXT: ssubw2 v0.4s, v0.4s, v1.8h
; CHECK-GI-NEXT: cmlt v4.4s, v2.4s, #0
; CHECK-GI-NEXT: cmlt v5.4s, v3.4s, #0
; CHECK-GI-NEXT: neg v6.4s, v2.4s
; CHECK-GI-NEXT: mov v1.16b, v4.16b
; CHECK-GI-NEXT: bif v0.16b, v3.16b, v5.16b
; CHECK-GI-NEXT: bsl v1.16b, v6.16b, v2.16b
; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
entry:
%0 = load <8 x i8>, ptr %arg1, align 1
%1 = zext <8 x i8> %0 to <8 x i32>
@@ -127,48 +126,48 @@ entry:
declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
define i32 @oversized_ADDV_512(ptr %arr) {
; SDAG-LABEL: oversized_ADDV_512:
; SDAG: // %bb.0:
; SDAG-NEXT: ldp q0, q1, [x0, #32]
; SDAG-NEXT: ldp q2, q3, [x0]
; SDAG-NEXT: add v1.4s, v3.4s, v1.4s
; SDAG-NEXT: add v0.4s, v2.4s, v0.4s
; SDAG-NEXT: add v0.4s, v0.4s, v1.4s
; SDAG-NEXT: addv s0, v0.4s
; SDAG-NEXT: fmov w0, s0
; SDAG-NEXT: ret
; CHECK-SD-LABEL: oversized_ADDV_512:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: ldp q0, q1, [x0, #32]
; CHECK-SD-NEXT: ldp q2, q3, [x0]
; CHECK-SD-NEXT: add v1.4s, v3.4s, v1.4s
; CHECK-SD-NEXT: add v0.4s, v2.4s, v0.4s
; CHECK-SD-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-SD-NEXT: addv s0, v0.4s
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; GISEL-LABEL: oversized_ADDV_512:
; GISEL: // %bb.0:
; GISEL-NEXT: ldp q0, q1, [x0]
; GISEL-NEXT: ldp q2, q3, [x0, #32]
; GISEL-NEXT: add v0.4s, v0.4s, v1.4s
; GISEL-NEXT: add v1.4s, v2.4s, v3.4s
; GISEL-NEXT: add v0.4s, v0.4s, v1.4s
; GISEL-NEXT: addv s0, v0.4s
; GISEL-NEXT: fmov w0, s0
; GISEL-NEXT: ret
; CHECK-GI-LABEL: oversized_ADDV_512:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldp q0, q1, [x0]
; CHECK-GI-NEXT: ldp q2, q3, [x0, #32]
; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-GI-NEXT: add v1.4s, v2.4s, v3.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
%bin.rdx = load <16 x i32>, ptr %arr
%r = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %bin.rdx)
ret i32 %r
}
define i8 @addv_combine_i8(<8 x i8> %a1, <8 x i8> %a2) {
; SDAG-LABEL: addv_combine_i8:
; SDAG: // %bb.0: // %entry
; SDAG-NEXT: add v0.8b, v0.8b, v1.8b
; SDAG-NEXT: addv b0, v0.8b
; SDAG-NEXT: fmov w0, s0
; SDAG-NEXT: ret
; CHECK-SD-LABEL: addv_combine_i8:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: add v0.8b, v0.8b, v1.8b
; CHECK-SD-NEXT: addv b0, v0.8b
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; GISEL-LABEL: addv_combine_i8:
; GISEL: // %bb.0: // %entry
; GISEL-NEXT: addv b0, v0.8b
; GISEL-NEXT: addv b1, v1.8b
; GISEL-NEXT: fmov w8, s0
; GISEL-NEXT: fmov w9, s1
; GISEL-NEXT: add w0, w9, w8, uxtb
; GISEL-NEXT: ret
; CHECK-GI-LABEL: addv_combine_i8:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: addv b0, v0.8b
; CHECK-GI-NEXT: addv b1, v1.8b
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w9, w8, uxtb
; CHECK-GI-NEXT: ret
entry:
%rdx.1 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %a1)
%rdx.2 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %a2)
@@ -177,21 +176,21 @@ entry:
}
define i16 @addv_combine_i16(<4 x i16> %a1, <4 x i16> %a2) {
; SDAG-LABEL: addv_combine_i16:
; SDAG: // %bb.0: // %entry
; SDAG-NEXT: add v0.4h, v0.4h, v1.4h
; SDAG-NEXT: addv h0, v0.4h
; SDAG-NEXT: fmov w0, s0
; SDAG-NEXT: ret
; CHECK-SD-LABEL: addv_combine_i16:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: add v0.4h, v0.4h, v1.4h
; CHECK-SD-NEXT: addv h0, v0.4h
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; GISEL-LABEL: addv_combine_i16:
; GISEL: // %bb.0: // %entry
; GISEL-NEXT: addv h0, v0.4h
; GISEL-NEXT: addv h1, v1.4h
; GISEL-NEXT: fmov w8, s0
; GISEL-NEXT: fmov w9, s1
; GISEL-NEXT: add w0, w9, w8, uxth
; GISEL-NEXT: ret
; CHECK-GI-LABEL: addv_combine_i16:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: addv h0, v0.4h
; CHECK-GI-NEXT: addv h1, v1.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w9, w8, uxth
; CHECK-GI-NEXT: ret
entry:
%rdx.1 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %a1)
%rdx.2 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %a2)
@@ -200,21 +199,21 @@ entry:
}
define i32 @addv_combine_i32(<4 x i32> %a1, <4 x i32> %a2) {
; SDAG-LABEL: addv_combine_i32:
; SDAG: // %bb.0: // %entry
; SDAG-NEXT: add v0.4s, v0.4s, v1.4s
; SDAG-NEXT: addv s0, v0.4s
; SDAG-NEXT: fmov w0, s0
; SDAG-NEXT: ret
; CHECK-SD-LABEL: addv_combine_i32:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-SD-NEXT: addv s0, v0.4s
; CHECK-SD-NEXT: fmov w0, s0
; CHECK-SD-NEXT: ret
;
; GISEL-LABEL: addv_combine_i32:
; GISEL: // %bb.0: // %entry
; GISEL-NEXT: addv s0, v0.4s
; GISEL-NEXT: addv s1, v1.4s
; GISEL-NEXT: fmov w8, s0
; GISEL-NEXT: fmov w9, s1
; GISEL-NEXT: add w0, w8, w9
; GISEL-NEXT: ret
; CHECK-GI-LABEL: addv_combine_i32:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: addv s1, v1.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w8, w9
; CHECK-GI-NEXT: ret
entry:
%rdx.1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a1)
%rdx.2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a2)
@@ -223,21 +222,21 @@ entry:
}
define i64 @addv_combine_i64(<2 x i64> %a1, <2 x i64> %a2) {
; SDAG-LABEL: addv_combine_i64:
; SDAG: // %bb.0: // %entry
; SDAG-NEXT: add v0.2d, v0.2d, v1.2d
; SDAG-NEXT: addp d0, v0.2d
; SDAG-NEXT: fmov x0, d0
; SDAG-NEXT: ret
; CHECK-SD-LABEL: addv_combine_i64:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: add v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: addp d0, v0.2d
; CHECK-SD-NEXT: fmov x0, d0
; CHECK-SD-NEXT: ret
;
; GISEL-LABEL: addv_combine_i64:
; GISEL: // %bb.0: // %entry
; GISEL-NEXT: addp d0, v0.2d
; GISEL-NEXT: addp d1, v1.2d
; GISEL-NEXT: fmov x8, d0
; GISEL-NEXT: fmov x9, d1
; GISEL-NEXT: add x0, x8, x9
; GISEL-NEXT: ret
; CHECK-GI-LABEL: addv_combine_i64:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: addp d0, v0.2d
; CHECK-GI-NEXT: addp d1, v1.2d
; CHECK-GI-NEXT: fmov x8, d0
; CHECK-GI-NEXT: fmov x9, d1
; CHECK-GI-NEXT: add x0, x8, x9
; CHECK-GI-NEXT: ret
entry:
%rdx.1 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a1)
%rdx.2 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a2)
@@ -471,3 +470,6 @@ entry:
ret i128 %arg1
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GISEL: {{.*}}
; SDAG: {{.*}}

View File

@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=arm64-linux-gnu | FileCheck %s --check-prefixes=CHECK,SDAG
; RUN: llc < %s -global-isel -global-isel-abort=1 -pass-remarks-missed=gisel* -mtriple=arm64-linux-gnu 2>&1 | FileCheck %s --check-prefixes=CHECK,GISEL,FALLBACK
; RUN: llc < %s -mtriple=arm64-linux-gnu | FileCheck %s --check-prefixes=CHECK,CHECK-SD
; RUN: llc < %s -mtriple=arm64-linux-gnu -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
%0 = type { i64, i64 }
@@ -39,22 +39,21 @@ declare i32 @llvm.aarch64.stxp(i64, i64, ptr) nounwind
@var = dso_local global i64 0, align 8
; FALLBACK-NOT: remark:{{.*}}test_load_i8
define dso_local void @test_load_i8(ptr %addr) {
; SDAG-LABEL: test_load_i8:
; SDAG: // %bb.0:
; SDAG-NEXT: ldxrb w8, [x0]
; SDAG-NEXT: adrp x9, var
; SDAG-NEXT: str x8, [x9, :lo12:var]
; SDAG-NEXT: ret
; CHECK-SD-LABEL: test_load_i8:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: ldxrb w8, [x0]
; CHECK-SD-NEXT: adrp x9, var
; CHECK-SD-NEXT: str x8, [x9, :lo12:var]
; CHECK-SD-NEXT: ret
;
; GISEL-LABEL: test_load_i8:
; GISEL: // %bb.0:
; GISEL-NEXT: ldxrb w9, [x0]
; GISEL-NEXT: adrp x8, var
; GISEL-NEXT: and x9, x9, #0xff
; GISEL-NEXT: str x9, [x8, :lo12:var]
; GISEL-NEXT: ret
; CHECK-GI-LABEL: test_load_i8:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldxrb w9, [x0]
; CHECK-GI-NEXT: adrp x8, var
; CHECK-GI-NEXT: and x9, x9, #0xff
; CHECK-GI-NEXT: str x9, [x8, :lo12:var]
; CHECK-GI-NEXT: ret
%val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i8) %addr)
%shortval = trunc i64 %val to i8
@@ -63,22 +62,21 @@ define dso_local void @test_load_i8(ptr %addr) {
ret void
}
; FALLBACK-NOT: remark:{{.*}}test_load_i16
define dso_local void @test_load_i16(ptr %addr) {
; SDAG-LABEL: test_load_i16:
; SDAG: // %bb.0:
; SDAG-NEXT: ldxrh w8, [x0]
; SDAG-NEXT: adrp x9, var
; SDAG-NEXT: str x8, [x9, :lo12:var]
; SDAG-NEXT: ret
; CHECK-SD-LABEL: test_load_i16:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: ldxrh w8, [x0]
; CHECK-SD-NEXT: adrp x9, var
; CHECK-SD-NEXT: str x8, [x9, :lo12:var]
; CHECK-SD-NEXT: ret
;
; GISEL-LABEL: test_load_i16:
; GISEL: // %bb.0:
; GISEL-NEXT: ldxrh w9, [x0]
; GISEL-NEXT: adrp x8, var
; GISEL-NEXT: and x9, x9, #0xffff
; GISEL-NEXT: str x9, [x8, :lo12:var]
; GISEL-NEXT: ret
; CHECK-GI-LABEL: test_load_i16:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldxrh w9, [x0]
; CHECK-GI-NEXT: adrp x8, var
; CHECK-GI-NEXT: and x9, x9, #0xffff
; CHECK-GI-NEXT: str x9, [x8, :lo12:var]
; CHECK-GI-NEXT: ret
%val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i16) %addr)
%shortval = trunc i64 %val to i16
@@ -87,22 +85,21 @@ define dso_local void @test_load_i16(ptr %addr) {
ret void
}
; FALLBACK-NOT: remark:{{.*}}test_load_i32
define dso_local void @test_load_i32(ptr %addr) {
; SDAG-LABEL: test_load_i32:
; SDAG: // %bb.0:
; SDAG-NEXT: ldxr w8, [x0]
; SDAG-NEXT: adrp x9, var
; SDAG-NEXT: str x8, [x9, :lo12:var]
; SDAG-NEXT: ret
; CHECK-SD-LABEL: test_load_i32:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: ldxr w8, [x0]
; CHECK-SD-NEXT: adrp x9, var
; CHECK-SD-NEXT: str x8, [x9, :lo12:var]
; CHECK-SD-NEXT: ret
;
; GISEL-LABEL: test_load_i32:
; GISEL: // %bb.0:
; GISEL-NEXT: ldxr w9, [x0]
; GISEL-NEXT: adrp x8, var
; GISEL-NEXT: mov w9, w9
; GISEL-NEXT: str x9, [x8, :lo12:var]
; GISEL-NEXT: ret
; CHECK-GI-LABEL: test_load_i32:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldxr w9, [x0]
; CHECK-GI-NEXT: adrp x8, var
; CHECK-GI-NEXT: mov w9, w9
; CHECK-GI-NEXT: str x9, [x8, :lo12:var]
; CHECK-GI-NEXT: ret
%val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i32) %addr)
%shortval = trunc i64 %val to i32
@@ -111,7 +108,6 @@ define dso_local void @test_load_i32(ptr %addr) {
ret void
}
; FALLBACK-NOT: remark:{{.*}}test_load_i64
define dso_local void @test_load_i64(ptr %addr) {
; CHECK-LABEL: test_load_i64:
; CHECK: // %bb.0:
@@ -128,7 +124,6 @@ define dso_local void @test_load_i64(ptr %addr) {
declare i64 @llvm.aarch64.ldxr.p0(ptr) nounwind
; FALLBACK-NOT: remark:{{.*}}test_store_i8
define dso_local i32 @test_store_i8(i32, i8 %val, ptr %addr) {
; CHECK-LABEL: test_store_i8:
; CHECK: // %bb.0:
@@ -140,7 +135,6 @@ define dso_local i32 @test_store_i8(i32, i8 %val, ptr %addr) {
ret i32 %res
}
; FALLBACK-NOT: remark:{{.*}}test_store_i16
define dso_local i32 @test_store_i16(i32, i16 %val, ptr %addr) {
; CHECK-LABEL: test_store_i16:
; CHECK: // %bb.0:
@@ -152,7 +146,6 @@ define dso_local i32 @test_store_i16(i32, i16 %val, ptr %addr) {
ret i32 %res
}
; FALLBACK-NOT: remark:{{.*}}test_store_i32
define dso_local i32 @test_store_i32(i32, i32 %val, ptr %addr) {
; CHECK-LABEL: test_store_i32:
; CHECK: // %bb.0:
@@ -163,7 +156,6 @@ define dso_local i32 @test_store_i32(i32, i32 %val, ptr %addr) {
ret i32 %res
}
; FALLBACK-NOT: remark:{{.*}}test_store_i64
define dso_local i32 @test_store_i64(i32, i64 %val, ptr %addr) {
; CHECK-LABEL: test_store_i64:
; CHECK: // %bb.0:
@@ -219,22 +211,21 @@ entry:
declare %0 @llvm.aarch64.ldaxp(ptr) nounwind
declare i32 @llvm.aarch64.stlxp(i64, i64, ptr) nounwind
; FALLBACK-NOT: remark:{{.*}}test_load_acquire_i8
define dso_local void @test_load_acquire_i8(ptr %addr) {
; SDAG-LABEL: test_load_acquire_i8:
; SDAG: // %bb.0:
; SDAG-NEXT: ldaxrb w8, [x0]
; SDAG-NEXT: adrp x9, var
; SDAG-NEXT: str x8, [x9, :lo12:var]
; SDAG-NEXT: ret
; CHECK-SD-LABEL: test_load_acquire_i8:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: ldaxrb w8, [x0]
; CHECK-SD-NEXT: adrp x9, var
; CHECK-SD-NEXT: str x8, [x9, :lo12:var]
; CHECK-SD-NEXT: ret
;
; GISEL-LABEL: test_load_acquire_i8:
; GISEL: // %bb.0:
; GISEL-NEXT: ldaxrb w9, [x0]
; GISEL-NEXT: adrp x8, var
; GISEL-NEXT: and x9, x9, #0xff
; GISEL-NEXT: str x9, [x8, :lo12:var]
; GISEL-NEXT: ret
; CHECK-GI-LABEL: test_load_acquire_i8:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldaxrb w9, [x0]
; CHECK-GI-NEXT: adrp x8, var
; CHECK-GI-NEXT: and x9, x9, #0xff
; CHECK-GI-NEXT: str x9, [x8, :lo12:var]
; CHECK-GI-NEXT: ret
%val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i8) %addr)
%shortval = trunc i64 %val to i8
@@ -243,22 +234,21 @@ define dso_local void @test_load_acquire_i8(ptr %addr) {
ret void
}
; FALLBACK-NOT: remark:{{.*}}test_load_acquire_i16
define dso_local void @test_load_acquire_i16(ptr %addr) {
; SDAG-LABEL: test_load_acquire_i16:
; SDAG: // %bb.0:
; SDAG-NEXT: ldaxrh w8, [x0]
; SDAG-NEXT: adrp x9, var
; SDAG-NEXT: str x8, [x9, :lo12:var]
; SDAG-NEXT: ret
; CHECK-SD-LABEL: test_load_acquire_i16:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: ldaxrh w8, [x0]
; CHECK-SD-NEXT: adrp x9, var
; CHECK-SD-NEXT: str x8, [x9, :lo12:var]
; CHECK-SD-NEXT: ret
;
; GISEL-LABEL: test_load_acquire_i16:
; GISEL: // %bb.0:
; GISEL-NEXT: ldaxrh w9, [x0]
; GISEL-NEXT: adrp x8, var
; GISEL-NEXT: and x9, x9, #0xffff
; GISEL-NEXT: str x9, [x8, :lo12:var]
; GISEL-NEXT: ret
; CHECK-GI-LABEL: test_load_acquire_i16:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldaxrh w9, [x0]
; CHECK-GI-NEXT: adrp x8, var
; CHECK-GI-NEXT: and x9, x9, #0xffff
; CHECK-GI-NEXT: str x9, [x8, :lo12:var]
; CHECK-GI-NEXT: ret
%val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i16) %addr)
%shortval = trunc i64 %val to i16
@@ -267,22 +257,21 @@ define dso_local void @test_load_acquire_i16(ptr %addr) {
ret void
}
; FALLBACK-NOT: remark:{{.*}}test_load_acquire_i32
define dso_local void @test_load_acquire_i32(ptr %addr) {
; SDAG-LABEL: test_load_acquire_i32:
; SDAG: // %bb.0:
; SDAG-NEXT: ldaxr w8, [x0]
; SDAG-NEXT: adrp x9, var
; SDAG-NEXT: str x8, [x9, :lo12:var]
; SDAG-NEXT: ret
; CHECK-SD-LABEL: test_load_acquire_i32:
; CHECK-SD: // %bb.0:
; CHECK-SD-NEXT: ldaxr w8, [x0]
; CHECK-SD-NEXT: adrp x9, var
; CHECK-SD-NEXT: str x8, [x9, :lo12:var]
; CHECK-SD-NEXT: ret
;
; GISEL-LABEL: test_load_acquire_i32:
; GISEL: // %bb.0:
; GISEL-NEXT: ldaxr w9, [x0]
; GISEL-NEXT: adrp x8, var
; GISEL-NEXT: mov w9, w9
; GISEL-NEXT: str x9, [x8, :lo12:var]
; GISEL-NEXT: ret
; CHECK-GI-LABEL: test_load_acquire_i32:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldaxr w9, [x0]
; CHECK-GI-NEXT: adrp x8, var
; CHECK-GI-NEXT: mov w9, w9
; CHECK-GI-NEXT: str x9, [x8, :lo12:var]
; CHECK-GI-NEXT: ret
%val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i32) %addr)
%shortval = trunc i64 %val to i32
@@ -291,7 +280,6 @@ define dso_local void @test_load_acquire_i32(ptr %addr) {
ret void
}
; FALLBACK-NOT: remark:{{.*}}test_load_acquire_i64
define dso_local void @test_load_acquire_i64(ptr %addr) {
; CHECK-LABEL: test_load_acquire_i64:
; CHECK: // %bb.0:
@@ -308,7 +296,6 @@ define dso_local void @test_load_acquire_i64(ptr %addr) {
declare i64 @llvm.aarch64.ldaxr.p0(ptr) nounwind
; FALLBACK-NOT: remark:{{.*}}test_store_release_i8
define dso_local i32 @test_store_release_i8(i32, i8 %val, ptr %addr) {
; CHECK-LABEL: test_store_release_i8:
; CHECK: // %bb.0:
@@ -320,7 +307,6 @@ define dso_local i32 @test_store_release_i8(i32, i8 %val, ptr %addr) {
ret i32 %res
}
; FALLBACK-NOT: remark:{{.*}}test_store_release_i16
define dso_local i32 @test_store_release_i16(i32, i16 %val, ptr %addr) {
; CHECK-LABEL: test_store_release_i16:
; CHECK: // %bb.0:
@@ -332,7 +318,6 @@ define dso_local i32 @test_store_release_i16(i32, i16 %val, ptr %addr) {
ret i32 %res
}
; FALLBACK-NOT: remark:{{.*}}test_store_release_i32
define dso_local i32 @test_store_release_i32(i32, i32 %val, ptr %addr) {
; CHECK-LABEL: test_store_release_i32:
; CHECK: // %bb.0:
@@ -343,7 +328,6 @@ define dso_local i32 @test_store_release_i32(i32, i32 %val, ptr %addr) {
ret i32 %res
}
; FALLBACK-NOT: remark:{{.*}}test_store_release_i64
define dso_local i32 @test_store_release_i64(i32, i64 %val, ptr %addr) {
; CHECK-LABEL: test_store_release_i64:
; CHECK: // %bb.0:
@@ -378,5 +362,3 @@ define dso_local i32 @test_stxp_undef_inline_asm(ptr %p, i64 %x) nounwind {
}
declare i32 @llvm.aarch64.stlxr.p0(i64, ptr) nounwind
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; FALLBACK: {{.*}}

File diff suppressed because it is too large Load Diff