mirror of
https://github.com/intel/llvm.git
synced 2026-01-23 16:06:39 +08:00
[NFC][InstCombine][AArch64] Remove SVE mul idempotency tests.
They have been subsumed into the SVE binop simplification tests.
This commit is contained in:
@@ -1,130 +0,0 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
|
||||
; RUN: opt -S -passes=instcombine < %s | FileCheck %s
|
||||
|
||||
target triple = "aarch64-unknown-linux-gnu"
|
||||
|
||||
; Idempotent fmuls -- should compile to just a ret.
|
||||
define <vscale x 8 x half> @idempotent_fmul_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 8 x half> @idempotent_fmul_f16(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x half> [[A:%.*]]) #[[ATTR0:[0-9]+]] {
|
||||
; CHECK-NEXT: ret <vscale x 8 x half> [[A]]
|
||||
;
|
||||
%1 = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half 1.0)
|
||||
%2 = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %1)
|
||||
ret <vscale x 8 x half> %2
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @idempotent_fmul_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 4 x float> @idempotent_fmul_f32(
|
||||
; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]], <vscale x 4 x float> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 4 x float> [[A]]
|
||||
;
|
||||
%1 = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float 1.0)
|
||||
%2 = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %1)
|
||||
ret <vscale x 4 x float> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @idempotent_fmul_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 2 x double> @idempotent_fmul_f64(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]], <vscale x 2 x double> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 2 x double> [[A]]
|
||||
;
|
||||
%1 = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double 1.0)
|
||||
%2 = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %1)
|
||||
ret <vscale x 2 x double> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @idempotent_fmul_different_argument_order(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 2 x double> @idempotent_fmul_different_argument_order(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]], <vscale x 2 x double> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[PG]], <vscale x 2 x double> [[A]], <vscale x 2 x double> splat (double 1.000000e+00)
|
||||
; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double 1.0)
|
||||
; Different argument order to the above tests.
|
||||
%2 = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %1, <vscale x 2 x double> %a)
|
||||
ret <vscale x 2 x double> %2
|
||||
}
|
||||
|
||||
define <vscale x 8 x half> @idempotent_fmul_with_predicated_dup(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 8 x half> @idempotent_fmul_with_predicated_dup(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x half> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 8 x half> [[A]]
|
||||
;
|
||||
%1 = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x i1> %pg, half 1.0)
|
||||
%2 = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %1)
|
||||
ret <vscale x 8 x half> %2
|
||||
}
|
||||
|
||||
define <vscale x 8 x half> @idempotent_fmul_two_dups(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
|
||||
; Edge case -- make sure that the case where we're fmultiplying two dups
|
||||
; together is sane.
|
||||
; CHECK-LABEL: define <vscale x 8 x half> @idempotent_fmul_two_dups(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x half> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 8 x half> splat (half 0xH3C00)
|
||||
;
|
||||
%1 = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half 1.0)
|
||||
%2 = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half 1.0)
|
||||
%3 = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %1, <vscale x 8 x half> %2)
|
||||
ret <vscale x 8 x half> %3
|
||||
}
|
||||
|
||||
; Non-idempotent fmuls -- we don't expect these to be optimised out.
|
||||
define <vscale x 8 x half> @non_idempotent_fmul_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 8 x half> @non_idempotent_fmul_f16(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x half> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> [[PG]], <vscale x 8 x half> [[A]], <vscale x 8 x half> splat (half 0xH4000))
|
||||
; CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half 2.0)
|
||||
%2 = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %1)
|
||||
ret <vscale x 8 x half> %2
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @non_idempotent_fmul_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 4 x float> @non_idempotent_fmul_f32(
|
||||
; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]], <vscale x 4 x float> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> [[PG]], <vscale x 4 x float> [[A]], <vscale x 4 x float> splat (float 2.000000e+00))
|
||||
; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float 2.0)
|
||||
%2 = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %1)
|
||||
ret <vscale x 4 x float> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @non_idempotent_fmul_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 2 x double> @non_idempotent_fmul_f64(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]], <vscale x 2 x double> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> [[PG]], <vscale x 2 x double> [[A]], <vscale x 2 x double> splat (double 2.000000e+00))
|
||||
; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double 2.0)
|
||||
%2 = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %1)
|
||||
ret <vscale x 2 x double> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @non_idempotent_fmul_with_predicated_dup(<vscale x 2 x i1> %pg1, <vscale x 2 x i1> %pg2, <vscale x 2 x double> %a) #0 {
|
||||
; Different predicates
|
||||
; CHECK-LABEL: define <vscale x 2 x double> @non_idempotent_fmul_with_predicated_dup(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG1:%.*]], <vscale x 2 x i1> [[PG2:%.*]], <vscale x 2 x double> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x i1> [[PG1]], double 1.000000e+00)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> [[PG2]], <vscale x 2 x double> [[A]], <vscale x 2 x double> [[TMP1]])
|
||||
; CHECK-NEXT: ret <vscale x 2 x double> [[TMP2]]
|
||||
;
|
||||
%1 = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x i1> %pg1, double 1.0)
|
||||
%2 = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg2, <vscale x 2 x double> %a, <vscale x 2 x double> %1)
|
||||
ret <vscale x 2 x double> %2
|
||||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half)
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float)
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double)
|
||||
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double)
|
||||
declare <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half)
|
||||
|
||||
declare <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
|
||||
|
||||
attributes #0 = { "target-features"="+sve" }
|
||||
@@ -1,129 +0,0 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
|
||||
; RUN: opt -S -passes=instcombine < %s | FileCheck %s
|
||||
|
||||
target triple = "aarch64-unknown-linux-gnu"
|
||||
|
||||
; Idempotent fmuls_u -- should compile to just a ret.
|
||||
define <vscale x 8 x half> @idempotent_fmul_u_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 8 x half> @idempotent_fmul_u_f16(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x half> [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
|
||||
; CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
;
|
||||
%1 = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half 1.0)
|
||||
%2 = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.u.nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %1)
|
||||
ret <vscale x 8 x half> %2
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @idempotent_fmul_u_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 4 x float> @idempotent_fmul_u_f32(
|
||||
; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]], <vscale x 4 x float> [[TMP0:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
;
|
||||
%1 = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float 1.0)
|
||||
%2 = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.u.nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %1)
|
||||
ret <vscale x 4 x float> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @idempotent_fmul_u_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 2 x double> @idempotent_fmul_u_f64(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]], <vscale x 2 x double> [[TMP0:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
;
|
||||
%1 = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double 1.0)
|
||||
%2 = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.u.nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %1)
|
||||
ret <vscale x 2 x double> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @idempotent_fmul_u_different_argument_order(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 2 x double> @idempotent_fmul_u_different_argument_order(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]], <vscale x 2 x double> [[TMP0:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
;
|
||||
%1 = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double 1.0)
|
||||
; Different argument order to the above tests.
|
||||
%2 = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.u.nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %1, <vscale x 2 x double> %a)
|
||||
ret <vscale x 2 x double> %2
|
||||
}
|
||||
|
||||
define <vscale x 8 x half> @idempotent_fmul_u_with_predicated_dup(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 8 x half> @idempotent_fmul_u_with_predicated_dup(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x half> [[TMP0:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
;
|
||||
%1 = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x i1> %pg, half 1.0)
|
||||
%2 = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.u.nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %1)
|
||||
ret <vscale x 8 x half> %2
|
||||
}
|
||||
|
||||
define <vscale x 8 x half> @idempotent_fmul_u_two_dups(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
|
||||
; Edge case -- make sure that the case where we're fmultiplying two dups
|
||||
; together is sane.
|
||||
; CHECK-LABEL: define <vscale x 8 x half> @idempotent_fmul_u_two_dups(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x half> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 8 x half> splat (half 0xH3C00)
|
||||
;
|
||||
%1 = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half 1.0)
|
||||
%2 = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half 1.0)
|
||||
%3 = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.u.nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %1, <vscale x 8 x half> %2)
|
||||
ret <vscale x 8 x half> %3
|
||||
}
|
||||
|
||||
; Non-idempotent fmuls_u -- we don't expect these to be optimised out.
|
||||
define <vscale x 8 x half> @non_idempotent_fmul_u_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 8 x half> @non_idempotent_fmul_u_f16(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x half> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.u.nxv8f16(<vscale x 8 x i1> [[PG]], <vscale x 8 x half> [[A]], <vscale x 8 x half> splat (half 0xH4000))
|
||||
; CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half 2.0)
|
||||
%2 = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.u.nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %1)
|
||||
ret <vscale x 8 x half> %2
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @non_idempotent_fmul_u_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 4 x float> @non_idempotent_fmul_u_f32(
|
||||
; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]], <vscale x 4 x float> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.u.nxv4f32(<vscale x 4 x i1> [[PG]], <vscale x 4 x float> [[A]], <vscale x 4 x float> splat (float 2.000000e+00))
|
||||
; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float 2.0)
|
||||
%2 = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.u.nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %1)
|
||||
ret <vscale x 4 x float> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @non_idempotent_fmul_u_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 2 x double> @non_idempotent_fmul_u_f64(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]], <vscale x 2 x double> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.u.nxv2f64(<vscale x 2 x i1> [[PG]], <vscale x 2 x double> [[A]], <vscale x 2 x double> splat (double 2.000000e+00))
|
||||
; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double 2.0)
|
||||
%2 = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.u.nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %1)
|
||||
ret <vscale x 2 x double> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @non_idempotent_fmul_u_with_predicated_dup(<vscale x 2 x i1> %pg1, <vscale x 2 x i1> %pg2, <vscale x 2 x double> %a) #0 {
|
||||
; Different predicates
|
||||
; CHECK-LABEL: define <vscale x 2 x double> @non_idempotent_fmul_u_with_predicated_dup(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG1:%.*]], <vscale x 2 x i1> [[PG2:%.*]], <vscale x 2 x double> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x i1> [[PG1]], double 1.000000e+00)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.u.nxv2f64(<vscale x 2 x i1> [[PG2]], <vscale x 2 x double> [[A]], <vscale x 2 x double> [[TMP1]])
|
||||
; CHECK-NEXT: ret <vscale x 2 x double> [[TMP2]]
|
||||
;
|
||||
%1 = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x i1> %pg1, double 1.0)
|
||||
%2 = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.u.nxv2f64(<vscale x 2 x i1> %pg2, <vscale x 2 x double> %a, <vscale x 2 x double> %1)
|
||||
ret <vscale x 2 x double> %2
|
||||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half)
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float)
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double)
|
||||
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double)
|
||||
declare <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half)
|
||||
|
||||
declare <vscale x 8 x half> @llvm.aarch64.sve.fmul.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.fmul.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.fmul.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
|
||||
|
||||
attributes #0 = { "target-features"="+sve" }
|
||||
@@ -1,130 +0,0 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
|
||||
; RUN: opt -S -passes=instcombine < %s | FileCheck %s
|
||||
|
||||
target triple = "aarch64-unknown-linux-gnu"
|
||||
|
||||
; Idempotent muls -- should compile to just a ret.
|
||||
define <vscale x 8 x i16> @idempotent_mul_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 8 x i16> @idempotent_mul_i16(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x i16> [[A:%.*]]) #[[ATTR0:[0-9]+]] {
|
||||
; CHECK-NEXT: ret <vscale x 8 x i16> [[A]]
|
||||
;
|
||||
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 1)
|
||||
%2 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %1)
|
||||
ret <vscale x 8 x i16> %2
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @idempotent_mul_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 4 x i32> @idempotent_mul_i32(
|
||||
; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]], <vscale x 4 x i32> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 4 x i32> [[A]]
|
||||
;
|
||||
%1 = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 1)
|
||||
%2 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %1)
|
||||
ret <vscale x 4 x i32> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @idempotent_mul_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 2 x i64> @idempotent_mul_i64(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]], <vscale x 2 x i64> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 2 x i64> [[A]]
|
||||
;
|
||||
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 1)
|
||||
%2 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %1)
|
||||
ret <vscale x 2 x i64> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @idempotent_mul_different_argument_order(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 2 x i64> @idempotent_mul_different_argument_order(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]], <vscale x 2 x i64> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[PG]], <vscale x 2 x i64> [[A]], <vscale x 2 x i64> splat (i64 1)
|
||||
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 1)
|
||||
; Different argument order to the above tests.
|
||||
%2 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %1, <vscale x 2 x i64> %a)
|
||||
ret <vscale x 2 x i64> %2
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @idempotent_mul_with_predicated_dup(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 8 x i16> @idempotent_mul_with_predicated_dup(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x i16> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 8 x i16> [[A]]
|
||||
;
|
||||
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> poison, <vscale x 8 x i1> %pg, i16 1)
|
||||
%2 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %1)
|
||||
ret <vscale x 8 x i16> %2
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @idempotent_mul_two_dups(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) #0 {
|
||||
; Edge case -- make sure that the case where we're multiplying two dups
|
||||
; together is sane.
|
||||
; CHECK-LABEL: define <vscale x 8 x i16> @idempotent_mul_two_dups(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x i16> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 8 x i16> splat (i16 1)
|
||||
;
|
||||
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 1)
|
||||
%2 = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 1)
|
||||
%3 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2)
|
||||
ret <vscale x 8 x i16> %3
|
||||
}
|
||||
|
||||
; Non-idempotent muls -- we don't expect these to be optimised out.
|
||||
define <vscale x 8 x i16> @non_idempotent_mul_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 8 x i16> @non_idempotent_mul_i16(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x i16> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> [[PG]], <vscale x 8 x i16> [[A]], <vscale x 8 x i16> splat (i16 2))
|
||||
; CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 2)
|
||||
%2 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %1)
|
||||
ret <vscale x 8 x i16> %2
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @non_idempotent_mul_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 4 x i32> @non_idempotent_mul_i32(
|
||||
; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]], <vscale x 4 x i32> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> [[PG]], <vscale x 4 x i32> [[A]], <vscale x 4 x i32> splat (i32 2))
|
||||
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 2)
|
||||
%2 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %1)
|
||||
ret <vscale x 4 x i32> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @non_idempotent_mul_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 2 x i64> @non_idempotent_mul_i64(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]], <vscale x 2 x i64> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> [[PG]], <vscale x 2 x i64> [[A]], <vscale x 2 x i64> splat (i64 2))
|
||||
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 2)
|
||||
%2 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %1)
|
||||
ret <vscale x 2 x i64> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @non_idempotent_mul_with_predicated_dup(<vscale x 2 x i1> %pg1, <vscale x 2 x i1> %pg2, <vscale x 2 x i64> %a) #0 {
|
||||
; Different predicates
|
||||
; CHECK-LABEL: define <vscale x 2 x i64> @non_idempotent_mul_with_predicated_dup(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG1:%.*]], <vscale x 2 x i1> [[PG2:%.*]], <vscale x 2 x i64> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> [[PG1]], i64 1)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> [[PG2]], <vscale x 2 x i64> [[A]], <vscale x 2 x i64> [[TMP1]])
|
||||
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
|
||||
;
|
||||
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg1, i64 1)
|
||||
%2 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg2, <vscale x 2 x i64> %a, <vscale x 2 x i64> %1)
|
||||
ret <vscale x 2 x i64> %2
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64)
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16)
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
||||
|
||||
attributes #0 = { "target-features"="+sve" }
|
||||
@@ -1,129 +0,0 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
|
||||
; RUN: opt -S -passes=instcombine < %s | FileCheck %s
|
||||
|
||||
target triple = "aarch64-unknown-linux-gnu"
|
||||
|
||||
; Idempotent muls -- should compile to just a ret.
|
||||
define <vscale x 8 x i16> @idempotent_mul_u_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 8 x i16> @idempotent_mul_u_i16(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x i16> [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
|
||||
; CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
;
|
||||
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 1)
|
||||
%2 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %1)
|
||||
ret <vscale x 8 x i16> %2
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @idempotent_mul_u_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 4 x i32> @idempotent_mul_u_i32(
|
||||
; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]], <vscale x 4 x i32> [[TMP0:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
;
|
||||
%1 = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 1)
|
||||
%2 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %1)
|
||||
ret <vscale x 4 x i32> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @idempotent_mul_u_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 2 x i64> @idempotent_mul_u_i64(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]], <vscale x 2 x i64> [[TMP0:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
;
|
||||
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 1)
|
||||
%2 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %1)
|
||||
ret <vscale x 2 x i64> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @idempotent_mul_u_different_argument_order(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 2 x i64> @idempotent_mul_u_different_argument_order(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]], <vscale x 2 x i64> [[TMP0:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
;
|
||||
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 1)
|
||||
; Different argument order to the above tests.
|
||||
%2 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %1, <vscale x 2 x i64> %a)
|
||||
ret <vscale x 2 x i64> %2
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @idempotent_mul_u_with_predicated_dup(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 8 x i16> @idempotent_mul_u_with_predicated_dup(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x i16> [[TMP0:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
;
|
||||
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> poison, <vscale x 8 x i1> %pg, i16 1)
|
||||
%2 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %1)
|
||||
ret <vscale x 8 x i16> %2
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @idempotent_mul_u_two_dups(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) #0 {
|
||||
; Edge case -- make sure that the case where we're multiplying two dups
|
||||
; together is sane.
|
||||
; CHECK-LABEL: define <vscale x 8 x i16> @idempotent_mul_u_two_dups(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x i16> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: ret <vscale x 8 x i16> splat (i16 1)
|
||||
;
|
||||
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 1)
|
||||
%2 = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 1)
|
||||
%3 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2)
|
||||
ret <vscale x 8 x i16> %3
|
||||
}
|
||||
|
||||
; Non-idempotent muls -- we don't expect these to be optimised out.
|
||||
define <vscale x 8 x i16> @non_idempotent_mul_u_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 8 x i16> @non_idempotent_mul_u_i16(
|
||||
; CHECK-SAME: <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x i16> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> [[PG]], <vscale x 8 x i16> [[A]], <vscale x 8 x i16> splat (i16 2))
|
||||
; CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 2)
|
||||
%2 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %1)
|
||||
ret <vscale x 8 x i16> %2
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @non_idempotent_mul_u_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 4 x i32> @non_idempotent_mul_u_i32(
|
||||
; CHECK-SAME: <vscale x 4 x i1> [[PG:%.*]], <vscale x 4 x i32> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1> [[PG]], <vscale x 4 x i32> [[A]], <vscale x 4 x i32> splat (i32 2))
|
||||
; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 2)
|
||||
%2 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %1)
|
||||
ret <vscale x 4 x i32> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @non_idempotent_mul_u_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
|
||||
; CHECK-LABEL: define <vscale x 2 x i64> @non_idempotent_mul_u_i64(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG:%.*]], <vscale x 2 x i64> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> [[PG]], <vscale x 2 x i64> [[A]], <vscale x 2 x i64> splat (i64 2))
|
||||
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
|
||||
;
|
||||
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 2)
|
||||
%2 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %1)
|
||||
ret <vscale x 2 x i64> %2
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @non_idempotent_mul_u_with_predicated_dup(<vscale x 2 x i1> %pg1, <vscale x 2 x i1> %pg2, <vscale x 2 x i64> %a) #0 {
|
||||
; Different predicates
|
||||
; CHECK-LABEL: define <vscale x 2 x i64> @non_idempotent_mul_u_with_predicated_dup(
|
||||
; CHECK-SAME: <vscale x 2 x i1> [[PG1:%.*]], <vscale x 2 x i1> [[PG2:%.*]], <vscale x 2 x i64> [[A:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> [[PG1]], i64 1)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> [[PG2]], <vscale x 2 x i64> [[A]], <vscale x 2 x i64> [[TMP1]])
|
||||
; CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
|
||||
;
|
||||
%1 = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg1, i64 1)
|
||||
%2 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> %pg2, <vscale x 2 x i64> %a, <vscale x 2 x i64> %1)
|
||||
ret <vscale x 2 x i64> %2
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64)
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16)
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
||||
|
||||
attributes #0 = { "target-features"="+sve" }
|
||||
Reference in New Issue
Block a user