[X86] vector-shuffle-combining-avx512f.ll - add tests showing failure to simplify expand/compress nodes (#171113)

This commit is contained in:
Simon Pilgrim
2025-12-08 12:02:43 +00:00
committed by GitHub
parent 32ff7100d7
commit 3a6781ea4d

View File

@@ -22,6 +22,9 @@ declare <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float>, <16 x
declare <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
declare <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
declare <16 x i32> @llvm.x86.avx512.mask.expand.v16i32(<16 x i32>, <16 x i32>, <16 x i1>)
declare <16 x i32> @llvm.x86.avx512.mask.compress.v16i32(<16 x i32>, <16 x i32>, <16 x i1>)
define <8 x double> @combine_permvar_8f64_identity(<8 x double> %x0, <8 x double> %x1) {
; CHECK-LABEL: combine_permvar_8f64_identity:
; CHECK: # %bb.0:
@@ -1031,3 +1034,69 @@ define <8 x double> @concat_vpermilvar_v8f64_v4f64(<4 x double> %a0, <4 x double
%res = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x double> %res
}
; TODO - shift elements up by one
define <16 x i32> @combine_vexpandd_as_valignd(<16 x i32> %x) {
; X86-AVX512F-LABEL: combine_vexpandd_as_valignd:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movw $-2, %ax
; X86-AVX512F-NEXT: kmovw %eax, %k1
; X86-AVX512F-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
; X86-AVX512F-NEXT: retl
;
; X86-AVX512BW-LABEL: combine_vexpandd_as_valignd:
; X86-AVX512BW: # %bb.0:
; X86-AVX512BW-NEXT: movw $-2, %ax
; X86-AVX512BW-NEXT: kmovd %eax, %k1
; X86-AVX512BW-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
; X86-AVX512BW-NEXT: retl
;
; X64-AVX512F-LABEL: combine_vexpandd_as_valignd:
; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: movw $-2, %ax
; X64-AVX512F-NEXT: kmovw %eax, %k1
; X64-AVX512F-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: combine_vexpandd_as_valignd:
; X64-AVX512BW: # %bb.0:
; X64-AVX512BW-NEXT: movw $-2, %ax
; X64-AVX512BW-NEXT: kmovd %eax, %k1
; X64-AVX512BW-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
; X64-AVX512BW-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.expand.v16i32(<16 x i32> %x, <16 x i32> zeroinitializer, <16 x i1> <i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
ret <16 x i32> %res
}
; TODO - zero upper half of vector
define <16 x i32> @combine_vcompressd_as_vmov(<16 x i32> %x) {
; X86-AVX512F-LABEL: combine_vcompressd_as_vmov:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movw $255, %ax
; X86-AVX512F-NEXT: kmovw %eax, %k1
; X86-AVX512F-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z}
; X86-AVX512F-NEXT: retl
;
; X86-AVX512BW-LABEL: combine_vcompressd_as_vmov:
; X86-AVX512BW: # %bb.0:
; X86-AVX512BW-NEXT: movw $255, %ax
; X86-AVX512BW-NEXT: kmovd %eax, %k1
; X86-AVX512BW-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z}
; X86-AVX512BW-NEXT: retl
;
; X64-AVX512F-LABEL: combine_vcompressd_as_vmov:
; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: movw $255, %ax
; X64-AVX512F-NEXT: kmovw %eax, %k1
; X64-AVX512F-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z}
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: combine_vcompressd_as_vmov:
; X64-AVX512BW: # %bb.0:
; X64-AVX512BW-NEXT: movw $255, %ax
; X64-AVX512BW-NEXT: kmovd %eax, %k1
; X64-AVX512BW-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z}
; X64-AVX512BW-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.compress.v16i32(<16 x i32> %x, <16 x i32> zeroinitializer, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>)
ret <16 x i32> %res
}