[X86][AVX512] Replace lossless i32/u32 to f64 conversion intrinsics with generic IR

Both the (V)CVTDQ2PD (i32 to f64) and (V)CVTUDQ2PD (u32 to f64) conversion instructions are lossless and can be safely represented as generic __builtin_convertvector calls instead of x86 intrinsics without affecting final codegen.

This patch removes the clang builtins and their use in the headers - a future patch will deal with removing the llvm intrinsics.

This is an extension patch to D20528 which dealt with the equivalent sse/avx cases.

Differential Revision: https://reviews.llvm.org/D26686

llvm-svn: 287088
This commit is contained in:
Simon Pilgrim
2016-11-16 09:27:40 +00:00
parent 4d60243bfd
commit 698528d83b
5 changed files with 106 additions and 94 deletions

View File

@@ -961,8 +961,6 @@ TARGET_BUILTIN(__builtin_ia32_maxps512_mask, "V16fV16fV16fV16fUsIi", "", "avx512
TARGET_BUILTIN(__builtin_ia32_maxpd512_mask, "V8dV8dV8dV8dUcIi", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtdq2ps512_mask, "V16fV16iV16fUsIi", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtudq2ps512_mask, "V16fV16iV16fUsIi", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtdq2pd512_mask, "V8dV8iV8dUc", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtudq2pd512_mask, "V8dV8iV8dUc", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtpd2ps512_mask, "V8fV8dV8fUcIi", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_vcvtps2ph512_mask, "V16sV16fIiV16sUs", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_vcvtph2ps512_mask, "V16fV16sV16fUsIi", "", "avx512f")
@@ -1165,8 +1163,6 @@ TARGET_BUILTIN(__builtin_ia32_compressstoresf128_mask, "vV4f*V4fUc", "", "avx512
TARGET_BUILTIN(__builtin_ia32_compressstoresf256_mask, "vV8f*V8fUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_compressstoresi128_mask, "vV4i*V4iUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_compressstoresi256_mask, "vV8i*V8iUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtdq2pd128_mask, "V2dV4iV2dUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtdq2pd256_mask, "V4dV4iV4dUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtdq2ps128_mask, "V4fV4iV4fUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtdq2ps256_mask, "V8fV8iV8fUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtpd2dq128_mask, "V4iV2dV4iUc", "", "avx512vl")
@@ -1189,8 +1185,6 @@ TARGET_BUILTIN(__builtin_ia32_cvttps2dq128_mask, "V4iV4fV4iUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvttps2dq256_mask, "V8iV8fV8iUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvttps2udq128_mask, "V4iV4fV4iUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvttps2udq256_mask, "V8iV8fV8iUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtudq2pd128_mask, "V2dV4iV2dUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtudq2pd256_mask, "V4dV4iV4dUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtudq2ps128_mask, "V4fV4iV4fUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtudq2ps256_mask, "V8fV8iV8fUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_expanddf128_mask, "V2dV2dV2dUc", "", "avx512vl")

View File

@@ -3740,26 +3740,23 @@ _mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A)
static __inline __m512d __DEFAULT_FN_ATTRS
_mm512_cvtepi32_pd(__m256i __A)
{
return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
(__v8df)
_mm512_setzero_pd (),
(__mmask8) -1);
return (__m512d)__builtin_convertvector((__v8si)__A, __v8df);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
{
return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
(__v8df) __W,
(__mmask8) __U);
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
(__v8df)_mm512_cvtepi32_pd(__A),
(__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
{
return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
(__v8df) _mm512_setzero_pd (),
(__mmask8) __U);
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
(__v8df)_mm512_cvtepi32_pd(__A),
(__v8df)_mm512_setzero_pd());
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
@@ -3804,26 +3801,23 @@ _mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A)
static __inline __m512d __DEFAULT_FN_ATTRS
_mm512_cvtepu32_pd(__m256i __A)
{
return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
(__v8df)
_mm512_setzero_pd (),
(__mmask8) -1);
return (__m512d)__builtin_convertvector((__v8su)__A, __v8df);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
{
return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
(__v8df) __W,
(__mmask8) __U);
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
(__v8df)_mm512_cvtepu32_pd(__A),
(__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
{
return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
(__v8df) _mm512_setzero_pd (),
(__mmask8) __U);
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
(__v8df)_mm512_cvtepu32_pd(__A),
(__v8df)_mm512_setzero_pd());
}
static __inline__ __m512d __DEFAULT_FN_ATTRS

View File

@@ -2134,32 +2134,30 @@ _mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) {
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
(__v2df) __W,
(__mmask8) __U);
return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
(__v2df)_mm_cvtepi32_pd(__A),
(__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
(__v2df)
_mm_setzero_pd (),
(__mmask8) __U);
return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
(__v2df)_mm_cvtepi32_pd(__A),
(__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
(__v4df) __W,
(__mmask8) __U);
return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
(__v4df)_mm256_cvtepi32_pd(__A),
(__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
(__v4df)
_mm256_setzero_pd (),
(__mmask8) __U);
return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
(__v4df)_mm256_cvtepi32_pd(__A),
(__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
@@ -2558,48 +2556,41 @@ _mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) {
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cvtepu32_pd (__m128i __A) {
return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
(__v2df)
_mm_setzero_pd (),
(__mmask8) -1);
return (__m128d) __builtin_convertvector(
__builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
(__v2df) __W,
(__mmask8) __U);
return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
(__v2df)_mm_cvtepu32_pd(__A),
(__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
(__v2df)
_mm_setzero_pd (),
(__mmask8) __U);
return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
(__v2df)_mm_cvtepu32_pd(__A),
(__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_cvtepu32_pd (__m128i __A) {
return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
(__v4df)
_mm256_setzero_pd (),
(__mmask8) -1);
return (__m256d)__builtin_convertvector((__v4su)__A, __v4df);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
(__v4df) __W,
(__mmask8) __U);
return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
(__v4df)_mm256_cvtepu32_pd(__A),
(__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
(__v4df)
_mm256_setzero_pd (),
(__mmask8) __U);
return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
(__v4df)_mm256_cvtepu32_pd(__A),
(__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS

View File

@@ -6949,17 +6949,26 @@ __m512 test_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A)
return _mm512_maskz_cvtepu32_ps (__U,__A);
}
__m512d test_mm512_cvtepi32_pd (__m256i __A)
{
// CHECK-LABEL: @test_mm512_cvtepi32_pd
// CHECK: sitofp <8 x i32> %{{.*}} to <8 x double>
return _mm512_cvtepi32_pd (__A);
}
__m512d test_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
{
// CHECK-LABEL: @test_mm512_mask_cvtepi32_pd
// CHECK: @llvm.x86.avx512.mask.cvtdq2pd.512
// CHECK-LABEL: @test_mm512_mask_cvtepi32_pd
// CHECK: sitofp <8 x i32> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> {{.*}}, <8 x double> {{.*}}, <8 x double> {{.*}}
return _mm512_mask_cvtepi32_pd (__W,__U,__A);
}
__m512d test_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
{
// CHECK-LABEL: @test_mm512_maskz_cvtepi32_pd
// CHECK: @llvm.x86.avx512.mask.cvtdq2pd.512
// CHECK-LABEL: @test_mm512_maskz_cvtepi32_pd
// CHECK: sitofp <8 x i32> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> {{.*}}, <8 x double> {{.*}}, <8 x double> {{.*}}
return _mm512_maskz_cvtepi32_pd (__U,__A);
}
@@ -6967,7 +6976,7 @@ __m512d test_mm512_cvtepi32lo_pd (__m512i __A)
{
// CHECK-LABEL: @test_mm512_cvtepi32lo_pd
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: @llvm.x86.avx512.mask.cvtdq2pd.512
// CHECK: sitofp <8 x i32> %{{.*}} to <8 x double>
return _mm512_cvtepi32lo_pd (__A);
}
@@ -6975,7 +6984,8 @@ __m512d test_mm512_mask_cvtepi32lo_pd (__m512d __W, __mmask8 __U, __m512i __A)
{
// CHECK-LABEL: @test_mm512_mask_cvtepi32lo_pd
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: @llvm.x86.avx512.mask.cvtdq2pd.512
// CHECK: sitofp <8 x i32> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> {{.*}}, <8 x double> {{.*}}, <8 x double> {{.*}}
return _mm512_mask_cvtepi32lo_pd (__W, __U, __A);
}
@@ -7000,17 +7010,26 @@ __m512 test_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A)
return _mm512_maskz_cvtepi32_ps (__U,__A);
}
__m512d test_mm512_cvtepu32_pd(__m256i __A)
{
// CHECK-LABEL: @test_mm512_cvtepu32_pd
// CHECK: uitofp <8 x i32> %{{.*}} to <8 x double>
return _mm512_cvtepu32_pd(__A);
}
__m512d test_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
{
// CHECK-LABEL: @test_mm512_mask_cvtepu32_pd
// CHECK: @llvm.x86.avx512.mask.cvtudq2pd.512
// CHECK-LABEL: @test_mm512_mask_cvtepu32_pd
// CHECK: uitofp <8 x i32> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> {{.*}}, <8 x double> {{.*}}, <8 x double> {{.*}}
return _mm512_mask_cvtepu32_pd (__W,__U,__A);
}
__m512d test_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
{
// CHECK-LABEL: @test_mm512_maskz_cvtepu32_pd
// CHECK: @llvm.x86.avx512.mask.cvtudq2pd.512
// CHECK-LABEL: @test_mm512_maskz_cvtepu32_pd
// CHECK: uitofp <8 x i32> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> {{.*}}, <8 x double> {{.*}}, <8 x double> {{.*}}
return _mm512_maskz_cvtepu32_pd (__U,__A);
}
@@ -7018,7 +7037,7 @@ __m512d test_mm512_cvtepu32lo_pd (__m512i __A)
{
// CHECK-LABEL: @test_mm512_cvtepu32lo_pd
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: @llvm.x86.avx512.mask.cvtudq2pd.512
// CHECK: uitofp <8 x i32> %{{.*}} to <8 x double>
return _mm512_cvtepu32lo_pd (__A);
}
@@ -7026,7 +7045,8 @@ __m512d test_mm512_mask_cvtepu32lo_pd (__m512d __W, __mmask8 __U, __m512i __A)
{
// CHECK-LABEL: @test_mm512_mask_cvtepu32lo_pd
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// CHECK: @llvm.x86.avx512.mask.cvtudq2pd.512
// CHECK: uitofp <8 x i32> %{{.*}} to <8 x double>
// CHECK: select <8 x i1> {{.*}}, <8 x double> {{.*}}, <8 x double> {{.*}}
return _mm512_mask_cvtepu32lo_pd (__W, __U, __A);
}

View File

@@ -1737,23 +1737,29 @@ void test_mm256_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m256i __A)
}
__m128d test_mm_mask_cvtepi32_pd(__m128d __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_cvtepi32_pd
// CHECK: @llvm.x86.avx512.mask.cvtdq2pd.128
return _mm_mask_cvtepi32_pd(__W,__U,__A);
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: sitofp <2 x i32> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_mask_cvtepi32_pd(__W,__U,__A);
}
__m128d test_mm_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_cvtepi32_pd
// CHECK: @llvm.x86.avx512.mask.cvtdq2pd.128
return _mm_maskz_cvtepi32_pd(__U,__A);
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: sitofp <2 x i32> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_maskz_cvtepi32_pd(__U,__A);
}
__m256d test_mm256_mask_cvtepi32_pd(__m256d __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm256_mask_cvtepi32_pd
// CHECK: @llvm.x86.avx512.mask.cvtdq2pd.256
return _mm256_mask_cvtepi32_pd(__W,__U,__A);
// CHECK: sitofp <4 x i32> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_mask_cvtepi32_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm256_maskz_cvtepi32_pd
// CHECK: @llvm.x86.avx512.mask.cvtdq2pd.256
return _mm256_maskz_cvtepi32_pd(__U,__A);
// CHECK: sitofp <4 x i32> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_maskz_cvtepi32_pd(__U,__A);
}
__m128 test_mm_mask_cvtepi32_ps(__m128 __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_cvtepi32_ps
@@ -2017,33 +2023,40 @@ __m256i test_mm256_maskz_cvttps_epu32(__mmask8 __U, __m256 __A) {
}
__m128d test_mm_cvtepu32_pd(__m128i __A) {
// CHECK-LABEL: @test_mm_cvtepu32_pd
// CHECK: @llvm.x86.avx512.mask.cvtudq2pd.128
return _mm_cvtepu32_pd(__A);
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: uitofp <2 x i32> %{{.*}} to <2 x double>
return _mm_cvtepu32_pd(__A);
}
__m128d test_mm_mask_cvtepu32_pd(__m128d __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_cvtepu32_pd
// CHECK: @llvm.x86.avx512.mask.cvtudq2pd.128
return _mm_mask_cvtepu32_pd(__W,__U,__A);
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: uitofp <2 x i32> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_mask_cvtepu32_pd(__W,__U,__A);
}
__m128d test_mm_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_cvtepu32_pd
// CHECK: @llvm.x86.avx512.mask.cvtudq2pd.128
return _mm_maskz_cvtepu32_pd(__U,__A);
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1>
// CHECK: uitofp <2 x i32> %{{.*}} to <2 x double>
// CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}}
return _mm_maskz_cvtepu32_pd(__U,__A);
}
__m256d test_mm256_cvtepu32_pd(__m128i __A) {
// CHECK-LABEL: @test_mm256_cvtepu32_pd
// CHECK: @llvm.x86.avx512.mask.cvtudq2pd.256
return _mm256_cvtepu32_pd(__A);
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x double>
return _mm256_cvtepu32_pd(__A);
}
__m256d test_mm256_mask_cvtepu32_pd(__m256d __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm256_mask_cvtepu32_pd
// CHECK: @llvm.x86.avx512.mask.cvtudq2pd.256
return _mm256_mask_cvtepu32_pd(__W,__U,__A);
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_mask_cvtepu32_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm256_maskz_cvtepu32_pd
// CHECK: @llvm.x86.avx512.mask.cvtudq2pd.256
return _mm256_maskz_cvtepu32_pd(__U,__A);
// CHECK: uitofp <4 x i32> %{{.*}} to <4 x double>
// CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}}
return _mm256_maskz_cvtepu32_pd(__U,__A);
}
__m128 test_mm_cvtepu32_ps(__m128i __A) {
// CHECK-LABEL: @test_mm_cvtepu32_ps