[AArch64] Refactor the NEON scalar reduce pairwise intrinsics so that they use

float/double rather than the vector equivalents when appropriate.

llvm-svn: 196836
This commit is contained in:
Chad Rosier
2013-12-09 22:47:59 +00:00
parent 01703584eb
commit ce511f2fcb

View File

@@ -1763,6 +1763,7 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
bool OverloadCvtInt = false;
bool OverloadWideInt = false;
bool OverloadNarrowInt = false;
bool OverloadScalarRetInt = false;
const char *s = NULL;
SmallVector<Value *, 4> Ops;
@@ -1995,35 +1996,35 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
case AArch64::BI__builtin_neon_vpadds_f32:
case AArch64::BI__builtin_neon_vpaddd_f64:
Int = Intrinsic::aarch64_neon_vpfadd;
s = "vpfadd"; AcrossVec = true; break;
s = "vpfadd"; OverloadScalarRetInt = true; break;
// Scalar Reduce Pairwise Floating Point Max
case AArch64::BI__builtin_neon_vmaxv_f32:
case AArch64::BI__builtin_neon_vpmaxs_f32:
case AArch64::BI__builtin_neon_vmaxvq_f64:
case AArch64::BI__builtin_neon_vpmaxqd_f64:
Int = Intrinsic::aarch64_neon_vpmax;
s = "vpmax"; AcrossVec = true; break;
s = "vpmax"; OverloadScalarRetInt = true; break;
// Scalar Reduce Pairwise Floating Point Min
case AArch64::BI__builtin_neon_vminv_f32:
case AArch64::BI__builtin_neon_vpmins_f32:
case AArch64::BI__builtin_neon_vminvq_f64:
case AArch64::BI__builtin_neon_vpminqd_f64:
Int = Intrinsic::aarch64_neon_vpmin;
s = "vpmin"; AcrossVec = true; break;
s = "vpmin"; OverloadScalarRetInt = true; break;
// Scalar Reduce Pairwise Floating Point Maxnm
case AArch64::BI__builtin_neon_vmaxnmv_f32:
case AArch64::BI__builtin_neon_vpmaxnms_f32:
case AArch64::BI__builtin_neon_vmaxnmvq_f64:
case AArch64::BI__builtin_neon_vpmaxnmqd_f64:
Int = Intrinsic::aarch64_neon_vpfmaxnm;
s = "vpfmaxnm"; AcrossVec = true; break;
s = "vpfmaxnm"; OverloadScalarRetInt = true; break;
// Scalar Reduce Pairwise Floating Point Minnm
case AArch64::BI__builtin_neon_vminnmv_f32:
case AArch64::BI__builtin_neon_vpminnms_f32:
case AArch64::BI__builtin_neon_vminnmvq_f64:
case AArch64::BI__builtin_neon_vpminnmqd_f64:
Int = Intrinsic::aarch64_neon_vpfminnm;
s = "vpfminnm"; AcrossVec = true; break;
s = "vpfminnm"; OverloadScalarRetInt = true; break;
// The followings are intrinsics with scalar results generated AcrossVec vectors
case AArch64::BI__builtin_neon_vaddlv_s8:
case AArch64::BI__builtin_neon_vaddlv_s16:
@@ -2580,12 +2581,22 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
F = CGF.CGM.getIntrinsic(Int, Tys);
assert(E->getNumArgs() == 1);
} else if (OverloadInt) {
// Determine the type of this overloaded AArch64 intrinsic
// Determine the type of this overloaded AArch64 intrinsic and convert the
// scalar types to one-vector element types.
llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
assert(VTy);
F = CGF.CGM.getIntrinsic(Int, VTy);
} else if (OverloadScalarRetInt) {
// Determine the type of this overloaded AArch64 intrinsic
const Expr *Arg = E->getArg(E->getNumArgs()-1);
llvm::Type *Ty = CGF.ConvertType(Arg->getType());
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
llvm::Type *RTy = VTy->getElementType();
llvm::Type *Tys[2] = {RTy, VTy};
F = CGF.CGM.getIntrinsic(Int, Tys);
} else if (OverloadWideInt || OverloadNarrowInt) {
// Determine the type of this overloaded AArch64 intrinsic
const Expr *Arg = E->getArg(E->getNumArgs()-1);