mirror of
https://github.com/intel/llvm.git
synced 2026-02-02 02:00:03 +08:00
Fix Whitespace.
llvm-svn: 116798
This commit is contained in:
@@ -74,7 +74,7 @@ namespace clang {
|
||||
bool BoolData1;
|
||||
|
||||
ABIArgInfo(Kind K, const llvm::Type *TD=0,
|
||||
unsigned UI=0, bool B0 = false, bool B1 = false)
|
||||
unsigned UI=0, bool B0 = false, bool B1 = false)
|
||||
: TheKind(K), TypeData(TD), UIntData(UI), BoolData0(B0), BoolData1(B1) {}
|
||||
|
||||
public:
|
||||
@@ -107,7 +107,7 @@ namespace clang {
|
||||
bool canHaveCoerceToType() const {
|
||||
return TheKind == Direct || TheKind == Extend;
|
||||
}
|
||||
|
||||
|
||||
// Direct/Extend accessors
|
||||
unsigned getDirectOffset() const {
|
||||
assert((isDirect() || isExtend()) && "Not a direct or extend kind");
|
||||
@@ -117,12 +117,12 @@ namespace clang {
|
||||
assert(canHaveCoerceToType() && "Invalid kind!");
|
||||
return TypeData;
|
||||
}
|
||||
|
||||
|
||||
void setCoerceToType(const llvm::Type *T) {
|
||||
assert(canHaveCoerceToType() && "Invalid kind!");
|
||||
TypeData = T;
|
||||
}
|
||||
|
||||
|
||||
// Indirect accessors
|
||||
unsigned getIndirectAlign() const {
|
||||
assert(TheKind == Indirect && "Invalid kind!");
|
||||
@@ -138,7 +138,7 @@ namespace clang {
|
||||
assert(TheKind == Indirect && "Invalid kind!");
|
||||
return BoolData1;
|
||||
}
|
||||
|
||||
|
||||
void dump() const;
|
||||
};
|
||||
|
||||
@@ -147,10 +147,10 @@ namespace clang {
|
||||
class ABIInfo {
|
||||
public:
|
||||
CodeGen::CodeGenTypes &CGT;
|
||||
|
||||
|
||||
ABIInfo(CodeGen::CodeGenTypes &cgt) : CGT(cgt) {}
|
||||
virtual ~ABIInfo();
|
||||
|
||||
|
||||
ASTContext &getContext() const;
|
||||
llvm::LLVMContext &getVMContext() const;
|
||||
const llvm::TargetData &getTargetData() const;
|
||||
|
||||
@@ -131,7 +131,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
|
||||
return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
|
||||
}
|
||||
|
||||
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
|
||||
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
|
||||
CXXCtorType Type) {
|
||||
llvm::SmallVector<CanQualType, 16> ArgTys;
|
||||
ArgTys.push_back(GetThisType(Context, D->getParent()));
|
||||
@@ -170,7 +170,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
|
||||
CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
|
||||
assert(isa<FunctionType>(FTy));
|
||||
if (isa<FunctionNoProtoType>(FTy))
|
||||
return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
|
||||
return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
|
||||
assert(isa<FunctionProtoType>(FTy));
|
||||
return getFunctionInfo(FTy.getAs<FunctionProtoType>());
|
||||
}
|
||||
@@ -195,13 +195,13 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
|
||||
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
|
||||
// FIXME: Do we need to handle ObjCMethodDecl?
|
||||
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
|
||||
|
||||
|
||||
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
|
||||
return getFunctionInfo(CD, GD.getCtorType());
|
||||
|
||||
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
|
||||
return getFunctionInfo(DD, GD.getDtorType());
|
||||
|
||||
|
||||
return getFunctionInfo(FD);
|
||||
}
|
||||
|
||||
@@ -256,14 +256,14 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
|
||||
|
||||
// Compute ABI information.
|
||||
getABIInfo().computeInfo(*FI);
|
||||
|
||||
|
||||
// Loop over all of the computed argument and return value info. If any of
|
||||
// them are direct or extend without a specified coerce type, specify the
|
||||
// default now.
|
||||
ABIArgInfo &RetInfo = FI->getReturnInfo();
|
||||
if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
|
||||
RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType()));
|
||||
|
||||
|
||||
for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
|
||||
I != E; ++I)
|
||||
if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
|
||||
@@ -274,7 +274,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
|
||||
// we *just* filled in the FunctionInfo for.
|
||||
if (!IsRecursive && !PointersToResolve.empty())
|
||||
HandleLateResolvedPointers();
|
||||
|
||||
|
||||
return *FI;
|
||||
}
|
||||
|
||||
@@ -288,7 +288,7 @@ CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
|
||||
NoReturn(_NoReturn), RegParm(_RegParm)
|
||||
{
|
||||
NumArgs = NumArgTys;
|
||||
|
||||
|
||||
// FIXME: Coallocate with the CGFunctionInfo object.
|
||||
Args = new ArgInfo[1 + NumArgTys];
|
||||
Args[0].type = ResTy;
|
||||
@@ -386,20 +386,20 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
|
||||
uint64_t DstSize, CodeGenFunction &CGF) {
|
||||
// We can't dive into a zero-element struct.
|
||||
if (SrcSTy->getNumElements() == 0) return SrcPtr;
|
||||
|
||||
|
||||
const llvm::Type *FirstElt = SrcSTy->getElementType(0);
|
||||
|
||||
|
||||
// If the first elt is at least as large as what we're looking for, or if the
|
||||
// first element is the same size as the whole struct, we can enter it.
|
||||
uint64_t FirstEltSize =
|
||||
uint64_t FirstEltSize =
|
||||
CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
|
||||
if (FirstEltSize < DstSize &&
|
||||
if (FirstEltSize < DstSize &&
|
||||
FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
|
||||
return SrcPtr;
|
||||
|
||||
|
||||
// GEP into the first element.
|
||||
SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
|
||||
|
||||
|
||||
// If the first element is a struct, recurse.
|
||||
const llvm::Type *SrcTy =
|
||||
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
|
||||
@@ -417,23 +417,23 @@ static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
|
||||
CodeGenFunction &CGF) {
|
||||
if (Val->getType() == Ty)
|
||||
return Val;
|
||||
|
||||
|
||||
if (isa<llvm::PointerType>(Val->getType())) {
|
||||
// If this is Pointer->Pointer avoid conversion to and from int.
|
||||
if (isa<llvm::PointerType>(Ty))
|
||||
return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
|
||||
|
||||
|
||||
// Convert the pointer to an integer so we can play with its width.
|
||||
Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
|
||||
}
|
||||
|
||||
|
||||
const llvm::Type *DestIntTy = Ty;
|
||||
if (isa<llvm::PointerType>(DestIntTy))
|
||||
DestIntTy = CGF.IntPtrTy;
|
||||
|
||||
|
||||
if (Val->getType() != DestIntTy)
|
||||
Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
|
||||
|
||||
|
||||
if (isa<llvm::PointerType>(Ty))
|
||||
Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
|
||||
return Val;
|
||||
@@ -452,18 +452,18 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
|
||||
CodeGenFunction &CGF) {
|
||||
const llvm::Type *SrcTy =
|
||||
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
|
||||
|
||||
|
||||
// If SrcTy and Ty are the same, just do a load.
|
||||
if (SrcTy == Ty)
|
||||
return CGF.Builder.CreateLoad(SrcPtr);
|
||||
|
||||
|
||||
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
|
||||
|
||||
|
||||
if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
|
||||
SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
|
||||
SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
|
||||
}
|
||||
|
||||
|
||||
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
|
||||
|
||||
// If the source and destination are integer or pointer types, just do an
|
||||
@@ -473,7 +473,7 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
|
||||
llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
|
||||
return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
|
||||
}
|
||||
|
||||
|
||||
// If load is legal, just bitcast the src pointer.
|
||||
if (SrcSize >= DstSize) {
|
||||
// Generally SrcSize is never greater than DstSize, since this means we are
|
||||
@@ -489,7 +489,7 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
|
||||
Load->setAlignment(1);
|
||||
return Load;
|
||||
}
|
||||
|
||||
|
||||
// Otherwise do coercion through memory. This is stupid, but
|
||||
// simple.
|
||||
llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
|
||||
@@ -518,14 +518,14 @@ static void CreateCoercedStore(llvm::Value *Src,
|
||||
CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
|
||||
|
||||
|
||||
if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
|
||||
DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
|
||||
DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
|
||||
}
|
||||
|
||||
|
||||
// If the source and destination are integer or pointer types, just do an
|
||||
// extension or truncation to the desired type.
|
||||
if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
|
||||
@@ -534,7 +534,7 @@ static void CreateCoercedStore(llvm::Value *Src,
|
||||
CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
|
||||
|
||||
// If store is legal, just bitcast the src pointer.
|
||||
@@ -590,7 +590,7 @@ bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
|
||||
|
||||
const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
|
||||
const CGFunctionInfo &FI = getFunctionInfo(GD);
|
||||
|
||||
|
||||
// For definition purposes, don't consider a K&R function variadic.
|
||||
bool Variadic = false;
|
||||
if (const FunctionProtoType *FPT =
|
||||
@@ -673,7 +673,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
|
||||
const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
|
||||
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
|
||||
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
|
||||
|
||||
|
||||
if (!VerifyFuncTypeComplete(FPT)) {
|
||||
const CGFunctionInfo *Info;
|
||||
if (isa<CXXDestructorDecl>(MD))
|
||||
@@ -688,7 +688,7 @@ const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
|
||||
|
||||
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
|
||||
const Decl *TargetDecl,
|
||||
AttributeListType &PAL,
|
||||
AttributeListType &PAL,
|
||||
unsigned &CallingConv) {
|
||||
unsigned FuncAttrs = 0;
|
||||
unsigned RetAttrs = 0;
|
||||
@@ -786,7 +786,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
|
||||
Attributes |= llvm::Attribute::InReg;
|
||||
}
|
||||
// FIXME: handle sseregparm someday...
|
||||
|
||||
|
||||
if (const llvm::StructType *STy =
|
||||
dyn_cast<llvm::StructType>(AI.getCoerceToType()))
|
||||
Index += STy->getNumElements()-1; // 1 will be added below.
|
||||
@@ -911,7 +911,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
|
||||
ArgI.getDirectOffset() == 0) {
|
||||
assert(AI != Fn->arg_end() && "Argument mismatch!");
|
||||
llvm::Value *V = AI;
|
||||
|
||||
|
||||
if (Arg->getType().isRestrictQualified())
|
||||
AI->addAttr(llvm::Attribute::NoAlias);
|
||||
|
||||
@@ -925,33 +925,33 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
|
||||
}
|
||||
|
||||
llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
|
||||
|
||||
|
||||
// The alignment we need to use is the max of the requested alignment for
|
||||
// the argument plus the alignment required by our access code below.
|
||||
unsigned AlignmentToUse =
|
||||
unsigned AlignmentToUse =
|
||||
CGF.CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
|
||||
AlignmentToUse = std::max(AlignmentToUse,
|
||||
(unsigned)getContext().getDeclAlign(Arg).getQuantity());
|
||||
|
||||
|
||||
Alloca->setAlignment(AlignmentToUse);
|
||||
llvm::Value *V = Alloca;
|
||||
llvm::Value *Ptr = V; // Pointer to store into.
|
||||
|
||||
|
||||
// If the value is offset in memory, apply the offset now.
|
||||
if (unsigned Offs = ArgI.getDirectOffset()) {
|
||||
Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
|
||||
Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
|
||||
Ptr = Builder.CreateBitCast(Ptr,
|
||||
Ptr = Builder.CreateBitCast(Ptr,
|
||||
llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
|
||||
}
|
||||
|
||||
|
||||
// If the coerce-to type is a first class aggregate, we flatten it and
|
||||
// pass the elements. Either way is semantically identical, but fast-isel
|
||||
// and the optimizer generally likes scalar values better than FCAs.
|
||||
if (const llvm::StructType *STy =
|
||||
dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
|
||||
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
|
||||
|
||||
|
||||
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
|
||||
assert(AI != Fn->arg_end() && "Argument mismatch!");
|
||||
AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
|
||||
@@ -964,8 +964,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
|
||||
AI->setName(Arg->getName() + ".coerce");
|
||||
CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// Match to what EmitParmDecl is expecting for this type.
|
||||
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
|
||||
V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
|
||||
@@ -1044,13 +1044,13 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
|
||||
RetAI.getDirectOffset() == 0) {
|
||||
// The internal return value temp always will have pointer-to-return-type
|
||||
// type, just do a load.
|
||||
|
||||
|
||||
// If the instruction right before the insertion point is a store to the
|
||||
// return value, we can elide the load, zap the store, and usually zap the
|
||||
// alloca.
|
||||
llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
|
||||
llvm::StoreInst *SI = 0;
|
||||
if (InsertBB->empty() ||
|
||||
if (InsertBB->empty() ||
|
||||
!(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
|
||||
SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
|
||||
RV = Builder.CreateLoad(ReturnValue);
|
||||
@@ -1059,7 +1059,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
|
||||
RetDbgLoc = SI->getDebugLoc();
|
||||
RV = SI->getValueOperand();
|
||||
SI->eraseFromParent();
|
||||
|
||||
|
||||
// If that was the only use of the return value, nuke it as well now.
|
||||
if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
|
||||
cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
|
||||
@@ -1072,10 +1072,10 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
|
||||
if (unsigned Offs = RetAI.getDirectOffset()) {
|
||||
V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
|
||||
V = Builder.CreateConstGEP1_32(V, Offs);
|
||||
V = Builder.CreateBitCast(V,
|
||||
V = Builder.CreateBitCast(V,
|
||||
llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
|
||||
}
|
||||
|
||||
|
||||
RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
|
||||
}
|
||||
break;
|
||||
@@ -1099,7 +1099,7 @@ RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
|
||||
llvm::Value *Local = GetAddrOfLocalVar(Param);
|
||||
|
||||
QualType ArgType = Param->getType();
|
||||
|
||||
|
||||
// For the most part, we just need to load the alloca, except:
|
||||
// 1) aggregate r-values are actually pointers to temporaries, and
|
||||
// 2) references to aggregates are pointers directly to the aggregate.
|
||||
@@ -1200,7 +1200,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
|
||||
|
||||
case ABIArgInfo::Ignore:
|
||||
break;
|
||||
|
||||
|
||||
case ABIArgInfo::Extend:
|
||||
case ABIArgInfo::Direct: {
|
||||
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
|
||||
@@ -1224,16 +1224,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
|
||||
StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
|
||||
} else
|
||||
SrcPtr = RV.getAggregateAddr();
|
||||
|
||||
|
||||
// If the value is offset in memory, apply the offset now.
|
||||
if (unsigned Offs = ArgInfo.getDirectOffset()) {
|
||||
SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
|
||||
SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
|
||||
SrcPtr = Builder.CreateBitCast(SrcPtr,
|
||||
SrcPtr = Builder.CreateBitCast(SrcPtr,
|
||||
llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
|
||||
|
||||
}
|
||||
|
||||
|
||||
// If the coerce-to type is a first class aggregate, we flatten it and
|
||||
// pass the elements. Either way is semantically identical, but fast-isel
|
||||
// and the optimizer generally likes scalar values better than FCAs.
|
||||
@@ -1253,7 +1253,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
|
||||
Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
|
||||
*this));
|
||||
}
|
||||
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1352,7 +1352,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
|
||||
// If we are ignoring an argument that had a result, make sure to
|
||||
// construct the appropriate return value for our caller.
|
||||
return GetUndefRValue(RetTy);
|
||||
|
||||
|
||||
case ABIArgInfo::Extend:
|
||||
case ABIArgInfo::Direct: {
|
||||
if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
|
||||
@@ -1375,25 +1375,25 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
|
||||
}
|
||||
return RValue::get(CI);
|
||||
}
|
||||
|
||||
|
||||
llvm::Value *DestPtr = ReturnValue.getValue();
|
||||
bool DestIsVolatile = ReturnValue.isVolatile();
|
||||
|
||||
|
||||
if (!DestPtr) {
|
||||
DestPtr = CreateMemTemp(RetTy, "coerce");
|
||||
DestIsVolatile = false;
|
||||
}
|
||||
|
||||
|
||||
// If the value is offset in memory, apply the offset now.
|
||||
llvm::Value *StorePtr = DestPtr;
|
||||
if (unsigned Offs = RetAI.getDirectOffset()) {
|
||||
StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
|
||||
StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
|
||||
StorePtr = Builder.CreateBitCast(StorePtr,
|
||||
StorePtr = Builder.CreateBitCast(StorePtr,
|
||||
llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
|
||||
}
|
||||
CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
|
||||
|
||||
|
||||
unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
|
||||
if (RetTy->isAnyComplexType())
|
||||
return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
|
||||
|
||||
@@ -442,7 +442,7 @@ public:
|
||||
llvm::BasicBlock *getBlock() const { return Block; }
|
||||
EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
|
||||
unsigned getDestIndex() const { return Index; }
|
||||
|
||||
|
||||
private:
|
||||
llvm::BasicBlock *Block;
|
||||
EHScopeStack::stable_iterator ScopeDepth;
|
||||
@@ -507,11 +507,11 @@ public:
|
||||
|
||||
bool Exceptions;
|
||||
bool CatchUndefined;
|
||||
|
||||
|
||||
/// \brief A mapping from NRVO variables to the flags used to indicate
|
||||
/// when the NRVO has been applied to this variable.
|
||||
llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
|
||||
|
||||
|
||||
/// \brief A mapping from 'Save' expression in a conditional expression
|
||||
/// to the IR for this expression. Used to implement IR gen. for Gnu
|
||||
/// extension's missing LHS expression in a conditional operator expression.
|
||||
@@ -590,8 +590,8 @@ public:
|
||||
|
||||
public:
|
||||
/// \brief Enter a new cleanup scope.
|
||||
explicit RunCleanupsScope(CodeGenFunction &CGF)
|
||||
: CGF(CGF), PerformCleanup(true)
|
||||
explicit RunCleanupsScope(CodeGenFunction &CGF)
|
||||
: CGF(CGF), PerformCleanup(true)
|
||||
{
|
||||
CleanupStackDepth = CGF.EHStack.stable_begin();
|
||||
OldDidCallStackSave = CGF.DidCallStackSave;
|
||||
@@ -676,7 +676,7 @@ public:
|
||||
void EndConditionalBranch() {
|
||||
assert(ConditionalBranchLevel != 0 &&
|
||||
"Conditional branch mismatch!");
|
||||
|
||||
|
||||
--ConditionalBranchLevel;
|
||||
}
|
||||
|
||||
@@ -745,7 +745,7 @@ private:
|
||||
/// VTT parameter.
|
||||
ImplicitParamDecl *CXXVTTDecl;
|
||||
llvm::Value *CXXVTTValue;
|
||||
|
||||
|
||||
/// ConditionalBranchLevel - Contains the nesting level of the current
|
||||
/// conditional branch. This is used so that we know if a temporary should be
|
||||
/// destroyed conditionally.
|
||||
@@ -754,9 +754,9 @@ private:
|
||||
|
||||
/// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
|
||||
/// type as well as the field number that contains the actual data.
|
||||
llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *,
|
||||
llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *,
|
||||
unsigned> > ByRefValueInfo;
|
||||
|
||||
|
||||
/// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
|
||||
/// number that holds the value.
|
||||
unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
|
||||
@@ -864,21 +864,21 @@ public:
|
||||
|
||||
/// GenerateThunk - Generate a thunk for the given method.
|
||||
void GenerateThunk(llvm::Function *Fn, GlobalDecl GD, const ThunkInfo &Thunk);
|
||||
|
||||
|
||||
void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
|
||||
FunctionArgList &Args);
|
||||
|
||||
/// InitializeVTablePointer - Initialize the vtable pointer of the given
|
||||
/// subobject.
|
||||
///
|
||||
void InitializeVTablePointer(BaseSubobject Base,
|
||||
void InitializeVTablePointer(BaseSubobject Base,
|
||||
const CXXRecordDecl *NearestVBase,
|
||||
uint64_t OffsetFromNearestVBase,
|
||||
llvm::Constant *VTable,
|
||||
const CXXRecordDecl *VTableClass);
|
||||
|
||||
typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
|
||||
void InitializeVTablePointers(BaseSubobject Base,
|
||||
void InitializeVTablePointers(BaseSubobject Base,
|
||||
const CXXRecordDecl *NearestVBase,
|
||||
uint64_t OffsetFromNearestVBase,
|
||||
bool BaseIsNonVirtualPrimaryBase,
|
||||
@@ -1145,7 +1145,7 @@ public:
|
||||
|
||||
/// GetAddressOfBaseClass - This function will add the necessary delta to the
|
||||
/// load of 'this' and returns address of the base class.
|
||||
llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
|
||||
llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
|
||||
const CXXRecordDecl *Derived,
|
||||
CastExpr::path_const_iterator PathBegin,
|
||||
CastExpr::path_const_iterator PathEnd,
|
||||
@@ -1160,7 +1160,7 @@ public:
|
||||
llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
|
||||
const CXXRecordDecl *ClassDecl,
|
||||
const CXXRecordDecl *BaseClassDecl);
|
||||
|
||||
|
||||
void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
|
||||
CXXCtorType CtorType,
|
||||
const FunctionArgList &Args);
|
||||
@@ -1175,7 +1175,7 @@ public:
|
||||
CallExpr::const_arg_iterator ArgBeg,
|
||||
CallExpr::const_arg_iterator ArgEnd,
|
||||
bool ZeroInitialization = false);
|
||||
|
||||
|
||||
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
|
||||
llvm::Value *NumElements,
|
||||
llvm::Value *ArrayPtr,
|
||||
@@ -1197,7 +1197,7 @@ public:
|
||||
|
||||
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
|
||||
bool ForVirtualBase, llvm::Value *This);
|
||||
|
||||
|
||||
void EmitNewArrayInitializer(const CXXNewExpr *E, llvm::Value *NewPtr,
|
||||
llvm::Value *NumElements);
|
||||
|
||||
@@ -1303,7 +1303,7 @@ public:
|
||||
void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
|
||||
|
||||
void EmitCXXTryStmt(const CXXTryStmt &S);
|
||||
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// LValue Expression Emission
|
||||
//===--------------------------------------------------------------------===//
|
||||
@@ -1408,7 +1408,7 @@ public:
|
||||
LValue EmitConditionalOperatorLValue(const ConditionalOperator *E);
|
||||
LValue EmitCastLValue(const CastExpr *E);
|
||||
LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
|
||||
|
||||
|
||||
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
|
||||
const ObjCIvarDecl *Ivar);
|
||||
LValue EmitLValueForAnonRecordField(llvm::Value* Base,
|
||||
@@ -1416,14 +1416,14 @@ public:
|
||||
unsigned CVRQualifiers);
|
||||
LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field,
|
||||
unsigned CVRQualifiers);
|
||||
|
||||
|
||||
/// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
|
||||
/// if the Field is a reference, this will return the address of the reference
|
||||
/// and not the address of the value stored in the reference.
|
||||
LValue EmitLValueForFieldInitialization(llvm::Value* Base,
|
||||
LValue EmitLValueForFieldInitialization(llvm::Value* Base,
|
||||
const FieldDecl* Field,
|
||||
unsigned CVRQualifiers);
|
||||
|
||||
|
||||
LValue EmitLValueForIvar(QualType ObjectTy,
|
||||
llvm::Value* Base, const ObjCIvarDecl *Ivar,
|
||||
unsigned CVRQualifiers);
|
||||
@@ -1437,7 +1437,7 @@ public:
|
||||
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
|
||||
LValue EmitCXXExprWithTemporariesLValue(const CXXExprWithTemporaries *E);
|
||||
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
|
||||
|
||||
|
||||
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
|
||||
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
|
||||
LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E);
|
||||
@@ -1468,7 +1468,7 @@ public:
|
||||
CallExpr::const_arg_iterator ArgBeg,
|
||||
CallExpr::const_arg_iterator ArgEnd,
|
||||
const Decl *TargetDecl = 0);
|
||||
RValue EmitCallExpr(const CallExpr *E,
|
||||
RValue EmitCallExpr(const CallExpr *E,
|
||||
ReturnValueSlot ReturnValue = ReturnValueSlot());
|
||||
|
||||
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
|
||||
@@ -1478,7 +1478,7 @@ public:
|
||||
|
||||
llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
|
||||
const llvm::Type *Ty);
|
||||
llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
|
||||
llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
|
||||
llvm::Value *&This, const llvm::Type *Ty);
|
||||
|
||||
RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
|
||||
@@ -1497,7 +1497,7 @@ public:
|
||||
const CXXMethodDecl *MD,
|
||||
ReturnValueSlot ReturnValue);
|
||||
|
||||
|
||||
|
||||
RValue EmitBuiltinExpr(const FunctionDecl *FD,
|
||||
unsigned BuiltinID, const CallExpr *E);
|
||||
|
||||
@@ -1508,7 +1508,7 @@ public:
|
||||
llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
|
||||
|
||||
llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
|
||||
llvm::Value *EmitNeonCall(llvm::Function *F,
|
||||
llvm::Value *EmitNeonCall(llvm::Function *F,
|
||||
llvm::SmallVectorImpl<llvm::Value*> &O,
|
||||
const char *name, bool splat = false,
|
||||
unsigned shift = 0, bool rightshift = false);
|
||||
@@ -1516,7 +1516,7 @@ public:
|
||||
bool widen = false);
|
||||
llvm::Value *EmitNeonShiftVector(llvm::Value *V, const llvm::Type *Ty,
|
||||
bool negateForRightShift);
|
||||
|
||||
|
||||
llvm::Value *BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops);
|
||||
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
|
||||
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
|
||||
@@ -1536,7 +1536,7 @@ public:
|
||||
|
||||
/// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
|
||||
/// expression. Will emit a temporary variable if E is not an LValue.
|
||||
RValue EmitReferenceBindingToExpr(const Expr* E,
|
||||
RValue EmitReferenceBindingToExpr(const Expr* E,
|
||||
const NamedDecl *InitializedDecl);
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
@@ -1598,7 +1598,7 @@ public:
|
||||
llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
|
||||
const char *Separator,
|
||||
llvm::GlobalValue::LinkageTypes Linkage);
|
||||
|
||||
|
||||
/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
|
||||
/// global variable that has already been created for it. If the initializer
|
||||
/// has a different type than GV does, this may free GV and return a different
|
||||
@@ -1606,7 +1606,7 @@ public:
|
||||
llvm::GlobalVariable *
|
||||
AddInitializerToStaticVarDecl(const VarDecl &D,
|
||||
llvm::GlobalVariable *GV);
|
||||
|
||||
|
||||
|
||||
/// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
|
||||
/// variable with global storage.
|
||||
@@ -1665,7 +1665,7 @@ public:
|
||||
/// getTrapBB - Create a basic block that will call the trap intrinsic. We'll
|
||||
/// generate a branch around the created basic block as necessary.
|
||||
llvm::BasicBlock *getTrapBB();
|
||||
|
||||
|
||||
/// EmitCallArg - Emit a single call argument.
|
||||
RValue EmitCallArg(const Expr *E, QualType ArgType);
|
||||
|
||||
@@ -1720,9 +1720,9 @@ private:
|
||||
#ifndef NDEBUG
|
||||
QualType ActualArgType = Arg->getType();
|
||||
if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
|
||||
QualType ActualBaseType =
|
||||
QualType ActualBaseType =
|
||||
ActualArgType->getAs<PointerType>()->getPointeeType();
|
||||
QualType ArgBaseType =
|
||||
QualType ArgBaseType =
|
||||
ArgType->getAs<PointerType>()->getPointeeType();
|
||||
if (ArgBaseType->isVariableArrayType()) {
|
||||
if (const VariableArrayType *VAT =
|
||||
@@ -1768,31 +1768,31 @@ class CGBlockInfo {
|
||||
public:
|
||||
/// Name - The name of the block, kindof.
|
||||
const char *Name;
|
||||
|
||||
|
||||
/// DeclRefs - Variables from parent scopes that have been
|
||||
/// imported into this block.
|
||||
llvm::SmallVector<const BlockDeclRefExpr *, 8> DeclRefs;
|
||||
|
||||
|
||||
/// InnerBlocks - This block and the blocks it encloses.
|
||||
llvm::SmallPtrSet<const DeclContext *, 4> InnerBlocks;
|
||||
|
||||
|
||||
/// CXXThisRef - Non-null if 'this' was required somewhere, in
|
||||
/// which case this is that expression.
|
||||
const CXXThisExpr *CXXThisRef;
|
||||
|
||||
|
||||
/// NeedsObjCSelf - True if something in this block has an implicit
|
||||
/// reference to 'self'.
|
||||
bool NeedsObjCSelf;
|
||||
|
||||
|
||||
/// These are initialized by GenerateBlockFunction.
|
||||
bool BlockHasCopyDispose;
|
||||
CharUnits BlockSize;
|
||||
CharUnits BlockAlign;
|
||||
llvm::SmallVector<const Expr*, 8> BlockLayout;
|
||||
|
||||
|
||||
CGBlockInfo(const char *Name);
|
||||
};
|
||||
|
||||
|
||||
} // end namespace CodeGen
|
||||
} // end namespace clang
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ private:
|
||||
/// is available only for ConvertType(). CovertType() is preferred
|
||||
/// interface to convert type T into a llvm::Type.
|
||||
const llvm::Type *ConvertNewType(QualType T);
|
||||
|
||||
|
||||
/// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
|
||||
/// pointers that are referenced but have not been converted yet. This is
|
||||
/// used to handle cyclic structures properly.
|
||||
@@ -139,10 +139,10 @@ public:
|
||||
static const TagType *VerifyFuncTypeComplete(const Type* T);
|
||||
|
||||
/// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
|
||||
/// given a CXXMethodDecl. If the method to has an incomplete return type,
|
||||
/// given a CXXMethodDecl. If the method to has an incomplete return type,
|
||||
/// and/or incomplete argument types, this will return the opaque type.
|
||||
const llvm::Type *GetFunctionTypeForVTable(GlobalDecl GD);
|
||||
|
||||
|
||||
const CGRecordLayout &getCGRecordLayout(const RecordDecl*) const;
|
||||
|
||||
/// UpdateCompletedType - When we find the full definition for a TagDecl,
|
||||
@@ -151,7 +151,7 @@ public:
|
||||
|
||||
/// getFunctionInfo - Get the function info for the specified function decl.
|
||||
const CGFunctionInfo &getFunctionInfo(GlobalDecl GD);
|
||||
|
||||
|
||||
const CGFunctionInfo &getFunctionInfo(const FunctionDecl *FD);
|
||||
const CGFunctionInfo &getFunctionInfo(const CXXMethodDecl *MD);
|
||||
const CGFunctionInfo &getFunctionInfo(const ObjCMethodDecl *MD);
|
||||
@@ -176,7 +176,7 @@ public:
|
||||
/// pointers.
|
||||
const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD,
|
||||
const FunctionProtoType *FTP);
|
||||
|
||||
|
||||
/// getFunctionInfo - Get the function info for a function described by a
|
||||
/// return type and argument types. If the calling convention is not
|
||||
/// specified, the "C" calling convention will be used.
|
||||
@@ -188,7 +188,7 @@ public:
|
||||
const FunctionType::ExtInfo &Info);
|
||||
|
||||
/// Retrieves the ABI information for the given function signature.
|
||||
///
|
||||
///
|
||||
/// \param ArgTys - must all actually be canonical as params
|
||||
const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
|
||||
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
|
||||
@@ -208,11 +208,11 @@ public: // These are internal details of CGT that shouldn't be used externally.
|
||||
/// ArgTys. See ABIArgInfo::Expand.
|
||||
void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys,
|
||||
bool IsRecursive);
|
||||
|
||||
|
||||
/// IsZeroInitializable - Return whether a type can be
|
||||
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
|
||||
bool isZeroInitializable(QualType T);
|
||||
|
||||
|
||||
/// IsZeroInitializable - Return whether a record type can be
|
||||
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
|
||||
bool isZeroInitializable(const CXXRecordDecl *RD);
|
||||
|
||||
@@ -675,11 +675,11 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
|
||||
AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext()));
|
||||
return AAI;
|
||||
}
|
||||
|
||||
|
||||
return ABIArgInfo::getDirect();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
|
||||
Ty = EnumTy->getDecl()->getIntegerType();
|
||||
|
||||
@@ -894,7 +894,7 @@ public:
|
||||
|
||||
const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
|
||||
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
|
||||
|
||||
|
||||
// 0-15 are the 16 integer registers.
|
||||
// 16 is %rip.
|
||||
AssignToArrayRange(Builder, Address, Eight8, 0, 16);
|
||||
@@ -1520,7 +1520,7 @@ GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
|
||||
unsigned HiAlign = TD.getABITypeAlignment(Hi);
|
||||
unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign);
|
||||
assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
|
||||
|
||||
|
||||
// To handle this, we have to increase the size of the low part so that the
|
||||
// second element will start at an 8 byte offset. We can't increase the size
|
||||
// of the second element because it might make us access off the end of the
|
||||
@@ -1536,11 +1536,11 @@ GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
|
||||
Lo = llvm::Type::getInt64Ty(Lo->getContext());
|
||||
}
|
||||
}
|
||||
|
||||
const llvm::StructType *Result =
|
||||
|
||||
const llvm::StructType *Result =
|
||||
llvm::StructType::get(Lo->getContext(), Lo, Hi, NULL);
|
||||
|
||||
|
||||
|
||||
|
||||
// Verify that the second element is at an 8-byte offset.
|
||||
assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
|
||||
"Invalid x86-64 argument pair!");
|
||||
@@ -1669,7 +1669,7 @@ classifyReturnType(QualType RetTy) const {
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
// If a high part was specified, merge it together with the low part. It is
|
||||
// known to pass in the high eightbyte of the result. We do this by forming a
|
||||
// first class struct aggregate with the high and low part: {low, high}
|
||||
@@ -1803,7 +1803,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
|
||||
// first class struct aggregate with the high and low part: {low, high}
|
||||
if (HighPart)
|
||||
ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
|
||||
|
||||
|
||||
return ABIArgInfo::getDirect(ResType);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user