[llvm] Move data layout string computation to TargetParser (#157612)

Clang and other frontends generally need the LLVM data layout string in
order to generate LLVM IR modules for LLVM. MLIR clients often need it
as well, since MLIR users often lower to LLVM IR.

Before this change, the LLVM datalayout string was computed in the
LLVM${TGT}CodeGen library in the relevant TargetMachine subclass.
However, none of the logic for computing the data layout string requires
any details of code generation. Clients who want to avoid duplicating
this information were forced to link in LLVMCodeGen and all registered
targets, leading to bloated binaries. This happened in PR #145899,
which measurably increased binary size for some of our users.

By moving this information to the TargetParser library, we
can delete the duplicate datalayout strings in Clang, and retain the
ability to generate IR for unregistered targets.

This is intended to be a very mechanical LLVM-only change, but there is
an immediately obvious follow-up to clang, which will be prepared
separately.

The vast majority of data layouts are computable with two inputs: the
triple and the "ABI name". There is only one exception, NVPTX, which has
a cl::opt to enable short device pointers. I invented a "shortptr" ABI
name to pass this option through the target independent interface.
Everything else fits. Mips is a bit awkward because it uses a special
MipsABIInfo abstraction, which includes members with codegen-like
concepts like ABI physical registers that can't live in TargetParser. I
think the string logic of looking for "n32" "n64" etc is reasonable to
duplicate. We have plenty of other minor duplication to preserve
layering.

---------

Co-authored-by: Matt Arsenault <arsenm2@gmail.com>
Co-authored-by: Sergei Barannikov <barannikov88@gmail.com>
This commit is contained in:
Reid Kleckner
2025-09-11 11:05:29 -07:00
committed by GitHub
parent b812e3d61a
commit f3efbce4a7
38 changed files with 712 additions and 613 deletions

View File

@@ -54,10 +54,7 @@ static std::vector<std::string> readSymbolsFromFile(StringRef InputFile) {
// Hackily figure out if there's a prefix on the symbol names - llvm-nm
// appears to not have a flag to skip this.
llvm::Triple HostTriple(LLVM_HOST_TRIPLE);
std::string DummyDatalayout = "e";
DummyDatalayout += DataLayout::getManglingComponent(HostTriple);
DataLayout DL(DummyDatalayout);
DataLayout DL(HostTriple.computeDataLayout());
char GlobalPrefix = DL.getGlobalPrefix();
std::vector<std::string> Lines;

View File

@@ -303,8 +303,6 @@ public:
llvm_unreachable("invalid mangling mode");
}
LLVM_ABI static const char *getManglingComponent(const Triple &T);
/// Returns true if the specified type fits in a native integer type
/// supported by the CPU.
///

View File

@@ -1328,6 +1328,10 @@ public:
const VersionTuple &Version);
LLVM_ABI ExceptionHandling getDefaultExceptionHandling() const;
/// Compute the LLVM IR data layout string based on the triple. Some targets
/// customize the layout based on the ABIName string.
LLVM_ABI std::string computeDataLayout(StringRef ABIName = "") const;
};
} // End llvm namespace

View File

@@ -172,18 +172,6 @@ struct LessPointerAddrSpace {
};
} // namespace
const char *DataLayout::getManglingComponent(const Triple &T) {
if (T.isOSBinFormatGOFF())
return "-m:l";
if (T.isOSBinFormatMachO())
return "-m:o";
if ((T.isOSWindows() || T.isUEFI()) && T.isOSBinFormatCOFF())
return T.getArch() == Triple::x86 ? "-m:x" : "-m:w";
if (T.isOSBinFormatXCOFF())
return "-m:a";
return "-m:e";
}
// Default primitive type specifications.
// NOTE: These arrays must be sorted by type bit width.
constexpr DataLayout::PrimitiveSpec DefaultIntSpecs[] = {

View File

@@ -295,27 +295,6 @@ static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
return std::make_unique<AArch64_ELFTargetObjectFile>();
}
// Helper function to build a DataLayout string
static std::string computeDataLayout(const Triple &TT,
const MCTargetOptions &Options,
bool LittleEndian) {
if (TT.isOSBinFormatMachO()) {
if (TT.getArch() == Triple::aarch64_32)
return "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-"
"n32:64-S128-Fn32";
return "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-n32:64-S128-"
"Fn32";
}
if (TT.isOSBinFormatCOFF())
return "e-m:w-p270:32:32-p271:32:32-p272:64:64-p:64:64-i32:32-i64:64-i128:"
"128-n32:64-S128-Fn32";
std::string Endian = LittleEndian ? "e" : "E";
std::string Ptr32 = TT.getEnvironment() == Triple::GNUILP32 ? "-p:32:32" : "";
return Endian + "-m:e" + Ptr32 +
"-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-"
"n32:64-S128-Fn32";
}
static StringRef computeDefaultCPU(const Triple &TT, StringRef CPU) {
if (CPU.empty() && TT.isArm64e())
return "apple-a12";
@@ -368,11 +347,10 @@ AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT,
bool LittleEndian)
: CodeGenTargetMachineImpl(
T, computeDataLayout(TT, Options.MCOptions, LittleEndian), TT,
computeDefaultCPU(TT, CPU), FS, Options,
getEffectiveRelocModel(TT, RM),
getEffectiveAArch64CodeModel(TT, CM, JIT), OL),
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT,
computeDefaultCPU(TT, CPU), FS, Options,
getEffectiveRelocModel(TT, RM),
getEffectiveAArch64CodeModel(TT, CM, JIT), OL),
TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian),
UseNewSMEABILowering(EnableNewSMEABILowering) {
initAsmInfo();

View File

@@ -720,25 +720,6 @@ static MachineSchedRegistry GCNILPSchedRegistry(
"Run GCN iterative scheduler for ILP scheduling (experimental)",
createIterativeILPMachineScheduler);
static StringRef computeDataLayout(const Triple &TT) {
if (TT.getArch() == Triple::r600) {
// 32-bit pointers.
return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
}
// 32-bit private, local, and region pointers. 64-bit global, constant and
// flat. 160-bit non-integral fat buffer pointers that include a 128-bit
// buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
// (address space 7), and 128-bit non-integral buffer resourcees (address
// space 8) which cannot be non-trivilally accessed by LLVM memory operations
// like getelementptr.
return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
"-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-"
"v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-"
"v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9";
}
LLVM_READNONE
static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
if (!GPU.empty())
@@ -764,7 +745,7 @@ AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OptLevel)
: CodeGenTargetMachineImpl(
T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU), FS, Options,
T, TT.computeDataLayout(), TT, getGPUOrDefault(TT, CPU), FS, Options,
getEffectiveRelocModel(RM),
getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
TLOF(createTLOF(getTargetTriple())) {

View File

@@ -33,12 +33,9 @@ ARCTargetMachine::ARCTargetMachine(const Target &T, const Triple &TT,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(
T,
"e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-"
"f32:32:32-i64:32-f64:32-a:0:32-n32",
TT, CPU, FS, Options, getRelocModel(RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU, FS, Options,
getRelocModel(RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
TLOF(std::make_unique<TargetLoweringObjectFileELF>()),
Subtarget(TT, std::string(CPU), std::string(FS), *this) {
initAsmInfo();

View File

@@ -121,62 +121,6 @@ static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
return std::make_unique<ARMElfTargetObjectFile>();
}
static std::string computeDataLayout(const Triple &TT,
const TargetOptions &Options,
bool isLittle) {
auto ABI = ARM::computeTargetABI(TT, Options.MCOptions.ABIName);
std::string Ret;
if (isLittle)
// Little endian.
Ret += "e";
else
// Big endian.
Ret += "E";
Ret += DataLayout::getManglingComponent(TT);
// Pointers are 32 bits and aligned to 32 bits.
Ret += "-p:32:32";
// Function pointers are aligned to 8 bits (because the LSB stores the
// ARM/Thumb state).
Ret += "-Fi8";
// ABIs other than APCS have 64 bit integers with natural alignment.
if (ABI != ARM::ARM_ABI_APCS)
Ret += "-i64:64";
// We have 64 bits floats. The APCS ABI requires them to be aligned to 32
// bits, others to 64 bits. We always try to align to 64 bits.
if (ABI == ARM::ARM_ABI_APCS)
Ret += "-f64:32:64";
// We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others
// to 64. We always ty to give them natural alignment.
if (ABI == ARM::ARM_ABI_APCS)
Ret += "-v64:32:64-v128:32:128";
else if (ABI != ARM::ARM_ABI_AAPCS16)
Ret += "-v128:64:128";
// Try to align aggregates to 32 bits (the default is 64 bits, which has no
// particular hardware support on 32-bit ARM).
Ret += "-a:0:32";
// Integer registers are 32 bits.
Ret += "-n32";
// The stack is 64 bit aligned on AAPCS and 32 bit aligned everywhere else.
if (ABI == ARM::ARM_ABI_AAPCS16)
Ret += "-S128";
else if (ABI == ARM::ARM_ABI_AAPCS)
Ret += "-S64";
else
Ret += "-S32";
return Ret;
}
static Reloc::Model getEffectiveRelocModel(const Triple &TT,
std::optional<Reloc::Model> RM) {
if (!RM)
@@ -201,12 +145,13 @@ ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT,
const TargetOptions &Options,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool isLittle)
: CodeGenTargetMachineImpl(T, computeDataLayout(TT, Options, isLittle), TT,
CPU, FS, Options, getEffectiveRelocModel(TT, RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
CodeGenOptLevel OL)
: CodeGenTargetMachineImpl(
T, TT.computeDataLayout(Options.MCOptions.ABIName), TT, CPU, FS,
Options, getEffectiveRelocModel(TT, RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
TargetABI(ARM::computeTargetABI(TT, Options.MCOptions.ABIName)),
TLOF(createTLOF(getTargetTriple())), isLittle(isLittle) {
TLOF(createTLOF(getTargetTriple())), isLittle(TT.isLittleEndian()) {
// Default to triple-appropriate float ABI
if (Options.FloatABIType == FloatABI::Default) {
@@ -334,7 +279,7 @@ ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
: ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
@@ -342,7 +287,7 @@ ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
: ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
namespace {

View File

@@ -42,8 +42,7 @@ public:
ARMBaseTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM, CodeGenOptLevel OL,
bool isLittle);
std::optional<CodeModel::Model> CM, CodeGenOptLevel OL);
~ARMBaseTargetMachine() override;
const ARMSubtarget *getSubtargetImpl(const Function &F) const override;

View File

@@ -28,9 +28,6 @@
namespace llvm {
static const char *AVRDataLayout =
"e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8:16-a:8";
/// Processes a CPU name.
static StringRef getCPU(StringRef CPU) {
if (CPU.empty() || CPU == "generic") {
@@ -50,8 +47,8 @@ AVRTargetMachine::AVRTargetMachine(const Target &T, const Triple &TT,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(T, AVRDataLayout, TT, getCPU(CPU), FS, Options,
getEffectiveRelocModel(RM),
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, getCPU(CPU), FS,
Options, getEffectiveRelocModel(RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
SubTarget(TT, std::string(getCPU(CPU)), std::string(FS), *this) {
this->TLOF = std::make_unique<AVRTargetObjectFile>();

View File

@@ -59,14 +59,6 @@ extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeBPFTarget() {
initializeBPFMIPreEmitCheckingPass(PR);
}
// DataLayout: little or big endian
static std::string computeDataLayout(const Triple &TT) {
if (TT.getArch() == Triple::bpfeb)
return "E-m:e-p:64:64-i64:64-i128:128-n32:64-S128";
else
return "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128";
}
static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
return RM.value_or(Reloc::PIC_);
}
@@ -77,7 +69,7 @@ BPFTargetMachine::BPFTargetMachine(const Target &T, const Triple &TT,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(T, computeDataLayout(TT), TT, CPU, FS, Options,
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU, FS, Options,
getEffectiveRelocModel(RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
TLOF(std::make_unique<TargetLoweringObjectFileELF>()),

View File

@@ -33,28 +33,13 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeCSKYTarget() {
initializeCSKYDAGToDAGISelLegacyPass(*Registry);
}
static std::string computeDataLayout(const Triple &TT) {
std::string Ret;
// Only support little endian for now.
// TODO: Add support for big endian.
Ret += "e";
// CSKY is always 32-bit target with the CSKYv2 ABI as prefer now.
// It's a 4-byte aligned stack with ELF mangling only.
Ret += "-m:e-S32-p:32:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:32"
"-v128:32:32-a:0:32-Fi32-n32";
return Ret;
}
CSKYTargetMachine::CSKYTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(T, computeDataLayout(TT), TT, CPU, FS, Options,
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU, FS, Options,
RM.value_or(Reloc::Static),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
TLOF(std::make_unique<CSKYELFTargetObjectFile>()) {

View File

@@ -134,11 +134,8 @@ DirectXTargetMachine::DirectXTargetMachine(const Target &T, const Triple &TT,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(
T,
"e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-"
"f32:32-f64:64-n8:16:32:64",
TT, CPU, FS, Options, Reloc::Static, CodeModel::Small, OL),
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU, FS, Options,
Reloc::Static, CodeModel::Small, OL),
TLOF(std::make_unique<DXILTargetObjectFile>()),
Subtarget(std::make_unique<DirectXSubtarget>(TT, CPU, FS, *this)) {
initAsmInfo();

View File

@@ -231,14 +231,10 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, const Triple &TT,
// Specify the vector alignment explicitly. For v512x1, the calculated
// alignment would be 512*alignment(i1), which is 512 bytes, instead of
// the required minimum of 64 bytes.
: CodeGenTargetMachineImpl(
T,
"e-m:e-p:32:32:32-a:0-n16:32-"
"i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-"
"v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048",
TT, CPU, FS, Options, getEffectiveRelocModel(RM),
getEffectiveCodeModel(CM, CodeModel::Small),
(HexagonNoOpt ? CodeGenOptLevel::None : OL)),
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU, FS, Options,
getEffectiveRelocModel(RM),
getEffectiveCodeModel(CM, CodeModel::Small),
(HexagonNoOpt ? CodeGenOptLevel::None : OL)),
TLOF(std::make_unique<HexagonTargetObjectFile>()),
Subtarget(Triple(TT), CPU, FS, *this) {
initAsmInfo();

View File

@@ -37,17 +37,6 @@ extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeLanaiTarget() {
initializeLanaiMemAluCombinerPass(PR);
}
static std::string computeDataLayout() {
// Data layout (keep in sync with clang/lib/Basic/Targets.cpp)
return "E" // Big endian
"-m:e" // ELF name manging
"-p:32:32" // 32-bit pointers, 32 bit aligned
"-i64:64" // 64 bit integers, 64 bit aligned
"-a:0:32" // 32 bit alignment of objects of aggregate type
"-n32" // 32 bit native integer width
"-S64"; // 64 bit natural stack alignment
}
static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
return RM.value_or(Reloc::PIC_);
}
@@ -58,7 +47,7 @@ LanaiTargetMachine::LanaiTargetMachine(
std::optional<CodeModel::Model> CodeModel, CodeGenOptLevel OptLevel,
bool JIT)
: CodeGenTargetMachineImpl(
T, computeDataLayout(), TT, Cpu, FeatureString, Options,
T, TT.computeDataLayout(), TT, Cpu, FeatureString, Options,
getEffectiveRelocModel(RM),
getEffectiveCodeModel(CodeModel, CodeModel::Medium), OptLevel),
Subtarget(TT, Cpu, FeatureString, *this, Options, getCodeModel(),

View File

@@ -57,13 +57,6 @@ static cl::opt<bool>
cl::desc("Enable the loop data prefetch pass"),
cl::init(false));
static std::string computeDataLayout(const Triple &TT) {
if (TT.isArch64Bit())
return "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128";
assert(TT.isArch32Bit() && "only LA32 and LA64 are currently supported");
return "e-m:e-p:32:32-i64:64-n32-S128";
}
static Reloc::Model getEffectiveRelocModel(const Triple &TT,
std::optional<Reloc::Model> RM) {
return RM.value_or(Reloc::Static);
@@ -93,7 +86,7 @@ LoongArchTargetMachine::LoongArchTargetMachine(
const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
const TargetOptions &Options, std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM, CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(T, computeDataLayout(TT), TT, CPU, FS, Options,
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU, FS, Options,
getEffectiveRelocModel(TT, RM),
getEffectiveLoongArchCodeModel(TT, CM), OL),
TLOF(std::make_unique<TargetLoweringObjectFileELF>()) {

View File

@@ -46,35 +46,6 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeM68kTarget() {
namespace {
std::string computeDataLayout(const Triple &TT, StringRef CPU,
const TargetOptions &Options) {
std::string Ret = "";
// M68k is Big Endian
Ret += "E";
// FIXME how to wire it with the used object format?
Ret += "-m:e";
// M68k pointers are always 32 bit wide even for 16-bit CPUs.
// The ABI only specifies 16-bit alignment.
// On at least the 68020+ with a 32-bit bus, there is a performance benefit
// to having 32-bit alignment.
Ret += "-p:32:16:32";
// Bytes do not require special alignment, words are word aligned and
// long words are word aligned at minimum.
Ret += "-i8:8:8-i16:16:16-i32:16:32";
// FIXME no floats at the moment
// The registers can hold 8, 16, 32 bits
Ret += "-n8:16:32";
Ret += "-a:0:16-S16";
return Ret;
}
Reloc::Model getEffectiveRelocModel(const Triple &TT,
std::optional<Reloc::Model> RM) {
// If not defined we default to static
@@ -101,8 +72,8 @@ M68kTargetMachine::M68kTargetMachine(const Target &T, const Triple &TT,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(T, computeDataLayout(TT, CPU, Options), TT, CPU,
FS, Options, getEffectiveRelocModel(TT, RM),
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU, FS, Options,
getEffectiveRelocModel(TT, RM),
::getEffectiveCodeModel(CM, JIT), OL),
TLOF(std::make_unique<M68kELFTargetObjectFile>()),
Subtarget(TT, CPU, FS, *this) {

View File

@@ -34,19 +34,14 @@ static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
return RM.value_or(Reloc::Static);
}
static std::string computeDataLayout(const Triple &TT, StringRef CPU,
const TargetOptions &Options) {
return "e-m:e-p:16:16-i32:16-i64:16-f32:16-f64:16-a:8-n8:16-S16";
}
MSP430TargetMachine::MSP430TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(T, computeDataLayout(TT, CPU, Options), TT, CPU,
FS, Options, getEffectiveRelocModel(RM),
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU, FS, Options,
getEffectiveRelocModel(RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
TLOF(std::make_unique<TargetLoweringObjectFileELF>()),
Subtarget(TT, std::string(CPU), std::string(FS), *this) {

View File

@@ -524,8 +524,8 @@ public:
MipsAsmParser(const MCSubtargetInfo &sti, MCAsmParser &parser,
const MCInstrInfo &MII, const MCTargetOptions &Options)
: MCTargetAsmParser(Options, sti, MII),
ABI(MipsABIInfo::computeTargetABI(sti.getTargetTriple(), sti.getCPU(),
Options)) {
ABI(MipsABIInfo::computeTargetABI(sti.getTargetTriple(),
Options.getABIName())) {
MCAsmParserExtension::Initialize(parser);
parser.addAliasForDirective(".asciiz", ".asciz");

View File

@@ -57,17 +57,16 @@ unsigned MipsABIInfo::GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const {
llvm_unreachable("Unhandled ABI");
}
MipsABIInfo MipsABIInfo::computeTargetABI(const Triple &TT, StringRef CPU,
const MCTargetOptions &Options) {
if (Options.getABIName().starts_with("o32"))
MipsABIInfo MipsABIInfo::computeTargetABI(const Triple &TT, StringRef ABIName) {
if (ABIName.starts_with("o32"))
return MipsABIInfo::O32();
if (Options.getABIName().starts_with("n32"))
if (ABIName.starts_with("n32"))
return MipsABIInfo::N32();
if (Options.getABIName().starts_with("n64"))
if (ABIName.starts_with("n64"))
return MipsABIInfo::N64();
if (TT.isABIN32())
return MipsABIInfo::N32();
assert(Options.getABIName().empty() && "Unknown ABI option for MIPS");
assert(ABIName.empty() && "Unknown ABI option for MIPS");
if (TT.isMIPS64())
return MipsABIInfo::N64();

View File

@@ -33,8 +33,7 @@ public:
static MipsABIInfo O32() { return MipsABIInfo(ABI::O32); }
static MipsABIInfo N32() { return MipsABIInfo(ABI::N32); }
static MipsABIInfo N64() { return MipsABIInfo(ABI::N64); }
static MipsABIInfo computeTargetABI(const Triple &TT, StringRef CPU,
const MCTargetOptions &Options);
static MipsABIInfo computeTargetABI(const Triple &TT, StringRef ABIName);
bool IsKnown() const { return ThisABI != ABI::Unknown; }
bool IsO32() const { return ThisABI == ABI::O32; }

View File

@@ -619,7 +619,7 @@ MCAsmBackend *llvm::createMipsAsmBackend(const Target &T,
return new WindowsMipsAsmBackend(T, MRI, STI);
MipsABIInfo ABI = MipsABIInfo::computeTargetABI(STI.getTargetTriple(),
STI.getCPU(), Options);
Options.getABIName());
return new MipsAsmBackend(T, MRI, STI.getTargetTriple(), STI.getCPU(),
ABI.IsN32());
}

View File

@@ -24,7 +24,8 @@ MipsELFMCAsmInfo::MipsELFMCAsmInfo(const Triple &TheTriple,
const MCTargetOptions &Options) {
IsLittleEndian = TheTriple.isLittleEndian();
MipsABIInfo ABI = MipsABIInfo::computeTargetABI(TheTriple, "", Options);
MipsABIInfo ABI =
MipsABIInfo::computeTargetABI(TheTriple, Options.getABIName());
if (TheTriple.isMIPS64() && !ABI.IsN32())
CodePointerSize = CalleeSaveStackSlotSize = 8;

View File

@@ -77,42 +77,6 @@ static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
return std::make_unique<MipsTargetObjectFile>();
}
static std::string computeDataLayout(const Triple &TT, StringRef CPU,
const TargetOptions &Options,
bool isLittle) {
std::string Ret;
MipsABIInfo ABI = MipsABIInfo::computeTargetABI(TT, CPU, Options.MCOptions);
// There are both little and big endian mips.
if (isLittle)
Ret += "e";
else
Ret += "E";
if (ABI.IsO32())
Ret += "-m:m";
else
Ret += "-m:e";
// Pointers are 32 bit on some ABIs.
if (!ABI.IsN64())
Ret += "-p:32:32";
// 8 and 16 bit integers only need to have natural alignment, but try to
// align them to 32 bits. 64 bit integers have natural alignment.
Ret += "-i8:8:32-i16:16:32-i64:64";
// 32 bit registers are always available and the stack is at least 64 bit
// aligned. On N64 64 bit registers are also available and the stack is
// 128 bit aligned.
if (ABI.IsN64() || ABI.IsN32())
Ret += "-i128:128-n32:64-S128";
else
Ret += "-n32-S64";
return Ret;
}
static Reloc::Model getEffectiveRelocModel(bool JIT,
std::optional<Reloc::Model> RM) {
if (!RM || JIT)
@@ -132,12 +96,12 @@ MipsTargetMachine::MipsTargetMachine(const Target &T, const Triple &TT,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT,
bool isLittle)
: CodeGenTargetMachineImpl(T, computeDataLayout(TT, CPU, Options, isLittle),
TT, CPU, FS, Options,
getEffectiveRelocModel(JIT, RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
: CodeGenTargetMachineImpl(
T, TT.computeDataLayout(Options.MCOptions.getABIName()), TT, CPU, FS,
Options, getEffectiveRelocModel(JIT, RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
isLittle(isLittle), TLOF(createTLOF(getTargetTriple())),
ABI(MipsABIInfo::computeTargetABI(TT, CPU, Options.MCOptions)),
ABI(MipsABIInfo::computeTargetABI(TT, Options.MCOptions.getABIName())),
Subtarget(nullptr),
DefaultSubtarget(TT, CPU, FS, isLittle, *this, std::nullopt),
NoMips16Subtarget(TT, CPU, FS.empty() ? "-mips16" : FS.str() + ",-mips16",

View File

@@ -118,24 +118,6 @@ extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeNVPTXTarget() {
initializeNVPTXPrologEpilogPassPass(PR);
}
static std::string computeDataLayout(bool is64Bit, bool UseShortPointers) {
std::string Ret = "e";
// Tensor Memory (addrspace:6) is always 32-bits.
// Distributed Shared Memory (addrspace:7) follows shared memory
// (addrspace:3).
if (!is64Bit)
Ret += "-p:32:32-p6:32:32-p7:32:32";
else if (UseShortPointers)
Ret += "-p3:32:32-p4:32:32-p5:32:32-p6:32:32-p7:32:32";
else
Ret += "-p6:32:32";
Ret += "-i64:64-i128:128-i256:256-v16:16-v32:32-n16:32:64";
return Ret;
}
NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
@@ -144,10 +126,10 @@ NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, const Triple &TT,
CodeGenOptLevel OL, bool is64bit)
// The pic relocation model is used regardless of what the client has
// specified, as it is the only relocation model currently supported.
: CodeGenTargetMachineImpl(T,
computeDataLayout(is64bit, UseShortPointersOpt),
TT, CPU, FS, Options, Reloc::PIC_,
getEffectiveCodeModel(CM, CodeModel::Small), OL),
: CodeGenTargetMachineImpl(
T, TT.computeDataLayout(UseShortPointersOpt ? "shortptr" : ""), TT,
CPU, FS, Options, Reloc::PIC_,
getEffectiveCodeModel(CM, CodeModel::Small), OL),
is64bit(is64bit), TLOF(std::make_unique<NVPTXTargetObjectFile>()),
Subtarget(TT, std::string(CPU), std::string(FS), *this),
StrPool(StrAlloc) {

View File

@@ -149,58 +149,6 @@ LLVMInitializePowerPCTarget() {
initializePPCAIXAsmPrinterPass(PR);
}
static bool isLittleEndianTriple(const Triple &T) {
return T.getArch() == Triple::ppc64le || T.getArch() == Triple::ppcle;
}
/// Return the datalayout string of a subtarget.
static std::string getDataLayoutString(const Triple &T) {
bool is64Bit = T.getArch() == Triple::ppc64 || T.getArch() == Triple::ppc64le;
std::string Ret;
// Most PPC* platforms are big endian, PPC(64)LE is little endian.
if (isLittleEndianTriple(T))
Ret = "e";
else
Ret = "E";
Ret += DataLayout::getManglingComponent(T);
// PPC32 has 32 bit pointers. The PS3 (OS Lv2) is a PPC64 machine with 32 bit
// pointers.
if (!is64Bit || T.getOS() == Triple::Lv2)
Ret += "-p:32:32";
// If the target ABI uses function descriptors, then the alignment of function
// pointers depends on the alignment used to emit the descriptor. Otherwise,
// function pointers are aligned to 32 bits because the instructions must be.
if ((T.getArch() == Triple::ppc64 && !T.isPPC64ELFv2ABI())) {
Ret += "-Fi64";
} else if (T.isOSAIX()) {
Ret += is64Bit ? "-Fi64" : "-Fi32";
} else {
Ret += "-Fn32";
}
// Note, the alignment values for f64 and i64 on ppc64 in Darwin
// documentation are wrong; these are correct (i.e. "what gcc does").
Ret += "-i64:64";
// PPC64 has 32 and 64 bit registers, PPC32 has only 32 bit ones.
if (is64Bit)
Ret += "-i128:128-n32:64";
else
Ret += "-n32";
// Specify the vector alignment explicitly. For v256i1 and v512i1, the
// calculated alignment would be 256*alignment(i1) and 512*alignment(i1),
// which is 256 and 512 bytes - way over aligned.
if (is64Bit && (T.isOSAIX() || T.isOSLinux()))
Ret += "-S128-v256:256:256-v512:512:512";
return Ret;
}
static std::string computeFSAdditions(StringRef FS, CodeGenOptLevel OL,
const Triple &TT) {
std::string FullFS = std::string(FS);
@@ -348,13 +296,13 @@ PPCTargetMachine::PPCTargetMachine(const Target &T, const Triple &TT,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(T, getDataLayoutString(TT), TT, CPU,
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU,
computeFSAdditions(FS, OL, TT), Options,
getEffectiveRelocModel(TT, RM),
getEffectivePPCCodeModel(TT, CM, JIT), OL),
TLOF(createTLOF(getTargetTriple())),
TargetABI(computeTargetABI(TT, Options)),
Endianness(isLittleEndianTriple(TT) ? Endian::LITTLE : Endian::BIG) {
Endianness(TT.isLittleEndian() ? Endian::LITTLE : Endian::BIG) {
initAsmInfo();
}

View File

@@ -141,39 +141,6 @@ extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
initializeRISCVAsmPrinterPass(*PR);
}
static std::string computeDataLayout(const Triple &TT,
const TargetOptions &Opts) {
std::string Ret;
if (TT.isLittleEndian())
Ret += "e";
else
Ret += "E";
Ret += "-m:e";
// Pointer and integer sizes.
if (TT.isArch64Bit()) {
Ret += "-p:64:64-i64:64-i128:128";
Ret += "-n32:64";
} else {
assert(TT.isArch32Bit() && "only RV32 and RV64 are currently supported");
Ret += "-p:32:32-i64:64";
Ret += "-n32";
}
// Stack alignment based on ABI.
StringRef ABI = Opts.MCOptions.getABIName();
if (ABI == "ilp32e")
Ret += "-S32";
else if (ABI == "lp64e")
Ret += "-S64";
else
Ret += "-S128";
return Ret;
}
static Reloc::Model getEffectiveRelocModel(const Triple &TT,
std::optional<Reloc::Model> RM) {
return RM.value_or(Reloc::Static);
@@ -185,9 +152,10 @@ RISCVTargetMachine::RISCVTargetMachine(const Target &T, const Triple &TT,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(T, computeDataLayout(TT, Options), TT, CPU, FS,
Options, getEffectiveRelocModel(TT, RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
: CodeGenTargetMachineImpl(
T, TT.computeDataLayout(Options.MCOptions.getABIName()), TT, CPU, FS,
Options, getEffectiveRelocModel(TT, RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
TLOF(std::make_unique<RISCVELFTargetObjectFile>()) {
initAsmInfo();

View File

@@ -60,30 +60,6 @@ extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeSPIRVTarget() {
initializeSPIRVStripConvergentIntrinsicsPass(PR);
}
static std::string computeDataLayout(const Triple &TT) {
const auto Arch = TT.getArch();
// TODO: this probably needs to be revisited:
// Logical SPIR-V has no pointer size, so any fixed pointer size would be
// wrong. The choice to default to 32 or 64 is just motivated by another
// memory model used for graphics: PhysicalStorageBuffer64. But it shouldn't
// mean anything.
if (Arch == Triple::spirv32)
return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-"
"v256:256-v512:512-v1024:1024-n8:16:32:64-G1";
if (Arch == Triple::spirv)
return "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-"
"v512:512-v1024:1024-n8:16:32:64-G10";
if (TT.getVendor() == Triple::VendorType::AMD &&
TT.getOS() == Triple::OSType::AMDHSA)
return "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-"
"v512:512-v1024:1024-n32:64-S32-G1-P4-A0";
if (TT.getVendor() == Triple::VendorType::Intel)
return "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-"
"v512:512-v1024:1024-n8:16:32:64-G1-P9-A0";
return "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-"
"v512:512-v1024:1024-n8:16:32:64-G1";
}
static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
if (!RM)
return Reloc::PIC_;
@@ -99,7 +75,7 @@ SPIRVTargetMachine::SPIRVTargetMachine(const Target &T, const Triple &TT,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(T, computeDataLayout(TT), TT, CPU, FS, Options,
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU, FS, Options,
getEffectiveRelocModel(RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
TLOF(std::make_unique<SPIRVTargetObjectFile>()),

View File

@@ -38,39 +38,6 @@ static cl::opt<bool>
BranchRelaxation("sparc-enable-branch-relax", cl::Hidden, cl::init(true),
cl::desc("Relax out of range conditional branches"));
static std::string computeDataLayout(const Triple &T) {
const bool is64Bit = T.isSPARC64();
// Sparc is typically big endian, but some are little.
std::string Ret = T.getArch() == Triple::sparcel ? "e" : "E";
Ret += "-m:e";
// Some ABIs have 32bit pointers.
if (!is64Bit)
Ret += "-p:32:32";
// Alignments for 64 bit integers.
Ret += "-i64:64";
// Alignments for 128 bit integers.
// This is not specified in the ABI document but is the de facto standard.
Ret += "-i128:128";
// On SparcV9 128 floats are aligned to 128 bits, on others only to 64.
// On SparcV9 registers can hold 64 or 32 bits, on others only 32.
if (is64Bit)
Ret += "-n32:64";
else
Ret += "-f128:64-n32";
if (is64Bit)
Ret += "-S128";
else
Ret += "-S64";
return Ret;
}
static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
return RM.value_or(Reloc::Static);
}
@@ -111,7 +78,7 @@ SparcTargetMachine::SparcTargetMachine(const Target &T, const Triple &TT,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(
T, computeDataLayout(TT), TT, CPU, FS, Options,
T, TT.computeDataLayout(), TT, CPU, FS, Options,
getEffectiveRelocModel(RM),
getEffectiveSparcCodeModel(CM, getEffectiveRelocModel(RM),
TT.isSPARC64(), JIT),

View File

@@ -54,47 +54,6 @@ LLVMInitializeSystemZTarget() {
initializeSystemZCopyPhysRegsPass(PR);
}
static std::string computeDataLayout(const Triple &TT) {
std::string Ret;
// Big endian.
Ret += "E";
// Data mangling.
Ret += DataLayout::getManglingComponent(TT);
// Special features for z/OS.
if (TT.isOSzOS()) {
if (TT.isArch64Bit()) {
// Custom address space for ptr32.
Ret += "-p1:32:32";
}
}
// Make sure that global data has at least 16 bits of alignment by
// default, so that we can refer to it using LARL. We don't have any
// special requirements for stack variables though.
Ret += "-i1:8:16-i8:8:16";
// 64-bit integers are naturally aligned.
Ret += "-i64:64";
// 128-bit floats are aligned only to 64 bits.
Ret += "-f128:64";
// The DataLayout string always holds a vector alignment of 64 bits, see
// comment in clang/lib/Basic/Targets/SystemZ.h.
Ret += "-v128:64";
// We prefer 16 bits of aligned for all globals; see above.
Ret += "-a:8:16";
// Integer registers are 32 or 64 bits.
Ret += "-n32:64";
return Ret;
}
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
if (TT.isOSzOS())
return std::make_unique<TargetLoweringObjectFileGOFF>();
@@ -163,7 +122,7 @@ SystemZTargetMachine::SystemZTargetMachine(const Target &T, const Triple &TT,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(
T, computeDataLayout(TT), TT, CPU, FS, Options,
T, TT.computeDataLayout(), TT, CPU, FS, Options,
getEffectiveRelocModel(RM),
getEffectiveSystemZCodeModel(CM, getEffectiveRelocModel(RM), JIT),
OL),

View File

@@ -35,38 +35,6 @@ extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeVETarget() {
initializeVEDAGToDAGISelLegacyPass(PR);
}
static std::string computeDataLayout(const Triple &T) {
// Aurora VE is little endian
std::string Ret = "e";
// Use ELF mangling
Ret += "-m:e";
// Alignments for 64 bit integers.
Ret += "-i64:64";
// VE supports 32 bit and 64 bits integer on registers
Ret += "-n32:64";
// Stack alignment is 128 bits
Ret += "-S128";
// Vector alignments are 64 bits
// Need to define all of them. Otherwise, each alignment becomes
// the size of each data by default.
Ret += "-v64:64:64"; // for v2f32
Ret += "-v128:64:64";
Ret += "-v256:64:64";
Ret += "-v512:64:64";
Ret += "-v1024:64:64";
Ret += "-v2048:64:64";
Ret += "-v4096:64:64";
Ret += "-v8192:64:64";
Ret += "-v16384:64:64"; // for v256f64
return Ret;
}
static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
return RM.value_or(Reloc::Static);
}
@@ -91,7 +59,7 @@ VETargetMachine::VETargetMachine(const Target &T, const Triple &TT,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(T, computeDataLayout(TT), TT, CPU, FS, Options,
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU, FS, Options,
getEffectiveRelocModel(RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
TLOF(createTLOF()),

View File

@@ -196,19 +196,9 @@ WebAssemblyTargetMachine::WebAssemblyTargetMachine(
const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
const TargetOptions &Options, std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM, CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(
T,
TT.isArch64Bit()
? (TT.isOSEmscripten() ? "e-m:e-p:64:64-p10:8:8-p20:8:8-i64:64-"
"i128:128-f128:64-n32:64-S128-ni:1:10:20"
: "e-m:e-p:64:64-p10:8:8-p20:8:8-i64:64-"
"i128:128-n32:64-S128-ni:1:10:20")
: (TT.isOSEmscripten() ? "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-"
"i128:128-f128:64-n32:64-S128-ni:1:10:20"
: "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-"
"i128:128-n32:64-S128-ni:1:10:20"),
TT, CPU, FS, Options, getEffectiveRelocModel(RM, TT),
getEffectiveCodeModel(CM, CodeModel::Large), OL),
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU, FS, Options,
getEffectiveRelocModel(RM, TT),
getEffectiveCodeModel(CM, CodeModel::Large), OL),
TLOF(new WebAssemblyTargetObjectFile()),
UsesMultivalueABI(Options.MCOptions.getABIName() == "experimental-mv") {
// WebAssembly type-checks instructions, but a noreturn function with a return

View File

@@ -125,54 +125,6 @@ static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
return std::make_unique<X86ELFTargetObjectFile>();
}
static std::string computeDataLayout(const Triple &TT) {
// X86 is little endian
std::string Ret = "e";
Ret += DataLayout::getManglingComponent(TT);
// X86 and x32 have 32 bit pointers.
if (!TT.isArch64Bit() || TT.isX32())
Ret += "-p:32:32";
// Address spaces for 32 bit signed, 32 bit unsigned, and 64 bit pointers.
Ret += "-p270:32:32-p271:32:32-p272:64:64";
// Some ABIs align 64 bit integers and doubles to 64 bits, others to 32.
// 128 bit integers are not specified in the 32-bit ABIs but are used
// internally for lowering f128, so we match the alignment to that.
if (TT.isArch64Bit() || TT.isOSWindows())
Ret += "-i64:64-i128:128";
else if (TT.isOSIAMCU())
Ret += "-i64:32-f64:32";
else
Ret += "-i128:128-f64:32:64";
// Some ABIs align long double to 128 bits, others to 32.
if (TT.isOSIAMCU())
; // No f80
else if (TT.isArch64Bit() || TT.isOSDarwin() || TT.isWindowsMSVCEnvironment())
Ret += "-f80:128";
else
Ret += "-f80:32";
if (TT.isOSIAMCU())
Ret += "-f128:32";
// The registers can hold 8, 16, 32 or, in x86-64, 64 bits.
if (TT.isArch64Bit())
Ret += "-n8:16:32:64";
else
Ret += "-n8:16:32";
// The stack is aligned to 32 bits on some ABIs and 128 bits on others.
if ((!TT.isArch64Bit() && TT.isOSWindows()) || TT.isOSIAMCU())
Ret += "-a:0:32-S32";
else
Ret += "-S128";
return Ret;
}
static Reloc::Model getEffectiveRelocModel(const Triple &TT, bool JIT,
std::optional<Reloc::Model> RM) {
bool is64Bit = TT.getArch() == Triple::x86_64;
@@ -236,7 +188,7 @@ X86TargetMachine::X86TargetMachine(const Target &T, const Triple &TT,
std::optional<Reloc::Model> RM,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT)
: CodeGenTargetMachineImpl(T, computeDataLayout(TT), TT, CPU, FS, Options,
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU, FS, Options,
getEffectiveRelocModel(TT, JIT, RM),
getEffectiveX86CodeModel(TT, CM, JIT), OL),
TLOF(createTLOF(getTargetTriple())), IsJIT(JIT) {

View File

@@ -32,13 +32,6 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXtensaTarget() {
initializeXtensaAsmPrinterPass(PR);
}
static std::string computeDataLayout(const Triple &TT, StringRef CPU,
const TargetOptions &Options,
bool IsLittle) {
std::string Ret = "e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32";
return Ret;
}
static Reloc::Model getEffectiveRelocModel(bool JIT,
std::optional<Reloc::Model> RM) {
if (!RM || JIT)
@@ -53,8 +46,7 @@ XtensaTargetMachine::XtensaTargetMachine(const Target &T, const Triple &TT,
std::optional<CodeModel::Model> CM,
CodeGenOptLevel OL, bool JIT,
bool IsLittle)
: CodeGenTargetMachineImpl(T, computeDataLayout(TT, CPU, Options, IsLittle),
TT, CPU, FS, Options,
: CodeGenTargetMachineImpl(T, TT.computeDataLayout(), TT, CPU, FS, Options,
getEffectiveRelocModel(JIT, RM),
getEffectiveCodeModel(CM, CodeModel::Small), OL),
TLOF(std::make_unique<TargetLoweringObjectFileELF>()) {

View File

@@ -26,6 +26,7 @@ add_llvm_component_library(LLVMTargetParser
SubtargetFeature.cpp
TargetParser.cpp
Triple.cpp
TargetDataLayout.cpp
X86TargetParser.cpp
XtensaTargetParser.cpp

View File

@@ -0,0 +1,629 @@
//===--- TargetDataLayout.cpp - Map Triple to LLVM data layout string -----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/TargetParser/ARMTargetParser.h"
#include "llvm/TargetParser/Triple.h"
#include <cstring>
using namespace llvm;
static StringRef getManglingComponent(const Triple &T) {
if (T.isOSBinFormatGOFF())
return "-m:l";
if (T.isOSBinFormatMachO())
return "-m:o";
if ((T.isOSWindows() || T.isUEFI()) && T.isOSBinFormatCOFF())
return T.getArch() == Triple::x86 ? "-m:x" : "-m:w";
if (T.isOSBinFormatXCOFF())
return "-m:a";
return "-m:e";
}
static std::string computeARMDataLayout(const Triple &TT, StringRef ABIName) {
auto ABI = ARM::computeTargetABI(TT, ABIName);
std::string Ret;
if (TT.isLittleEndian())
// Little endian.
Ret += "e";
else
// Big endian.
Ret += "E";
Ret += getManglingComponent(TT);
// Pointers are 32 bits and aligned to 32 bits.
Ret += "-p:32:32";
// Function pointers are aligned to 8 bits (because the LSB stores the
// ARM/Thumb state).
Ret += "-Fi8";
// ABIs other than APCS have 64 bit integers with natural alignment.
if (ABI != ARM::ARM_ABI_APCS)
Ret += "-i64:64";
// We have 64 bits floats. The APCS ABI requires them to be aligned to 32
// bits, others to 64 bits. We always try to align to 64 bits.
if (ABI == ARM::ARM_ABI_APCS)
Ret += "-f64:32:64";
// We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others
// to 64. We always ty to give them natural alignment.
if (ABI == ARM::ARM_ABI_APCS)
Ret += "-v64:32:64-v128:32:128";
else if (ABI != ARM::ARM_ABI_AAPCS16)
Ret += "-v128:64:128";
// Try to align aggregates to 32 bits (the default is 64 bits, which has no
// particular hardware support on 32-bit ARM).
Ret += "-a:0:32";
// Integer registers are 32 bits.
Ret += "-n32";
// The stack is 64 bit aligned on AAPCS and 32 bit aligned everywhere else.
if (ABI == ARM::ARM_ABI_AAPCS16)
Ret += "-S128";
else if (ABI == ARM::ARM_ABI_AAPCS)
Ret += "-S64";
else
Ret += "-S32";
return Ret;
}
// Helper function to build a DataLayout string
static std::string computeAArch64DataLayout(const Triple &TT) {
if (TT.isOSBinFormatMachO()) {
if (TT.getArch() == Triple::aarch64_32)
return "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-"
"n32:64-S128-Fn32";
return "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-n32:64-S128-"
"Fn32";
}
if (TT.isOSBinFormatCOFF())
return "e-m:w-p270:32:32-p271:32:32-p272:64:64-p:64:64-i32:32-i64:64-i128:"
"128-n32:64-S128-Fn32";
std::string Endian = TT.isLittleEndian() ? "e" : "E";
std::string Ptr32 = TT.getEnvironment() == Triple::GNUILP32 ? "-p:32:32" : "";
return Endian + "-m:e" + Ptr32 +
"-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-"
"n32:64-S128-Fn32";
}
// DataLayout: little or big endian
static std::string computeBPFDataLayout(const Triple &TT) {
if (TT.getArch() == Triple::bpfeb)
return "E-m:e-p:64:64-i64:64-i128:128-n32:64-S128";
else
return "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128";
}
static std::string computeCSKYDataLayout(const Triple &TT) {
// CSKY is always 32-bit target with the CSKYv2 ABI as prefer now.
// It's a 4-byte aligned stack with ELF mangling only.
// Only support little endian for now.
// TODO: Add support for big endian.
return "e-m:e-S32-p:32:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:32"
"-v128:32:32-a:0:32-Fi32-n32";
}
static std::string computeLoongArchDataLayout(const Triple &TT) {
if (TT.isLoongArch64())
return "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128";
assert(TT.isLoongArch32() && "only LA32 and LA64 are currently supported");
return "e-m:e-p:32:32-i64:64-n32-S128";
}
static std::string computeM68kDataLayout(const Triple &TT) {
std::string Ret = "";
// M68k is Big Endian
Ret += "E";
// FIXME how to wire it with the used object format?
Ret += "-m:e";
// M68k pointers are always 32 bit wide even for 16-bit CPUs.
// The ABI only specifies 16-bit alignment.
// On at least the 68020+ with a 32-bit bus, there is a performance benefit
// to having 32-bit alignment.
Ret += "-p:32:16:32";
// Bytes do not require special alignment, words are word aligned and
// long words are word aligned at minimum.
Ret += "-i8:8:8-i16:16:16-i32:16:32";
// FIXME no floats at the moment
// The registers can hold 8, 16, 32 bits
Ret += "-n8:16:32";
Ret += "-a:0:16-S16";
return Ret;
}
namespace {
enum class MipsABI { Unknown, O32, N32, N64 };
}
// FIXME: This duplicates MipsABIInfo::computeTargetABI, but duplicating this is
// preferable to violating layering rules. Ideally that information should live
// in LLVM TargetParser, but for now we just duplicate some ABI name string
// logic for simplicity.
static MipsABI getMipsABI(const Triple &TT, StringRef ABIName) {
if (ABIName.starts_with("o32"))
return MipsABI::O32;
if (ABIName.starts_with("n32"))
return MipsABI::N32;
if (ABIName.starts_with("n64"))
return MipsABI::N64;
if (TT.isABIN32())
return MipsABI::N32;
assert(ABIName.empty() && "Unknown ABI option for MIPS");
if (TT.isMIPS64())
return MipsABI::N64;
return MipsABI::O32;
}
static std::string computeMipsDataLayout(const Triple &TT, StringRef ABIName) {
std::string Ret;
MipsABI ABI = getMipsABI(TT, ABIName);
// There are both little and big endian mips.
if (TT.isLittleEndian())
Ret += "e";
else
Ret += "E";
if (ABI == MipsABI::O32)
Ret += "-m:m";
else
Ret += "-m:e";
// Pointers are 32 bit on some ABIs.
if (ABI != MipsABI::N64)
Ret += "-p:32:32";
// 8 and 16 bit integers only need to have natural alignment, but try to
// align them to 32 bits. 64 bit integers have natural alignment.
Ret += "-i8:8:32-i16:16:32-i64:64";
// 32 bit registers are always available and the stack is at least 64 bit
// aligned. On N64 64 bit registers are also available and the stack is
// 128 bit aligned.
if (ABI == MipsABI::N64 || ABI == MipsABI::N32)
Ret += "-i128:128-n32:64-S128";
else
Ret += "-n32-S64";
return Ret;
}
static std::string computePowerDataLayout(const Triple &T) {
bool is64Bit = T.isPPC64();
std::string Ret;
// Most PPC* platforms are big endian, PPC(64)LE is little endian.
if (T.isLittleEndian())
Ret = "e";
else
Ret = "E";
Ret += getManglingComponent(T);
// PPC32 has 32 bit pointers. The PS3 (OS Lv2) is a PPC64 machine with 32 bit
// pointers.
if (!is64Bit || T.getOS() == Triple::Lv2)
Ret += "-p:32:32";
// If the target ABI uses function descriptors, then the alignment of function
// pointers depends on the alignment used to emit the descriptor. Otherwise,
// function pointers are aligned to 32 bits because the instructions must be.
if ((T.getArch() == Triple::ppc64 && !T.isPPC64ELFv2ABI())) {
Ret += "-Fi64";
} else if (T.isOSAIX()) {
Ret += is64Bit ? "-Fi64" : "-Fi32";
} else {
Ret += "-Fn32";
}
// Note, the alignment values for f64 and i64 on ppc64 in Darwin
// documentation are wrong; these are correct (i.e. "what gcc does").
Ret += "-i64:64";
// PPC64 has 32 and 64 bit registers, PPC32 has only 32 bit ones.
if (is64Bit)
Ret += "-i128:128-n32:64";
else
Ret += "-n32";
// Specify the vector alignment explicitly. For v256i1 and v512i1, the
// calculated alignment would be 256*alignment(i1) and 512*alignment(i1),
// which is 256 and 512 bytes - way over aligned.
if (is64Bit && (T.isOSAIX() || T.isOSLinux()))
Ret += "-S128-v256:256:256-v512:512:512";
return Ret;
}
static std::string computeAMDDataLayout(const Triple &TT) {
if (TT.getArch() == Triple::r600) {
// 32-bit pointers.
return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
}
// 32-bit private, local, and region pointers. 64-bit global, constant and
// flat. 160-bit non-integral fat buffer pointers that include a 128-bit
// buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
// (address space 7), and 128-bit non-integral buffer resourcees (address
// space 8) which cannot be non-trivilally accessed by LLVM memory operations
// like getelementptr.
return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
"-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-"
"v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-"
"v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9";
}
static std::string computeRISCVDataLayout(const Triple &TT, StringRef ABIName) {
std::string Ret;
if (TT.isLittleEndian())
Ret += "e";
else
Ret += "E";
Ret += "-m:e";
// Pointer and integer sizes.
if (TT.isRISCV64()) {
Ret += "-p:64:64-i64:64-i128:128";
Ret += "-n32:64";
} else {
assert(TT.isRISCV32() && "only RV32 and RV64 are currently supported");
Ret += "-p:32:32-i64:64";
Ret += "-n32";
}
// Stack alignment based on ABI.
StringRef ABI = ABIName;
if (ABI == "ilp32e")
Ret += "-S32";
else if (ABI == "lp64e")
Ret += "-S64";
else
Ret += "-S128";
return Ret;
}
static std::string computeSparcDataLayout(const Triple &T) {
const bool Is64Bit = T.isSPARC64();
// Sparc is typically big endian, but some are little.
std::string Ret = T.getArch() == Triple::sparcel ? "e" : "E";
Ret += "-m:e";
// Some ABIs have 32bit pointers.
if (!Is64Bit)
Ret += "-p:32:32";
// Alignments for 64 bit integers.
Ret += "-i64:64";
// Alignments for 128 bit integers.
// This is not specified in the ABI document but is the de facto standard.
Ret += "-i128:128";
// On SparcV9 128 floats are aligned to 128 bits, on others only to 64.
// On SparcV9 registers can hold 64 or 32 bits, on others only 32.
if (Is64Bit)
Ret += "-n32:64";
else
Ret += "-f128:64-n32";
if (Is64Bit)
Ret += "-S128";
else
Ret += "-S64";
return Ret;
}
static std::string computeSystemZDataLayout(const Triple &TT) {
std::string Ret;
// Big endian.
Ret += "E";
// Data mangling.
Ret += getManglingComponent(TT);
// Special features for z/OS.
if (TT.isOSzOS()) {
// Custom address space for ptr32.
Ret += "-p1:32:32";
}
// Make sure that global data has at least 16 bits of alignment by
// default, so that we can refer to it using LARL. We don't have any
// special requirements for stack variables though.
Ret += "-i1:8:16-i8:8:16";
// 64-bit integers are naturally aligned.
Ret += "-i64:64";
// 128-bit floats are aligned only to 64 bits.
Ret += "-f128:64";
// The DataLayout string always holds a vector alignment of 64 bits, see
// comment in clang/lib/Basic/Targets/SystemZ.h.
Ret += "-v128:64";
// We prefer 16 bits of aligned for all globals; see above.
Ret += "-a:8:16";
// Integer registers are 32 or 64 bits.
Ret += "-n32:64";
return Ret;
}
static std::string computeX86DataLayout(const Triple &TT) {
bool Is64Bit = TT.getArch() == Triple::x86_64;
// X86 is little endian
std::string Ret = "e";
Ret += getManglingComponent(TT);
// X86 and x32 have 32 bit pointers.
if (!Is64Bit || TT.isX32())
Ret += "-p:32:32";
// Address spaces for 32 bit signed, 32 bit unsigned, and 64 bit pointers.
Ret += "-p270:32:32-p271:32:32-p272:64:64";
// Some ABIs align 64 bit integers and doubles to 64 bits, others to 32.
// 128 bit integers are not specified in the 32-bit ABIs but are used
// internally for lowering f128, so we match the alignment to that.
if (Is64Bit || TT.isOSWindows())
Ret += "-i64:64-i128:128";
else if (TT.isOSIAMCU())
Ret += "-i64:32-f64:32";
else
Ret += "-i128:128-f64:32:64";
// Some ABIs align long double to 128 bits, others to 32.
if (TT.isOSIAMCU())
; // No f80
else if (Is64Bit || TT.isOSDarwin() || TT.isWindowsMSVCEnvironment())
Ret += "-f80:128";
else
Ret += "-f80:32";
if (TT.isOSIAMCU())
Ret += "-f128:32";
// The registers can hold 8, 16, 32 or, in x86-64, 64 bits.
if (Is64Bit)
Ret += "-n8:16:32:64";
else
Ret += "-n8:16:32";
// The stack is aligned to 32 bits on some ABIs and 128 bits on others.
if ((!Is64Bit && TT.isOSWindows()) || TT.isOSIAMCU())
Ret += "-a:0:32-S32";
else
Ret += "-S128";
return Ret;
}
static std::string computeNVPTXDataLayout(const Triple &T, StringRef ABIName) {
bool Is64Bit = T.getArch() == Triple::nvptx64;
std::string Ret = "e";
// Tensor Memory (addrspace:6) is always 32-bits.
// Distributed Shared Memory (addrspace:7) follows shared memory
// (addrspace:3).
if (!Is64Bit)
Ret += "-p:32:32-p6:32:32-p7:32:32";
else if (ABIName == "shortptr")
Ret += "-p3:32:32-p4:32:32-p5:32:32-p6:32:32-p7:32:32";
else
Ret += "-p6:32:32";
Ret += "-i64:64-i128:128-i256:256-v16:16-v32:32-n16:32:64";
return Ret;
}
static std::string computeSPIRVDataLayout(const Triple &TT) {
const auto Arch = TT.getArch();
// TODO: this probably needs to be revisited:
// Logical SPIR-V has no pointer size, so any fixed pointer size would be
// wrong. The choice to default to 32 or 64 is just motivated by another
// memory model used for graphics: PhysicalStorageBuffer64. But it shouldn't
// mean anything.
if (Arch == Triple::spirv32)
return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-"
"v256:256-v512:512-v1024:1024-n8:16:32:64-G1";
if (Arch == Triple::spirv)
return "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-"
"v512:512-v1024:1024-n8:16:32:64-G10";
if (TT.getVendor() == Triple::VendorType::AMD &&
TT.getOS() == Triple::OSType::AMDHSA)
return "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-"
"v512:512-v1024:1024-n32:64-S32-G1-P4-A0";
if (TT.getVendor() == Triple::VendorType::Intel)
return "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-"
"v512:512-v1024:1024-n8:16:32:64-G1-P9-A0";
return "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-"
"v512:512-v1024:1024-n8:16:32:64-G1";
}
static std::string computeLanaiDataLayout() {
// Data layout (keep in sync with clang/lib/Basic/Targets.cpp)
return "E" // Big endian
"-m:e" // ELF name manging
"-p:32:32" // 32-bit pointers, 32 bit aligned
"-i64:64" // 64 bit integers, 64 bit aligned
"-a:0:32" // 32 bit alignment of objects of aggregate type
"-n32" // 32 bit native integer width
"-S64"; // 64 bit natural stack alignment
}
static std::string computeWebAssemblyDataLayout(const Triple &TT) {
return TT.getArch() == Triple::wasm64
? (TT.isOSEmscripten() ? "e-m:e-p:64:64-p10:8:8-p20:8:8-i64:64-"
"i128:128-f128:64-n32:64-S128-ni:1:10:20"
: "e-m:e-p:64:64-p10:8:8-p20:8:8-i64:64-"
"i128:128-n32:64-S128-ni:1:10:20")
: (TT.isOSEmscripten() ? "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-"
"i128:128-f128:64-n32:64-S128-ni:1:10:20"
: "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-"
"i128:128-n32:64-S128-ni:1:10:20");
}
static std::string computeVEDataLayout(const Triple &T) {
// Aurora VE is little endian
std::string Ret = "e";
// Use ELF mangling
Ret += "-m:e";
// Alignments for 64 bit integers.
Ret += "-i64:64";
// VE supports 32 bit and 64 bits integer on registers
Ret += "-n32:64";
// Stack alignment is 128 bits
Ret += "-S128";
// Vector alignments are 64 bits
// Need to define all of them. Otherwise, each alignment becomes
// the size of each data by default.
Ret += "-v64:64:64"; // for v2f32
Ret += "-v128:64:64";
Ret += "-v256:64:64";
Ret += "-v512:64:64";
Ret += "-v1024:64:64";
Ret += "-v2048:64:64";
Ret += "-v4096:64:64";
Ret += "-v8192:64:64";
Ret += "-v16384:64:64"; // for v256f64
return Ret;
}
std::string Triple::computeDataLayout(StringRef ABIName) const {
switch (getArch()) {
case Triple::arm:
case Triple::armeb:
case Triple::thumb:
case Triple::thumbeb:
return computeARMDataLayout(*this, ABIName);
case Triple::aarch64:
case Triple::aarch64_be:
case Triple::aarch64_32:
return computeAArch64DataLayout(*this);
case Triple::arc:
return "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-"
"f32:32:32-i64:32-f64:32-a:0:32-n32";
case Triple::avr:
return "e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8:16-a:8";
case Triple::bpfel:
case Triple::bpfeb:
return computeBPFDataLayout(*this);
case Triple::csky:
return computeCSKYDataLayout(*this);
case Triple::dxil:
return "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-"
"f32:32-f64:64-n8:16:32:64";
case Triple::hexagon:
return "e-m:e-p:32:32:32-a:0-n16:32-"
"i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-"
"v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048";
case Triple::loongarch32:
case Triple::loongarch64:
return computeLoongArchDataLayout(*this);
case Triple::m68k:
return computeM68kDataLayout(*this);
case Triple::mips:
case Triple::mipsel:
case Triple::mips64:
case Triple::mips64el:
return computeMipsDataLayout(*this, ABIName);
case Triple::msp430:
return "e-m:e-p:16:16-i32:16-i64:16-f32:16-f64:16-a:8-n8:16-S16";
case Triple::ppc:
case Triple::ppcle:
case Triple::ppc64:
case Triple::ppc64le:
return computePowerDataLayout(*this);
case Triple::r600:
case Triple::amdgcn:
return computeAMDDataLayout(*this);
case Triple::riscv32:
case Triple::riscv64:
case Triple::riscv32be:
case Triple::riscv64be:
return computeRISCVDataLayout(*this, ABIName);
case Triple::sparc:
case Triple::sparcv9:
case Triple::sparcel:
return computeSparcDataLayout(*this);
case Triple::systemz:
return computeSystemZDataLayout(*this);
case Triple::tce:
case Triple::tcele:
case Triple::x86:
case Triple::x86_64:
return computeX86DataLayout(*this);
case Triple::xcore:
case Triple::xtensa:
return "e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32";
case Triple::nvptx:
case Triple::nvptx64:
return computeNVPTXDataLayout(*this, ABIName);
case Triple::spir:
case Triple::spir64:
case Triple::spirv:
case Triple::spirv32:
case Triple::spirv64:
return computeSPIRVDataLayout(*this);
case Triple::lanai:
return computeLanaiDataLayout();
case Triple::wasm32:
case Triple::wasm64:
return computeWebAssemblyDataLayout(*this);
case Triple::ve:
return computeVEDataLayout(*this);
case Triple::amdil:
case Triple::amdil64:
case Triple::hsail:
case Triple::hsail64:
case Triple::kalimba:
case Triple::shave:
case Triple::renderscript32:
case Triple::renderscript64:
// These are all virtual ISAs with no LLVM backend, and therefore no fixed
// LLVM data layout.
return "";
case Triple::UnknownArch:
return "";
}
llvm_unreachable("Invalid arch");
}

View File

@@ -677,11 +677,4 @@ TEST(DataLayoutTest, VectorAlign) {
EXPECT_EQ(Align(4 * 8), DL->getPrefTypeAlign(V8F32Ty));
}
TEST(DataLayoutTest, UEFI) {
Triple TT = Triple("x86_64-unknown-uefi");
// Test UEFI X86_64 Mangling Component.
EXPECT_STREQ(DataLayout::getManglingComponent(TT), "-m:w");
}
} // anonymous namespace

View File

@@ -10,6 +10,7 @@
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/VersionTuple.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
using namespace llvm;
@@ -3300,4 +3301,12 @@ TEST(TripleTest, isCompatibleWith) {
EXPECT_TRUE(DoTest(C.B, C.A, C.Result));
}
}
TEST(DataLayoutTest, UEFI) {
Triple TT = Triple("x86_64-unknown-uefi");
// Test UEFI X86_64 Mangling Component.
EXPECT_THAT(TT.computeDataLayout(), testing::HasSubstr("-m:w-"));
}
} // end anonymous namespace