2021-02-12 12:53:12 +01:00
|
|
|
//===- GPUOpsLowering.cpp - GPU FuncOp / ReturnOp lowering ----------------===//
|
|
|
|
|
//
|
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
|
//
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
|
|
#include "GPUOpsLowering.h"
|
2021-12-08 23:28:06 +00:00
|
|
|
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
|
Add generic type attribute mapping infrastructure, use it in GpuToX
Remapping memory spaces is a function often needed in type
conversions, most often when going to LLVM or to/from SPIR-V (a future
commit), and it is possible that such remappings may become more
common in the future as dialects take advantage of the more generic
memory space infrastructure.
Currently, memory space remappings are handled by running a
special-purpose conversion pass before the main conversion that
changes the address space attributes. In this commit, this approach is
replaced by adding a notion of type attribute conversions
TypeConverter, which is then used to convert memory space attributes.
Then, we use this infrastructure throughout the *ToLLVM conversions.
This has the advantage of loosing the requirements on the inputs to
those passes from "all address spaces must be integers" to "all
memory spaces must be convertible to integer spaces", a looser
requirement that reduces the coupling between portions of MLIR.
ON top of that, this change leads to the removal of most of the calls
to getMemorySpaceAsInt(), bringing us closer to removing it.
(A rework of the SPIR-V conversions to use this new system will be in
a folowup commit.)
As a note, one long-term motivation for this change is that I would
eventually like to add an allocaMemorySpace key to MLIR data layouts
and then call getMemRefAddressSpace(allocaMemorySpace) in the
relevant *ToLLVM in order to ensure all alloca()s, whether incoming or
produces during the LLVM lowering, have the correct address space for
a given target.
I expect that the type attribute conversion system may be useful in
other contexts.
Reviewed By: ftynse
Differential Revision: https://reviews.llvm.org/D142159
2023-01-19 21:56:04 +00:00
|
|
|
#include "mlir/IR/Attributes.h"
|
2021-02-12 12:53:12 +01:00
|
|
|
#include "mlir/IR/Builders.h"
|
2022-10-27 10:08:52 +02:00
|
|
|
#include "mlir/IR/BuiltinTypes.h"
|
2023-05-03 10:01:22 -04:00
|
|
|
#include "llvm/ADT/SmallVectorExtras.h"
|
2021-02-12 12:53:12 +01:00
|
|
|
#include "llvm/Support/FormatVariadic.h"
|
|
|
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
|
|
|
|
|
|
LogicalResult
|
2021-09-24 17:51:20 +00:00
|
|
|
GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
|
2021-02-12 12:53:12 +01:00
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
Location loc = gpuFuncOp.getLoc();
|
|
|
|
|
|
|
|
|
|
SmallVector<LLVM::GlobalOp, 3> workgroupBuffers;
|
|
|
|
|
workgroupBuffers.reserve(gpuFuncOp.getNumWorkgroupAttributions());
|
2022-01-02 22:02:14 +00:00
|
|
|
for (const auto &en : llvm::enumerate(gpuFuncOp.getWorkgroupAttributions())) {
|
2023-04-18 19:48:49 +00:00
|
|
|
BlockArgument attribution = en.value();
|
2021-02-12 12:53:12 +01:00
|
|
|
|
2023-05-08 16:33:54 +02:00
|
|
|
auto type = dyn_cast<MemRefType>(attribution.getType());
|
2021-02-12 12:53:12 +01:00
|
|
|
assert(type && type.hasStaticShape() && "unexpected type in attribution");
|
|
|
|
|
|
|
|
|
|
uint64_t numElements = type.getNumElements();
|
|
|
|
|
|
|
|
|
|
auto elementType =
|
2023-05-08 16:33:54 +02:00
|
|
|
cast<Type>(typeConverter->convertType(type.getElementType()));
|
2021-02-12 12:53:12 +01:00
|
|
|
auto arrayType = LLVM::LLVMArrayType::get(elementType, numElements);
|
|
|
|
|
std::string name = std::string(
|
|
|
|
|
llvm::formatv("__wg_{0}_{1}", gpuFuncOp.getName(), en.index()));
|
2023-04-18 19:48:49 +00:00
|
|
|
uint64_t alignment = 0;
|
|
|
|
|
if (auto alignAttr =
|
2023-05-08 16:33:54 +02:00
|
|
|
dyn_cast_or_null<IntegerAttr>(gpuFuncOp.getWorkgroupAttributionAttr(
|
|
|
|
|
en.index(), LLVM::LLVMDialect::getAlignAttrName())))
|
2023-04-18 19:48:49 +00:00
|
|
|
alignment = alignAttr.getInt();
|
2021-02-12 12:53:12 +01:00
|
|
|
auto globalOp = rewriter.create<LLVM::GlobalOp>(
|
|
|
|
|
gpuFuncOp.getLoc(), arrayType, /*isConstant=*/false,
|
2023-04-18 19:48:49 +00:00
|
|
|
LLVM::Linkage::Internal, name, /*value=*/Attribute(), alignment,
|
|
|
|
|
workgroupAddrSpace);
|
2021-02-12 12:53:12 +01:00
|
|
|
workgroupBuffers.push_back(globalOp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Remap proper input types.
|
|
|
|
|
TypeConverter::SignatureConversion signatureConversion(
|
|
|
|
|
gpuFuncOp.front().getNumArguments());
|
2023-02-21 07:51:44 +01:00
|
|
|
Type funcType = getTypeConverter()->convertFunctionSignature(
|
2023-04-06 16:10:45 +00:00
|
|
|
gpuFuncOp.getFunctionType(), /*isVariadic=*/false,
|
|
|
|
|
getTypeConverter()->getOptions().useBarePtrCallConv, signatureConversion);
|
2021-02-12 12:53:12 +01:00
|
|
|
|
|
|
|
|
// Create the new function operation. Only copy those attributes that are
|
|
|
|
|
// not specific to function modeling.
|
|
|
|
|
SmallVector<NamedAttribute, 4> attributes;
|
2021-02-26 13:28:32 +00:00
|
|
|
for (const auto &attr : gpuFuncOp->getAttrs()) {
|
2021-11-18 05:23:32 +00:00
|
|
|
if (attr.getName() == SymbolTable::getSymbolAttrName() ||
|
2022-12-06 11:28:47 -08:00
|
|
|
attr.getName() == gpuFuncOp.getFunctionTypeAttrName() ||
|
2023-04-18 19:48:49 +00:00
|
|
|
attr.getName() ==
|
|
|
|
|
gpu::GPUFuncOp::getNumWorkgroupAttributionsAttrName() ||
|
|
|
|
|
attr.getName() == gpuFuncOp.getWorkgroupAttribAttrsAttrName() ||
|
|
|
|
|
attr.getName() == gpuFuncOp.getPrivateAttribAttrsAttrName())
|
2021-02-12 12:53:12 +01:00
|
|
|
continue;
|
|
|
|
|
attributes.push_back(attr);
|
|
|
|
|
}
|
|
|
|
|
// Add a dialect specific kernel attribute in addition to GPU kernel
|
|
|
|
|
// attribute. The former is necessary for further translation while the
|
|
|
|
|
// latter is expected by gpu.launch_func.
|
|
|
|
|
if (gpuFuncOp.isKernel())
|
|
|
|
|
attributes.emplace_back(kernelAttributeName, rewriter.getUnitAttr());
|
|
|
|
|
auto llvmFuncOp = rewriter.create<LLVM::LLVMFuncOp>(
|
|
|
|
|
gpuFuncOp.getLoc(), gpuFuncOp.getName(), funcType,
|
2023-06-27 06:56:01 +00:00
|
|
|
LLVM::Linkage::External, /*dsoLocal=*/false, /*cconv=*/LLVM::CConv::C,
|
|
|
|
|
/*comdat=*/nullptr, attributes);
|
2021-02-12 12:53:12 +01:00
|
|
|
|
|
|
|
|
{
|
|
|
|
|
// Insert operations that correspond to converted workgroup and private
|
|
|
|
|
// memory attributions to the body of the function. This must operate on
|
|
|
|
|
// the original function, before the body region is inlined in the new
|
|
|
|
|
// function to maintain the relation between block arguments and the
|
|
|
|
|
// parent operation that assigns their semantics.
|
|
|
|
|
OpBuilder::InsertionGuard guard(rewriter);
|
|
|
|
|
|
|
|
|
|
// Rewrite workgroup memory attributions to addresses of global buffers.
|
|
|
|
|
rewriter.setInsertionPointToStart(&gpuFuncOp.front());
|
|
|
|
|
unsigned numProperArguments = gpuFuncOp.getNumArguments();
|
|
|
|
|
|
2022-01-02 22:02:14 +00:00
|
|
|
for (const auto &en : llvm::enumerate(workgroupBuffers)) {
|
2021-02-12 12:53:12 +01:00
|
|
|
LLVM::GlobalOp global = en.value();
|
2023-02-21 07:51:44 +01:00
|
|
|
Value address = rewriter.create<LLVM::AddressOfOp>(
|
|
|
|
|
loc,
|
|
|
|
|
getTypeConverter()->getPointerType(global.getType(),
|
|
|
|
|
global.getAddrSpace()),
|
|
|
|
|
global.getSymNameAttr());
|
2021-02-12 12:53:12 +01:00
|
|
|
auto elementType =
|
2023-05-08 16:33:54 +02:00
|
|
|
cast<LLVM::LLVMArrayType>(global.getType()).getElementType();
|
2021-02-12 12:53:12 +01:00
|
|
|
Value memory = rewriter.create<LLVM::GEPOp>(
|
2023-02-21 07:51:44 +01:00
|
|
|
loc,
|
|
|
|
|
getTypeConverter()->getPointerType(elementType,
|
|
|
|
|
global.getAddrSpace()),
|
|
|
|
|
global.getType(), address, ArrayRef<LLVM::GEPArg>{0, 0});
|
2021-02-12 12:53:12 +01:00
|
|
|
|
|
|
|
|
// Build a memref descriptor pointing to the buffer to plug with the
|
|
|
|
|
// existing memref infrastructure. This may use more registers than
|
|
|
|
|
// otherwise necessary given that memref sizes are fixed, but we can try
|
|
|
|
|
// and canonicalize that away later.
|
|
|
|
|
Value attribution = gpuFuncOp.getWorkgroupAttributions()[en.index()];
|
2023-05-08 16:33:54 +02:00
|
|
|
auto type = cast<MemRefType>(attribution.getType());
|
2021-02-12 12:53:12 +01:00
|
|
|
auto descr = MemRefDescriptor::fromStaticShape(
|
|
|
|
|
rewriter, loc, *getTypeConverter(), type, memory);
|
|
|
|
|
signatureConversion.remapInput(numProperArguments + en.index(), descr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Rewrite private memory attributions to alloca'ed buffers.
|
|
|
|
|
unsigned numWorkgroupAttributions = gpuFuncOp.getNumWorkgroupAttributions();
|
|
|
|
|
auto int64Ty = IntegerType::get(rewriter.getContext(), 64);
|
2022-01-02 22:02:14 +00:00
|
|
|
for (const auto &en : llvm::enumerate(gpuFuncOp.getPrivateAttributions())) {
|
2021-02-12 12:53:12 +01:00
|
|
|
Value attribution = en.value();
|
2023-05-08 16:33:54 +02:00
|
|
|
auto type = cast<MemRefType>(attribution.getType());
|
2021-02-12 12:53:12 +01:00
|
|
|
assert(type && type.hasStaticShape() && "unexpected type in attribution");
|
|
|
|
|
|
|
|
|
|
// Explicitly drop memory space when lowering private memory
|
|
|
|
|
// attributions since NVVM models it as `alloca`s in the default
|
|
|
|
|
// memory space and does not support `alloca`s with addrspace(5).
|
2023-02-21 07:51:44 +01:00
|
|
|
Type elementType = typeConverter->convertType(type.getElementType());
|
|
|
|
|
auto ptrType =
|
|
|
|
|
getTypeConverter()->getPointerType(elementType, allocaAddrSpace);
|
2021-02-12 12:53:12 +01:00
|
|
|
Value numElements = rewriter.create<LLVM::ConstantOp>(
|
2022-08-09 14:40:07 -04:00
|
|
|
gpuFuncOp.getLoc(), int64Ty, type.getNumElements());
|
2023-04-18 19:48:49 +00:00
|
|
|
uint64_t alignment = 0;
|
|
|
|
|
if (auto alignAttr =
|
2023-05-08 16:33:54 +02:00
|
|
|
dyn_cast_or_null<IntegerAttr>(gpuFuncOp.getPrivateAttributionAttr(
|
|
|
|
|
en.index(), LLVM::LLVMDialect::getAlignAttrName())))
|
2023-04-18 19:48:49 +00:00
|
|
|
alignment = alignAttr.getInt();
|
2021-02-12 12:53:12 +01:00
|
|
|
Value allocated = rewriter.create<LLVM::AllocaOp>(
|
2023-04-18 19:48:49 +00:00
|
|
|
gpuFuncOp.getLoc(), ptrType, elementType, numElements, alignment);
|
2021-02-12 12:53:12 +01:00
|
|
|
auto descr = MemRefDescriptor::fromStaticShape(
|
|
|
|
|
rewriter, loc, *getTypeConverter(), type, allocated);
|
|
|
|
|
signatureConversion.remapInput(
|
|
|
|
|
numProperArguments + numWorkgroupAttributions + en.index(), descr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Move the region to the new function, update the entry block signature.
|
|
|
|
|
rewriter.inlineRegionBefore(gpuFuncOp.getBody(), llvmFuncOp.getBody(),
|
|
|
|
|
llvmFuncOp.end());
|
|
|
|
|
if (failed(rewriter.convertRegionTypes(&llvmFuncOp.getBody(), *typeConverter,
|
|
|
|
|
&signatureConversion)))
|
|
|
|
|
return failure();
|
|
|
|
|
|
2022-07-11 18:29:01 +00:00
|
|
|
// If bare memref pointers are being used, remap them back to memref
|
|
|
|
|
// descriptors This must be done after signature conversion to get rid of the
|
|
|
|
|
// unrealized casts.
|
|
|
|
|
if (getTypeConverter()->getOptions().useBarePtrCallConv) {
|
|
|
|
|
OpBuilder::InsertionGuard guard(rewriter);
|
|
|
|
|
rewriter.setInsertionPointToStart(&llvmFuncOp.getBody().front());
|
|
|
|
|
for (const auto &en : llvm::enumerate(gpuFuncOp.getArgumentTypes())) {
|
2023-05-08 16:33:54 +02:00
|
|
|
auto memrefTy = dyn_cast<MemRefType>(en.value());
|
2022-07-11 18:29:01 +00:00
|
|
|
if (!memrefTy)
|
|
|
|
|
continue;
|
|
|
|
|
assert(memrefTy.hasStaticShape() &&
|
|
|
|
|
"Bare pointer convertion used with dynamically-shaped memrefs");
|
|
|
|
|
// Use a placeholder when replacing uses of the memref argument to prevent
|
|
|
|
|
// circular replacements.
|
|
|
|
|
auto remapping = signatureConversion.getInputMapping(en.index());
|
|
|
|
|
assert(remapping && remapping->size == 1 &&
|
|
|
|
|
"Type converter should produce 1-to-1 mapping for bare memrefs");
|
|
|
|
|
BlockArgument newArg =
|
|
|
|
|
llvmFuncOp.getBody().getArgument(remapping->inputNo);
|
|
|
|
|
auto placeholder = rewriter.create<LLVM::UndefOp>(
|
|
|
|
|
loc, getTypeConverter()->convertType(memrefTy));
|
|
|
|
|
rewriter.replaceUsesOfBlockArgument(newArg, placeholder);
|
|
|
|
|
Value desc = MemRefDescriptor::fromStaticShape(
|
|
|
|
|
rewriter, loc, *getTypeConverter(), memrefTy, newArg);
|
|
|
|
|
rewriter.replaceOp(placeholder, {desc});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-12 12:53:12 +01:00
|
|
|
rewriter.eraseOp(gpuFuncOp);
|
|
|
|
|
return success();
|
|
|
|
|
}
|
2021-12-08 23:28:06 +00:00
|
|
|
|
2023-01-05 21:20:45 +00:00
|
|
|
static SmallString<16> getUniqueFormatGlobalName(gpu::GPUModuleOp moduleOp) {
|
|
|
|
|
const char formatStringPrefix[] = "printfFormat_";
|
|
|
|
|
// Get a unique global name.
|
|
|
|
|
unsigned stringNumber = 0;
|
|
|
|
|
SmallString<16> stringConstName;
|
|
|
|
|
do {
|
|
|
|
|
stringConstName.clear();
|
|
|
|
|
(formatStringPrefix + Twine(stringNumber++)).toStringRef(stringConstName);
|
|
|
|
|
} while (moduleOp.lookupSymbol(stringConstName));
|
|
|
|
|
return stringConstName;
|
|
|
|
|
}
|
2021-12-08 23:28:06 +00:00
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
|
static LLVM::LLVMFuncOp getOrDefineFunction(T &moduleOp, const Location loc,
|
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
|
StringRef name,
|
|
|
|
|
LLVM::LLVMFunctionType type) {
|
|
|
|
|
LLVM::LLVMFuncOp ret;
|
|
|
|
|
if (!(ret = moduleOp.template lookupSymbol<LLVM::LLVMFuncOp>(name))) {
|
|
|
|
|
ConversionPatternRewriter::InsertionGuard guard(rewriter);
|
|
|
|
|
rewriter.setInsertionPointToStart(moduleOp.getBody());
|
|
|
|
|
ret = rewriter.create<LLVM::LLVMFuncOp>(loc, name, type,
|
|
|
|
|
LLVM::Linkage::External);
|
|
|
|
|
}
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
LogicalResult GPUPrintfOpToHIPLowering::matchAndRewrite(
|
|
|
|
|
gpu::PrintfOp gpuPrintfOp, gpu::PrintfOpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
Location loc = gpuPrintfOp->getLoc();
|
|
|
|
|
|
|
|
|
|
mlir::Type llvmI8 = typeConverter->convertType(rewriter.getI8Type());
|
2023-02-21 07:51:44 +01:00
|
|
|
mlir::Type i8Ptr = getTypeConverter()->getPointerType(llvmI8);
|
2021-12-08 23:28:06 +00:00
|
|
|
mlir::Type llvmI32 = typeConverter->convertType(rewriter.getI32Type());
|
|
|
|
|
mlir::Type llvmI64 = typeConverter->convertType(rewriter.getI64Type());
|
|
|
|
|
// Note: this is the GPUModule op, not the ModuleOp that surrounds it
|
|
|
|
|
// This ensures that global constants and declarations are placed within
|
|
|
|
|
// the device code, not the host code
|
|
|
|
|
auto moduleOp = gpuPrintfOp->getParentOfType<gpu::GPUModuleOp>();
|
|
|
|
|
|
|
|
|
|
auto ocklBegin =
|
|
|
|
|
getOrDefineFunction(moduleOp, loc, rewriter, "__ockl_printf_begin",
|
|
|
|
|
LLVM::LLVMFunctionType::get(llvmI64, {llvmI64}));
|
|
|
|
|
LLVM::LLVMFuncOp ocklAppendArgs;
|
2022-09-30 12:30:41 -07:00
|
|
|
if (!adaptor.getArgs().empty()) {
|
2021-12-08 23:28:06 +00:00
|
|
|
ocklAppendArgs = getOrDefineFunction(
|
|
|
|
|
moduleOp, loc, rewriter, "__ockl_printf_append_args",
|
|
|
|
|
LLVM::LLVMFunctionType::get(
|
|
|
|
|
llvmI64, {llvmI64, /*numArgs*/ llvmI32, llvmI64, llvmI64, llvmI64,
|
|
|
|
|
llvmI64, llvmI64, llvmI64, llvmI64, /*isLast*/ llvmI32}));
|
|
|
|
|
}
|
|
|
|
|
auto ocklAppendStringN = getOrDefineFunction(
|
|
|
|
|
moduleOp, loc, rewriter, "__ockl_printf_append_string_n",
|
|
|
|
|
LLVM::LLVMFunctionType::get(
|
|
|
|
|
llvmI64,
|
|
|
|
|
{llvmI64, i8Ptr, /*length (bytes)*/ llvmI64, /*isLast*/ llvmI32}));
|
|
|
|
|
|
|
|
|
|
/// Start the printf hostcall
|
2022-08-09 14:40:07 -04:00
|
|
|
Value zeroI64 = rewriter.create<LLVM::ConstantOp>(loc, llvmI64, 0);
|
2021-12-08 23:28:06 +00:00
|
|
|
auto printfBeginCall = rewriter.create<LLVM::CallOp>(loc, ocklBegin, zeroI64);
|
2022-08-11 00:34:02 -04:00
|
|
|
Value printfDesc = printfBeginCall.getResult();
|
2021-12-08 23:28:06 +00:00
|
|
|
|
2023-01-05 21:20:45 +00:00
|
|
|
// Get a unique global name for the format.
|
|
|
|
|
SmallString<16> stringConstName = getUniqueFormatGlobalName(moduleOp);
|
2021-12-08 23:28:06 +00:00
|
|
|
|
2022-09-30 12:30:41 -07:00
|
|
|
llvm::SmallString<20> formatString(adaptor.getFormat());
|
2021-12-08 23:28:06 +00:00
|
|
|
formatString.push_back('\0'); // Null terminate for C
|
|
|
|
|
size_t formatStringSize = formatString.size_in_bytes();
|
|
|
|
|
|
|
|
|
|
auto globalType = LLVM::LLVMArrayType::get(llvmI8, formatStringSize);
|
|
|
|
|
LLVM::GlobalOp global;
|
|
|
|
|
{
|
|
|
|
|
ConversionPatternRewriter::InsertionGuard guard(rewriter);
|
|
|
|
|
rewriter.setInsertionPointToStart(moduleOp.getBody());
|
|
|
|
|
global = rewriter.create<LLVM::GlobalOp>(
|
|
|
|
|
loc, globalType,
|
|
|
|
|
/*isConstant=*/true, LLVM::Linkage::Internal, stringConstName,
|
|
|
|
|
rewriter.getStringAttr(formatString));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get a pointer to the format string's first element and pass it to printf()
|
2023-02-21 07:51:44 +01:00
|
|
|
Value globalPtr = rewriter.create<LLVM::AddressOfOp>(
|
|
|
|
|
loc,
|
|
|
|
|
getTypeConverter()->getPointerType(globalType, global.getAddrSpace()),
|
|
|
|
|
global.getSymNameAttr());
|
2021-12-08 23:28:06 +00:00
|
|
|
Value stringStart = rewriter.create<LLVM::GEPOp>(
|
2023-02-21 07:51:44 +01:00
|
|
|
loc, i8Ptr, globalType, globalPtr, ArrayRef<LLVM::GEPArg>{0, 0});
|
2022-08-09 14:40:07 -04:00
|
|
|
Value stringLen =
|
|
|
|
|
rewriter.create<LLVM::ConstantOp>(loc, llvmI64, formatStringSize);
|
2021-12-08 23:28:06 +00:00
|
|
|
|
2022-08-09 14:40:07 -04:00
|
|
|
Value oneI32 = rewriter.create<LLVM::ConstantOp>(loc, llvmI32, 1);
|
|
|
|
|
Value zeroI32 = rewriter.create<LLVM::ConstantOp>(loc, llvmI32, 0);
|
2021-12-08 23:28:06 +00:00
|
|
|
|
2021-12-10 05:02:25 +00:00
|
|
|
auto appendFormatCall = rewriter.create<LLVM::CallOp>(
|
|
|
|
|
loc, ocklAppendStringN,
|
|
|
|
|
ValueRange{printfDesc, stringStart, stringLen,
|
2022-09-30 12:30:41 -07:00
|
|
|
adaptor.getArgs().empty() ? oneI32 : zeroI32});
|
2022-08-11 00:34:02 -04:00
|
|
|
printfDesc = appendFormatCall.getResult();
|
2021-12-08 23:28:06 +00:00
|
|
|
|
|
|
|
|
// __ockl_printf_append_args takes 7 values per append call
|
|
|
|
|
constexpr size_t argsPerAppend = 7;
|
2022-09-30 12:30:41 -07:00
|
|
|
size_t nArgs = adaptor.getArgs().size();
|
2021-12-08 23:28:06 +00:00
|
|
|
for (size_t group = 0; group < nArgs; group += argsPerAppend) {
|
|
|
|
|
size_t bound = std::min(group + argsPerAppend, nArgs);
|
|
|
|
|
size_t numArgsThisCall = bound - group;
|
|
|
|
|
|
|
|
|
|
SmallVector<mlir::Value, 2 + argsPerAppend + 1> arguments;
|
|
|
|
|
arguments.push_back(printfDesc);
|
2022-08-09 14:40:07 -04:00
|
|
|
arguments.push_back(
|
|
|
|
|
rewriter.create<LLVM::ConstantOp>(loc, llvmI32, numArgsThisCall));
|
2021-12-08 23:28:06 +00:00
|
|
|
for (size_t i = group; i < bound; ++i) {
|
2022-09-30 12:30:41 -07:00
|
|
|
Value arg = adaptor.getArgs()[i];
|
2023-05-08 16:33:54 +02:00
|
|
|
if (auto floatType = dyn_cast<FloatType>(arg.getType())) {
|
2021-12-08 23:28:06 +00:00
|
|
|
if (!floatType.isF64())
|
|
|
|
|
arg = rewriter.create<LLVM::FPExtOp>(
|
|
|
|
|
loc, typeConverter->convertType(rewriter.getF64Type()), arg);
|
|
|
|
|
arg = rewriter.create<LLVM::BitcastOp>(loc, llvmI64, arg);
|
|
|
|
|
}
|
|
|
|
|
if (arg.getType().getIntOrFloatBitWidth() != 64)
|
|
|
|
|
arg = rewriter.create<LLVM::ZExtOp>(loc, llvmI64, arg);
|
|
|
|
|
|
|
|
|
|
arguments.push_back(arg);
|
|
|
|
|
}
|
|
|
|
|
// Pad out to 7 arguments since the hostcall always needs 7
|
|
|
|
|
for (size_t extra = numArgsThisCall; extra < argsPerAppend; ++extra) {
|
|
|
|
|
arguments.push_back(zeroI64);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auto isLast = (bound == nArgs) ? oneI32 : zeroI32;
|
|
|
|
|
arguments.push_back(isLast);
|
|
|
|
|
auto call = rewriter.create<LLVM::CallOp>(loc, ocklAppendArgs, arguments);
|
2022-08-11 00:34:02 -04:00
|
|
|
printfDesc = call.getResult();
|
2021-12-08 23:28:06 +00:00
|
|
|
}
|
|
|
|
|
rewriter.eraseOp(gpuPrintfOp);
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
LogicalResult GPUPrintfOpToLLVMCallLowering::matchAndRewrite(
|
|
|
|
|
gpu::PrintfOp gpuPrintfOp, gpu::PrintfOpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
Location loc = gpuPrintfOp->getLoc();
|
|
|
|
|
|
|
|
|
|
mlir::Type llvmI8 = typeConverter->convertType(rewriter.getIntegerType(8));
|
2023-02-21 07:51:44 +01:00
|
|
|
mlir::Type i8Ptr = getTypeConverter()->getPointerType(llvmI8, addressSpace);
|
2021-12-08 23:28:06 +00:00
|
|
|
|
|
|
|
|
// Note: this is the GPUModule op, not the ModuleOp that surrounds it
|
|
|
|
|
// This ensures that global constants and declarations are placed within
|
|
|
|
|
// the device code, not the host code
|
|
|
|
|
auto moduleOp = gpuPrintfOp->getParentOfType<gpu::GPUModuleOp>();
|
|
|
|
|
|
|
|
|
|
auto printfType = LLVM::LLVMFunctionType::get(rewriter.getI32Type(), {i8Ptr},
|
|
|
|
|
/*isVarArg=*/true);
|
|
|
|
|
LLVM::LLVMFuncOp printfDecl =
|
|
|
|
|
getOrDefineFunction(moduleOp, loc, rewriter, "printf", printfType);
|
|
|
|
|
|
2023-01-05 21:20:45 +00:00
|
|
|
// Get a unique global name for the format.
|
|
|
|
|
SmallString<16> stringConstName = getUniqueFormatGlobalName(moduleOp);
|
2021-12-08 23:28:06 +00:00
|
|
|
|
2022-09-30 12:30:41 -07:00
|
|
|
llvm::SmallString<20> formatString(adaptor.getFormat());
|
2021-12-08 23:28:06 +00:00
|
|
|
formatString.push_back('\0'); // Null terminate for C
|
|
|
|
|
auto globalType =
|
|
|
|
|
LLVM::LLVMArrayType::get(llvmI8, formatString.size_in_bytes());
|
|
|
|
|
LLVM::GlobalOp global;
|
|
|
|
|
{
|
|
|
|
|
ConversionPatternRewriter::InsertionGuard guard(rewriter);
|
|
|
|
|
rewriter.setInsertionPointToStart(moduleOp.getBody());
|
|
|
|
|
global = rewriter.create<LLVM::GlobalOp>(
|
|
|
|
|
loc, globalType,
|
|
|
|
|
/*isConstant=*/true, LLVM::Linkage::Internal, stringConstName,
|
|
|
|
|
rewriter.getStringAttr(formatString), /*allignment=*/0, addressSpace);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get a pointer to the format string's first element
|
2023-02-21 07:51:44 +01:00
|
|
|
Value globalPtr = rewriter.create<LLVM::AddressOfOp>(
|
|
|
|
|
loc,
|
|
|
|
|
getTypeConverter()->getPointerType(globalType, global.getAddrSpace()),
|
|
|
|
|
global.getSymNameAttr());
|
2021-12-08 23:28:06 +00:00
|
|
|
Value stringStart = rewriter.create<LLVM::GEPOp>(
|
2023-02-21 07:51:44 +01:00
|
|
|
loc, i8Ptr, globalType, globalPtr, ArrayRef<LLVM::GEPArg>{0, 0});
|
2021-12-08 23:28:06 +00:00
|
|
|
|
|
|
|
|
// Construct arguments and function call
|
2022-09-30 12:30:41 -07:00
|
|
|
auto argsRange = adaptor.getArgs();
|
2021-12-08 23:28:06 +00:00
|
|
|
SmallVector<Value, 4> printfArgs;
|
|
|
|
|
printfArgs.reserve(argsRange.size() + 1);
|
|
|
|
|
printfArgs.push_back(stringStart);
|
|
|
|
|
printfArgs.append(argsRange.begin(), argsRange.end());
|
|
|
|
|
|
|
|
|
|
rewriter.create<LLVM::CallOp>(loc, printfDecl, printfArgs);
|
|
|
|
|
rewriter.eraseOp(gpuPrintfOp);
|
|
|
|
|
return success();
|
|
|
|
|
}
|
2022-10-27 10:08:52 +02:00
|
|
|
|
2023-01-05 21:20:45 +00:00
|
|
|
LogicalResult GPUPrintfOpToVPrintfLowering::matchAndRewrite(
|
|
|
|
|
gpu::PrintfOp gpuPrintfOp, gpu::PrintfOpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const {
|
|
|
|
|
Location loc = gpuPrintfOp->getLoc();
|
|
|
|
|
|
|
|
|
|
mlir::Type llvmI8 = typeConverter->convertType(rewriter.getIntegerType(8));
|
|
|
|
|
mlir::Type i8Ptr = LLVM::LLVMPointerType::get(llvmI8);
|
|
|
|
|
|
|
|
|
|
// Note: this is the GPUModule op, not the ModuleOp that surrounds it
|
|
|
|
|
// This ensures that global constants and declarations are placed within
|
|
|
|
|
// the device code, not the host code
|
|
|
|
|
auto moduleOp = gpuPrintfOp->getParentOfType<gpu::GPUModuleOp>();
|
|
|
|
|
|
|
|
|
|
auto vprintfType =
|
|
|
|
|
LLVM::LLVMFunctionType::get(rewriter.getI32Type(), {i8Ptr, i8Ptr});
|
|
|
|
|
LLVM::LLVMFuncOp vprintfDecl =
|
|
|
|
|
getOrDefineFunction(moduleOp, loc, rewriter, "vprintf", vprintfType);
|
|
|
|
|
|
|
|
|
|
// Get a unique global name for the format.
|
|
|
|
|
SmallString<16> stringConstName = getUniqueFormatGlobalName(moduleOp);
|
|
|
|
|
|
|
|
|
|
llvm::SmallString<20> formatString(adaptor.getFormat());
|
|
|
|
|
formatString.push_back('\0'); // Null terminate for C
|
|
|
|
|
auto globalType =
|
|
|
|
|
LLVM::LLVMArrayType::get(llvmI8, formatString.size_in_bytes());
|
|
|
|
|
LLVM::GlobalOp global;
|
|
|
|
|
{
|
|
|
|
|
ConversionPatternRewriter::InsertionGuard guard(rewriter);
|
|
|
|
|
rewriter.setInsertionPointToStart(moduleOp.getBody());
|
|
|
|
|
global = rewriter.create<LLVM::GlobalOp>(
|
|
|
|
|
loc, globalType,
|
|
|
|
|
/*isConstant=*/true, LLVM::Linkage::Internal, stringConstName,
|
|
|
|
|
rewriter.getStringAttr(formatString), /*allignment=*/0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get a pointer to the format string's first element
|
|
|
|
|
Value globalPtr = rewriter.create<LLVM::AddressOfOp>(loc, global);
|
|
|
|
|
Value stringStart = rewriter.create<LLVM::GEPOp>(
|
|
|
|
|
loc, i8Ptr, globalPtr, ArrayRef<LLVM::GEPArg>{0, 0});
|
|
|
|
|
SmallVector<Type> types;
|
|
|
|
|
SmallVector<Value> args;
|
|
|
|
|
// Promote and pack the arguments into a stack allocation.
|
|
|
|
|
for (Value arg : adaptor.getArgs()) {
|
|
|
|
|
Type type = arg.getType();
|
|
|
|
|
Value promotedArg = arg;
|
|
|
|
|
assert(type.isIntOrFloat());
|
2023-05-08 16:33:54 +02:00
|
|
|
if (isa<FloatType>(type)) {
|
2023-01-05 21:20:45 +00:00
|
|
|
type = rewriter.getF64Type();
|
|
|
|
|
promotedArg = rewriter.create<LLVM::FPExtOp>(loc, type, arg);
|
|
|
|
|
}
|
|
|
|
|
types.push_back(type);
|
|
|
|
|
args.push_back(promotedArg);
|
|
|
|
|
}
|
|
|
|
|
Type structType =
|
|
|
|
|
LLVM::LLVMStructType::getLiteral(gpuPrintfOp.getContext(), types);
|
|
|
|
|
Type structPtrType = LLVM::LLVMPointerType::get(structType);
|
|
|
|
|
Value one = rewriter.create<LLVM::ConstantOp>(loc, rewriter.getI64Type(),
|
|
|
|
|
rewriter.getIndexAttr(1));
|
|
|
|
|
Value tempAlloc = rewriter.create<LLVM::AllocaOp>(loc, structPtrType, one,
|
|
|
|
|
/*alignment=*/0);
|
|
|
|
|
for (auto [index, arg] : llvm::enumerate(args)) {
|
|
|
|
|
Value ptr = rewriter.create<LLVM::GEPOp>(
|
|
|
|
|
loc, LLVM::LLVMPointerType::get(arg.getType()), tempAlloc,
|
|
|
|
|
ArrayRef<LLVM::GEPArg>{0, index});
|
|
|
|
|
rewriter.create<LLVM::StoreOp>(loc, arg, ptr);
|
|
|
|
|
}
|
|
|
|
|
tempAlloc = rewriter.create<LLVM::BitcastOp>(loc, i8Ptr, tempAlloc);
|
|
|
|
|
std::array<Value, 2> printfArgs = {stringStart, tempAlloc};
|
|
|
|
|
|
|
|
|
|
rewriter.create<LLVM::CallOp>(loc, vprintfDecl, printfArgs);
|
|
|
|
|
rewriter.eraseOp(gpuPrintfOp);
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-27 10:08:52 +02:00
|
|
|
/// Unrolls op if it's operating on vectors.
|
|
|
|
|
LogicalResult impl::scalarizeVectorOp(Operation *op, ValueRange operands,
|
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
|
LLVMTypeConverter &converter) {
|
|
|
|
|
TypeRange operandTypes(operands);
|
|
|
|
|
if (llvm::none_of(operandTypes,
|
2023-05-08 16:33:54 +02:00
|
|
|
[](Type type) { return isa<VectorType>(type); })) {
|
2022-10-27 10:08:52 +02:00
|
|
|
return rewriter.notifyMatchFailure(op, "expected vector operand");
|
|
|
|
|
}
|
|
|
|
|
if (op->getNumRegions() != 0 || op->getNumSuccessors() != 0)
|
|
|
|
|
return rewriter.notifyMatchFailure(op, "expected no region/successor");
|
|
|
|
|
if (op->getNumResults() != 1)
|
|
|
|
|
return rewriter.notifyMatchFailure(op, "expected single result");
|
2023-05-08 16:33:54 +02:00
|
|
|
VectorType vectorType = dyn_cast<VectorType>(op->getResult(0).getType());
|
2022-10-27 10:08:52 +02:00
|
|
|
if (!vectorType)
|
|
|
|
|
return rewriter.notifyMatchFailure(op, "expected vector result");
|
|
|
|
|
|
|
|
|
|
Location loc = op->getLoc();
|
|
|
|
|
Value result = rewriter.create<LLVM::UndefOp>(loc, vectorType);
|
|
|
|
|
Type indexType = converter.convertType(rewriter.getIndexType());
|
|
|
|
|
StringAttr name = op->getName().getIdentifier();
|
|
|
|
|
Type elementType = vectorType.getElementType();
|
|
|
|
|
|
|
|
|
|
for (int64_t i = 0; i < vectorType.getNumElements(); ++i) {
|
|
|
|
|
Value index = rewriter.create<LLVM::ConstantOp>(loc, indexType, i);
|
|
|
|
|
auto extractElement = [&](Value operand) -> Value {
|
2023-05-08 16:33:54 +02:00
|
|
|
if (!isa<VectorType>(operand.getType()))
|
2022-10-27 10:08:52 +02:00
|
|
|
return operand;
|
|
|
|
|
return rewriter.create<LLVM::ExtractElementOp>(loc, operand, index);
|
|
|
|
|
};
|
2023-05-03 10:01:22 -04:00
|
|
|
auto scalarOperands = llvm::map_to_vector(operands, extractElement);
|
2022-10-27 10:08:52 +02:00
|
|
|
Operation *scalarOp =
|
|
|
|
|
rewriter.create(loc, name, scalarOperands, elementType, op->getAttrs());
|
|
|
|
|
rewriter.create<LLVM::InsertElementOp>(loc, result, scalarOp->getResult(0),
|
|
|
|
|
index);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rewriter.replaceOp(op, result);
|
|
|
|
|
return success();
|
|
|
|
|
}
|
Add generic type attribute mapping infrastructure, use it in GpuToX
Remapping memory spaces is a function often needed in type
conversions, most often when going to LLVM or to/from SPIR-V (a future
commit), and it is possible that such remappings may become more
common in the future as dialects take advantage of the more generic
memory space infrastructure.
Currently, memory space remappings are handled by running a
special-purpose conversion pass before the main conversion that
changes the address space attributes. In this commit, this approach is
replaced by adding a notion of type attribute conversions
TypeConverter, which is then used to convert memory space attributes.
Then, we use this infrastructure throughout the *ToLLVM conversions.
This has the advantage of loosing the requirements on the inputs to
those passes from "all address spaces must be integers" to "all
memory spaces must be convertible to integer spaces", a looser
requirement that reduces the coupling between portions of MLIR.
ON top of that, this change leads to the removal of most of the calls
to getMemorySpaceAsInt(), bringing us closer to removing it.
(A rework of the SPIR-V conversions to use this new system will be in
a folowup commit.)
As a note, one long-term motivation for this change is that I would
eventually like to add an allocaMemorySpace key to MLIR data layouts
and then call getMemRefAddressSpace(allocaMemorySpace) in the
relevant *ToLLVM in order to ensure all alloca()s, whether incoming or
produces during the LLVM lowering, have the correct address space for
a given target.
I expect that the type attribute conversion system may be useful in
other contexts.
Reviewed By: ftynse
Differential Revision: https://reviews.llvm.org/D142159
2023-01-19 21:56:04 +00:00
|
|
|
|
|
|
|
|
static IntegerAttr wrapNumericMemorySpace(MLIRContext *ctx, unsigned space) {
|
|
|
|
|
return IntegerAttr::get(IntegerType::get(ctx, 64), space);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void mlir::populateGpuMemorySpaceAttributeConversions(
|
|
|
|
|
TypeConverter &typeConverter, const MemorySpaceMapping &mapping) {
|
|
|
|
|
typeConverter.addTypeAttributeConversion(
|
|
|
|
|
[mapping](BaseMemRefType type, gpu::AddressSpaceAttr memorySpaceAttr) {
|
|
|
|
|
gpu::AddressSpace memorySpace = memorySpaceAttr.getValue();
|
|
|
|
|
unsigned addressSpace = mapping(memorySpace);
|
|
|
|
|
return wrapNumericMemorySpace(memorySpaceAttr.getContext(),
|
|
|
|
|
addressSpace);
|
|
|
|
|
});
|
|
|
|
|
}
|