[mlir][llvm] Ensure immediate usage in intrinsics

This commit changes intrinsics that have immarg parameter attributes to
model these parameters as attributes, instead of operands. Using
operands only works if the operation is an `llvm.mlir.constant`,
otherwise the exported LLVMIR is invalid.

Reviewed By: gysit

Differential Revision: https://reviews.llvm.org/D151692
This commit is contained in:
Christian Ulmann
2023-06-12 06:23:42 +00:00
parent d1ef99fe1c
commit 48b126e30b
27 changed files with 279 additions and 226 deletions

View File

@@ -3458,11 +3458,9 @@ mlir::Value IntrinsicLibrary::genIsFPClass(mlir::Type resultType,
assert(args.size() == 1);
mlir::MLIRContext *context = builder.getContext();
mlir::IntegerType i1ty = mlir::IntegerType::get(context, 1);
mlir::IntegerType i32ty = mlir::IntegerType::get(context, 32);
mlir::Value test = builder.createIntegerConstant(loc, i32ty, fpclass);
mlir::Value isfpclass =
builder.create<mlir::LLVM::IsFPClass>(loc, i1ty, args[0], test);
builder.create<mlir::LLVM::IsFPClass>(loc, i1ty, args[0], fpclass);
return builder.createConvert(loc, resultType, isfpclass);
}

View File

@@ -6,8 +6,7 @@ subroutine ieee_is_normal_f16(r)
use ieee_arithmetic
real(KIND=2) :: r
i = ieee_is_normal(r)
! CHECK: %[[test:.*]] = arith.constant 360 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (f16, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 360 : i32}> : (f16) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine ieee_is_normal_f16
@@ -16,8 +15,7 @@ subroutine ieee_is_normal_bf16(r)
use ieee_arithmetic
real(KIND=3) :: r
i = ieee_is_normal(r)
! CHECK: %[[test:.*]] = arith.constant 360 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (bf16, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 360 : i32}> : (bf16) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine ieee_is_normal_bf16
@@ -28,8 +26,7 @@ subroutine ieee_is_normal_f32(r)
use ieee_arithmetic
real :: r
i = ieee_is_normal(r)
! CHECK: %[[test:.*]] = arith.constant 360 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (f32, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 360 : i32}> : (f32) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine ieee_is_normal_f32
@@ -38,8 +35,7 @@ subroutine ieee_is_normal_f64(r)
use ieee_arithmetic
real(KIND=8) :: r
i = ieee_is_normal(r)
! CHECK: %[[test:.*]] = arith.constant 360 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (f64, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 360 : i32}> : (f64) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine ieee_is_normal_f64
@@ -48,8 +44,7 @@ subroutine ieee_is_normal_f80(r)
use ieee_arithmetic
real(KIND=10) :: r
i = ieee_is_normal(r)
! CHECK: %[[test:.*]] = arith.constant 360 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (f80, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 360 : i32}> : (f80) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine ieee_is_normal_f80
@@ -58,7 +53,6 @@ subroutine ieee_is_normal_f128(r)
use ieee_arithmetic
real(KIND=16) :: r
i = ieee_is_normal(r)
! CHECK: %[[test:.*]] = arith.constant 360 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (f128, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 360 : i32}> : (f128) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine ieee_is_normal_f128

View File

@@ -5,8 +5,7 @@
subroutine isnan_f32(r)
real :: r
i = isnan(r)
! CHECK: %[[test:.*]] = arith.constant 3 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (f32, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 3 : i32}> : (f32) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine isnan_f32
@@ -15,8 +14,7 @@ subroutine ieee_is_nan_f32(r)
use ieee_arithmetic
real :: r
i = ieee_is_nan(r)
! CHECK: %[[test:.*]] = arith.constant 3 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (f32, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 3 : i32}> : (f32) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine ieee_is_nan_f32
@@ -24,8 +22,7 @@ end subroutine ieee_is_nan_f32
subroutine isnan_f64(r)
real(KIND=8) :: r
i = isnan(r)
! CHECK: %[[test:.*]] = arith.constant 3 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (f64, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 3 : i32}> : (f64) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine isnan_f64
@@ -34,8 +31,7 @@ subroutine ieee_is_nan_f64(r)
use ieee_arithmetic
real(KIND=8) :: r
i = ieee_is_nan(r)
! CHECK: %[[test:.*]] = arith.constant 3 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (f64, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 3 : i32}> : (f64) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine ieee_is_nan_f64
@@ -43,8 +39,7 @@ end subroutine ieee_is_nan_f64
subroutine isnan_f80(r)
real(KIND=10) :: r
i = isnan(r)
! CHECK: %[[test:.*]] = arith.constant 3 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (f80, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 3 : i32}> : (f80) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine isnan_f80
@@ -53,8 +48,7 @@ subroutine ieee_is_nan_f80(r)
use ieee_arithmetic
real(KIND=10) :: r
i = ieee_is_nan(r)
! CHECK: %[[test:.*]] = arith.constant 3 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (f80, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 3 : i32}> : (f80) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine ieee_is_nan_f80
@@ -62,8 +56,7 @@ end subroutine ieee_is_nan_f80
subroutine isnan_f128(r)
real(KIND=16) :: r
i = isnan(r)
! CHECK: %[[test:.*]] = arith.constant 3 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (f128, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 3 : i32}> : (f128) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine isnan_f128
@@ -72,7 +65,6 @@ subroutine ieee_is_nan_f128(r)
use ieee_arithmetic
real(KIND=16) :: r
i = ieee_is_nan(r)
! CHECK: %[[test:.*]] = arith.constant 3 : i32
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}, %[[test]]) : (f128, i32) -> i1
! CHECK: %[[l:.*]] = "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 3 : i32}> : (f128) -> i1
! CHECK: fir.convert %[[l]] : (i1) -> !fir.logical<4>
end subroutine ieee_is_nan_f128

View File

@@ -88,16 +88,52 @@ class LLVM_CountZerosIntrOp<string func, list<Trait> traits = []> :
LLVM_OneResultIntrOp<func, [], [0],
!listconcat([Pure], traits)> {
let arguments = (ins LLVM_ScalarOrVectorOf<AnySignlessInteger>:$in,
I1:$zero_undefined);
I1Attr:$is_zero_poison);
string mlirBuilder = [{
auto op = $_builder.create<$_qualCppClassName>($_location,
$_resultType, $in, $_int_attr($is_zero_poison));
$res = op;
}];
string llvmBuilder = [{
auto *inst = createIntrinsicCall(
builder, llvm::Intrinsic::}] # llvmEnumName # [{,
{$in, builder.getInt1(op.getIsZeroPoison())}, }]
# declTypes # [{);
$res = inst;
}];
}
def LLVM_AbsOp : LLVM_OneResultIntrOp<"abs", [], [0], [Pure]> {
let arguments = (ins LLVM_ScalarOrVectorOf<AnySignlessInteger>:$in,
I1:$is_int_min_poison);
I1Attr:$is_int_min_poison);
string mlirBuilder = [{
auto op = $_builder.create<$_qualCppClassName>($_location,
$_resultType, $in, $_int_attr($is_int_min_poison));
$res = op;
}];
string llvmBuilder = [{
auto *inst = createIntrinsicCall(
builder, llvm::Intrinsic::}] # llvmEnumName # [{,
{$in, builder.getInt1(op.getIsIntMinPoison())}, }]
# declTypes # [{);
$res = inst;
}];
}
def LLVM_IsFPClass : LLVM_OneResultIntrOp<"is.fpclass", [], [0], [Pure]> {
let arguments = (ins LLVM_ScalarOrVectorOf<LLVM_AnyFloat>:$in, I32:$bit);
let arguments = (ins LLVM_ScalarOrVectorOf<LLVM_AnyFloat>:$in, I32Attr:$bit);
string mlirBuilder = [{
auto op = $_builder.create<$_qualCppClassName>($_location,
$_resultType, $in, $_int_attr($bit));
$res = op;
}];
string llvmBuilder = [{
auto *inst = createIntrinsicCall(
builder, llvm::Intrinsic::}] # llvmEnumName # [{,
{$in, builder.getInt32(op.getBit())},
}] # declTypes # [{);
$res = inst;
}];
}
def LLVM_CopySignOp : LLVM_BinarySameArgsIntrOpF<"copysign">;
@@ -113,7 +149,19 @@ def LLVM_Log10Op : LLVM_UnaryIntrOpF<"log10">;
def LLVM_Log2Op : LLVM_UnaryIntrOpF<"log2">;
def LLVM_LogOp : LLVM_UnaryIntrOpF<"log">;
def LLVM_Prefetch : LLVM_ZeroResultIntrOp<"prefetch", [0]> {
let arguments = (ins LLVM_AnyPointer:$addr, I32:$rw, I32:$hint, I32:$cache);
let arguments = (ins LLVM_AnyPointer:$addr, I32Attr:$rw, I32Attr:$hint, I32Attr:$cache);
string mlirBuilder = [{
$_op = $_builder.create<$_qualCppClassName>($_location,
$addr, $_int_attr($rw), $_int_attr($hint), $_int_attr($cache));
}];
string llvmBuilder = [{
createIntrinsicCall(
builder, llvm::Intrinsic::}] # llvmEnumName # [{,
{$addr, builder.getInt32(op.getRw()),
builder.getInt32(op.getHint()),
builder.getInt32(op.getCache())},
}] # declTypes # [{);
}];
}
def LLVM_SinOp : LLVM_UnaryIntrOpF<"sin">;
def LLVM_RoundEvenOp : LLVM_UnaryIntrOpF<"roundeven">;
@@ -163,37 +211,108 @@ class LLVM_MemcpyIntrOpBase<string name> :
/*requiresAliasAnalysis=*/1> {
dag args = (ins Arg<LLVM_AnyPointer,"",[MemWrite]>:$dst,
Arg<LLVM_AnyPointer,"",[MemRead]>:$src,
AnySignlessInteger:$len, I1:$isVolatile);
AnySignlessInteger:$len, I1Attr:$isVolatile);
// Append the alias attributes defined by LLVM_IntrOpBase.
let arguments = !con(args, aliasAttrs);
let builders = [
OpBuilder<(ins "Value":$dst, "Value":$src, "Value":$len,
"Value":$isVolatile), [{
"bool":$isVolatile), [{
build($_builder, $_state, dst, src, len,
$_builder.getBoolAttr(isVolatile));
}]>,
OpBuilder<(ins "Value":$dst, "Value":$src, "Value":$len,
"IntegerAttr":$isVolatile), [{
build($_builder, $_state, dst, src, len, isVolatile,
/*access_groups=*/nullptr, /*alias_scopes=*/nullptr,
/*noalias_scopes=*/nullptr, /*tbaa=*/nullptr);
}]
>];
}]>
];
string mlirBuilder = [{
$_op = $_builder.create<$_qualCppClassName>($_location,
$dst, $src, $len, $_int_attr($isVolatile));
}];
string llvmBuilder = [{
auto *inst = createIntrinsicCall(
builder, llvm::Intrinsic::}] # llvmEnumName # [{,
{$dst, $src, $len,
builder.getInt1(op.getIsVolatile())},
}] # declTypes # [{ ); }]
# setAccessGroupsMetadataCode
# setAliasAnalysisMetadataCode;
}
def LLVM_MemcpyOp : LLVM_MemcpyIntrOpBase<"memcpy">;
def LLVM_MemcpyInlineOp : LLVM_MemcpyIntrOpBase<"memcpy.inline">;
def LLVM_MemmoveOp : LLVM_MemcpyIntrOpBase<"memmove">;
def LLVM_MemcpyInlineOp :
LLVM_ZeroResultIntrOp<"memcpy.inline", [0, 1], [],
/*requiresAccessGroup=*/1,
/*requiresAliasAnalysis=*/1> {
dag args = (ins Arg<LLVM_AnyPointer,"",[MemWrite]>:$dst,
Arg<LLVM_AnyPointer,"",[MemRead]>:$src,
APIntAttr:$len, I1Attr:$isVolatile);
// Append the alias attributes defined by LLVM_IntrOpBase.
let arguments = !con(args, aliasAttrs);
let builders = [
OpBuilder<(ins "Value":$dst, "Value":$src, "IntegerAttr":$len,
"bool":$isVolatile), [{
build($_builder, $_state, dst, src, len,
$_builder.getBoolAttr(isVolatile));
}]>,
OpBuilder<(ins "Value":$dst, "Value":$src, "IntegerAttr":$len,
"IntegerAttr":$isVolatile), [{
build($_builder, $_state, dst, src, len, isVolatile,
/*access_groups=*/nullptr, /*alias_scopes=*/nullptr,
/*noalias_scopes=*/nullptr, /*tbaa=*/nullptr);
}]>
];
string mlirBuilder = [{
$_op = $_builder.create<$_qualCppClassName>($_location,
$dst, $src, $_int_attr($len), $_int_attr($isVolatile));
}];
string llvmBuilder = [{
auto *inst = createIntrinsicCall(
builder, llvm::Intrinsic::}] # llvmEnumName # [{,
{$dst, $src, builder.getInt(op.getLen()),
builder.getInt1(op.getIsVolatile())}, { }]
# !interleave(!listconcat(declTypeList, [
[{ moduleTranslation.convertType(op.getLenAttr().getType()) }]
]), ", ") # [{ }); }]
# setAccessGroupsMetadataCode
# setAliasAnalysisMetadataCode;
}
def LLVM_MemsetOp : LLVM_ZeroResultIntrOp<"memset", [0, 2], [],
/*requiresAccessGroup=*/1, /*requiresAliasAnalysis=*/1> {
dag args = (ins Arg<LLVM_AnyPointer,"",[MemWrite]>:$dst,
I8:$val, AnySignlessInteger:$len, I1:$isVolatile);
I8:$val, AnySignlessInteger:$len, I1Attr:$isVolatile);
// Append the alias attributes defined by LLVM_IntrOpBase.
let arguments = !con(args, aliasAttrs);
let builders = [
OpBuilder<(ins "Value":$dst, "Value":$val, "Value":$len,
"Value":$isVolatile), [{
"bool":$isVolatile), [{
build($_builder, $_state, dst, val, len,
$_builder.getBoolAttr(isVolatile));
}]>,
OpBuilder<(ins "Value":$dst, "Value":$val, "Value":$len,
"IntegerAttr":$isVolatile), [{
build($_builder, $_state, dst, val, len, isVolatile,
/*access_groups=*/nullptr, /*alias_scopes=*/nullptr,
/*noalias_scopes=*/nullptr, /*tbaa=*/nullptr);
}]
>];
}]>
];
string mlirBuilder = [{
$_op = $_builder.create<$_qualCppClassName>($_location,
$dst, $val, $len, $_int_attr($isVolatile));
}];
string llvmBuilder = [{
auto *inst = createIntrinsicCall(
builder, llvm::Intrinsic::}] # llvmEnumName # [{,
{$dst, $val, $len,
builder.getInt1(op.getIsVolatile())},
}] # declTypes # [{ ); }]
# setAccessGroupsMetadataCode
# setAliasAnalysisMetadataCode;
}
def LLVM_NoAliasScopeDeclOp

View File

@@ -331,16 +331,16 @@ class LLVM_IntrOpBase<Dialect dialect, string opName, string enumName,
LLVM_IntrPatterns.structResult,
LLVM_IntrPatterns.result);
string llvmEnumName = enumName;
list<string> declTypeList = !listconcat(
ListIntSubst<resultPattern, overloadedResults>.lst,
ListIntSubst<LLVM_IntrPatterns.operand,
overloadedOperands>.lst);
string declTypes = [{ { }] # !interleave(declTypeList, ", ") # [{ } }];
let llvmBuilder = [{
llvm::Module *module = builder.GetInsertBlock()->getModule();
llvm::Function *fn = llvm::Intrinsic::getDeclaration(
module,
llvm::Intrinsic::}] # enumName # [{,
{ }] # !interleave(!listconcat(
ListIntSubst<resultPattern, overloadedResults>.lst,
ListIntSubst<LLVM_IntrPatterns.operand,
overloadedOperands>.lst), ", ") # [{
});
llvm::Intrinsic::}] # enumName # [{,}] # declTypes # [{);
auto operands = moduleTranslation.lookupValues(opInst.getOperands());
}] # [{
auto *inst = builder.CreateCall(fn, operands);

View File

@@ -366,10 +366,10 @@ llvm::Constant *getLLVMConstant(llvm::Type *llvmType, Attribute attr,
const ModuleTranslation &moduleTranslation);
/// Creates a call to an LLVM IR intrinsic function with the given arguments.
llvm::Value *createIntrinsicCall(llvm::IRBuilderBase &builder,
llvm::Intrinsic::ID intrinsic,
ArrayRef<llvm::Value *> args = {},
ArrayRef<llvm::Type *> tys = {});
llvm::CallInst *createIntrinsicCall(llvm::IRBuilderBase &builder,
llvm::Intrinsic::ID intrinsic,
ArrayRef<llvm::Value *> args = {},
ArrayRef<llvm::Type *> tys = {});
} // namespace detail
} // namespace LLVM

View File

@@ -275,8 +275,6 @@ LogicalResult ConvertToLLVMPattern::copyUnrankedDescriptors(
sizes);
// Get frequently used types.
MLIRContext *context = builder.getContext();
auto i1Type = IntegerType::get(context, 1);
Type indexType = getTypeConverter()->getIndexType();
// Find the malloc and free, or declare them if necessary.
@@ -289,10 +287,6 @@ LogicalResult ConvertToLLVMPattern::copyUnrankedDescriptors(
freeFunc = LLVM::lookupOrCreateFreeFn(
module, getTypeConverter()->useOpaquePointers());
// Initialize shared constants.
Value zero =
builder.create<LLVM::ConstantOp>(loc, i1Type, builder.getBoolAttr(false));
unsigned unrankedMemrefPos = 0;
for (unsigned i = 0, e = operands.size(); i < e; ++i) {
Type type = origTypes[i];
@@ -311,7 +305,7 @@ LogicalResult ConvertToLLVMPattern::copyUnrankedDescriptors(
allocationSize,
/*alignment=*/0);
Value source = desc.memRefDescPtr(builder, loc);
builder.create<LLVM::MemcpyOp>(loc, memory, source, allocationSize, zero);
builder.create<LLVM::MemcpyOp>(loc, memory, source, allocationSize, false);
if (!toDynamic)
builder.create<LLVM::CallOp>(loc, freeFunc, source);

View File

@@ -77,12 +77,10 @@ struct IntOpWithFlagLowering : public ConvertOpToLLVMPattern<MathOp> {
auto loc = op.getLoc();
auto resultType = op.getResult().getType();
auto boolZero = rewriter.getBoolAttr(false);
if (!isa<LLVM::LLVMArrayType>(operandType)) {
LLVM::ConstantOp zero = rewriter.create<LLVM::ConstantOp>(loc, boolZero);
rewriter.replaceOpWithNewOp<LLVMOp>(op, resultType, adaptor.getOperand(),
zero);
false);
return success();
}
@@ -93,10 +91,8 @@ struct IntOpWithFlagLowering : public ConvertOpToLLVMPattern<MathOp> {
return LLVM::detail::handleMultidimensionalVectors(
op.getOperation(), adaptor.getOperands(), *this->getTypeConverter(),
[&](Type llvm1DVectorTy, ValueRange operands) {
LLVM::ConstantOp zero =
rewriter.create<LLVM::ConstantOp>(loc, boolZero);
return rewriter.create<LLVMOp>(loc, llvm1DVectorTy, operands[0],
zero);
false);
},
rewriter);
}
@@ -105,7 +101,8 @@ struct IntOpWithFlagLowering : public ConvertOpToLLVMPattern<MathOp> {
using CountLeadingZerosOpLowering =
IntOpWithFlagLowering<math::CountLeadingZerosOp, LLVM::CountLeadingZerosOp>;
using CountTrailingZerosOpLowering =
IntOpWithFlagLowering<math::CountTrailingZerosOp, LLVM::CountTrailingZerosOp>;
IntOpWithFlagLowering<math::CountTrailingZerosOp,
LLVM::CountTrailingZerosOp>;
using AbsIOpLowering = IntOpWithFlagLowering<math::AbsIOp, LLVM::AbsOp>;
// A `expm1` is converted into `exp - 1`.

View File

@@ -217,8 +217,6 @@ struct ReallocOpLoweringBase : public AllocationOpLLVMLowering {
allocateBuffer(rewriter, loc, dstByteSize, op);
// Copy the data from the old buffer to the new buffer.
Value srcAlignedPtr = desc.alignedPtr(rewriter, loc);
Value isVolatile =
rewriter.create<LLVM::ConstantOp>(loc, rewriter.getBoolAttr(false));
auto toVoidPtr = [&](Value ptr) -> Value {
if (getTypeConverter()->useOpaquePointers())
return ptr;
@@ -226,7 +224,7 @@ struct ReallocOpLoweringBase : public AllocationOpLLVMLowering {
};
rewriter.create<LLVM::MemcpyOp>(loc, toVoidPtr(dstAlignedPtr),
toVoidPtr(srcAlignedPtr), srcByteSize,
isVolatile);
/*isVolatile=*/false);
// Deallocate the old buffer.
LLVM::LLVMFuncOp freeFunc =
getFreeFn(getTypeConverter(), op->getParentOfType<ModuleOp>());
@@ -804,14 +802,10 @@ struct PrefetchOpLowering : public LoadStoreOpLowering<memref::PrefetchOp> {
adaptor.getIndices(), rewriter);
// Replace with llvm.prefetch.
auto llvmI32Type = typeConverter->convertType(rewriter.getIntegerType(32));
auto isWrite = rewriter.create<LLVM::ConstantOp>(loc, llvmI32Type,
prefetchOp.getIsWrite());
auto localityHint = rewriter.create<LLVM::ConstantOp>(
loc, llvmI32Type, prefetchOp.getLocalityHint());
auto isData = rewriter.create<LLVM::ConstantOp>(
loc, llvmI32Type, prefetchOp.getIsDataCache());
IntegerAttr isWrite = rewriter.getI32IntegerAttr(prefetchOp.getIsWrite());
IntegerAttr localityHint = prefetchOp.getLocalityHintAttr();
IntegerAttr isData =
rewriter.getI32IntegerAttr(prefetchOp.getIsDataCache());
rewriter.replaceOpWithNewOp<LLVM::Prefetch>(prefetchOp, dataPtr, isWrite,
localityHint, isData);
return success();
@@ -974,10 +968,8 @@ struct MemRefCopyOpLowering : public ConvertOpToLLVMPattern<memref::CopyOp> {
Value targetOffset = targetDesc.offset(rewriter, loc);
Value targetPtr = rewriter.create<LLVM::GEPOp>(
loc, targetBasePtr.getType(), elementType, targetBasePtr, targetOffset);
Value isVolatile =
rewriter.create<LLVM::ConstantOp>(loc, rewriter.getBoolAttr(false));
rewriter.create<LLVM::MemcpyOp>(loc, targetPtr, srcPtr, totalSize,
isVolatile);
/*isVolatile=*/false);
rewriter.eraseOp(op);
return success();
@@ -1178,11 +1170,8 @@ struct MemorySpaceCastOpLowering
loc, getIndexType(), rewriter.getIndexAttr(bytesToSkip));
Value copySize = rewriter.create<LLVM::SubOp>(
loc, getIndexType(), resultUnderlyingSize, bytesToSkipConst);
Type llvmBool = typeConverter->convertType(rewriter.getI1Type());
Value nonVolatile = rewriter.create<LLVM::ConstantOp>(
loc, llvmBool, rewriter.getBoolAttr(false));
rewriter.create<LLVM::MemcpyOp>(loc, resultIndexVals, sourceIndexVals,
copySize, nonVolatile);
copySize, /*isVolatile=*/false);
rewriter.replaceOp(op, ValueRange{result});
return success();

View File

@@ -70,11 +70,7 @@ static unsigned calculateGlobalIndex(spirv::GlobalVariableOp op) {
/// Copies the given number of bytes from src to dst pointers.
static void copy(Location loc, Value dst, Value src, Value size,
OpBuilder &builder) {
MLIRContext *context = builder.getContext();
auto llvmI1Type = IntegerType::get(context, 1);
Value isVolatile = builder.create<LLVM::ConstantOp>(
loc, llvmI1Type, builder.getBoolAttr(false));
builder.create<LLVM::MemcpyOp>(loc, dst, src, size, isVolatile);
builder.create<LLVM::MemcpyOp>(loc, dst, src, size, /*isVolatile=*/false);
}
/// Encodes the binding and descriptor set numbers into a new symbolic name.
@@ -284,7 +280,6 @@ class GPULaunchLowering : public ConvertOpToLLVMPattern<gpu::LaunchFuncOp> {
class LowerHostCodeToLLVM
: public impl::LowerHostCodeToLLVMPassBase<LowerHostCodeToLLVM> {
public:
using Base::Base;
void runOnOperation() override {

View File

@@ -207,9 +207,8 @@ static Value handleByValArgumentInit(OpBuilder &builder, Location loc,
// Copy the pointee to the newly allocated value.
Value copySize = builder.create<LLVM::ConstantOp>(
loc, builder.getI64Type(), builder.getI64IntegerAttr(elementTypeSize));
Value isVolatile = builder.create<LLVM::ConstantOp>(
loc, builder.getI1Type(), builder.getBoolAttr(false));
builder.create<LLVM::MemcpyOp>(loc, allocaOp, argument, copySize, isVolatile);
builder.create<LLVM::MemcpyOp>(loc, allocaOp, argument, copySize,
/*isVolatile=*/false);
return allocaOp;
}

View File

@@ -588,7 +588,7 @@ mlir::LLVM::detail::getTopologicallySortedBlocks(Region &region) {
return blocks;
}
llvm::Value *mlir::LLVM::detail::createIntrinsicCall(
llvm::CallInst *mlir::LLVM::detail::createIntrinsicCall(
llvm::IRBuilderBase &builder, llvm::Intrinsic::ID intrinsic,
ArrayRef<llvm::Value *> args, ArrayRef<llvm::Type *> tys) {
llvm::Module *module = builder.GetInsertBlock()->getModule();

View File

@@ -131,10 +131,9 @@ func.func @return_var_memref_caller(%arg0: memref<4x3xf32>) {
// CHECK: %[[DOUBLE_RANK_INC:.*]] = llvm.add %[[DOUBLE_RANK]], %[[ONE]]
// CHECK: %[[TABLES_SIZE:.*]] = llvm.mul %[[DOUBLE_RANK_INC]], %[[IDX_SIZE]]
// CHECK: %[[ALLOC_SIZE:.*]] = llvm.add %[[DOUBLE_PTR_SIZE]], %[[TABLES_SIZE]]
// CHECK: %[[FALSE:.*]] = llvm.mlir.constant(false)
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[ALLOC_SIZE]] x i8
// CHECK: %[[SOURCE:.*]] = llvm.extractvalue %[[CALL_RES]][1]
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA]], %[[SOURCE]], %[[ALLOC_SIZE]], %[[FALSE]])
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA]], %[[SOURCE]], %[[ALLOC_SIZE]]) <{isVolatile = false}>
// CHECK: llvm.call @free(%[[SOURCE]])
// CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(i64, ptr)>
// CHECK: %[[RANK:.*]] = llvm.extractvalue %[[CALL_RES]][0] : !llvm.struct<(i64, ptr)>
@@ -164,9 +163,8 @@ func.func @return_var_memref(%arg0: memref<4x3xf32>) -> memref<*xf32> attributes
// CHECK: %[[DOUBLE_RANK_INC:.*]] = llvm.add %[[DOUBLE_RANK]], %[[ONE]]
// CHECK: %[[TABLES_SIZE:.*]] = llvm.mul %[[DOUBLE_RANK_INC]], %[[IDX_SIZE]]
// CHECK: %[[ALLOC_SIZE:.*]] = llvm.add %[[DOUBLE_PTR_SIZE]], %[[TABLES_SIZE]]
// CHECK: %[[FALSE:.*]] = llvm.mlir.constant(false)
// CHECK: %[[ALLOCATED:.*]] = llvm.call @malloc(%[[ALLOC_SIZE]])
// CHECK: "llvm.intr.memcpy"(%[[ALLOCATED]], %[[ALLOCA]], %[[ALLOC_SIZE]], %[[FALSE]])
// CHECK: "llvm.intr.memcpy"(%[[ALLOCATED]], %[[ALLOCA]], %[[ALLOC_SIZE]]) <{isVolatile = false}>
// CHECK: %[[NEW_DESC:.*]] = llvm.mlir.undef : !llvm.struct<(i64, ptr)>
// CHECK: %[[NEW_DESC_1:.*]] = llvm.insertvalue %[[RANK]], %[[NEW_DESC]][0]
// CHECK: %[[NEW_DESC_2:.*]] = llvm.insertvalue %[[ALLOCATED]], %[[NEW_DESC_1]][1]
@@ -190,7 +188,7 @@ func.func @return_two_var_memref_caller(%arg0: memref<4x3xf32>) {
// CHECK: %[[ALLOCA_1:.*]] = llvm.alloca %{{.*}} x i8
// CHECK: %[[SOURCE_1:.*]] = llvm.extractvalue %[[RES_1:.*]][1] : ![[DESC_TYPE:.*>]]
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA_1]], %[[SOURCE_1]], %{{.*}}, %[[FALSE:.*]])
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA_1]], %[[SOURCE_1]], %{{.*}}) <{isVolatile = false}>
// CHECK: llvm.call @free(%[[SOURCE_1]])
// CHECK: %[[DESC_1:.*]] = llvm.mlir.undef : ![[DESC_TYPE]]
// CHECK: %[[DESC_11:.*]] = llvm.insertvalue %{{.*}}, %[[DESC_1]][0]
@@ -198,7 +196,7 @@ func.func @return_two_var_memref_caller(%arg0: memref<4x3xf32>) {
// CHECK: %[[ALLOCA_2:.*]] = llvm.alloca %{{.*}} x i8
// CHECK: %[[SOURCE_2:.*]] = llvm.extractvalue %[[RES_2:.*]][1]
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA_2]], %[[SOURCE_2]], %{{.*}}, %[[FALSE]])
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA_2]], %[[SOURCE_2]], %{{.*}}) <{isVolatile = false}>
// CHECK: llvm.call @free(%[[SOURCE_2]])
// CHECK: %[[DESC_2:.*]] = llvm.mlir.undef : ![[DESC_TYPE]]
// CHECK: %[[DESC_21:.*]] = llvm.insertvalue %{{.*}}, %[[DESC_2]][0]
@@ -220,13 +218,13 @@ func.func @return_two_var_memref(%arg0: memref<4x3xf32>) -> (memref<*xf32>, memr
// convention requires the caller to free them and the caller cannot know
// whether they are the same value or not.
// CHECK: %[[ALLOCATED_1:.*]] = llvm.call @malloc(%{{.*}})
// CHECK: "llvm.intr.memcpy"(%[[ALLOCATED_1]], %[[ALLOCA]], %{{.*}}, %[[FALSE:.*]])
// CHECK: "llvm.intr.memcpy"(%[[ALLOCATED_1]], %[[ALLOCA]], %{{.*}}) <{isVolatile = false}>
// CHECK: %[[RES_1:.*]] = llvm.mlir.undef
// CHECK: %[[RES_11:.*]] = llvm.insertvalue %{{.*}}, %[[RES_1]][0]
// CHECK: %[[RES_12:.*]] = llvm.insertvalue %[[ALLOCATED_1]], %[[RES_11]][1]
// CHECK: %[[ALLOCATED_2:.*]] = llvm.call @malloc(%{{.*}})
// CHECK: "llvm.intr.memcpy"(%[[ALLOCATED_2]], %[[ALLOCA]], %{{.*}}, %[[FALSE]])
// CHECK: "llvm.intr.memcpy"(%[[ALLOCATED_2]], %[[ALLOCA]], %{{.*}}) <{isVolatile = false}>
// CHECK: %[[RES_2:.*]] = llvm.mlir.undef
// CHECK: %[[RES_21:.*]] = llvm.insertvalue %{{.*}}, %[[RES_2]][0]
// CHECK: %[[RES_22:.*]] = llvm.insertvalue %[[ALLOCATED_2]], %[[RES_21]][1]

View File

@@ -16,8 +16,7 @@ func.func @ops(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32, %arg4: f64) {
// -----
func.func @absi(%arg0: i32) -> i32 {
// CHECK: %[[FALSE:.*]] = llvm.mlir.constant(false
// CHECK: = "llvm.intr.abs"(%{{.*}}, %[[FALSE]]) : (i32, i1) -> i32
// CHECK: = "llvm.intr.abs"(%{{.*}}) <{is_int_min_poison = false}> : (i32) -> i32
%0 = math.absi %arg0 : i32
return %0 : i32
}
@@ -147,8 +146,7 @@ func.func @sine(%arg0 : f32) {
// CHECK-LABEL: func @ctlz(
// CHECK-SAME: i32
func.func @ctlz(%arg0 : i32) {
// CHECK: %[[ZERO:.+]] = llvm.mlir.constant(false) : i1
// CHECK: "llvm.intr.ctlz"(%arg0, %[[ZERO]]) : (i32, i1) -> i32
// CHECK: "llvm.intr.ctlz"(%arg0) <{is_zero_poison = false}> : (i32) -> i32
%0 = math.ctlz %arg0 : i32
func.return
}
@@ -158,8 +156,7 @@ func.func @ctlz(%arg0 : i32) {
// CHECK-LABEL: func @cttz(
// CHECK-SAME: i32
func.func @cttz(%arg0 : i32) {
// CHECK: %[[ZERO:.+]] = llvm.mlir.constant(false) : i1
// CHECK: "llvm.intr.cttz"(%arg0, %[[ZERO]]) : (i32, i1) -> i32
// CHECK: "llvm.intr.cttz"(%arg0) <{is_zero_poison = false}> : (i32) -> i32
%0 = math.cttz %arg0 : i32
func.return
}
@@ -169,8 +166,7 @@ func.func @cttz(%arg0 : i32) {
// CHECK-LABEL: func @cttz_vec(
// CHECK-SAME: i32
func.func @cttz_vec(%arg0 : vector<4xi32>) {
// CHECK: %[[ZERO:.+]] = llvm.mlir.constant(false) : i1
// CHECK: "llvm.intr.cttz"(%arg0, %[[ZERO]]) : (vector<4xi32>, i1) -> vector<4xi32>
// CHECK: "llvm.intr.cttz"(%arg0) <{is_zero_poison = false}> : (vector<4xi32>) -> vector<4xi32>
%0 = math.cttz %arg0 : vector<4xi32>
func.return
}

View File

@@ -212,20 +212,11 @@ func.func @prefetch(%A : memref<?x?xf32>, %i : index, %j : index) {
// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
// CHECK-NEXT: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK-NEXT: [[C3:%.*]] = llvm.mlir.constant(3 : i32) : i32
// CHECK-NEXT: [[C1_1:%.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK-NEXT: "llvm.intr.prefetch"(%[[addr]], [[C1]], [[C3]], [[C1_1]]) : (!llvm.ptr, i32, i32, i32) -> ()
// CHECK-NEXT: "llvm.intr.prefetch"(%[[addr]]) <{cache = 1 : i32, hint = 3 : i32, rw = 1 : i32}> : (!llvm.ptr) -> ()
memref.prefetch %A[%i, %j], write, locality<3>, data : memref<?x?xf32>
// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: [[C0_1:%.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: [[C1_2:%.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: "llvm.intr.prefetch"(%{{.*}}, [[C0]], [[C0_1]], [[C1_2]]) : (!llvm.ptr, i32, i32, i32) -> ()
// CHECK: "llvm.intr.prefetch"(%{{.*}}) <{cache = 1 : i32, hint = 0 : i32, rw = 0 : i32}> : (!llvm.ptr) -> ()
memref.prefetch %A[%i, %j], read, locality<0>, data : memref<?x?xf32>
// CHECK: [[C0_2:%.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32
// CHECK: [[C0_3:%.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: "llvm.intr.prefetch"(%{{.*}}, [[C0_2]], [[C2]], [[C0_3]]) : (!llvm.ptr, i32, i32, i32) -> ()
// CHECK: "llvm.intr.prefetch"(%{{.*}}) <{cache = 0 : i32, hint = 2 : i32, rw = 0 : i32}> : (!llvm.ptr) -> ()
memref.prefetch %A[%i, %j], read, locality<2>, instr : memref<?x?xf32>
return
}
@@ -307,14 +298,13 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
// CHECK: [[RESULT_ALIGN_GEP:%.*]] = llvm.getelementptr [[RESULT_DESC]][1]
// CHECK: llvm.store [[RESULT_ALIGN]], [[RESULT_ALIGN_GEP]] : !llvm.ptr
// Memcpy remaniing values
// Memcpy remaining values
// CHECK: [[SOURCE_OFFSET_GEP:%.*]] = llvm.getelementptr [[SOURCE_DESC]][2]
// CHECK: [[RESULT_OFFSET_GEP:%.*]] = llvm.getelementptr [[RESULT_DESC]][2]
// CHECK: [[SIZEOF_TWO_RESULT_PTRS:%.*]] = llvm.mlir.constant(16 : index) : i64
// CHECK: [[COPY_SIZE:%.*]] = llvm.sub [[DESC_ALLOC_SIZE]], [[SIZEOF_TWO_RESULT_PTRS]]
// CHECK: [[FALSE:%.*]] = llvm.mlir.constant(false) : i1
// CHECK: "llvm.intr.memcpy"([[RESULT_OFFSET_GEP]], [[SOURCE_OFFSET_GEP]], [[COPY_SIZE]], [[FALSE]])
// CHECK: "llvm.intr.memcpy"([[RESULT_OFFSET_GEP]], [[SOURCE_OFFSET_GEP]], [[COPY_SIZE]]) <{isVolatile = false}>
// -----
@@ -674,8 +664,7 @@ func.func @realloc_dynamic(%in: memref<?xf32>, %d: index) -> memref<?xf32>{
// CHECK: %[[src_size:.*]] = llvm.mul %[[src_dim]], %[[dst_es]]
// CHECK: %[[new_buffer_raw:.*]] = llvm.call @malloc(%[[dst_size]])
// CHECK: %[[old_buffer_aligned:.*]] = llvm.extractvalue %[[descriptor]][1]
// CHECK: %[[volatile:.*]] = llvm.mlir.constant(false) : i1
// CHECK: "llvm.intr.memcpy"(%[[new_buffer_raw]], %[[old_buffer_aligned]], %[[src_size]], %[[volatile]])
// CHECK: "llvm.intr.memcpy"(%[[new_buffer_raw]], %[[old_buffer_aligned]], %[[src_size]]) <{isVolatile = false}>
// CHECK: %[[old_buffer_unaligned:.*]] = llvm.extractvalue %[[descriptor]][0]
// CHECK: llvm.call @free(%[[old_buffer_unaligned]])
// CHECK: %[[descriptor_update1:.*]] = llvm.insertvalue %[[new_buffer_raw]], %[[descriptor]][0]
@@ -721,8 +710,7 @@ func.func @realloc_dynamic_alignment(%in: memref<?xf32>, %d: index) -> memref<?x
// CHECK: %[[new_buffer_aligned_int:.*]] = llvm.sub %[[ptr_alignment_m1]], %[[padding]]
// CHECK: %[[new_buffer_aligned:.*]] = llvm.inttoptr %[[new_buffer_aligned_int]] : i64 to !llvm.ptr
// CHECK: %[[old_buffer_aligned:.*]] = llvm.extractvalue %[[descriptor]][1]
// CHECK: %[[volatile:.*]] = llvm.mlir.constant(false) : i1
// CHECK: "llvm.intr.memcpy"(%[[new_buffer_aligned]], %[[old_buffer_aligned]], %[[src_size]], %[[volatile]])
// CHECK: "llvm.intr.memcpy"(%[[new_buffer_aligned]], %[[old_buffer_aligned]], %[[src_size]]) <{isVolatile = false}>
// CHECK: %[[old_buffer_unaligned:.*]] = llvm.extractvalue %[[descriptor]][0]
// CHECK: llvm.call @free(%[[old_buffer_unaligned]])
// CHECK: %[[descriptor_update1:.*]] = llvm.insertvalue %[[new_buffer_raw]], %[[descriptor]][0]
@@ -752,8 +740,7 @@ func.func @realloc_dynamic_alignment(%in: memref<?xf32>, %d: index) -> memref<?x
// ALIGNED-ALLOC: %[[adjust_dst_size:.*]] = llvm.sub %[[size_alignment_m1]], %[[padding]]
// ALIGNED-ALLOC: %[[new_buffer_raw:.*]] = llvm.call @aligned_alloc(%[[alignment]], %[[adjust_dst_size]])
// ALIGNED-ALLOC: %[[old_buffer_aligned:.*]] = llvm.extractvalue %[[descriptor]][1]
// ALIGNED-ALLOC: %[[volatile:.*]] = llvm.mlir.constant(false) : i1
// ALIGNED-ALLOC: "llvm.intr.memcpy"(%[[new_buffer_raw]], %[[old_buffer_aligned]], %[[src_size]], %[[volatile]])
// ALIGNED-ALLOC: "llvm.intr.memcpy"(%[[new_buffer_raw]], %[[old_buffer_aligned]], %[[src_size]]) <{isVolatile = false}>
// ALIGNED-ALLOC: %[[old_buffer_unaligned:.*]] = llvm.extractvalue %[[descriptor]][0]
// ALIGNED-ALLOC: llvm.call @free(%[[old_buffer_unaligned]])
// ALIGNED-ALLOC: %[[descriptor_update1:.*]] = llvm.insertvalue %[[new_buffer_raw]], %[[descriptor]][0]

View File

@@ -363,8 +363,7 @@ func.func @realloc_static(%in: memref<2xi32>) -> memref<4xi32>{
// CHECK: %[[src_size:.*]] = llvm.mul %[[src_dim]], %[[dst_es]]
// CHECK: %[[new_buffer_raw:.*]] = llvm.call @malloc(%[[dst_size]])
// CHECK: %[[old_buffer_aligned:.*]] = llvm.extractvalue %[[descriptor]][1]
// CHECK: %[[volatile:.*]] = llvm.mlir.constant(false) : i1
// CHECK: "llvm.intr.memcpy"(%[[new_buffer_raw]], %[[old_buffer_aligned]], %[[src_size]], %[[volatile]])
// CHECK: "llvm.intr.memcpy"(%[[new_buffer_raw]], %[[old_buffer_aligned]], %[[src_size]]) <{isVolatile = false}>
// CHECK: %[[old_buffer_unaligned:.*]] = llvm.extractvalue %[[descriptor]][0]
// CHECK: llvm.call @free(%[[old_buffer_unaligned]])
// CHECK: %[[descriptor_update1:.*]] = llvm.insertvalue %[[new_buffer_raw]], %[[descriptor]][0]
@@ -406,8 +405,7 @@ func.func @realloc_static_alignment(%in: memref<2xf32>) -> memref<4xf32>{
// CHECK: %[[new_buffer_aligned_int:.*]] = llvm.sub %[[ptr_alignment_m1]], %[[padding]]
// CHECK: %[[new_buffer_aligned:.*]] = llvm.inttoptr %[[new_buffer_aligned_int]] : i64 to !llvm.ptr
// CHECK: %[[old_buffer_aligned:.*]] = llvm.extractvalue %[[descriptor]][1]
// CHECK: %[[volatile:.*]] = llvm.mlir.constant(false) : i1
// CHECK: "llvm.intr.memcpy"(%[[new_buffer_aligned]], %[[old_buffer_aligned]], %[[src_size]], %[[volatile]])
// CHECK: "llvm.intr.memcpy"(%[[new_buffer_aligned]], %[[old_buffer_aligned]], %[[src_size]]) <{isVolatile = false}>
// CHECK: %[[old_buffer_unaligned:.*]] = llvm.extractvalue %[[descriptor]][0]
// CHECK: llvm.call @free(%[[old_buffer_unaligned]])
// CHECK: %[[descriptor_update1:.*]] = llvm.insertvalue %[[new_buffer_raw]], %[[descriptor]][0]

View File

@@ -446,8 +446,7 @@ func.func @memref_copy_ranked() {
// CHECK: [[EXTRACT2P:%.*]] = llvm.extractvalue {{%.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[EXTRACT2O:%.*]] = llvm.extractvalue {{%.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[GEP2:%.*]] = llvm.getelementptr [[EXTRACT2P]][[[EXTRACT2O]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
// CHECK: [[VOLATILE:%.*]] = llvm.mlir.constant(false) : i1
// CHECK: "llvm.intr.memcpy"([[GEP2]], [[GEP1]], [[SIZE]], [[VOLATILE]]) : (!llvm.ptr, !llvm.ptr, i64, i1) -> ()
// CHECK: "llvm.intr.memcpy"([[GEP2]], [[GEP1]], [[SIZE]]) <{isVolatile = false}>
return
}
@@ -478,8 +477,7 @@ func.func @memref_copy_contiguous(%in: memref<16x4xi32>, %offset: index) {
// CHECK: [[EXTRACT2P:%.*]] = llvm.extractvalue {{%.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: [[EXTRACT2O:%.*]] = llvm.extractvalue {{%.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: [[GEP2:%.*]] = llvm.getelementptr [[EXTRACT2P]][[[EXTRACT2O]]] : (!llvm.ptr, i64) -> !llvm.ptr, i32
// CHECK: [[VOLATILE:%.*]] = llvm.mlir.constant(false) : i1
// CHECK: "llvm.intr.memcpy"([[GEP2]], [[GEP1]], [[SIZE]], [[VOLATILE]]) : (!llvm.ptr, !llvm.ptr, i64, i1) -> ()
// CHECK: "llvm.intr.memcpy"([[GEP2]], [[GEP1]], [[SIZE]]) <{isVolatile = false}>
return
}

View File

@@ -394,10 +394,9 @@ func.func @realloc_dynamic(%in: memref<?xf32>, %d: index) -> memref<?xf32>{
// CHECK: %[[new_buffer_raw:.*]] = llvm.call @malloc(%[[dst_size]])
// CHECK: %[[new_buffer:.*]] = llvm.bitcast %[[new_buffer_raw]] : !llvm.ptr<i8> to !llvm.ptr<f32>
// CHECK: %[[old_buffer_aligned:.*]] = llvm.extractvalue %[[descriptor]][1]
// CHECK: %[[volatile:.*]] = llvm.mlir.constant(false) : i1
// CHECK-DAG: %[[new_buffer_void:.*]] = llvm.bitcast %[[new_buffer]] : !llvm.ptr<f32> to !llvm.ptr<i8>
// CHECK-DAG: %[[old_buffer_void:.*]] = llvm.bitcast %[[old_buffer_aligned]] : !llvm.ptr<f32> to !llvm.ptr<i8>
// CHECK: "llvm.intr.memcpy"(%[[new_buffer_void]], %[[old_buffer_void]], %[[src_size]], %[[volatile]])
// CHECK: "llvm.intr.memcpy"(%[[new_buffer_void]], %[[old_buffer_void]], %[[src_size]]) <{isVolatile = false}>
// CHECK: %[[old_buffer_unaligned:.*]] = llvm.extractvalue %[[descriptor]][0]
// CHECK: %[[old_buffer_unaligned_void:.*]] = llvm.bitcast %[[old_buffer_unaligned]] : !llvm.ptr<f32> to !llvm.ptr<i8>
// CHECK: llvm.call @free(%[[old_buffer_unaligned_void]])

View File

@@ -15,11 +15,9 @@ module attributes {gpu.container_module, spirv.target_env = #spirv.target_env<#s
// CHECK-LABEL: @main
// CHECK: %[[SRC:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK-NEXT: %[[DEST:.*]] = llvm.mlir.addressof @__spv__foo_bar_arg_0_descriptor_set0_binding0 : !llvm.ptr
// CHECK-NEXT: llvm.mlir.constant(false) : i1
// CHECK-NEXT: "llvm.intr.memcpy"(%[[DEST]], %[[SRC]], %[[SIZE:.*]], %{{.*}}) : (!llvm.ptr, !llvm.ptr, i64, i1) -> ()
// CHECK-NEXT: "llvm.intr.memcpy"(%[[DEST]], %[[SRC]], %[[SIZE:.*]]) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i64) -> ()
// CHECK-NEXT: llvm.call @__spv__foo_bar() : () -> ()
// CHECK-NEXT: llvm.mlir.constant(false) : i1
// CHECK-NEXT: "llvm.intr.memcpy"(%[[SRC]], %[[DEST]], %[[SIZE]], %{{.*}}) : (!llvm.ptr, !llvm.ptr, i64, i1) -> ()
// CHECK-NEXT: "llvm.intr.memcpy"(%[[SRC]], %[[DEST]], %[[SIZE]]) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i64) -> ()
spirv.module @__spv__foo Logical GLSL450 requires #spirv.vce<v1.0, [Shader], [SPV_KHR_variable_pointers]> {
spirv.GlobalVariable @bar_arg_0 bind(0, 0) : !spirv.ptr<!spirv.struct<(!spirv.array<6 x i32, stride=4> [0])>, StorageBuffer>

View File

@@ -13,9 +13,9 @@ func.func @inner_func_inlinable(%ptr : !llvm.ptr) -> i32 {
llvm.intr.dbg.declare #variableAddr = %ptr : !llvm.ptr
%byte = llvm.mlir.constant(43 : i8) : i8
%true = llvm.mlir.constant(1 : i1) : i1
"llvm.intr.memset"(%ptr, %byte, %0, %true) : (!llvm.ptr, i8, i32, i1) -> ()
"llvm.intr.memmove"(%ptr, %ptr, %0, %true) : (!llvm.ptr, !llvm.ptr, i32, i1) -> ()
"llvm.intr.memcpy"(%ptr, %ptr, %0, %true) : (!llvm.ptr, !llvm.ptr, i32, i1) -> ()
"llvm.intr.memset"(%ptr, %byte, %0) <{isVolatile = true}> : (!llvm.ptr, i8, i32) -> ()
"llvm.intr.memmove"(%ptr, %ptr, %0) <{isVolatile = true}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
"llvm.intr.memcpy"(%ptr, %ptr, %0) <{isVolatile = true}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
"llvm.intr.assume"(%true) : (i1) -> ()
llvm.fence release
%2 = llvm.atomicrmw add %ptr, %0 monotonic : !llvm.ptr, i32

View File

@@ -161,16 +161,14 @@ func.func @ops(%arg0: i32, %arg1: f32,
// CHECK: llvm.intr.round(%[[FLOAT]]) : (f32) -> f32
%34 = llvm.intr.round(%arg1) : (f32) -> f32
// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, !llvm.ptr, i32, i1) -> ()
"llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg4) : (!llvm.ptr, !llvm.ptr, i32, i1) -> ()
// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
"llvm.intr.memcpy"(%arg2, %arg3, %arg0) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, !llvm.ptr, i32, i1) -> ()
"llvm.intr.memcpy"(%arg2, %arg3, %arg0, %arg4) : (!llvm.ptr, !llvm.ptr, i32, i1) -> ()
// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
"llvm.intr.memcpy"(%arg2, %arg3, %arg0) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
// CHECK: %[[SZ:.*]] = llvm.mlir.constant
%sz = llvm.mlir.constant(10: i64) : i64
// CHECK: "llvm.intr.memcpy.inline"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, !llvm.ptr, i64, i1) -> ()
"llvm.intr.memcpy.inline"(%arg2, %arg3, %sz, %arg4) : (!llvm.ptr, !llvm.ptr, i64, i1) -> ()
// CHECK: "llvm.intr.memcpy.inline"(%{{.*}}, %{{.*}}) <{isVolatile = false, len = 10 : i64}> : (!llvm.ptr, !llvm.ptr) -> ()
"llvm.intr.memcpy.inline"(%arg2, %arg3) <{isVolatile = false, len = 10 : i64}> : (!llvm.ptr, !llvm.ptr) -> ()
// CHECK: llvm.return
llvm.return

View File

@@ -10,11 +10,20 @@ define void @fmuladd_test(float %0, float %1, <8 x float> %2, ptr %3) {
%7 = call float @llvm.fma.f32(float %0, float %1, float %0)
; CHECK: llvm.intr.fma(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xf32>, vector<8xf32>, vector<8xf32>) -> vector<8xf32>
%8 = call <8 x float> @llvm.fma.v8f32(<8 x float> %2, <8 x float> %2, <8 x float> %2)
; CHECK: "llvm.intr.prefetch"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, i32, i32, i32) -> ()
; CHECK: "llvm.intr.prefetch"(%{{.*}}) <{cache = 1 : i32, hint = 3 : i32, rw = 0 : i32}> : (!llvm.ptr) -> ()
call void @llvm.prefetch.p0(ptr %3, i32 0, i32 3, i32 1)
ret void
}
; CHECK-LABEL: llvm.func @fpclass_test
define void @fpclass_test(float %0, <8 x float> %1) {
; CHECK: "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 0 : i32}> : (f32) -> i1
%3 = call i1 @llvm.is.fpclass.f32(float %0, i32 0)
; CHECK: "llvm.intr.is.fpclass"(%{{.*}}) <{bit = 1 : i32}> : (vector<8xf32>) -> vector<8xi1>
%4 = call <8 x i1> @llvm.is.fpclass.v8f32(<8 x float> %1, i32 1)
ret void
}
; CHECK-LABEL: llvm.func @exp_test
define void @exp_test(float %0, <8 x float> %1) {
; CHECK: llvm.intr.exp(%{{.*}}) : (f32) -> f32
@@ -201,22 +210,28 @@ define void @byteswap_test(i32 %0, <8 x i32> %1) {
}
; CHECK-LABEL: llvm.func @ctlz_test
define void @ctlz_test(i32 %0, <8 x i32> %1) {
; CHECK: %[[FALSE:.+]] = llvm.mlir.constant(false) : i1
; CHECK: "llvm.intr.ctlz"(%{{.*}}, %[[FALSE]]) : (i32, i1) -> i32
; CHECK: "llvm.intr.ctlz"(%{{.*}}) <{is_zero_poison = false}> : (i32) -> i32
%3 = call i32 @llvm.ctlz.i32(i32 %0, i1 false)
; CHECK: "llvm.intr.ctlz"(%{{.*}}, %[[FALSE]]) : (vector<8xi32>, i1) -> vector<8xi32>
; CHECK: "llvm.intr.ctlz"(%{{.*}}) <{is_zero_poison = false}> : (vector<8xi32>) -> vector<8xi32>
%4 = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %1, i1 false)
ret void
}
; CHECK-LABEL: llvm.func @cttz_test
define void @cttz_test(i32 %0, <8 x i32> %1) {
; CHECK: %[[FALSE:.+]] = llvm.mlir.constant(false) : i1
; CHECK: "llvm.intr.cttz"(%{{.*}}, %[[FALSE]]) : (i32, i1) -> i32
; CHECK: "llvm.intr.cttz"(%{{.*}}) <{is_zero_poison = false}> : (i32) -> i32
%3 = call i32 @llvm.cttz.i32(i32 %0, i1 false)
; CHECK: "llvm.intr.cttz"(%{{.*}}, %[[FALSE]]) : (vector<8xi32>, i1) -> vector<8xi32>
; CHECK: "llvm.intr.cttz"(%{{.*}}) <{is_zero_poison = false}> : (vector<8xi32>) -> vector<8xi32>
%4 = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %1, i1 false)
ret void
}
; CHECK-LABEL: llvm.func @abs_test
define void @abs_test(i32 %0, <8 x i32> %1) {
; CHECK: "llvm.intr.abs"(%{{.*}}) <{is_int_min_poison = false}> : (i32) -> i32
%3 = call i32 @llvm.abs.i32(i32 %0, i1 false)
; CHECK: "llvm.intr.abs"(%{{.*}}) <{is_int_min_poison = true}> : (vector<8xi32>) -> vector<8xi32>
%4 = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %1, i1 true)
ret void
}
; CHECK-LABEL: llvm.func @ctpop_test
define void @ctpop_test(i32 %0, <8 x i32> %1) {
@@ -435,27 +450,26 @@ define void @trap_intrinsics() {
; CHECK-LABEL: llvm.func @memcpy_test
define void @memcpy_test(i32 %0, ptr %1, ptr %2) {
; CHECK: %[[FALSE:.+]] = llvm.mlir.constant(false) : i1
; CHECK: %[[CST:.+]] = llvm.mlir.constant(10 : i64) : i64
; CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}, %[[FALSE]]) : (!llvm.ptr, !llvm.ptr, i32, i1) -> ()
; CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
call void @llvm.memcpy.p0.p0.i32(ptr %1, ptr %2, i32 %0, i1 false)
; CHECK: "llvm.intr.memcpy.inline"(%{{.*}}, %{{.*}}, %[[CST]], %[[FALSE]]) : (!llvm.ptr, !llvm.ptr, i64, i1) -> ()
; CHECK: "llvm.intr.memcpy.inline"(%{{.*}}, %{{.*}}) <{isVolatile = false, len = 10 : i64}> : (!llvm.ptr, !llvm.ptr) -> ()
call void @llvm.memcpy.inline.p0.p0.i64(ptr %1, ptr %2, i64 10, i1 false)
; CHECK: "llvm.intr.memcpy.inline"(%{{.*}}, %{{.*}}) <{isVolatile = false, len = 10 : i32}> : (!llvm.ptr, !llvm.ptr) -> ()
call void @llvm.memcpy.inline.p0.p0.i32(ptr %1, ptr %2, i32 10, i1 false)
ret void
}
; CHECK-LABEL: llvm.func @memmove_test
define void @memmove_test(i32 %0, ptr %1, ptr %2) {
; CHECK: %[[falseval:.+]] = llvm.mlir.constant(false) : i1
; CHECK: "llvm.intr.memmove"(%{{.*}}, %{{.*}}, %{{.*}}, %[[falseval]]) : (!llvm.ptr, !llvm.ptr, i32, i1) -> ()
; CHECK: "llvm.intr.memmove"(%{{.*}}, %{{.*}}, %{{.*}}) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
call void @llvm.memmove.p0.p0.i32(ptr %1, ptr %2, i32 %0, i1 false)
ret void
}
; CHECK-LABEL: llvm.func @memset_test
define void @memset_test(i32 %0, ptr %1, i8 %2) {
; CHECK: %[[falseval:.+]] = llvm.mlir.constant(false) : i1
; CHECK: "llvm.intr.memset"(%{{.*}}, %{{.*}}, %{{.*}}, %[[falseval]]) : (!llvm.ptr, i8, i32, i1) -> ()
; CHECK: "llvm.intr.memset"(%{{.*}}, %{{.*}}, %{{.*}}) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
call void @llvm.memset.p0.i32(ptr %1, i8 %2, i32 %0, i1 false)
ret void
}
@@ -830,6 +844,8 @@ declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>)
declare float @llvm.fma.f32(float, float, float)
declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>)
declare void @llvm.prefetch.p0(ptr nocapture readonly, i32 immarg, i32 immarg, i32)
declare i1 @llvm.is.fpclass.f32(float, i32 immarg)
declare <8 x i1> @llvm.is.fpclass.v8f32(<8 x float>, i32 immarg)
declare float @llvm.exp.f32(float)
declare <8 x float> @llvm.exp.v8f32(<8 x float>)
declare float @llvm.exp2.f32(float)
@@ -882,6 +898,8 @@ declare i32 @llvm.ctlz.i32(i32, i1 immarg)
declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>, i1 immarg)
declare i32 @llvm.cttz.i32(i32, i1 immarg)
declare <8 x i32> @llvm.cttz.v8i32(<8 x i32>, i1 immarg)
declare i32 @llvm.abs.i32(i32, i1 immarg)
declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1 immarg)
declare i32 @llvm.ctpop.i32(i32)
declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>)
declare i32 @llvm.fshl.i32(i32, i32, i32)
@@ -933,6 +951,7 @@ declare void @llvm.debugtrap()
declare void @llvm.ubsantrap(i8 immarg)
declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg)
declare void @llvm.memcpy.inline.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64 immarg, i1 immarg)
declare void @llvm.memcpy.inline.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32 immarg, i1 immarg)
declare void @llvm.memmove.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1 immarg)
declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg)
declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)

View File

@@ -2,9 +2,6 @@
// CHECK-LABEL: @intrinsics
llvm.func @intrinsics(%arg0: f32, %arg1: f32, %arg2: vector<8xf32>, %arg3: !llvm.ptr<i8>) {
%c3 = llvm.mlir.constant(3 : i32) : i32
%c1 = llvm.mlir.constant(1 : i32) : i32
%c0 = llvm.mlir.constant(0 : i32) : i32
// CHECK: call float @llvm.fmuladd.f32
"llvm.intr.fmuladd"(%arg0, %arg1, %arg0) : (f32, f32, f32) -> f32
// CHECK: call <8 x float> @llvm.fmuladd.v8f32
@@ -14,15 +11,14 @@ llvm.func @intrinsics(%arg0: f32, %arg1: f32, %arg2: vector<8xf32>, %arg3: !llvm
// CHECK: call <8 x float> @llvm.fma.v8f32
"llvm.intr.fma"(%arg2, %arg2, %arg2) : (vector<8xf32>, vector<8xf32>, vector<8xf32>) -> vector<8xf32>
// CHECK: call void @llvm.prefetch.p0(ptr %3, i32 0, i32 3, i32 1)
"llvm.intr.prefetch"(%arg3, %c0, %c3, %c1) : (!llvm.ptr<i8>, i32, i32, i32) -> ()
"llvm.intr.prefetch"(%arg3) <{cache = 1 : i32, hint = 3 : i32, rw = 0 : i32}> : (!llvm.ptr<i8>) -> ()
llvm.return
}
// CHECK-LABEL: @fpclass_test
llvm.func @fpclass_test(%arg0: f32) -> i1 {
%checkNan = llvm.mlir.constant(0 : i32) : i32
// CHECK: call i1 @llvm.is.fpclass
%0 = "llvm.intr.is.fpclass"(%arg0, %checkNan) : (f32, i32) -> i1
%0 = "llvm.intr.is.fpclass"(%arg0) <{bit = 0 : i32 }>: (f32) -> i1
llvm.return %0 : i1
}
@@ -224,21 +220,28 @@ llvm.func @byteswap_test(%arg0: i32, %arg1: vector<8xi32>) {
// CHECK-LABEL: @ctlz_test
llvm.func @ctlz_test(%arg0: i32, %arg1: vector<8xi32>) {
%i1 = llvm.mlir.constant(false) : i1
// CHECK: call i32 @llvm.ctlz.i32
"llvm.intr.ctlz"(%arg0, %i1) : (i32, i1) -> i32
"llvm.intr.ctlz"(%arg0) <{is_zero_poison = 0 : i1}> : (i32) -> i32
// CHECK: call <8 x i32> @llvm.ctlz.v8i32
"llvm.intr.ctlz"(%arg1, %i1) : (vector<8xi32>, i1) -> vector<8xi32>
"llvm.intr.ctlz"(%arg1) <{is_zero_poison = 1 : i1}> : (vector<8xi32>) -> vector<8xi32>
llvm.return
}
// CHECK-LABEL: @cttz_test
llvm.func @cttz_test(%arg0: i32, %arg1: vector<8xi32>) {
%i1 = llvm.mlir.constant(false) : i1
// CHECK: call i32 @llvm.cttz.i32
"llvm.intr.cttz"(%arg0, %i1) : (i32, i1) -> i32
"llvm.intr.cttz"(%arg0) <{is_zero_poison = 0 : i1}> : (i32) -> i32
// CHECK: call <8 x i32> @llvm.cttz.v8i32
"llvm.intr.cttz"(%arg1, %i1) : (vector<8xi32>, i1) -> vector<8xi32>
"llvm.intr.cttz"(%arg1) <{is_zero_poison = 1 : i1}> : (vector<8xi32>) -> vector<8xi32>
llvm.return
}
// CHECK-LABEL: @abs_test
llvm.func @abs_test(%arg0: i32, %arg1: vector<8xi32>) {
// CHECK: call i32 @llvm.abs.i32
"llvm.intr.abs"(%arg0) <{is_int_min_poison = 0 : i1}> : (i32) -> i32
// CHECK: call <8 x i32> @llvm.abs.v8i32
"llvm.intr.abs"(%arg1) <{is_int_min_poison = 1 : i1}> : (vector<8xi32>) -> vector<8xi32>
llvm.return
}
@@ -457,28 +460,27 @@ llvm.func @trap_intrinsics() {
// CHECK-LABEL: @memcpy_test
llvm.func @memcpy_test(%arg0: i32, %arg2: !llvm.ptr<i8>, %arg3: !llvm.ptr<i8>) {
%i1 = llvm.mlir.constant(false) : i1
// CHECK: call void @llvm.memcpy.p0.p0.i32(ptr %{{.*}}, ptr %{{.*}}, i32 %{{.*}}, i1 {{.*}})
"llvm.intr.memcpy"(%arg2, %arg3, %arg0, %i1) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32, i1) -> ()
%sz = llvm.mlir.constant(10: i64) : i64
// CHECK: call void @llvm.memcpy.inline.p0.p0.i64(ptr %{{.*}}, ptr %{{.*}}, i64 10, i1 {{.*}})
"llvm.intr.memcpy.inline"(%arg2, %arg3, %sz, %i1) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i64, i1) -> ()
// CHECK: call void @llvm.memcpy.p0.p0.i32(ptr %{{.*}}, ptr %{{.*}}, i32 %{{.*}}, i1 false
"llvm.intr.memcpy"(%arg2, %arg3, %arg0) <{isVolatile = false}> : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32) -> ()
// CHECK: call void @llvm.memcpy.inline.p0.p0.i32(ptr %{{.*}}, ptr %{{.*}}, i32 10, i1 true
"llvm.intr.memcpy.inline"(%arg2, %arg3) <{isVolatile = true, len = 10 : i32}> : (!llvm.ptr<i8>, !llvm.ptr<i8>) -> ()
// CHECK: call void @llvm.memcpy.inline.p0.p0.i64(ptr %{{.*}}, ptr %{{.*}}, i64 10, i1 true
"llvm.intr.memcpy.inline"(%arg2, %arg3) <{isVolatile = true, len = 10 : i64}> : (!llvm.ptr<i8>, !llvm.ptr<i8>) -> ()
llvm.return
}
// CHECK-LABEL: @memmove_test
llvm.func @memmove_test(%arg0: i32, %arg2: !llvm.ptr<i8>, %arg3: !llvm.ptr<i8>) {
%i1 = llvm.mlir.constant(false) : i1
// CHECK: call void @llvm.memmove.p0.p0.i32(ptr %{{.*}}, ptr %{{.*}}, i32 %{{.*}}, i1 {{.*}})
"llvm.intr.memmove"(%arg2, %arg3, %arg0, %i1) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32, i1) -> ()
// CHECK: call void @llvm.memmove.p0.p0.i32(ptr %{{.*}}, ptr %{{.*}}, i32 %{{.*}}, i1 false
"llvm.intr.memmove"(%arg2, %arg3, %arg0) <{isVolatile = false}> : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32) -> ()
llvm.return
}
// CHECK-LABEL: @memset_test
llvm.func @memset_test(%arg0: i32, %arg2: !llvm.ptr<i8>, %arg3: i8) {
%i1 = llvm.mlir.constant(false) : i1
// CHECK: call void @llvm.memset.p0.i32(ptr %{{.*}}, i8 %{{.*}}, i32 %{{.*}}, i1 {{.*}})
"llvm.intr.memset"(%arg2, %arg3, %arg0, %i1) : (!llvm.ptr<i8>, i8, i32, i1) -> ()
// CHECK: call void @llvm.memset.p0.i32(ptr %{{.*}}, i8 %{{.*}}, i32 %{{.*}}, i1 false
"llvm.intr.memset"(%arg2, %arg3, %arg0) <{isVolatile = false}> : (!llvm.ptr<i8>, i8, i32) -> ()
llvm.return
}
@@ -925,6 +927,7 @@ llvm.func @lifetime(%p: !llvm.ptr) {
// CHECK-DAG: declare float @llvm.fmuladd.f32(float, float, float)
// CHECK-DAG: declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>) #0
// CHECK-DAG: declare void @llvm.prefetch.p0(ptr nocapture readonly, i32 immarg, i32 immarg, i32 immarg)
// CHECK-DAG: declare i1 @llvm.is.fpclass.f32(float, i32 immarg)
// CHECK-DAG: declare float @llvm.exp.f32(float)
// CHECK-DAG: declare <8 x float> @llvm.exp.v8f32(<8 x float>) #0
// CHECK-DAG: declare float @llvm.log.f32(float)
@@ -977,6 +980,7 @@ llvm.func @lifetime(%p: !llvm.ptr) {
// CHECK-DAG: declare void @llvm.debugtrap()
// CHECK-DAG: declare void @llvm.ubsantrap(i8 immarg)
// CHECK-DAG: declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg)
// CHECK-DAG: declare void @llvm.memcpy.inline.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32 immarg, i1 immarg)
// CHECK-DAG: declare void @llvm.memcpy.inline.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64 immarg, i1 immarg)
// CHECK-DAG: declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
// CHECK-DAG: declare { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32>, <8 x i32>)

View File

@@ -106,41 +106,25 @@ llvm.func @powi_intr_wrong_type(%arg0 : f32, %arg1 : f32) -> f32 {
// -----
llvm.func @ctlz_intr_wrong_type(%arg0 : i32, %arg1 : i32) -> i32 {
// expected-error @below{{op operand #1 must be 1-bit signless integer, but got 'i32'}}
%0 = "llvm.intr.ctlz"(%arg0, %arg1) : (i32, i32) -> i32
llvm.return %0 : i32
}
// -----
llvm.func @memcpy_intr_wrong_type(%src : i64, %dst : i64, %len : i64, %volatile : i1) {
llvm.func @memcpy_intr_wrong_type(%src : i64, %dst : i64, %len : i64) {
// expected-error @below{{op operand #0 must be LLVM pointer type, but got 'i64'}}
"llvm.intr.memcpy"(%src, %dst, %len, %volatile) : (i64, i64, i64, i1) -> ()
"llvm.intr.memcpy"(%src, %dst, %len) <{isVolatile = false}> : (i64, i64, i64) -> ()
llvm.return
}
// -----
llvm.func @memcpy_inline_intr_wrong_type(%src : !llvm.ptr, %dst : !llvm.ptr, %len : i64, %volatile : i32) {
// expected-error @below{{op operand #3 must be 1-bit signless integer, but got 'i32'}}
"llvm.intr.memcpy.inline"(%src, %dst, %len, %volatile) : (!llvm.ptr, !llvm.ptr, i64, i32) -> ()
llvm.return
}
// -----
llvm.func @memmove_intr_wrong_type(%src : !llvm.ptr, %dst : i64, %len : i64, %volatile : i1) {
llvm.func @memmove_intr_wrong_type(%src : !llvm.ptr, %dst : i64, %len : i64) {
// expected-error @below{{op operand #1 must be LLVM pointer type, but got 'i64'}}
"llvm.intr.memmove"(%src, %dst, %len, %volatile) : (!llvm.ptr, i64, i64, i1) -> ()
"llvm.intr.memmove"(%src, %dst, %len) <{isVolatile = false}> : (!llvm.ptr, i64, i64) -> ()
llvm.return
}
// -----
llvm.func @memset_intr_wrong_type(%dst : !llvm.ptr, %val : i32, %len : i64, %volatile : i1) {
llvm.func @memset_intr_wrong_type(%dst : !llvm.ptr, %val : i32, %len : i64) {
// expected-error @below{{op operand #1 must be 8-bit signless integer, but got 'i32'}}
"llvm.intr.memset"(%dst, %val, %len, %volatile) : (!llvm.ptr, i32, i64, i1) -> ()
"llvm.intr.memset"(%dst, %val, %len) <{isVolatile = false}> : (!llvm.ptr, i32, i64) -> ()
llvm.return
}

View File

@@ -2034,12 +2034,11 @@ llvm.func @aliasScope(%arg1 : !llvm.ptr) {
%2 = llvm.atomicrmw add %arg1, %0 monotonic {alias_scopes = [@metadata::@scope3], noalias_scopes = [@metadata::@scope1, @metadata::@scope2]} : !llvm.ptr, i32
// CHECK: cmpxchg {{.*}}, !alias.scope ![[SCOPES3]]
%3 = llvm.cmpxchg %arg1, %1, %2 acq_rel monotonic {alias_scopes = [@metadata::@scope3]} : !llvm.ptr, i32
%4 = llvm.mlir.constant(0 : i1) : i1
%5 = llvm.mlir.constant(42 : i8) : i8
// CHECK: llvm.memcpy{{.*}}, !alias.scope ![[SCOPES3]]
"llvm.intr.memcpy"(%arg1, %arg1, %0, %4) {alias_scopes = [@metadata::@scope3]} : (!llvm.ptr, !llvm.ptr, i32, i1) -> ()
"llvm.intr.memcpy"(%arg1, %arg1, %0) <{isVolatile = false}> {alias_scopes = [@metadata::@scope3]} : (!llvm.ptr, !llvm.ptr, i32) -> ()
// CHECK: llvm.memset{{.*}}, !noalias ![[SCOPES3]]
"llvm.intr.memset"(%arg1, %5, %0, %4) {noalias_scopes = [@metadata::@scope3]} : (!llvm.ptr, i8, i32, i1) -> ()
"llvm.intr.memset"(%arg1, %5, %0) <{isVolatile = false}> {noalias_scopes = [@metadata::@scope3]} : (!llvm.ptr, i8, i32) -> ()
// CHECK: call void @foo({{.*}} !alias.scope ![[SCOPES3]]
llvm.call @foo(%arg1) {alias_scopes = [@metadata::@scope3]} : (!llvm.ptr) -> ()
// CHECK: call void @foo({{.*}} !noalias ![[SCOPES3]]

View File

@@ -258,12 +258,11 @@ llvm.func @loopOptions(%arg1 : i32, %arg2 : i32) {
%6 = llvm.atomicrmw add %4, %5 monotonic {access_groups = [@metadata::@group1, @metadata::@group2]} : !llvm.ptr, i32
// CHECK: = cmpxchg ptr %{{.*}}, i32 %{{.*}}, i32 %{{.*}} !llvm.access.group ![[ACCESS_GROUPS_NODE]]
%7 = llvm.cmpxchg %4, %5, %6 acq_rel monotonic {access_groups = [@metadata::@group1, @metadata::@group2]} : !llvm.ptr, i32
%8 = llvm.mlir.constant(0 : i1) : i1
%9 = llvm.mlir.constant(42 : i8) : i8
// CHECK: llvm.memcpy{{.*}} !llvm.access.group ![[ACCESS_GROUPS_NODE]]
"llvm.intr.memcpy"(%4, %4, %0, %8) {access_groups = [@metadata::@group1, @metadata::@group2]} : (!llvm.ptr, !llvm.ptr, i32, i1) -> ()
"llvm.intr.memcpy"(%4, %4, %0) <{isVolatile = false}> {access_groups = [@metadata::@group1, @metadata::@group2]} : (!llvm.ptr, !llvm.ptr, i32) -> ()
// CHECK: llvm.memset{{.*}} !llvm.access.group ![[ACCESS_GROUPS_NODE]]
"llvm.intr.memset"(%4, %9, %0, %8) {access_groups = [@metadata::@group1, @metadata::@group2]} : (!llvm.ptr, i8, i32, i1) -> ()
"llvm.intr.memset"(%4, %9, %0) <{isVolatile = false}> {access_groups = [@metadata::@group1, @metadata::@group2]} : (!llvm.ptr, i8, i32) -> ()
// CHECK: call void @foo({{.*}} !llvm.access.group ![[ACCESS_GROUPS_NODE]]
llvm.call @foo(%arg1) {access_groups = [@metadata::@group1, @metadata::@group2]} : (i32) -> ()
// CHECK: br label {{.*}} !llvm.loop ![[LOOP_NODE]]

View File

@@ -70,12 +70,11 @@ module {
%6 = llvm.atomicrmw add %5, %4 monotonic {tbaa = [@__tbaa::@tbaa_tag_7]} : !llvm.ptr, i32
// CHECK: cmpxchg ptr %{{.*}}, i32 %{{.*}}, i32 %{{.*}} !tbaa ![[STAG]]
%7 = llvm.cmpxchg %5, %6, %4 acq_rel monotonic {tbaa = [@__tbaa::@tbaa_tag_7]} : !llvm.ptr, i32
%8 = llvm.mlir.constant(0 : i1) : i1
%9 = llvm.mlir.constant(42 : i8) : i8
// CHECK: llvm.memcpy{{.*}} !tbaa ![[STAG]]
"llvm.intr.memcpy"(%arg1, %arg1, %0, %8) {tbaa = [@__tbaa::@tbaa_tag_7]} : (!llvm.ptr, !llvm.ptr, i32, i1) -> ()
"llvm.intr.memcpy"(%arg1, %arg1, %0) <{isVolatile = false}> {tbaa = [@__tbaa::@tbaa_tag_7]} : (!llvm.ptr, !llvm.ptr, i32) -> ()
// CHECK: llvm.memset{{.*}} !tbaa ![[STAG]]
"llvm.intr.memset"(%arg1, %9, %0, %8) {tbaa = [@__tbaa::@tbaa_tag_7]} : (!llvm.ptr, i8, i32, i1) -> ()
"llvm.intr.memset"(%arg1, %9, %0) <{isVolatile = false}> {tbaa = [@__tbaa::@tbaa_tag_7]} : (!llvm.ptr, i8, i32) -> ()
// CHECK: call void @foo({{.*}} !tbaa ![[STAG]]
llvm.call @foo(%arg1) {tbaa = [@__tbaa::@tbaa_tag_7]} : (!llvm.ptr) -> ()
llvm.return