[CIR] Add handling for volatile loads and stores (#156124)

This fills in the missing pieces to handle volatile loads and stores in
CIR.

This addresses https://github.com/llvm/llvm-project/issues/153280
This commit is contained in:
Andy Kaylor
2025-09-02 11:36:47 -07:00
committed by GitHub
parent 08001cf340
commit 95d3ecee82
9 changed files with 264 additions and 24 deletions

View File

@@ -160,16 +160,15 @@ public:
}
cir::LoadOp createLoad(mlir::Location loc, mlir::Value ptr,
uint64_t alignment = 0) {
bool isVolatile = false, uint64_t alignment = 0) {
mlir::IntegerAttr alignmentAttr = getAlignmentAttr(alignment);
assert(!cir::MissingFeatures::opLoadStoreVolatile());
return cir::LoadOp::create(*this, loc, ptr, /*isDeref=*/false,
return cir::LoadOp::create(*this, loc, ptr, /*isDeref=*/false, isVolatile,
alignmentAttr, cir::MemOrderAttr{});
}
mlir::Value createAlignedLoad(mlir::Location loc, mlir::Value ptr,
uint64_t alignment) {
return createLoad(loc, ptr, alignment);
return createLoad(loc, ptr, /*isVolatile=*/false, alignment);
}
mlir::Value createNot(mlir::Value value) {
@@ -250,7 +249,7 @@ public:
bool isVolatile = false,
mlir::IntegerAttr align = {},
cir::MemOrderAttr order = {}) {
return cir::StoreOp::create(*this, loc, val, dst, align, order);
return cir::StoreOp::create(*this, loc, val, dst, isVolatile, align, order);
}
[[nodiscard]] cir::GlobalOp createGlobal(mlir::ModuleOp mlirModule,
@@ -274,7 +273,8 @@ public:
mlir::IntegerAttr alignmentAttr = getAlignmentAttr(alignment);
auto addr = createAlloca(loc, getPointerTo(type), type, {}, alignmentAttr);
return cir::LoadOp::create(*this, loc, addr, /*isDeref=*/false,
alignmentAttr, /*mem_order=*/{});
/*isVolatile=*/false, alignmentAttr,
/*mem_order=*/{});
}
cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base,

View File

@@ -404,7 +404,11 @@ def CIR_LoadOp : CIR_Op<"load", [
`cir.load` reads a value (lvalue to rvalue conversion) given an address
backed up by a `cir.ptr` type. A unit attribute `deref` can be used to
mark the resulting value as used by another operation to dereference
a pointer.
a pointer. A unit attribute `volatile` can be used to indicate a volatile
loading. Load can be marked atomic by using `atomic(<mem_order>)`.
`alignment` can be used to specify an alignment that's different from the
default, which is computed from `result`'s type ABI data layout.
Example:
@@ -416,18 +420,26 @@ def CIR_LoadOp : CIR_Op<"load", [
// Load address from memory at address %0. %3 is used by at least one
// operation that dereferences a pointer.
%3 = cir.load deref %0 : !cir.ptr<!cir.ptr<i32>>
// Perform a volatile load from address in %0.
%4 = cir.load volatile %0 : !cir.ptr<i32>, i32
// Others
%x = cir.load align(16) atomic(seq_cst) %0 : !cir.ptr<i32>, i32
```
}];
let arguments = (ins Arg<CIR_PointerType, "the address to load from",
[MemRead]>:$addr,
UnitAttr:$isDeref,
UnitAttr:$is_volatile,
OptionalAttr<I64Attr>:$alignment,
OptionalAttr<CIR_MemOrder>:$mem_order);
let results = (outs CIR_AnyType:$result);
let assemblyFormat = [{
(`deref` $isDeref^)?
(`volatile` $is_volatile^)?
(`align` `(` $alignment^ `)`)?
(`atomic` `(` $mem_order^ `)`)?
$addr `:` qualified(type($addr)) `,` type($result) attr-dict
@@ -452,7 +464,7 @@ def CIR_StoreOp : CIR_Op<"store", [
a volatile store. Store's can be marked atomic by using
`atomic(<mem_order>)`.
`align` can be used to specify an alignment that's different from the
`alignment` can be used to specify an alignment that's different from the
default, which is computed from `result`'s type ABI data layout.
Example:
@@ -460,16 +472,24 @@ def CIR_StoreOp : CIR_Op<"store", [
```mlir
// Store a function argument to local storage, address in %0.
cir.store %arg0, %0 : i32, !cir.ptr<i32>
// Perform a volatile store into memory location at the address in %0.
cir.store volatile %arg0, %0 : i32, !cir.ptr<i32>
// Others
cir.store align(16) atomic(seq_cst) %x, %addr : i32, !cir.ptr<i32>
```
}];
let arguments = (ins CIR_AnyType:$value,
Arg<CIR_PointerType, "the address to store the value",
[MemWrite]>:$addr,
UnitAttr:$is_volatile,
OptionalAttr<I64Attr>:$alignment,
OptionalAttr<CIR_MemOrder>:$mem_order);
let assemblyFormat = [{
(`volatile` $is_volatile^)?
(`align` `(` $alignment^ `)`)?
(`atomic` `(` $mem_order^ `)`)?
$value `,` $addr attr-dict `:` type($value) `,` qualified(type($addr))

View File

@@ -47,9 +47,8 @@ struct MissingFeatures {
// Load/store attributes
static bool opLoadStoreThreadLocal() { return false; }
static bool opLoadEmitScalarRangeCheck() { return false; }
static bool opLoadBooleanRepresentation() { return false; }
static bool opLoadStoreNontemporal() { return false; }
static bool opLoadStoreTbaa() { return false; }
static bool opLoadStoreVolatile() { return false; }
static bool opLoadStoreAtomic() { return false; }
static bool opLoadStoreObjC() { return false; }

View File

@@ -375,7 +375,7 @@ public:
bool isVolatile = false) {
mlir::IntegerAttr align = getAlignmentAttr(addr.getAlignment());
return cir::LoadOp::create(*this, loc, addr.getPointer(), /*isDeref=*/false,
/*alignment=*/align,
isVolatile, /*alignment=*/align,
/*mem_order=*/cir::MemOrderAttr{});
}

View File

@@ -325,6 +325,7 @@ void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
const auto vecTy = cast<cir::VectorType>(elementType);
// TODO(CIR): Use `ABIInfo::getOptimalVectorMemoryType` once it upstreamed
assert(!cir::MissingFeatures::cirgenABIInfo());
if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
cgm.errorNYI(addr.getPointer().getLoc(),
"emitStoreOfScalar Vec3 & PreserveVec3Type disabled");
@@ -345,7 +346,7 @@ void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
}
assert(currSrcLoc && "must pass in source location");
builder.createStore(*currSrcLoc, value, addr /*, isVolatile*/);
builder.createStore(*currSrcLoc, value, addr, isVolatile);
if (isNontemporal) {
cgm.errorNYI(addr.getPointer().getLoc(), "emitStoreOfScalar nontemporal");
@@ -543,23 +544,52 @@ void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue,
lvalue.getType(), isInit, /*isNontemporal=*/false);
}
mlir::Value CIRGenFunction::emitLoadOfScalar(LValue lvalue,
SourceLocation loc) {
mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile,
QualType ty, SourceLocation loc,
LValueBaseInfo baseInfo) {
assert(!cir::MissingFeatures::opLoadStoreThreadLocal());
assert(!cir::MissingFeatures::opLoadEmitScalarRangeCheck());
assert(!cir::MissingFeatures::opLoadBooleanRepresentation());
Address addr = lvalue.getAddress();
mlir::Type eltTy = addr.getElementType();
if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
if (clangVecTy->isExtVectorBoolType()) {
cgm.errorNYI(loc, "emitLoadOfScalar: ExtVectorBoolType");
return nullptr;
}
const auto vecTy = cast<cir::VectorType>(eltTy);
// Handle vectors of size 3 like size 4 for better performance.
assert(!cir::MissingFeatures::cirgenABIInfo());
if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
cgm.errorNYI(addr.getPointer().getLoc(),
"emitLoadOfScalar Vec3 & PreserveVec3Type disabled");
}
assert(!cir::MissingFeatures::opLoadStoreTbaa());
LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
if (ty->isAtomicType() || isLValueSuitableForInlineAtomic(atomicLValue))
cgm.errorNYI("emitLoadOfScalar: load atomic");
if (mlir::isa<cir::VoidType>(eltTy))
cgm.errorNYI(loc, "emitLoadOfScalar: void type");
mlir::Value loadOp = builder.createLoad(getLoc(loc), addr);
assert(!cir::MissingFeatures::opLoadEmitScalarRangeCheck());
mlir::Value loadOp = builder.createLoad(getLoc(loc), addr, isVolatile);
if (!ty->isBooleanType() && ty->hasBooleanRepresentation())
cgm.errorNYI("emitLoadOfScalar: boolean type with boolean representation");
return loadOp;
}
mlir::Value CIRGenFunction::emitLoadOfScalar(LValue lvalue,
SourceLocation loc) {
assert(!cir::MissingFeatures::opLoadStoreNontemporal());
assert(!cir::MissingFeatures::opLoadStoreTbaa());
return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
lvalue.getType(), loc, lvalue.getBaseInfo());
}
/// Given an expression that represents a value lvalue, this
/// method emits the address of the lvalue, then loads the result as an rvalue,
/// returning the rvalue.

View File

@@ -101,7 +101,11 @@ public:
void VisitCastExpr(CastExpr *e) {
switch (e->getCastKind()) {
case CK_LValueToRValue:
assert(!cir::MissingFeatures::aggValueSlotVolatile());
// If we're loading from a volatile type, force the destination
// into existence.
if (e->getSubExpr()->getType().isVolatileQualified())
cgf.cgm.errorNYI(e->getSourceRange(),
"AggExprEmitter: volatile lvalue-to-rvalue cast");
[[fallthrough]];
case CK_NoOp:
case CK_UserDefinedConversion:

View File

@@ -1292,6 +1292,8 @@ public:
/// the LLVM value representation. The l-value must be a simple
/// l-value.
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc);
mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, QualType ty,
SourceLocation loc, LValueBaseInfo baseInfo);
/// Emit code to compute a designator that specifies the location
/// of the expression.

View File

@@ -1214,10 +1214,10 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite(
assert(!cir::MissingFeatures::lowerModeOptLevel());
// TODO: nontemporal, syncscope.
assert(!cir::MissingFeatures::opLoadStoreVolatile());
assert(!cir::MissingFeatures::opLoadStoreNontemporal());
mlir::LLVM::LoadOp newLoad = mlir::LLVM::LoadOp::create(
rewriter, op->getLoc(), llvmTy, adaptor.getAddr(), alignment,
/*isVolatile=*/false, /*isNonTemporal=*/false,
op.getIsVolatile(), /*isNonTemporal=*/false,
/*isInvariant=*/false, /*isInvariantGroup=*/false, ordering);
// Convert adapted result to its original type if needed.
@@ -1244,10 +1244,11 @@ mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite(
mlir::Value value = emitToMemory(rewriter, dataLayout,
op.getValue().getType(), adaptor.getValue());
// TODO: nontemporal, syncscope.
assert(!cir::MissingFeatures::opLoadStoreVolatile());
assert(!cir::MissingFeatures::opLoadStoreNontemporal());
assert(!cir::MissingFeatures::opLoadStoreTbaa());
mlir::LLVM::StoreOp storeOp = mlir::LLVM::StoreOp::create(
rewriter, op->getLoc(), value, adaptor.getAddr(), alignment,
/*isVolatile=*/false,
op.getIsVolatile(),
/*isNonTemporal=*/false, /*isInvariantGroup=*/false, memorder);
rewriter.replaceOp(op, storeOp);
assert(!cir::MissingFeatures::opLoadStoreTbaa());

View File

@@ -0,0 +1,184 @@
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
// RUN: FileCheck --check-prefix=CIR %s < %t.cir
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
// RUN: FileCheck --check-prefix=LLVM %s < %t-cir.ll
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
// RUN: FileCheck --check-prefix=OGCG %s < %t.ll
int test_load(volatile int *ptr) {
return *ptr;
}
// CIR: cir.func dso_local @_Z9test_loadPVi
// CIR: cir.load volatile
// LLVM: define {{.*}} i32 @_Z9test_loadPVi
// LLVM: load volatile i32, ptr %{{.*}}
// OGCG: define {{.*}} i32 @_Z9test_loadPVi
// OGCG: load volatile i32, ptr %{{.*}}
void test_store(volatile int *ptr) {
*ptr = 42;
}
// CIR: cir.func dso_local @_Z10test_storePVi
// CIR: cir.store volatile
// LLVM: define {{.*}} void @_Z10test_storePVi
// LLVM: store volatile i32 42, ptr %{{.*}}
// OGCG: define {{.*}} void @_Z10test_storePVi
// OGCG: store volatile i32 42, ptr %{{.*}}
struct Foo {
int x;
volatile int y;
volatile int z: 4;
};
int test_load_field1(volatile Foo *ptr) {
return ptr->x;
}
// CIR: cir.func dso_local @_Z16test_load_field1PV3Foo
// CIR: %[[MEMBER_ADDR:.*]] = cir.get_member
// CIR: %{{.+}} = cir.load volatile{{.*}} %[[MEMBER_ADDR]]
// LLVM: define {{.*}} i32 @_Z16test_load_field1PV3Foo
// LLVM: %[[MEMBER_ADDR:.*]] = getelementptr %struct.Foo, ptr %{{.*}}, i32 0, i32 0
// LLVM: %{{.*}} = load volatile i32, ptr %[[MEMBER_ADDR]]
// OGCG: define {{.*}} i32 @_Z16test_load_field1PV3Foo
// OGCG: %[[MEMBER_ADDR:.*]] = getelementptr inbounds nuw %struct.Foo, ptr %{{.*}}, i32 0, i32 0
// OGCG: %{{.*}} = load volatile i32, ptr %[[MEMBER_ADDR]]
int test_load_field2(Foo *ptr) {
return ptr->y;
}
// CIR: cir.func dso_local @_Z16test_load_field2P3Foo
// CIR: %[[MEMBER_ADDR:.*]] = cir.get_member
// CIR: %{{.+}} = cir.load volatile{{.*}} %[[MEMBER_ADDR]]
// LLVM: define {{.*}} i32 @_Z16test_load_field2P3Foo
// LLVM: %[[MEMBER_ADDR:.*]] = getelementptr %struct.Foo, ptr %{{.*}}, i32 0, i32 1
// LLVM: %{{.*}} = load volatile i32, ptr %[[MEMBER_ADDR]]
// OGCG: define {{.*}} i32 @_Z16test_load_field2P3Foo
// OGCG: %[[MEMBER_ADDR:.*]] = getelementptr inbounds nuw %struct.Foo, ptr %{{.*}}, i32 0, i32 1
// OGCG: %{{.*}} = load volatile i32, ptr %[[MEMBER_ADDR]]
int test_load_field3(Foo *ptr) {
return ptr->z;
}
// CIR: cir.func dso_local @_Z16test_load_field3P3Foo
// CIR: %[[MEMBER_ADDR:.*]] = cir.get_member
// CIR: %{{.*}} = cir.get_bitfield align(4) (#bfi_z, %[[MEMBER_ADDR:.+]] {is_volatile} : !cir.ptr<!u8i>) -> !s32i
// LLVM: define {{.*}} i32 @_Z16test_load_field3P3Foo
// LLVM: %[[MEMBER_ADDR:.*]] = getelementptr %struct.Foo, ptr %{{.*}}, i32 0, i32 2
// LLVM: %[[TMP1:.*]] = load volatile i8, ptr %[[MEMBER_ADDR]]
// LLVM: %[[TMP2:.*]] = shl i8 %[[TMP1]], 4
// LLVM: %[[TMP3:.*]] = ashr i8 %[[TMP2]], 4
// LLVM: %{{.*}} = sext i8 %[[TMP3]] to i32
// OGCG: define {{.*}} i32 @_Z16test_load_field3P3Foo
// OGCG: %[[MEMBER_ADDR:.*]] = getelementptr inbounds nuw %struct.Foo, ptr %{{.*}}, i32 0, i32 2
// OGCG: %[[TMP1:.*]] = load volatile i8, ptr %[[MEMBER_ADDR]]
// OGCG: %[[TMP2:.*]] = shl i8 %[[TMP1]], 4
// OGCG: %[[TMP3:.*]] = ashr i8 %[[TMP2]], 4
// OGCG: %{{.*}} = sext i8 %[[TMP3]] to i32
void test_store_field1(volatile Foo *ptr) {
ptr->x = 42;
}
// CIR: cir.func dso_local @_Z17test_store_field1PV3Foo
// CIR: %[[MEMBER_ADDR:.*]] = cir.get_member
// CIR: cir.store volatile{{.*}} %{{.+}}, %[[MEMBER_ADDR]]
// LLVM: define {{.*}} void @_Z17test_store_field1PV3Foo
// LLVM: %[[MEMBER_ADDR:.*]] = getelementptr %struct.Foo, ptr %{{.*}}, i32 0, i32 0
// LLVM: store volatile i32 42, ptr %[[MEMBER_ADDR]]
// OGCG: define {{.*}} void @_Z17test_store_field1PV3Foo
// OGCG: %[[MEMBER_ADDR:.*]] = getelementptr inbounds nuw %struct.Foo, ptr %{{.*}}, i32 0, i32 0
// OGCG: store volatile i32 42, ptr %[[MEMBER_ADDR]]
void test_store_field2(Foo *ptr) {
ptr->y = 42;
}
// CIR: cir.func dso_local @_Z17test_store_field2P3Foo
// CIR: %[[MEMBER_ADDR:.*]] = cir.get_member
// CIR: cir.store volatile{{.*}} %{{.+}}, %[[MEMBER_ADDR]]
// LLVM: define {{.*}} void @_Z17test_store_field2P3Foo
// LLVM: %[[MEMBER_ADDR:.*]] = getelementptr %struct.Foo, ptr %{{.*}}, i32 0, i32 1
// LLVM: store volatile i32 42, ptr %[[MEMBER_ADDR]]
// OGCG: define {{.*}} void @_Z17test_store_field2P3Foo
// OGCG: %[[MEMBER_ADDR:.*]] = getelementptr inbounds nuw %struct.Foo, ptr %{{.*}}, i32 0, i32 1
// OGCG: store volatile i32 42, ptr %[[MEMBER_ADDR]]
void test_store_field3(Foo *ptr) {
ptr->z = 4;
}
// CIR: cir.func dso_local @_Z17test_store_field3P3Foo
// CIR: %[[MEMBER_ADDR:.*]] = cir.get_member
// CIR: cir.set_bitfield align(4) (#bfi_z, %[[MEMBER_ADDR:.+]] : !cir.ptr<!u8i>, %1 : !s32i) {is_volatile}
// LLVM: define {{.*}} void @_Z17test_store_field3P3Foo
// LLVM: %[[MEMBER_ADDR:.*]] = getelementptr %struct.Foo, ptr %{{.*}}, i32 0, i32 2
// LLVM: %[[TMP1:.*]] = load volatile i8, ptr %[[MEMBER_ADDR]]
// LLVM: %[[TMP2:.*]] = and i8 %[[TMP1]], -16
// LLVM: %[[TMP3:.*]] = or i8 %[[TMP2]], 4
// LLVM: store volatile i8 %[[TMP3]], ptr %[[MEMBER_ADDR]]
// OGCG: define {{.*}} void @_Z17test_store_field3P3Foo
// OGCG: %[[MEMBER_ADDR:.*]] = getelementptr inbounds nuw %struct.Foo, ptr %{{.*}}, i32 0, i32 2
// OGCG: %[[TMP1:.*]] = load volatile i8, ptr %[[MEMBER_ADDR]]
// OGCG: %[[TMP2:.*]] = and i8 %[[TMP1]], -16
// OGCG: %[[TMP3:.*]] = or i8 %[[TMP2]], 4
// OGCG: store volatile i8 %[[TMP3]], ptr %[[MEMBER_ADDR]]
struct A {
int x;
void set_x(int val) volatile;
int get_x() volatile;
};
void A::set_x(int val) volatile {
x = val;
}
// CIR: cir.func dso_local @_ZNV1A5set_xEi
// CIR: %[[MEMBER_ADDR:.*]] = cir.get_member %{{.*}}[0] {name = "x"}
// CIR: cir.store volatile {{.*}} %{{.*}}, %[[MEMBER_ADDR]]
// LLVM: define {{.*}} void @_ZNV1A5set_xEi
// LLVM: %[[MEMBER_ADDR:.*]] = getelementptr %struct.A, ptr %{{.*}}, i32 0, i32 0
// LLVM: store volatile i32 %{{.*}}, ptr %[[MEMBER_ADDR]]
// OGCG: define {{.*}} void @_ZNV1A5set_xEi
// OGCG: %[[MEMBER_ADDR:.*]] = getelementptr inbounds nuw %struct.A, ptr %{{.*}}, i32 0, i32 0
// OGCG: store volatile i32 %{{.*}}, ptr %[[MEMBER_ADDR]]
int A::get_x() volatile {
return x;
}
// CIR: cir.func dso_local @_ZNV1A5get_xEv
// CIR: %[[MEMBER_ADDR:.*]] = cir.get_member %{{.*}}[0] {name = "x"}
// CIR: cir.load volatile {{.*}} %[[MEMBER_ADDR]]
// LLVM: define {{.*}} i32 @_ZNV1A5get_xEv
// LLVM: %[[MEMBER_ADDR:.*]] = getelementptr %struct.A, ptr %{{.*}}, i32 0, i32 0
// LLVM: %{{.*}} = load volatile i32, ptr %[[MEMBER_ADDR]]
// OGCG: define {{.*}} i32 @_ZNV1A5get_xEv
// OGCG: %[[MEMBER_ADDR:.*]] = getelementptr inbounds nuw %struct.A, ptr %{{.*}}, i32 0, i32 0
// OGCG: %{{.*}} = load volatile i32, ptr %[[MEMBER_ADDR]]