Skip to content

Commit

Permalink
[CIR][CIRGen] Add atomic load support
Browse files Browse the repository at this point in the history
  • Loading branch information
bcardosolopes authored and lanza committed Apr 29, 2024
1 parent 38015b1 commit fe396fd
Show file tree
Hide file tree
Showing 6 changed files with 121 additions and 36 deletions.
42 changes: 24 additions & 18 deletions clang/include/clang/CIR/Dialect/IR/CIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,25 @@ def ConstantOp : CIR_Op<"const",
let hasFolder = 1;
}

//===----------------------------------------------------------------------===//
// C/C++ memory order definitions
//===----------------------------------------------------------------------===//

def MemOrderRelaxed : I32EnumAttrCase<"Relaxed", 0, "relaxed">;
def MemOrderConsume : I32EnumAttrCase<"Consume", 1, "consume">;
def MemOrderAcquire : I32EnumAttrCase<"Acquire", 2, "acquire">;
def MemOrderRelease : I32EnumAttrCase<"Release", 3, "release">;
def MemOrderAcqRel : I32EnumAttrCase<"AcquireRelease", 4, "acq_rel">;
def MemOrderSeqCst : I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">;

def MemOrder : I32EnumAttr<
"MemOrder",
"Memory order according to C++11 memory model",
[MemOrderRelaxed, MemOrderConsume, MemOrderAcquire,
MemOrderRelease, MemOrderAcqRel, MemOrderSeqCst]> {
let cppNamespace = "::mlir::cir";
}

//===----------------------------------------------------------------------===//
// AllocaOp
//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -403,13 +422,16 @@ def LoadOp : CIR_Op<"load", [

let arguments = (ins Arg<CIR_PointerType, "the address to load from",
[MemRead]>:$addr, UnitAttr:$isDeref,
UnitAttr:$is_volatile);
UnitAttr:$is_volatile,
OptionalAttr<MemOrder>:$mem_order);
let results = (outs CIR_AnyType:$result);

// FIXME: we should not be printing `cir.ptr` below, that should come
// from the pointer type directly.
let assemblyFormat = [{
(`deref` $isDeref^)? (`volatile` $is_volatile^)?
(`deref` $isDeref^)?
(`volatile` $is_volatile^)?
(`atomic` `(` $mem_order^ `)`)?
$addr `:` `cir.ptr` type($addr) `,` type($result) attr-dict
}];

Expand Down Expand Up @@ -3456,22 +3478,6 @@ def IsConstantOp : CIR_Op<"is_constant", [Pure]> {
// Atomic operations
//===----------------------------------------------------------------------===//

// Memory order related definitions.
def MemOrderRelaxed : I32EnumAttrCase<"Relaxed", 0, "relaxed">;
def MemOrderConsume : I32EnumAttrCase<"Consume", 1, "consume">;
def MemOrderAcquire : I32EnumAttrCase<"Acquire", 2, "acquire">;
def MemOrderRelease : I32EnumAttrCase<"Release", 3, "release">;
def MemOrderAcqRel : I32EnumAttrCase<"AcquireRelease", 4, "acq_rel">;
def MemOrderSeqCst : I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">;

def MemOrder : I32EnumAttr<
"MemOrder",
"Memory order according to C++11 memory model",
[MemOrderRelaxed, MemOrderConsume, MemOrderAcquire,
MemOrderRelease, MemOrderAcqRel, MemOrderSeqCst]> {
let cppNamespace = "::mlir::cir";
}

// Binary opcodes for atomic fetch.
def Atomic_Add : I32EnumAttrCase<"Add", 0, "add">;
def Atomic_Sub : I32EnumAttrCase<"Sub", 1, "sub">;
Expand Down
20 changes: 13 additions & 7 deletions clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest,

auto &builder = CGF.getBuilder();
auto loc = CGF.getLoc(E->getSourceRange());
auto orderAttr = mlir::cir::MemOrderAttr::get(builder.getContext(), Order);
mlir::cir::AtomicFetchKindAttr fetchAttr;
bool fetchFirst = true;

Expand Down Expand Up @@ -357,7 +358,13 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__atomic_load:
case AtomicExpr::AO__scoped_atomic_load_n:
case AtomicExpr::AO__scoped_atomic_load: {
llvm_unreachable("NYI");
auto *load = builder.createLoad(loc, Ptr).getDefiningOp();
// FIXME(cir): add scope information.
assert(!UnimplementedFeature::syncScopeID());
load->setAttr("mem_order", orderAttr);
if (E->isVolatile())
load->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext()));
builder.createStore(loc, load->getResult(0), Dest);
return;
}

Expand Down Expand Up @@ -499,7 +506,6 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest,
SmallVector<mlir::Value> atomicOperands = {Ptr.getPointer(), LoadVal1};
SmallVector<mlir::Type> atomicResTys = {
Ptr.getPointer().getType().cast<mlir::cir::PointerType>().getPointee()};
auto orderAttr = mlir::cir::MemOrderAttr::get(builder.getContext(), Order);
auto RMWI = builder.create(loc, builder.getStringAttr(Op), atomicOperands,
atomicResTys, {});

Expand Down Expand Up @@ -601,7 +607,7 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) {

case AtomicExpr::AO__atomic_load:
case AtomicExpr::AO__scoped_atomic_load:
llvm_unreachable("NYI");
Dest = buildPointerWithAlignment(E->getVal1());
break;

case AtomicExpr::AO__atomic_store:
Expand Down Expand Up @@ -716,7 +722,8 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) {
Val2 = Atomics.convertToAtomicIntPointer(Val2);
}
if (Dest.isValid()) {
llvm_unreachable("NYI");
if (ShouldCastToIntPtrTy)
Dest = Atomics.castToAtomicIntPointer(Dest);
} else if (E->isCmpXChg())
llvm_unreachable("NYI");
else if (!RValTy->isVoidType()) {
Expand Down Expand Up @@ -1087,9 +1094,8 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) {
break;
}
}
if (RValTy->isVoidType()) {
llvm_unreachable("NYI");
}
if (RValTy->isVoidType())
return RValue::get(nullptr);

return convertTempToRValue(Dest.withElementType(convertTypeForMem(RValTy)),
RValTy, E->getExprLoc());
Expand Down
2 changes: 1 addition & 1 deletion clang/lib/CIR/CodeGen/CIRGenExpr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2529,7 +2529,7 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,

mlir::cir::LoadOp Load = builder.create<mlir::cir::LoadOp>(
Loc, Addr.getElementType(), Addr.getPointer(), /* deref */ false,
Volatile);
Volatile, ::mlir::cir::MemOrderAttr{});

if (isNontemporal) {
llvm_unreachable("NYI");
Expand Down
6 changes: 4 additions & 2 deletions clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -393,15 +393,17 @@ static void lowerArrayDtorCtorIntoLoop(CIRBaseBuilderTy &builder,
loc,
/*condBuilder=*/
[&](mlir::OpBuilder &b, mlir::Location loc) {
auto currentElement = b.create<mlir::cir::LoadOp>(loc, eltTy, tmpAddr);
auto currentElement =
b.create<mlir::cir::LoadOp>(loc, eltTy, tmpAddr.getResult());
mlir::Type boolTy = mlir::cir::BoolType::get(b.getContext());
auto cmp = builder.create<mlir::cir::CmpOp>(
loc, boolTy, mlir::cir::CmpOpKind::eq, currentElement, end);
builder.createCondition(cmp);
},
/*bodyBuilder=*/
[&](mlir::OpBuilder &b, mlir::Location loc) {
auto currentElement = b.create<mlir::cir::LoadOp>(loc, eltTy, tmpAddr);
auto currentElement =
b.create<mlir::cir::LoadOp>(loc, eltTy, tmpAddr.getResult());

CallOp ctorCall;
op->walk([&](CallOp c) { ctorCall = c; });
Expand Down
35 changes: 33 additions & 2 deletions clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1009,9 +1009,40 @@ class CIRLoadLowering : public mlir::OpConversionPattern<mlir::cir::LoadOp> {
mlir::ConversionPatternRewriter &rewriter) const override {
const auto llvmTy =
getTypeConverter()->convertType(op.getResult().getType());
// FIXME: right now we only pass in the alignment when the memory access is
// atomic, we should always pass it instead.
unsigned alignment = 0;
auto ordering = mlir::LLVM::AtomicOrdering::not_atomic;
if (op.getMemOrder()) {
switch (*op.getMemOrder()) {
case mlir::cir::MemOrder::Relaxed:
ordering = mlir::LLVM::AtomicOrdering::monotonic;
break;
case mlir::cir::MemOrder::Consume:
case mlir::cir::MemOrder::Acquire:
ordering = mlir::LLVM::AtomicOrdering::acquire;
break;
case mlir::cir::MemOrder::Release:
ordering = mlir::LLVM::AtomicOrdering::release;
break;
case mlir::cir::MemOrder::AcquireRelease:
ordering = mlir::LLVM::AtomicOrdering::acq_rel;
break;
case mlir::cir::MemOrder::SequentiallyConsistent:
ordering = mlir::LLVM::AtomicOrdering::seq_cst;
break;
}

mlir::DataLayout layout(op->getParentOfType<mlir::ModuleOp>());
alignment = (unsigned)layout.getTypeABIAlignment(llvmTy);
}

// TODO: nontemporal, invariant, syncscope.
rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>(
op, llvmTy, adaptor.getAddr(), /* alignment */ 0,
/* volatile */ op.getIsVolatile());
op, llvmTy, adaptor.getAddr(), /* alignment */ alignment,
/* volatile */ op.getIsVolatile(),
/* nontemporal */ false,
/* invariant */ false, ordering);
return mlir::LogicalResult::success();
}
};
Expand Down
52 changes: 46 additions & 6 deletions clang/test/CIR/CodeGen/atomic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@
// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll
// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s

// Available on resource dir.
#include <stdatomic.h>

typedef struct _a {
_Atomic(int) d;
} at;
Expand All @@ -11,11 +14,6 @@ void m() { at y; }

// CHECK: ![[A:.*]] = !cir.struct<struct "_a" {!cir.int<s, 32>}>

enum memory_order {
memory_order_relaxed, memory_order_consume, memory_order_acquire,
memory_order_release, memory_order_acq_rel, memory_order_seq_cst
};

int basic_binop_fetch(int *i) {
return __atomic_add_fetch(i, 1, memory_order_seq_cst);
}
Expand Down Expand Up @@ -138,4 +136,46 @@ void min_max_fetch(int *i) {
// LLVM: select i1 %[[ICMP_MAX]], i32 %[[MAX]]
// LLVM: %[[MIN:.*]] = atomicrmw min ptr
// LLVM: %[[ICMP_MIN:.*]] = icmp slt i32 %[[MIN]]
// LLVM: select i1 %[[ICMP_MIN]], i32 %[[MIN]]
// LLVM: select i1 %[[ICMP_MIN]], i32 %[[MIN]]

int fi1(_Atomic(int) *i) {
return __c11_atomic_load(i, memory_order_seq_cst);
}

// CHECK: cir.func @_Z3fi1PU7_Atomici
// CHECK: cir.load atomic(seq_cst)

// LLVM-LABEL: @_Z3fi1PU7_Atomici
// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4

int fi1a(int *i) {
int v;
__atomic_load(i, &v, memory_order_seq_cst);
return v;
}

// CHECK-LABEL: @_Z4fi1aPi
// CHECK: cir.load atomic(seq_cst)

// LLVM-LABEL: @_Z4fi1aPi
// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4

int fi1b(int *i) {
return __atomic_load_n(i, memory_order_seq_cst);
}

// CHECK-LABEL: @_Z4fi1bPi
// CHECK: cir.load atomic(seq_cst)

// LLVM-LABEL: @_Z4fi1bPi
// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4

int fi1c(atomic_int *i) {
return atomic_load(i);
}

// CHECK-LABEL: @_Z4fi1cPU7_Atomici
// CHECK: cir.load atomic(seq_cst)

// LLVM-LABEL: @_Z4fi1cPU7_Atomici
// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4

0 comments on commit fe396fd

Please sign in to comment.