diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h index 9d92cc20c0e2..159a1434cdb7 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -43,6 +43,7 @@ namespace impl { // corresponding trait classes. This avoids them being template // instantiated/duplicated. LogicalResult verifySameFirstOperandAndResultType(Operation *op); +LogicalResult verifySameSecondOperandAndResultType(Operation *op); LogicalResult verifySameFirstSecondOperandAndResultType(Operation *op); } // namespace impl @@ -59,7 +60,19 @@ class SameFirstOperandAndResultType }; /// This class provides verification for ops that are known to have the same -/// first operand and result type. +/// second operand and result type. +/// +template +class SameSecondOperandAndResultType + : public TraitBase { +public: + static LogicalResult verifyTrait(Operation *op) { + return impl::verifySameSecondOperandAndResultType(op); + } +}; + +/// This class provides verification for ops that are known to have the same +/// first, second operand and result type. /// template class SameFirstSecondOperandAndResultType diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index d1c7cbe1e774..ceff861c38a3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -39,6 +39,17 @@ include "mlir/IR/SymbolInterfaces.td" class CIR_Op traits = []> : Op; +//===----------------------------------------------------------------------===// +// CIR Op Traits +//===----------------------------------------------------------------------===// + +def SameFirstOperandAndResultType : + NativeOpTrait<"SameFirstOperandAndResultType">; +def SameSecondOperandAndResultType : + NativeOpTrait<"SameSecondOperandAndResultType">; +def SameFirstSecondOperandAndResultType : + NativeOpTrait<"SameFirstSecondOperandAndResultType">; + //===----------------------------------------------------------------------===// // CastOp //===----------------------------------------------------------------------===// @@ -109,6 +120,7 @@ def CastOp : CIR_Op<"cast", [Pure]> { // The input and output types should match the cast kind. let hasVerifier = 1; + let hasFolder = 1; } //===----------------------------------------------------------------------===// @@ -183,9 +195,6 @@ def PtrDiffOp : CIR_Op<"ptr_diff", [Pure, SameTypeOperands]> { // PtrStrideOp //===----------------------------------------------------------------------===// -def SameFirstOperandAndResultType : - NativeOpTrait<"SameFirstOperandAndResultType">; - def PtrStrideOp : CIR_Op<"ptr_stride", [Pure, SameFirstOperandAndResultType]> { let summary = "Pointer access with stride"; @@ -2933,9 +2942,6 @@ def MemChrOp : CIR_Op<"libc.memchr"> { // StdFindOp //===----------------------------------------------------------------------===// -def SameFirstSecondOperandAndResultType : - NativeOpTrait<"SameFirstSecondOperandAndResultType">; - def StdFindOp : CIR_Op<"std.find", [SameFirstSecondOperandAndResultType]> { let arguments = (ins FlatSymbolRefAttr:$original_fn, CIR_AnyType:$first, @@ -3412,6 +3418,46 @@ def IsConstantOp : CIR_Op<"is_constant", [Pure]> { }]; } +//===----------------------------------------------------------------------===// +// Atomic operations +//===----------------------------------------------------------------------===// + +def MemOrderRelaxed : I32EnumAttrCase<"Relaxed", 0, "relaxed">; +def MemOrderConsume : I32EnumAttrCase<"Consume", 1, "consume">; +def MemOrderAcquire : I32EnumAttrCase<"Acquire", 2, "acquire">; +def MemOrderRelease : I32EnumAttrCase<"Release", 3, "release">; +def MemOrderAcqRel : I32EnumAttrCase<"AcquireRelease", 4, "acq_rel">; +def MemOrderSeqCst : I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">; + +def MemOrder : I32EnumAttr< + "MemOrder", + "Memory order according to C++11 memory model", + [MemOrderRelaxed, MemOrderConsume, MemOrderAcquire, + MemOrderRelease, MemOrderAcqRel, MemOrderSeqCst]> { + let cppNamespace = "::mlir::cir"; +} + +def AtomicAddFetch : CIR_Op<"atomic.add_fetch", + [Pure, SameSecondOperandAndResultType]> { + let summary = "Represents the __atomic_add_fetch builtin"; + let description = [{}]; + let results = (outs CIR_AnyIntOrFloat:$result); + let arguments = (ins IntOrFPPtr:$ptr, CIR_AnyIntOrFloat:$val, + Arg:$mem_order, + UnitAttr:$is_volatile); + + let assemblyFormat = [{ + `(` + $ptr `:` type($ptr) `,` + $val `:` type($val) `,` + $mem_order `)` + (`volatile` $is_volatile^)? + `:` type($result) attr-dict + }]; + + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // Operations Lowered Directly to LLVM IR // diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h index 889cde696e91..06851947f24c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h +++ b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h @@ -115,6 +115,18 @@ LLVM_ATTRIBUTE_UNUSED static bool isValidLinkage(GlobalLinkageKind L) { isLinkOnceLinkage(L); } +bool operator<(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; +bool operator>(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; +bool operator<=(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; +bool operator>=(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; + +// Validate an integral value which isn't known to fit within the enum's range +// is a valid AtomicOrderingCABI. +template inline bool isValidCIRAtomicOrderingCABI(Int I) { + return (Int)mlir::cir::MemOrder::Relaxed <= I && + I <= (Int)mlir::cir::MemOrder::SequentiallyConsistent; +} + } // namespace cir } // namespace mlir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 489afcff7d96..c67d3013ed7f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -169,6 +169,7 @@ def CIR_Double : CIR_FloatType<"Double", "double"> { // Constraints def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double]>; +def CIR_AnyIntOrFloat: AnyTypeOf<[CIR_AnyFloat, CIR_IntType]>; //===----------------------------------------------------------------------===// // PointerType @@ -373,6 +374,16 @@ def VoidPtr : Type< "mlir::cir::VoidType::get($_builder.getContext()))"> { } +// Pointer to int, float or double +def IntOrFPPtr : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::PointerType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::IntType," + "::mlir::cir::SingleType, ::mlir::cir::DoubleType>()">, + ]>, "{int,void}*"> { +} + // Pointer to struct def StructPtr : Type< And<[ diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index e67f640a911f..99544623ad2b 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -79,6 +79,7 @@ class Address { /// Return address with different element type, but same pointer and /// alignment. Address withElementType(mlir::Type ElemTy) const { + // TODO(cir): hasOffset() check return Address(getPointer(), ElemTy, getAlignment(), isKnownNonNull()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 8f8ccd0e87a7..23ca168d9765 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -269,6 +269,10 @@ static Address buildValToTemp(CIRGenFunction &CGF, Expr *E) { } Address AtomicInfo::castToAtomicIntPointer(Address addr) const { + auto intTy = addr.getElementType().dyn_cast(); + // Don't bother with int casts if the integer size is the same. + if (intTy && intTy.getWidth() == AtomicSizeInBits) + return addr; auto ty = CGF.getBuilder().getUIntNTy(AtomicSizeInBits); return addr.withElementType(ty); } @@ -314,10 +318,12 @@ static mlir::cir::IntAttr getConstOpIntAttr(mlir::Value v) { static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, mlir::Value IsWeak, mlir::Value FailureOrder, - uint64_t Size, llvm::AtomicOrdering Order, + uint64_t Size, mlir::cir::MemOrder Order, uint8_t Scope) { assert(!UnimplementedFeature::syncScopeID()); + StringRef Op; [[maybe_unused]] bool PostOpMinMax = false; + auto loc = CGF.getLoc(E->getSourceRange()); switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_init: @@ -375,18 +381,19 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_add_fetch: case AtomicExpr::AO__scoped_atomic_add_fetch: - llvm_unreachable("NYI"); + // In LLVM codegen, the post operation codegen is tracked here. [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__hip_atomic_fetch_add: case AtomicExpr::AO__opencl_atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_add: case AtomicExpr::AO__scoped_atomic_fetch_add: - llvm_unreachable("NYI"); + Op = mlir::cir::AtomicAddFetch::getOperationName(); break; case AtomicExpr::AO__atomic_sub_fetch: case AtomicExpr::AO__scoped_atomic_sub_fetch: + // In LLVM codegen, the post operation codegen is tracked here. llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_sub: @@ -423,6 +430,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_and_fetch: case AtomicExpr::AO__scoped_atomic_and_fetch: + // In LLVM codegen, the post operation codegen is tracked here. llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_and: @@ -435,6 +443,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_or_fetch: case AtomicExpr::AO__scoped_atomic_or_fetch: + // In LLVM codegen, the post operation codegen is tracked here. llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_or: @@ -447,6 +456,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_xor_fetch: case AtomicExpr::AO__scoped_atomic_xor_fetch: + // In LLVM codegen, the post operation codegen is tracked here. llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_xor: @@ -459,6 +469,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_nand_fetch: case AtomicExpr::AO__scoped_atomic_nand_fetch: + // In LLVM codegen, the post operation codegen is tracked here. llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_nand: @@ -467,13 +478,38 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, llvm_unreachable("NYI"); break; } - llvm_unreachable("NYI"); + + assert(Op.size() && "expected operation name to build"); + auto &builder = CGF.getBuilder(); + + auto LoadVal1 = builder.createLoad(loc, Val1); + + SmallVector atomicOperands = {Ptr.getPointer(), LoadVal1}; + SmallVector atomicResTys = { + Ptr.getPointer().getType().cast().getPointee()}; + auto orderAttr = mlir::cir::MemOrderAttr::get(builder.getContext(), Order); + auto RMWI = builder.create(loc, builder.getStringAttr(Op), atomicOperands, + atomicResTys, {}); + RMWI->setAttr("mem_order", orderAttr); + if (E->isVolatile()) + RMWI->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext())); + auto Result = RMWI->getResult(0); + + if (PostOpMinMax) + llvm_unreachable("NYI"); + + // This should be handled in LowerToLLVM.cpp, still tracking here for now. + if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch || + E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch) + llvm_unreachable("NYI"); + + builder.createStore(loc, Result, Dest); } static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, Address Ptr, Address Val1, Address Val2, mlir::Value IsWeak, mlir::Value FailureOrder, - uint64_t Size, llvm::AtomicOrdering Order, + uint64_t Size, mlir::cir::MemOrder Order, mlir::Value Scope) { auto ScopeModel = Expr->getScopeModel(); @@ -1011,34 +1047,34 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // We should not ever get to a case where the ordering isn't a valid CABI // value, but it's hard to enforce that in general. auto ord = ordAttr.getUInt(); - if (llvm::isValidAtomicOrderingCABI(ord)) { - switch ((llvm::AtomicOrderingCABI)ord) { - case llvm::AtomicOrderingCABI::relaxed: + if (mlir::cir::isValidCIRAtomicOrderingCABI(ord)) { + switch ((mlir::cir::MemOrder)ord) { + case mlir::cir::MemOrder::Relaxed: buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Monotonic, Scope); + mlir::cir::MemOrder::Relaxed, Scope); break; - case llvm::AtomicOrderingCABI::consume: - case llvm::AtomicOrderingCABI::acquire: + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: if (IsStore) break; // Avoid crashing on code with undefined behavior buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Acquire, Scope); + mlir::cir::MemOrder::Acquire, Scope); break; - case llvm::AtomicOrderingCABI::release: + case mlir::cir::MemOrder::Release: if (IsLoad) break; // Avoid crashing on code with undefined behavior buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Release, Scope); + mlir::cir::MemOrder::Release, Scope); break; - case llvm::AtomicOrderingCABI::acq_rel: + case mlir::cir::MemOrder::AcquireRelease: if (IsLoad || IsStore) break; // Avoid crashing on code with undefined behavior buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::AcquireRelease, Scope); + mlir::cir::MemOrder::AcquireRelease, Scope); break; - case llvm::AtomicOrderingCABI::seq_cst: + case mlir::cir::MemOrder::SequentiallyConsistent: buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::SequentiallyConsistent, Scope); + mlir::cir::MemOrder::SequentiallyConsistent, Scope); break; } } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index ae1c42d1cc18..76da99ace30d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -742,8 +742,10 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } mlir::Value createLoad(mlir::Location loc, Address addr) { - return create(loc, addr.getElementType(), - addr.getPointer()); + auto ptrTy = addr.getPointer().getType().dyn_cast(); + return create( + loc, addr.getElementType(), + createElementBitCast(loc, addr, ptrTy.getPointee()).getPointer()); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index aa4364ffa169..9893d9b3c27a 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -511,6 +511,20 @@ LogicalResult CastOp::verify() { llvm_unreachable("Unknown CastOp kind?"); } +OpFoldResult CastOp::fold(FoldAdaptor adaptor) { + if (getKind() != mlir::cir::CastKind::integral) + return {}; + if (getSrc().getType() != getResult().getType()) + return {}; + // TODO: for sign differences, it's possible in certain conditions to + // create a new attributes that's capable or representing the source. + SmallVector foldResults; + auto foldOrder = getSrc().getDefiningOp()->fold(foldResults); + if (foldOrder.succeeded() && foldResults[0].is()) + return foldResults[0].get(); + return {}; +} + //===----------------------------------------------------------------------===// // VecCreateOp //===----------------------------------------------------------------------===// @@ -2373,6 +2387,21 @@ mlir::OpTrait::impl::verifySameFirstOperandAndResultType(Operation *op) { return success(); } +LogicalResult +mlir::OpTrait::impl::verifySameSecondOperandAndResultType(Operation *op) { + if (failed(verifyAtLeastNOperands(op, 2)) || failed(verifyOneResult(op))) + return failure(); + + auto type = op->getResult(0).getType(); + auto opType = op->getOperand(1).getType(); + + if (type != opType) + return op->emitOpError() + << "requires the same type for first operand and result"; + + return success(); +} + LogicalResult mlir::OpTrait::impl::verifySameFirstSecondOperandAndResultType(Operation *op) { if (failed(verifyAtLeastNOperands(op, 3)) || failed(verifyOneResult(op))) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 9e8505747c10..611b8c4dcd5e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2406,6 +2406,65 @@ class CIRBitPopcountOpLowering } }; +class CIRAtomicFetchLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LLVM::AtomicOrdering + getLLVMAtomicOrder(mlir::cir::MemOrder memo) const { + switch (memo) { + case mlir::cir::MemOrder::Relaxed: + return mlir::LLVM::AtomicOrdering::monotonic; + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: + return mlir::LLVM::AtomicOrdering::acquire; + case mlir::cir::MemOrder::Release: + return mlir::LLVM::AtomicOrdering::release; + case mlir::cir::MemOrder::AcquireRelease: + return mlir::LLVM::AtomicOrdering::acq_rel; + case mlir::cir::MemOrder::SequentiallyConsistent: + return mlir::LLVM::AtomicOrdering::seq_cst; + } + llvm_unreachable("shouldn't get here"); + } + + mlir::LogicalResult buildPostOp(mlir::cir::AtomicAddFetch op, + OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, + mlir::Value rmwVal) const { + if (op.getVal().getType().isa()) + rewriter.replaceOpWithNewOp(op, rmwVal, + adaptor.getVal()); + else if (op.getVal() + .getType() + .isa()) + rewriter.replaceOpWithNewOp(op, rmwVal, + adaptor.getVal()); + else + return op.emitError() << "Unsupported type"; + return mlir::success(); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AtomicAddFetch op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto llvmOrder = getLLVMAtomicOrder(adaptor.getMemOrder()); + + // FIXME: add syncscope. + auto rmwVal = rewriter.create( + op.getLoc(), mlir::LLVM::AtomicBinOp::add, adaptor.getPtr(), + adaptor.getVal(), llvmOrder); + + // FIXME: Make the rewrite generic and expand this to more opcodes. + bool hasPostOp = isa(op); + + if (hasPostOp) + return buildPostOp(op, adaptor, rewriter, rmwVal.getRes()); + return mlir::success(); + } +}; + class CIRBrOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -2844,13 +2903,13 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, patterns.add< CIRCmpOpLowering, CIRBitClrsbOpLowering, CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitFfsOpLowering, CIRBitParityOpLowering, - CIRBitPopcountOpLowering, CIRLoopOpInterfaceLowering, CIRBrCondOpLowering, - CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, - CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, - CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, - CIRScopeOpLowering, CIRCastOpLowering, CIRIfLowering, CIRGlobalOpLowering, - CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, - CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, + CIRBitPopcountOpLowering, CIRAtomicFetchLowering, + CIRLoopOpInterfaceLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, + CIRCallLowering, CIRUnaryOpLowering, CIRBinOpLowering, CIRShiftOpLowering, + CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, + CIRFuncLowering, CIRScopeOpLowering, CIRCastOpLowering, CIRIfLowering, + CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, + CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRTernaryOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index d806855ba8ed..34cb3158e0af 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s typedef struct _a { _Atomic(int) d; @@ -7,4 +9,27 @@ typedef struct _a { void m() { at y; } -// CHECK: !ty_22_a22 = !cir.struct}> \ No newline at end of file +// CHECK: ![[A:.*]] = !cir.struct}> + +enum memory_order { + memory_order_relaxed, memory_order_consume, memory_order_acquire, + memory_order_release, memory_order_acq_rel, memory_order_seq_cst +}; + +int fi3b(int *i) { + return __atomic_add_fetch(i, 1, memory_order_seq_cst); +} + +// CHECK: cir.func @_Z4fi3bPi +// CHECK: %[[ARGI:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["i", init] {alignment = 8 : i64} +// CHECK: %[[ONE_ADDR:.*]] = cir.alloca !s32i, cir.ptr , [".atomictmp"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %[[ARGI]] : !cir.ptr, cir.ptr > +// CHECK: %[[I:.*]] = cir.load %[[ARGI]] : cir.ptr >, !cir.ptr +// CHECK: %[[ONE:.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[ONE]], %[[ONE_ADDR]] : !s32i, cir.ptr +// CHECK: %[[VAL:.*]] = cir.load %[[ONE_ADDR]] : cir.ptr , !s32i +// CHECK: cir.atomic.add_fetch(%[[I]] : !cir.ptr, %[[VAL]] : !s32i, seq_cst) : !s32i + +// LLVM: define i32 @_Z4fi3bPi +// LLVM: %[[RMW:.*]] = atomicrmw add ptr {{.*}}, i32 %[[VAL:.*]] seq_cst, align 4 +// LLVM: add i32 %[[RMW]], %[[VAL]] \ No newline at end of file