Skip to content

Commit

Permalink
Auto fixups
Browse files Browse the repository at this point in the history
  • Loading branch information
smeenai committed Nov 7, 2024
1 parent 65fab2c commit 4c6d517
Show file tree
Hide file tree
Showing 35 changed files with 544 additions and 545 deletions.
1 change: 0 additions & 1 deletion clang/lib/CIR/CodeGen/ABIInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@

namespace clang::CIRGen {

class ABIArgInfo;
class CIRGenCXXABI;
class CIRGenFunctionInfo;
class CIRGenTypes;
Expand Down
18 changes: 9 additions & 9 deletions clang/lib/CIR/CodeGen/CIRAsm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S,
mlir::Type TruncTy = ResultTruncRegTypes[i];

if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
assert(!MissingFeatures::asmLLVMAssume());
assert(!cir::MissingFeatures::asmLLVMAssume());
}

// If the result type of the LLVM IR asm doesn't match the result type of
Expand All @@ -311,7 +311,7 @@ static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S,
} else if (isa<mlir::cir::IntType>(TruncTy)) {
Tmp = Builder.createIntCast(Tmp, TruncTy);
} else if (false /*TruncTy->isVectorTy()*/) {
assert(!MissingFeatures::asmVectorType());
assert(!cir::MissingFeatures::asmVectorType());
}
}

Expand Down Expand Up @@ -468,7 +468,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) {
}

// Update largest vector width for any vector types.
assert(!MissingFeatures::asmVectorType());
assert(!cir::MissingFeatures::asmVectorType());
} else {
Address DestAddr = Dest.getAddress();

Expand Down Expand Up @@ -504,7 +504,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) {
Arg = builder.createBitcast(Arg, AdjTy);

// Update largest vector width for any vector types.
assert(!MissingFeatures::asmVectorType());
assert(!cir::MissingFeatures::asmVectorType());

// Only tie earlyclobber physregs.
if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
Expand All @@ -521,7 +521,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) {
// If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
// to the return value slot. Only do this when returning in registers.
if (isa<MSAsmStmt>(&S)) {
const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
const cir::ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
if (RetAI.isDirect() || RetAI.isExtend()) {
// Make a fake lvalue for the return value slot.
LValue ReturnSlot = makeAddrLValue(ReturnValue, FnRetTy);
Expand Down Expand Up @@ -593,7 +593,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) {
<< InputExpr->getType() << InputConstraint;

// Update largest vector width for any vector types.
assert(!MissingFeatures::asmVectorType());
assert(!cir::MissingFeatures::asmVectorType());

ArgTypes.push_back(Arg.getType());
ArgElemTypes.push_back(ArgElemType);
Expand Down Expand Up @@ -636,11 +636,11 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) {
HasSideEffect, inferFlavor(CGM, S), mlir::ArrayAttr());

if (false /*IsGCCAsmGoto*/) {
assert(!MissingFeatures::asmGoto());
assert(!cir::MissingFeatures::asmGoto());
} else if (HasUnwindClobber) {
assert(!MissingFeatures::asmUnwindClobber());
assert(!cir::MissingFeatures::asmUnwindClobber());
} else {
assert(!MissingFeatures::asmMemoryEffects());
assert(!cir::MissingFeatures::asmMemoryEffects());

mlir::Value result;
if (IA.getNumResults())
Expand Down
30 changes: 15 additions & 15 deletions clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class AtomicInfo {
uint64_t ValueSizeInBits;
CharUnits AtomicAlign;
CharUnits ValueAlign;
TypeEvaluationKind EvaluationKind;
cir::TypeEvaluationKind EvaluationKind;
bool UseLibcall;
LValue LVal;
CIRGenBitFieldInfo BFI;
Expand All @@ -51,7 +51,7 @@ class AtomicInfo {
public:
AtomicInfo(CIRGenFunction &CGF, LValue &lvalue, mlir::Location l)
: CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
EvaluationKind(TEK_Scalar), UseLibcall(true), loc(l) {
EvaluationKind(cir::TEK_Scalar), UseLibcall(true), loc(l) {
assert(!lvalue.isGlobalReg());
ASTContext &C = CGF.getContext();
if (lvalue.isSimple()) {
Expand Down Expand Up @@ -102,7 +102,7 @@ class AtomicInfo {
CharUnits getAtomicAlignment() const { return AtomicAlign; }
uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
cir::TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
bool shouldUseLibcall() const { return UseLibcall; }
const LValue &getAtomicLValue() const { return LVal; }
mlir::Value getAtomicPointer() const {
Expand Down Expand Up @@ -287,13 +287,13 @@ bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
switch (getEvaluationKind()) {
// For scalars and complexes, check whether the store size of the
// type uses the full size.
case TEK_Scalar:
case cir::TEK_Scalar:
return !isFullSizeType(CGF.CGM, ty, AtomicSizeInBits);
case TEK_Complex:
case cir::TEK_Complex:
llvm_unreachable("NYI");

// Padding in structs has an undefined bit pattern. User beware.
case TEK_Aggregate:
case cir::TEK_Aggregate:
return false;
}
llvm_unreachable("bad evaluation kind");
Expand Down Expand Up @@ -545,7 +545,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest,
mlir::Value IsWeak, mlir::Value FailureOrder,
uint64_t Size, mlir::cir::MemOrder Order,
uint8_t Scope) {
assert(!MissingFeatures::syncScopeID());
assert(!cir::MissingFeatures::syncScopeID());
StringRef Op;

auto &builder = CGF.getBuilder();
Expand Down Expand Up @@ -592,7 +592,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__scoped_atomic_load: {
auto *load = builder.createLoad(loc, Ptr).getDefiningOp();
// FIXME(cir): add scope information.
assert(!MissingFeatures::syncScopeID());
assert(!cir::MissingFeatures::syncScopeID());
load->setAttr("mem_order", orderAttr);
if (E->isVolatile())
load->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext()));
Expand All @@ -618,7 +618,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__scoped_atomic_store_n: {
auto loadVal1 = builder.createLoad(loc, Val1);
// FIXME(cir): add scope information.
assert(!MissingFeatures::syncScopeID());
assert(!cir::MissingFeatures::syncScopeID());
builder.createStore(loc, loadVal1, Ptr, E->isVolatile(),
/*alignment=*/mlir::IntegerAttr{}, orderAttr);
return;
Expand Down Expand Up @@ -791,15 +791,15 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest,
// LLVM atomic instructions always have synch scope. If clang atomic
// expression has no scope operand, use default LLVM synch scope.
if (!ScopeModel) {
assert(!MissingFeatures::syncScopeID());
assert(!cir::MissingFeatures::syncScopeID());
buildAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
Order, /*FIXME(cir): LLVM default scope*/ 1);
return;
}

// Handle constant scope.
if (getConstOpIntAttr(Scope)) {
assert(!MissingFeatures::syncScopeID());
assert(!cir::MissingFeatures::syncScopeID());
llvm_unreachable("NYI");
return;
}
Expand Down Expand Up @@ -1469,7 +1469,7 @@ void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest,
store.setIsVolatile(true);

// DecorateInstructionWithTBAA
assert(!MissingFeatures::tbaa());
assert(!cir::MissingFeatures::tbaa());
return;
}

Expand All @@ -1480,18 +1480,18 @@ void CIRGenFunction::buildAtomicInit(Expr *init, LValue dest) {
AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange()));

switch (atomics.getEvaluationKind()) {
case TEK_Scalar: {
case cir::TEK_Scalar: {
mlir::Value value = buildScalarExpr(init);
atomics.emitCopyIntoMemory(RValue::get(value));
return;
}

case TEK_Complex: {
case cir::TEK_Complex: {
llvm_unreachable("NYI");
return;
}

case TEK_Aggregate: {
case cir::TEK_Aggregate: {
// Fix up the destination if the initializer isn't an expression
// of atomic type.
llvm_unreachable("NYI");
Expand Down
40 changes: 20 additions & 20 deletions clang/lib/CIR/CodeGen/CIRGenBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,10 @@ namespace clang::CIRGen {

class CIRGenFunction;

class CIRGenBuilderTy : public CIRBaseBuilderTy {
class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
const CIRGenTypeCache &typeCache;
bool IsFPConstrained = false;
fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict;
cir::fp::ExceptionBehavior DefaultConstrainedExcept = cir::fp::ebStrict;
llvm::RoundingMode DefaultConstrainedRounding = llvm::RoundingMode::Dynamic;

llvm::StringMap<unsigned> GlobalsVersioning;
Expand Down Expand Up @@ -96,10 +96,10 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy {
}

/// Set the exception handling to be used with constrained floating point
void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
void setDefaultConstrainedExcept(cir::fp::ExceptionBehavior NewExcept) {
#ifndef NDEBUG
std::optional<llvm::StringRef> ExceptStr =
convertExceptionBehaviorToStr(NewExcept);
cir::convertExceptionBehaviorToStr(NewExcept);
assert(ExceptStr && "Garbage strict exception behavior!");
#endif
DefaultConstrainedExcept = NewExcept;
Expand All @@ -109,14 +109,14 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy {
void setDefaultConstrainedRounding(llvm::RoundingMode NewRounding) {
#ifndef NDEBUG
std::optional<llvm::StringRef> RoundingStr =
convertRoundingModeToStr(NewRounding);
cir::convertRoundingModeToStr(NewRounding);
assert(RoundingStr && "Garbage strict rounding mode!");
#endif
DefaultConstrainedRounding = NewRounding;
}

/// Get the exception handling used with constrained floating point
fp::ExceptionBehavior getDefaultConstrainedExcept() {
cir::fp::ExceptionBehavior getDefaultConstrainedExcept() {
return DefaultConstrainedExcept;
}

Expand Down Expand Up @@ -422,7 +422,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy {
// FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special
// type so it's a bit more clear and C++ idiomatic.
auto fnTy = mlir::cir::FuncType::get({}, getUInt32Ty(), isVarArg);
assert(!MissingFeatures::isVarArg());
assert(!cir::MissingFeatures::isVarArg());
return getPointerTo(getPointerTo(fnTy));
}

Expand Down Expand Up @@ -657,30 +657,30 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy {
}

mlir::Value createFSub(mlir::Value lhs, mlir::Value rhs) {
assert(!MissingFeatures::metaDataNode());
assert(!cir::MissingFeatures::metaDataNode());
if (IsFPConstrained)
llvm_unreachable("Constrained FP NYI");

assert(!MissingFeatures::foldBinOpFMF());
assert(!cir::MissingFeatures::foldBinOpFMF());
return create<mlir::cir::BinOp>(lhs.getLoc(), mlir::cir::BinOpKind::Sub,
lhs, rhs);
}

mlir::Value createFAdd(mlir::Value lhs, mlir::Value rhs) {
assert(!MissingFeatures::metaDataNode());
assert(!cir::MissingFeatures::metaDataNode());
if (IsFPConstrained)
llvm_unreachable("Constrained FP NYI");

assert(!MissingFeatures::foldBinOpFMF());
assert(!cir::MissingFeatures::foldBinOpFMF());
return create<mlir::cir::BinOp>(lhs.getLoc(), mlir::cir::BinOpKind::Add,
lhs, rhs);
}
mlir::Value createFMul(mlir::Value lhs, mlir::Value rhs) {
assert(!MissingFeatures::metaDataNode());
assert(!cir::MissingFeatures::metaDataNode());
if (IsFPConstrained)
llvm_unreachable("Constrained FP NYI");

assert(!MissingFeatures::foldBinOpFMF());
assert(!cir::MissingFeatures::foldBinOpFMF());
return create<mlir::cir::BinOp>(lhs.getLoc(), mlir::cir::BinOpKind::Mul,
lhs, rhs);
}
Expand All @@ -697,14 +697,14 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy {
mlir::Value createDynCastToVoid(mlir::Location loc, mlir::Value src,
bool vtableUseRelativeLayout) {
// TODO(cir): consider address space here.
assert(!MissingFeatures::addressSpace());
assert(!cir::MissingFeatures::addressSpace());
auto destTy = getVoidPtrTy();
return create<mlir::cir::DynamicCastOp>(
loc, destTy, mlir::cir::DynamicCastKind::ptr, src,
mlir::cir::DynamicCastInfoAttr{}, vtableUseRelativeLayout);
}

cir::Address createBaseClassAddr(mlir::Location loc, cir::Address addr,
Address createBaseClassAddr(mlir::Location loc, Address addr,
mlir::Type destType, unsigned offset,
bool assumeNotNull) {
if (destType == addr.getElementType())
Expand All @@ -716,7 +716,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy {
return Address(baseAddr, ptrTy, addr.getAlignment());
}

cir::Address createDerivedClassAddr(mlir::Location loc, cir::Address addr,
Address createDerivedClassAddr(mlir::Location loc, Address addr,
mlir::Type destType, unsigned offset,
bool assumeNotNull) {
if (destType == addr.getElementType())
Expand Down Expand Up @@ -833,7 +833,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy {

/// Cast the element type of the given address to a different type,
/// preserving information like the alignment.
cir::Address createElementBitCast(mlir::Location loc, cir::Address addr,
Address createElementBitCast(mlir::Location loc, Address addr,
mlir::Type destType) {
if (destType == addr.getElementType())
return addr;
Expand Down Expand Up @@ -869,7 +869,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy {
mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty,
mlir::Value ptr, llvm::MaybeAlign align) {
// TODO: make sure callsites shouldn't be really passing volatile.
assert(!MissingFeatures::volatileLoadOrStore());
assert(!cir::MissingFeatures::volatileLoadOrStore());
return createAlignedLoad(loc, ty, ptr, align, /*isVolatile=*/false);
}

Expand Down Expand Up @@ -942,7 +942,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy {
// but currently some parts of Clang AST, which we don't want to touch just
// yet, return them.
void computeGlobalViewIndicesFromFlatOffset(
int64_t Offset, mlir::Type Ty, CIRDataLayout Layout,
int64_t Offset, mlir::Type Ty, cir::CIRDataLayout Layout,
llvm::SmallVectorImpl<int64_t> &Indices) {
if (!Offset)
return;
Expand Down Expand Up @@ -1046,7 +1046,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy {
mlir::cast<mlir::cir::DataMemberType>(memberPtr.getType());

// TODO(cir): consider address space.
assert(!MissingFeatures::addressSpace());
assert(!cir::MissingFeatures::addressSpace());
auto resultTy = getPointerTo(memberPtrTy.getMemberTy());

return create<mlir::cir::GetRuntimeMemberOp>(loc, resultTy, objectPtr,
Expand Down
Loading

0 comments on commit 4c6d517

Please sign in to comment.