Skip to content

Commit

Permalink
[CIR][CIRGen] Cleanup: enable conditional cleanup with exceptions
Browse files Browse the repository at this point in the history
Entails several minor changes:
- Duplicate resume blocks around.
- Disable LP caching, we repeat them as often as necessary.
- Update maps accordingly for tracking places to patch up.
- Make changes to clean up block handling.
- Fix an issue in flatten cfg.
  • Loading branch information
bcardosolopes committed Oct 4, 2024
1 parent 9975749 commit 3086002
Show file tree
Hide file tree
Showing 6 changed files with 275 additions and 49 deletions.
20 changes: 18 additions & 2 deletions clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,11 @@ static void setupCleanupBlockActivation(CIRGenFunction &CGF,
llvm_unreachable("NYI");
}

llvm_unreachable("NYI");
auto builder = CGF.getBuilder();
mlir::Location loc = var.getPointer().getLoc();
mlir::Value trueOrFalse =
kind == ForActivation ? builder.getTrue(loc) : builder.getFalse(loc);
CGF.getBuilder().createStore(loc, trueOrFalse, var);
}

/// Deactive a cleanup that was created in an active state.
Expand Down Expand Up @@ -421,7 +425,6 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
if (RequiresEHCleanup) {
mlir::cir::TryOp tryOp =
ehEntry->getParentOp()->getParentOfType<mlir::cir::TryOp>();
assert(tryOp && "expected available cir.try");
auto *nextAction = getEHDispatchBlock(EHParent, tryOp);
(void)nextAction;

Expand Down Expand Up @@ -469,6 +472,19 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
mlir::Block *blockToPatch = cleanupsToPatch[currBlock];
auto currYield = cast<YieldOp>(blockToPatch->getTerminator());
builder.setInsertionPoint(currYield);

// If nextAction is an EH resume block, also update all try locations
// for these "to-patch" blocks with the appropriate resume content.
if (nextAction == ehResumeBlock) {
if (auto tryToPatch = currYield->getParentOp()
->getParentOfType<mlir::cir::TryOp>()) {
mlir::Block *resumeBlockToPatch =
tryToPatch.getCatchUnwindEntryBlock();
buildEHResumeBlock(/*isCleanup=*/true, resumeBlockToPatch,
tryToPatch.getLoc());
}
}

buildCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
currBlock = blockToPatch;
}
Expand Down
113 changes: 70 additions & 43 deletions clang/lib/CIR/CodeGen/CIRGenException.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -252,19 +252,9 @@ void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) {
DeactivateCleanupBlock(cleanup, op);
}

mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup,
mlir::cir::TryOp tryOp) {

if (ehResumeBlock)
return ehResumeBlock;
// Just like some other try/catch related logic: return the basic block
// pointer but only use it to denote we're tracking things, but there
// shouldn't be any changes to that block after work done in this function.
assert(tryOp && "expected available cir.try");
ehResumeBlock = tryOp.getCatchUnwindEntryBlock();
if (!ehResumeBlock->empty())
return ehResumeBlock;

void CIRGenFunction::buildEHResumeBlock(bool isCleanup,
mlir::Block *ehResumeBlock,
mlir::Location loc) {
auto ip = getBuilder().saveInsertionPoint();
getBuilder().setInsertionPointToStart(ehResumeBlock);

Expand All @@ -283,9 +273,22 @@ mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup,
llvm_unreachable("NYI");
}

getBuilder().create<mlir::cir::ResumeOp>(tryOp.getLoc(), mlir::Value{},
mlir::Value{});
getBuilder().create<mlir::cir::ResumeOp>(loc, mlir::Value{}, mlir::Value{});
getBuilder().restoreInsertionPoint(ip);
}

mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup,
mlir::cir::TryOp tryOp) {

if (ehResumeBlock)
return ehResumeBlock;
// Setup unwind.
assert(tryOp && "expected available cir.try");
ehResumeBlock = tryOp.getCatchUnwindEntryBlock();
if (!ehResumeBlock->empty())
return ehResumeBlock;

buildEHResumeBlock(isCleanup, ehResumeBlock, tryOp.getLoc());
return ehResumeBlock;
}

Expand Down Expand Up @@ -599,7 +602,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
/// Check whether this is a non-EH scope, i.e. a scope which doesn't
/// affect exception handling. Currently, the only non-EH scopes are
/// normal-only cleanup scopes.
static bool isNonEHScope(const EHScope &S) {
[[maybe_unused]] static bool isNonEHScope(const EHScope &S) {
switch (S.getKind()) {
case EHScope::Cleanup:
return !cast<EHCleanupScope>(S).isEHCleanup();
Expand All @@ -625,15 +628,16 @@ mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) {
case EHScope::Catch:
case EHScope::Cleanup:
case EHScope::Filter:
if (auto *lpad = innermostEHScope.getCachedLandingPad())
return lpad;
// CIR does not cache landing pads.

This comment has been minimized.

Copy link
@smeenai

smeenai Oct 4, 2024

Collaborator

Are there potential downsides to this?

This comment has been minimized.

Copy link
@bcardosolopes

bcardosolopes Oct 5, 2024

Author Member

Not really because we have one per call site, but we never thread to the same basic block (like LLVM does). We'll have to be smarter in flattencfg to do good lowering.

break;
}

// If there's an existing TryOp, it means we got a `cir.try` scope
// that leads to this "landing pad" creation site. Otherwise, exceptions
// are enabled but a throwing function is called anyways (common pattern
// with function local static initializers).
{
mlir::ArrayAttr catches = tryOp.getCatchTypesAttr();
if (!catches || catches.empty()) {
// Save the current CIR generation state.
mlir::OpBuilder::InsertionGuard guard(builder);
assert(!MissingFeatures::generateDebugInfo() && "NYI");
Expand Down Expand Up @@ -727,14 +731,16 @@ mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) {
// Add final array of clauses into TryOp.
tryOp.setCatchTypesAttr(
mlir::ArrayAttr::get(builder.getContext(), clauses));

// In traditional LLVM codegen. this tells the backend how to generate the
// landing pad by generating a branch to the dispatch block.
mlir::Block *dispatch =
getEHDispatchBlock(EHStack.getInnermostEHScope(), tryOp);
(void)dispatch;
}

// In traditional LLVM codegen. this tells the backend how to generate the
// landing pad by generating a branch to the dispatch block. In CIR,
// getEHDispatchBlock is used to populate blocks for later filing during
// cleanup handling.
mlir::Block *dispatch =
getEHDispatchBlock(EHStack.getInnermostEHScope(), tryOp);
(void)dispatch;

return tryOp;
}

Expand All @@ -755,8 +761,21 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si,

// Otherwise, we should look at the actual scope.
EHScope &scope = *EHStack.find(si);

auto *dispatchBlock = scope.getCachedEHDispatchBlock();

mlir::Block *originalBlock = nullptr;
if (dispatchBlock && tryOp) {
// If the dispatch is cached but comes from a different tryOp, make sure:
// - Populate current `tryOp` with a new dispatch block regardless.
// - Update the map to enqueue new dispatchBlock to also get a cleanup. See
// code at the end of the function.
mlir::Operation *parentOp = dispatchBlock->getParentOp();
if (tryOp != parentOp->getParentOfType<mlir::cir::TryOp>()) {
originalBlock = dispatchBlock;
dispatchBlock = nullptr;
}
}

if (!dispatchBlock) {
switch (scope.getKind()) {
case EHScope::Catch: {
Expand All @@ -774,13 +793,25 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si,
}

case EHScope::Cleanup: {
assert(callWithExceptionCtx && "expected call information");
{
if (callWithExceptionCtx && "expected call information") {
mlir::OpBuilder::InsertionGuard guard(getBuilder());
assert(callWithExceptionCtx.getCleanup().empty() &&
"one per call: expected empty region at this point");
dispatchBlock = builder.createBlock(&callWithExceptionCtx.getCleanup());
builder.createYield(callWithExceptionCtx.getLoc());
} else {
// Usually coming from general cir.scope cleanups that aren't
// tried to a specific throwing call.
assert(currLexScope && currLexScope->isRegular() &&
"expected regular cleanup");
dispatchBlock = currLexScope->getOrCreateCleanupBlock(builder);
if (dispatchBlock->empty()) {
mlir::OpBuilder::InsertionGuard guard(builder);
builder.setInsertionPointToEnd(dispatchBlock);
mlir::Location loc =
currSrcLoc ? *currSrcLoc : builder.getUnknownLoc();
builder.createYield(loc);
}
}
break;
}
Expand All @@ -793,6 +824,14 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si,
llvm_unreachable("NYI");
break;
}
}

if (originalBlock) {
// As mentioned above: update the map to enqueue new dispatchBlock to also
// get a cleanup.
cleanupsToPatch[originalBlock] = dispatchBlock;
dispatchBlock = originalBlock;
} else {
scope.setCachedEHDispatchBlock(dispatchBlock);
}
return dispatchBlock;
Expand Down Expand Up @@ -826,18 +865,13 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl(mlir::cir::TryOp tryOp) {
assert(!EHStack.empty());
assert(isInvokeDest());

// Check the innermost scope for a cached landing pad. If this is
// a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad.
auto *LP = EHStack.begin()->getCachedLandingPad();
if (LP)
return LP;

// CIR does not cache landing pads.
const EHPersonality &Personality = EHPersonality::get(*this);

// FIXME(cir): add personality function
// if (!CurFn->hasPersonalityFn())
// CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, Personality));

mlir::Operation *LP = nullptr;
if (Personality.usesFuncletPads()) {
// We don't need separate landing pads in the funclet model.
llvm::errs() << "PersonalityFn: " << Personality.PersonalityFn << "\n";
Expand All @@ -848,14 +882,7 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl(mlir::cir::TryOp tryOp) {

assert(LP);

// Cache the landing pad on the innermost scope. If this is a
// non-EH scope, cache the landing pad on the enclosing scope, too.
for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) {
ir->setCachedLandingPad(LP);
if (!isNonEHScope(*ir))
break;
}

// CIR does not cache landing pads.
return LP;
}

Expand Down
12 changes: 11 additions & 1 deletion clang/lib/CIR/CodeGen/CIRGenFunction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -363,9 +363,19 @@ void CIRGenFunction::LexicalScope::cleanup() {
mlir::OpBuilder::InsertionGuard guard(builder);
builder.setInsertionPointToEnd(InsPt);

// If we still don't have a cleanup block, it means that `applyCleanup`
// below might be able to get us one.
mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);

// Leverage and defers to RunCleanupsScope's dtor and scope handling.
applyCleanup();

// If we now have one after `applyCleanup`, hook it up properly.
if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
cleanupBlock = localScope->getCleanupBlock(builder);
builder.create<BrOp>(InsPt->back().getLoc(), cleanupBlock);
}

if (localScope->Depth == 0) {
buildImplicitReturn();
return;
Expand All @@ -374,7 +384,7 @@ void CIRGenFunction::LexicalScope::cleanup() {
// End of any local scope != function
// Ternary ops have to deal with matching arms for yielding types
// and do return a value, it must do its own cir.yield insertion.
if (!localScope->isTernary()) {
if (!localScope->isTernary() && !InsPt->mightHaveTerminator()) {
!retVal ? builder.create<YieldOp>(localScope->EndLoc)
: builder.create<YieldOp>(localScope->EndLoc, retVal);
}
Expand Down
14 changes: 13 additions & 1 deletion clang/lib/CIR/CodeGen/CIRGenFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -1787,6 +1787,8 @@ class CIRGenFunction : public CIRGenTypeCache {
/// Emits try/catch information for the current EH stack.
mlir::cir::CallOp callWithExceptionCtx = nullptr;
mlir::Operation *buildLandingPad(mlir::cir::TryOp tryOp);
void buildEHResumeBlock(bool isCleanup, mlir::Block *ehResumeBlock,
mlir::Location loc);
mlir::Block *getEHResumeBlock(bool isCleanup, mlir::cir::TryOp tryOp);
mlir::Block *getEHDispatchBlock(EHScopeStack::stable_iterator scope,
mlir::cir::TryOp tryOp);
Expand Down Expand Up @@ -2370,7 +2372,17 @@ DominatingCIRValue::save(CIRGenFunction &CGF, mlir::Value value) {

inline mlir::Value DominatingCIRValue::restore(CIRGenFunction &CGF,
saved_type value) {
llvm_unreachable("NYI");
// If the value says it wasn't saved, trust that it's still dominating.
if (!value.getInt())
return value.getPointer();

// Otherwise, it should be an alloca instruction, as set up in save().
auto alloca = cast<mlir::cir::AllocaOp>(value.getPointer().getDefiningOp());
mlir::Value val = CGF.getBuilder().createAlignedLoad(
alloca.getLoc(), alloca.getType(), alloca);
mlir::cir::LoadOp loadOp = cast<mlir::cir::LoadOp>(val.getDefiningOp());
loadOp.setAlignment(alloca.getAlignment());
return val;
}

/// A specialization of DominatingValue for RValue.
Expand Down
2 changes: 1 addition & 1 deletion clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -544,7 +544,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern<mlir::cir::TryOp> {

// Quick block cleanup: no indirection to the post try block.
auto brOp = dyn_cast<mlir::cir::BrOp>(afterTry->getTerminator());
if (brOp) {
if (brOp && brOp.getDest()->hasNoPredecessors()) {
mlir::Block *srcBlock = brOp.getDest();
rewriter.eraseOp(brOp);
rewriter.mergeBlocks(srcBlock, afterTry);
Expand Down
Loading

0 comments on commit 3086002

Please sign in to comment.