Skip to content

Commit 94763c8

Browse files
committed
[sil-verifier] Convert else-if chain to if + continue to clarify control flow.
else-if chains provide the possibility that there is additional code after the else-if chain. In this case, no such code exists, so by using early continues we communicate that the iteration ends at the end of an if block and the programmer does not need to read to the end of the else-if block. This also allows for me to eliminate one level of indentation in the successor code as well.
1 parent 5ef8f3b commit 94763c8

File tree

1 file changed

+177
-158
lines changed

1 file changed

+177
-158
lines changed

lib/SIL/Verifier/FlowSensitiveVerifier.cpp

Lines changed: 177 additions & 158 deletions
Original file line numberDiff line numberDiff line change
@@ -334,7 +334,10 @@ void swift::silverifier::verifyFlowSensitiveRules(SILFunction *F) {
334334
} else {
335335
state.handleScopeInst(i.getStackAllocation());
336336
}
337-
} else if (i.isDeallocatingStack()) {
337+
continue;
338+
}
339+
340+
if (i.isDeallocatingStack()) {
338341
SILValue op = i.getOperand(0);
339342
while (auto *mvi = dyn_cast<MoveValueInst>(op)) {
340343
op = mvi->getOperand();
@@ -358,188 +361,204 @@ void swift::silverifier::verifyFlowSensitiveRules(SILFunction *F) {
358361
// failure.
359362
});
360363
}
361-
} else if (auto scopeEndingValue = isScopeInst(&i)) {
364+
continue;
365+
}
366+
367+
if (auto scopeEndingValue = isScopeInst(&i)) {
362368
state.handleScopeInst(scopeEndingValue);
363-
} else if (isScopeEndingInst(&i)) {
369+
continue;
370+
}
371+
372+
if (isScopeEndingInst(&i)) {
364373
state.handleScopeEndingInst(i);
365-
} else if (auto *endBorrow = dyn_cast<EndBorrowInst>(&i)) {
374+
continue;
375+
}
376+
377+
if (auto *endBorrow = dyn_cast<EndBorrowInst>(&i)) {
366378
if (isa<StoreBorrowInst>(endBorrow->getOperand())) {
367379
state.handleScopeEndingInst(i);
368380
}
369-
} else if (auto gaci = dyn_cast<GetAsyncContinuationInstBase>(&i)) {
381+
continue;
382+
}
383+
384+
if (auto gaci = dyn_cast<GetAsyncContinuationInstBase>(&i)) {
370385
require(!state.GotAsyncContinuation,
371386
"get_async_continuation while unawaited continuation is "
372387
"already active");
373388
state.GotAsyncContinuation = gaci;
374-
} else if (auto term = dyn_cast<TermInst>(&i)) {
375-
if (term->isFunctionExiting()) {
376-
require(state.Stack.empty(),
377-
"return with stack allocs that haven't been deallocated");
378-
require(state.ActiveOps.empty(),
379-
"return with operations still active");
380-
require(!state.GotAsyncContinuation,
381-
"return with unawaited async continuation");
389+
continue;
390+
}
382391

383-
if (isa<UnwindInst>(term)) {
384-
require(state.CFG == CFGState::YieldUnwind,
385-
"encountered 'unwind' when not on unwind path");
386-
} else {
387-
require(state.CFG != CFGState::YieldUnwind,
388-
"encountered 'return' or 'throw' when on unwind path");
389-
if (isa<ReturnInst>(term) &&
390-
F->getLoweredFunctionType()->getCoroutineKind() ==
391-
SILCoroutineKind::YieldOnce &&
392-
F->getModule().getStage() != SILStage::Raw) {
393-
require(state.CFG == CFGState::YieldOnceResume,
394-
"encountered 'return' before yielding a value in "
395-
"yield_once coroutine");
396-
}
392+
auto *term = dyn_cast<TermInst>(&i);
393+
if (!term)
394+
continue;
395+
396+
if (term->isFunctionExiting()) {
397+
require(state.Stack.empty(),
398+
"return with stack allocs that haven't been deallocated");
399+
require(state.ActiveOps.empty(), "return with operations still active");
400+
require(!state.GotAsyncContinuation,
401+
"return with unawaited async continuation");
402+
403+
if (isa<UnwindInst>(term)) {
404+
require(state.CFG == CFGState::YieldUnwind,
405+
"encountered 'unwind' when not on unwind path");
406+
} else {
407+
require(state.CFG != CFGState::YieldUnwind,
408+
"encountered 'return' or 'throw' when on unwind path");
409+
if (isa<ReturnInst>(term) &&
410+
F->getLoweredFunctionType()->getCoroutineKind() ==
411+
SILCoroutineKind::YieldOnce &&
412+
F->getModule().getStage() != SILStage::Raw) {
413+
require(state.CFG == CFGState::YieldOnceResume,
414+
"encountered 'return' before yielding a value in "
415+
"yield_once coroutine");
397416
}
398417
}
418+
}
399419

400-
if (isa<YieldInst>(term)) {
401-
require(state.CFG != CFGState::YieldOnceResume,
402-
"encountered multiple 'yield's along single path");
403-
require(state.CFG == CFGState::Normal,
404-
"encountered 'yield' on abnormal CFG path");
405-
require(!state.GotAsyncContinuation,
406-
"encountered 'yield' while an unawaited continuation is "
407-
"active");
408-
}
420+
if (isa<YieldInst>(term)) {
421+
require(state.CFG != CFGState::YieldOnceResume,
422+
"encountered multiple 'yield's along single path");
423+
require(state.CFG == CFGState::Normal,
424+
"encountered 'yield' on abnormal CFG path");
425+
require(!state.GotAsyncContinuation,
426+
"encountered 'yield' while an unawaited continuation is "
427+
"active");
428+
}
409429

410-
auto successors = term->getSuccessors();
411-
for (auto i : indices(successors)) {
412-
SILBasicBlock *succBB = successors[i].getBB();
413-
auto succStateRef = move_if(state, i + 1 == successors.size());
414-
415-
// Some successors (currently just `yield`) have state
416-
// transitions on the edges themselves. Fortunately,
417-
// these successors all require their destination blocks
418-
// to be uniquely referenced, so we never have to combine
419-
// the state change with merging or consistency checking.
420-
421-
// Check whether this edge enters a dead-end region.
422-
if (auto deadEndRegion = deadEnds.entersDeadEndRegion(BB, succBB)) {
423-
// If so, record it in the visited set, which will tell us
424-
// whether it's the last remaining edge to the region.
425-
bool isLastDeadEndEdge = visitedDeadEndEdges.visitEdgeTo(succBB);
426-
427-
// Check for an existing shared state for the region.
428-
auto &regionInfo = deadEndRegionStates[*deadEndRegion];
429-
430-
// If we don't have an existing shared state, and this is the
431-
// last edge to the region, just fall through and process it
432-
// like a normal edge.
433-
if (!regionInfo && isLastDeadEndEdge) {
434-
// This can only happen if there's exactly one edge to the
435-
// block, so we will end up in the insertion success case below.
436-
// Note that the state-changing terminators like `yield`
437-
// always take this path: since this must be the unique edge
438-
// to the successor, it must be in its own dead-end region.
439-
440-
// fall through to the main path
441-
442-
// Otherwise, we need to merge this into the shared state.
443-
} else {
444-
require(!isa<YieldInst>(term),
445-
"successor of 'yield' should not be encountered twice");
446-
447-
// Copy/move our current state into the shared state if it
448-
// doesn't already exist.
449-
if (!regionInfo) {
450-
regionInfo.emplace(std::move(succStateRef));
451-
452-
// Otherwise, merge our current state into the shared state.
453-
} else {
454-
regionInfo->sharedState.handleJoinPoint(
455-
succStateRef.get(), /*dead end*/ true, term, succBB);
456-
}
430+
auto successors = term->getSuccessors();
431+
for (auto i : indices(successors)) {
432+
SILBasicBlock *succBB = successors[i].getBB();
433+
auto succStateRef = move_if(state, i + 1 == successors.size());
434+
435+
// Some successors (currently just `yield`) have state
436+
// transitions on the edges themselves. Fortunately,
437+
// these successors all require their destination blocks
438+
// to be uniquely referenced, so we never have to combine
439+
// the state change with merging or consistency checking.
440+
441+
// Check whether this edge enters a dead-end region.
442+
if (auto deadEndRegion = deadEnds.entersDeadEndRegion(BB, succBB)) {
443+
// If so, record it in the visited set, which will tell us
444+
// whether it's the last remaining edge to the region.
445+
bool isLastDeadEndEdge = visitedDeadEndEdges.visitEdgeTo(succBB);
446+
447+
// Check for an existing shared state for the region.
448+
auto &regionInfo = deadEndRegionStates[*deadEndRegion];
449+
450+
// If we don't have an existing shared state, and this is the
451+
// last edge to the region, just fall through and process it
452+
// like a normal edge.
453+
if (!regionInfo && isLastDeadEndEdge) {
454+
// This can only happen if there's exactly one edge to the
455+
// block, so we will end up in the insertion success case below.
456+
// Note that the state-changing terminators like `yield`
457+
// always take this path: since this must be the unique edge
458+
// to the successor, it must be in its own dead-end region.
459+
460+
// fall through to the main path
461+
462+
// Otherwise, we need to merge this into the shared state.
463+
} else {
464+
require(!isa<YieldInst>(term),
465+
"successor of 'yield' should not be encountered twice");
457466

458-
// Add the successor block to the state's set of entry blocks.
459-
regionInfo->entryBlocks.insert(succBB);
460-
461-
// If this was the last branch to the region, act like we
462-
// just saw the edges to each of its entry blocks.
463-
if (isLastDeadEndEdge) {
464-
for (auto ebi = regionInfo->entryBlocks.begin(),
465-
ebe = regionInfo->entryBlocks.end();
466-
ebi != ebe;) {
467-
auto *regionEntryBB = *ebi++;
468-
469-
// Copy/move the shared state to be the state for the
470-
// region entry block.
471-
auto insertResult = visitedBBs.try_emplace(
472-
regionEntryBB,
473-
move_if(regionInfo->sharedState, ebi == ebe));
474-
assert(insertResult.second &&
475-
"already visited edge to dead-end region!");
476-
(void)insertResult;
477-
478-
// Add the region entry block to the worklist.
479-
Worklist.push_back(regionEntryBB);
480-
}
481-
}
467+
// Copy/move our current state into the shared state if it
468+
// doesn't already exist.
469+
if (!regionInfo) {
470+
regionInfo.emplace(std::move(succStateRef));
482471

483-
// Regardless, don't fall through to the main path.
484-
continue;
472+
// Otherwise, merge our current state into the shared state.
473+
} else {
474+
regionInfo->sharedState.handleJoinPoint(
475+
succStateRef.get(), /*dead end*/ true, term, succBB);
485476
}
486-
}
487477

488-
// Okay, either this isn't an edge to a dead-end region or it
489-
// was a unique edge to it.
490-
491-
// Optimistically try to set our current state as the state
492-
// of the successor. We can use a move on the final successor;
493-
// note that if the insertion fails, the move won't actually
494-
// happen, which is important because we'll still need it
495-
// to compare against the already-recorded state for the block.
496-
auto insertResult =
497-
visitedBBs.try_emplace(succBB, std::move(succStateRef));
498-
499-
// If the insertion was successful, we need to add the successor
500-
// block to the worklist.
501-
if (insertResult.second) {
502-
auto &insertedState = insertResult.first->second;
503-
504-
// 'yield' has successor-specific state updates, so we do that
505-
// now. 'yield' does not permit critical edges, so we don't
506-
// have to worry about doing this in the case below where
507-
// insertion failed.
508-
if (isa<YieldInst>(term)) {
509-
// Enforce that the unwind logic is segregated in all stages.
510-
if (i == 1) {
511-
insertedState.CFG = CFGState::YieldUnwind;
512-
513-
// We check the yield_once rule in the mandatory analyses,
514-
// so we can't assert it yet in the raw stage.
515-
} else if (F->getLoweredFunctionType()->getCoroutineKind() ==
516-
SILCoroutineKind::YieldOnce &&
517-
F->getModule().getStage() != SILStage::Raw) {
518-
insertedState.CFG = CFGState::YieldOnceResume;
478+
// Add the successor block to the state's set of entry blocks.
479+
regionInfo->entryBlocks.insert(succBB);
480+
481+
// If this was the last branch to the region, act like we
482+
// just saw the edges to each of its entry blocks.
483+
if (isLastDeadEndEdge) {
484+
for (auto ebi = regionInfo->entryBlocks.begin(),
485+
ebe = regionInfo->entryBlocks.end();
486+
ebi != ebe;) {
487+
auto *regionEntryBB = *ebi++;
488+
489+
// Copy/move the shared state to be the state for the
490+
// region entry block.
491+
auto insertResult = visitedBBs.try_emplace(
492+
regionEntryBB,
493+
move_if(regionInfo->sharedState, ebi == ebe));
494+
assert(insertResult.second &&
495+
"already visited edge to dead-end region!");
496+
(void)insertResult;
497+
498+
// Add the region entry block to the worklist.
499+
Worklist.push_back(regionEntryBB);
519500
}
520501
}
521502

522-
// Go ahead and add the block.
523-
Worklist.push_back(succBB);
503+
// Regardless, don't fall through to the main path.
524504
continue;
525505
}
506+
}
507+
508+
// Okay, either this isn't an edge to a dead-end region or it
509+
// was a unique edge to it.
510+
511+
// Optimistically try to set our current state as the state
512+
// of the successor. We can use a move on the final successor;
513+
// note that if the insertion fails, the move won't actually
514+
// happen, which is important because we'll still need it
515+
// to compare against the already-recorded state for the block.
516+
auto insertResult =
517+
visitedBBs.try_emplace(succBB, std::move(succStateRef));
518+
519+
// If the insertion was successful, we need to add the successor
520+
// block to the worklist.
521+
if (insertResult.second) {
522+
auto &insertedState = insertResult.first->second;
523+
524+
// 'yield' has successor-specific state updates, so we do that
525+
// now. 'yield' does not permit critical edges, so we don't
526+
// have to worry about doing this in the case below where
527+
// insertion failed.
528+
if (isa<YieldInst>(term)) {
529+
// Enforce that the unwind logic is segregated in all stages.
530+
if (i == 1) {
531+
insertedState.CFG = CFGState::YieldUnwind;
532+
533+
// We check the yield_once rule in the mandatory analyses,
534+
// so we can't assert it yet in the raw stage.
535+
} else if (F->getLoweredFunctionType()->getCoroutineKind() ==
536+
SILCoroutineKind::YieldOnce &&
537+
F->getModule().getStage() != SILStage::Raw) {
538+
insertedState.CFG = CFGState::YieldOnceResume;
539+
}
540+
}
526541

527-
// This rule is checked elsewhere, but we'd want to assert it
528-
// here anyway.
529-
require(!isa<YieldInst>(term),
530-
"successor of 'yield' should not be encountered twice");
531-
532-
// Okay, we failed to insert. That means there's an existing
533-
// state for the successor block. That existing state generally
534-
// needs to match the current state, but certain rules are
535-
// relaxed for branches that enter dead-end regions.
536-
auto &foundState = insertResult.first->second;
537-
538-
// Join the states into `foundState`. We can still validly use
539-
// succStateRef here because the insertion didn't work.
540-
foundState.handleJoinPoint(succStateRef.get(), /*dead-end*/ false,
541-
term, succBB);
542+
// Go ahead and add the block.
543+
Worklist.push_back(succBB);
544+
continue;
542545
}
546+
547+
// This rule is checked elsewhere, but we'd want to assert it
548+
// here anyway.
549+
require(!isa<YieldInst>(term),
550+
"successor of 'yield' should not be encountered twice");
551+
552+
// Okay, we failed to insert. That means there's an existing
553+
// state for the successor block. That existing state generally
554+
// needs to match the current state, but certain rules are
555+
// relaxed for branches that enter dead-end regions.
556+
auto &foundState = insertResult.first->second;
557+
558+
// Join the states into `foundState`. We can still validly use
559+
// succStateRef here because the insertion didn't work.
560+
foundState.handleJoinPoint(succStateRef.get(), /*dead-end*/ false, term,
561+
succBB);
543562
}
544563
}
545564
}

0 commit comments

Comments
 (0)