Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
e0fdc3f
Support spatial-temporal loop control, and parsing perfect nested loo…
Oct 23, 2025
5637cad
Merge origin/main and remove allow-steering-spatial-temporal option
Oct 27, 2025
fc3792a
Fix test: check if there exists neura.load_indexed/store_indexed, and…
Oct 27, 2025
85a8a28
Fix compilation errors in AffineToNeuraPass
Oct 27, 2025
e09519c
Completely rewrite AffineToNeura pass with dataflow-style loop lowering
Oct 27, 2025
e57c3e0
Add comprehensive test suite and fix code style
Oct 28, 2025
bb4816a
feat(AffineToNeura): Add loop nest analysis and valid signal reuse op…
Oct 28, 2025
cb6f657
refactor: Reorganize AffineToNeura tests - split into focused test files
Oct 28, 2025
56a16ba
Fixed know error
Oct 28, 2025
5a2e111
fix: Pass empty ValueRange to inlineBlockBefore
Oct 29, 2025
bb86bdd
fix: Correctly pass loop_index to inlineBlockBefore
Oct 29, 2025
53cd897
style: rename LoopInfo fields to snake_case
Oct 30, 2025
ce811c0
refactor: remove unused fused operations (CarryInvariantOp, Condition…
Oct 30, 2025
c9950fc
refactor: improve test readability
Oct 30, 2025
07f83da
test: add example of unsupported case (affine.if)
Oct 30, 2025
0357b5c
refactor: remove hard 3D dimension constraint
Oct 30, 2025
f83f8ad
docs: add comment explaining AffineApplyOp single-result check
Oct 30, 2025
1571c5a
docs: add comprehensive examples for all conversions
Oct 30, 2025
f063aec
fix: remove ConstantOp from steering unwrapped operations.
Oct 31, 2025
ed2795d
Fix grant_once semantic conflict in loop control
Oct 31, 2025
154d3b2
1. Remove indentation in imperfect-ops-after.mlir CHECK lines
Oct 31, 2025
7331bf4
fix: update test files to expect constant instead of grant_once
Oct 31, 2025
698121c
fix: address reviewer comments on test files
Nov 2, 2025
e5d2243
remove: delete unsupported-dynamic-bounds.mlir test file
Nov 2, 2025
49cc61a
Remove confusing comments in mapping_util.cpp
Nov 2, 2025
bc0695c
Align is_steering_unwrapped_op with InsertDataMovPass behavior
Nov 2, 2025
9a59352
fix: correct FileCheck pattern in unsupported-affine-if test
Nov 2, 2025
00d6d55
fix: remove is_steering_unwrapped_op per reviewer feedback and fix test
Nov 3, 2025
0e22a58
feat: add complete multi-stage lowering demonstration for affine.if
Nov 3, 2025
7544c23
test: use deterministic patterns and move CHECK after code
Nov 3, 2025
17f512f
test: add visual separators in CHECK patterns
Nov 3, 2025
fadb2f0
fix: correct operation names in complex-affine-expressions test
Nov 4, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 70 additions & 0 deletions include/Conversion/AffineToNeura/LoopNestAnalysis.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
#ifndef CONVERSION_AFFINE_TO_NEURA_LOOP_NEST_ANALYSIS_H
#define CONVERSION_AFFINE_TO_NEURA_LOOP_NEST_ANALYSIS_H

#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Operation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include <memory>

namespace mlir {
namespace neura {

/// Loop information structure - Stores all analysis information for a single loop.
struct LoopInfo {
affine::AffineForOp loop; // The loop operation itself.
LoopInfo *parent = nullptr; // Parent loop (nullptr if top-level).
llvm::SmallVector<LoopInfo *, 4> children; // Child loops list.
unsigned depth = 0; // Nesting depth (0=top-level).
bool is_perfect_nest = true; // Whether it is a perfect nest.

// Operations list for imperfect nesting.
llvm::SmallVector<Operation *, 4> operations_before_child; // Operations before child loops.
llvm::SmallVector<Operation *, 4> operations_after_child; // Operations after child loops.

LoopInfo(affine::AffineForOp loop) : loop(loop) {}
};

/// Loop nest analysis class.
///
/// Purpose: Provides loop hierarchy information for AffineToNeura pass to support optimization decisions.
///
/// Usage example:
/// LoopNestAnalysis analysis(func_op);
/// analysis.dump(); // Prints analysis results.
/// LoopInfo *info = analysis.getLoopInfo(loop);
/// if (info && info->parent) {
/// // This is a nested loop, can reuse parent's valid signal.
/// }
class LoopNestAnalysis {
public:
/// Constructor - Performs loop nest analysis on the given function.
explicit LoopNestAnalysis(func::FuncOp func);

/// Query interfaces.
LoopInfo *getLoopInfo(affine::AffineForOp loop) const; // Gets loop information.
llvm::ArrayRef<LoopInfo *> getTopLevelLoops() const { return topLevelLoops; } // Gets top-level loops.
llvm::ArrayRef<std::unique_ptr<LoopInfo>> getAllLoops() const { return allLoops; } // Gets all loops.
bool isPerfectNest(affine::AffineForOp loop) const; // Checks if perfect nest.
LoopInfo *getParentLoop(affine::AffineForOp loop) const; // Gets parent loop.
llvm::ArrayRef<LoopInfo *> getChildLoops(affine::AffineForOp loop) const; // Gets child loops.

/// Debug interface - Prints analysis results.
void dump() const;

private:
/// Internal analysis methods.
void buildLoopNestTree(func::FuncOp func); // Builds loop hierarchy tree.
void analyzePerfectNests(); // Analyzes perfect nest characteristics.

/// Data members.
llvm::DenseMap<Operation *, LoopInfo *> loopMap; // Loop fast lookup table.
llvm::SmallVector<std::unique_ptr<LoopInfo>, 8> allLoops; // All loops (owns ownership).
llvm::SmallVector<LoopInfo *, 4> topLevelLoops; // Top-level loop pointers list.
};

} // namespace neura
} // namespace mlir

#endif
1 change: 1 addition & 0 deletions include/Conversion/ConversionPasses.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ std::unique_ptr<mlir::Pass> createLowerArithToNeuraPass();
std::unique_ptr<mlir::Pass> createLowerLlvmToNeuraPass();
std::unique_ptr<mlir::Pass> createLowerMemRefToNeuraPass();
std::unique_ptr<mlir::Pass> createLowerBuiltinToNeuraPass();
std::unique_ptr<mlir::Pass> createLowerAffineToNeuraPass();

#define GEN_PASS_REGISTRATION
#include "Conversion/ConversionPasses.h.inc"
Expand Down
12 changes: 12 additions & 0 deletions include/Conversion/ConversionPasses.td
Original file line number Diff line number Diff line change
Expand Up @@ -32,4 +32,16 @@ def LowerBuiltinToNeura : Pass<"lower-builtin-to-neura", "ModuleOp">{
let constructor = "mlir::createLowerBuiltinToNeuraPass()";
}

def LowerAffineToNeura : Pass<"lower-affine-to-neura", "func::FuncOp">{
let summary = "Lower Affine perfect nested loops to Neura loop_control operations";
let description = [{
Converts perfectly nested affine.for loops directly to Neura dialect using
loop_control operations, avoiding the need to flatten to LLVM IR first.
This preserves loop structure information for better optimization on
dataflow architectures.
}];
let constructor = "mlir::createLowerAffineToNeuraPass()";
let dependentDialects = ["mlir::neura::NeuraDialect", "mlir::affine::AffineDialect"];
}

#endif // CONVERSION_PASSES_TD
4 changes: 3 additions & 1 deletion include/NeuraDialect/Architecture/Architecture.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,9 @@ enum OperationKind {
// Loop control operations.
ILoopControl = 34,
// Constant operations.
IConstant = 35
IConstant = 35,
// Steering control fused operations.
ICarryInvariant = 36, IConditionalSelect = 37, IInvariantGroup = 38
};

//===----------------------------------------------------------------------===//
Expand Down
4 changes: 4 additions & 0 deletions include/NeuraDialect/Mapping/mapping_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@ OperationKind getOperationKindFromMlirOp(Operation *op);
// Returns true if the operation does not need CGRA tile placement.
bool is_non_materialized(Operation *op);

// Returns true if the operation is a steering-mode operation that doesn't
// require DataMovOp wrapping (e.g., constants, carry, invariant, etc.).
bool is_steering_unwrapped_op(Operation *op);

// Returns true if the operation is a materialized reserve user, i.e.,
// phi, invariant, carry.
bool isMaterializedReserveUser(Operation *op);
Expand Down
1 change: 1 addition & 0 deletions include/NeuraDialect/NeuraPasses.td
Original file line number Diff line number Diff line change
Expand Up @@ -134,4 +134,5 @@ def RemovePredicatedType : Pass<"remove-predicated-type", "ModuleOp"> {
}];
let constructor = "neura::createRemovePredicatedTypePass()";
}

#endif // NEURA_PASSES_TD
Loading