Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ jobs:
git checkout 6146a88
mkdir build && cd build
cmake -G Ninja ../llvm \
-DLLVM_ENABLE_PROJECTS="mlir" \
-DLLVM_ENABLE_PROJECTS="mlir;clang" \
-DLLVM_BUILD_EXAMPLES=OFF \
-DLLVM_TARGETS_TO_BUILD="Native" \
-DCMAKE_BUILD_TYPE=Release \
Expand All @@ -58,8 +58,8 @@ jobs:
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache

cmake --build .
cmake --build . --target check-mlir

# setup mlir-cgra
- name: setup dataflow tool-chain
working-directory: ${{github.workspace}}
Expand Down
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ set(FILECHECK ${LLVM_TOOLS_BINARY_DIR}/FileCheck)
set(MLIR_OPT ${LLVM_TOOLS_BINARY_DIR}/mlir-opt)
set(MLIR_TRANSLATE ${LLVM_TOOLS_BINARY_DIR}/mlir-translate)
set(LLC ${LLVM_TOOLS_BINARY_DIR}/llc)
set(CLANG ${LLVM_TOOLS_BINARY_DIR}/clang)

# Configures lit.cfg from lit.cfg.in
configure_file(
Expand Down
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ Build LLVM & Neura
$ mkdir build && cd build
# May need install ccache and lld.
$ cmake -G Ninja ../llvm \
-DLLVM_ENABLE_PROJECTS="mlir" \
-DLLVM_ENABLE_PROJECTS="mlir;clang" \
-DLLVM_BUILD_EXAMPLES=OFF \
-DLLVM_TARGETS_TO_BUILD="Native" \
-DCMAKE_BUILD_TYPE=Release \
Expand All @@ -28,6 +28,7 @@ Build LLVM & Neura
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache
$ cmake --build . --target check-mlir
$ cmake --build . --target check-clang
```

- Build Neura:
Expand Down
14 changes: 7 additions & 7 deletions include/NeuraDialect/NeuraOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -110,11 +110,11 @@ def Neura_ICmpOp : Op<NeuraDialect, "icmp"> {
def Neura_FCmpOp : Op<NeuraDialect, "fcmp"> {
let summary = "Floating-point compare operation";
let opName = "fcmp";
let arguments = (ins AnyFloat:$lhs,
AnyFloat:$rhs,
let arguments = (ins AnyType:$lhs,
AnyType:$rhs,
Optional<AnyType>:$predicate,
StrAttr:$cmpType);
let results = (outs I1:$result);
let results = (outs AnyType:$result);
// let assemblyFormat = "$lhs `,` $rhs `,` $cmpType attr-dict `:` type($result)";
// let traits = [SameOperandsAndResultElementType];
}
Expand Down Expand Up @@ -239,16 +239,16 @@ def Neura_VFMulOp : Op<NeuraDialect, "vfmul"> {

def Neura_FAddFAddOp : Op<NeuraDialect, "fadd_fadd"> {
let summary = "Fused fadd(fadd(a, b), c)";
let arguments = (ins AnyFloat:$a, AnyFloat:$b, AnyFloat:$c, Optional<AnyType>:$predicate);
let results = (outs AnyFloat:$result);
let arguments = (ins AnyType:$a, AnyType:$b, AnyType:$c, Optional<AnyType>:$predicate);
let results = (outs AnyType:$result);
// let assemblyFormat = "$a `,` $b `,` $c `,` $predicate attr-dict `:` type($result)";
let traits = [SameOperandsAndResultElementType];
}

def Neura_FMulFAddOp : Op<NeuraDialect, "fmul_fadd"> {
let summary = "Fused fadd(fmul(a, b), c)";
let arguments = (ins AnyFloat:$a, AnyFloat:$b, AnyFloat:$c, Optional<AnyType>:$predicate);
let results = (outs AnyFloat:$result);
let arguments = (ins AnyType:$a, AnyType:$b, AnyType:$c, Optional<AnyType>:$predicate);
let results = (outs AnyType:$result);
// let assemblyFormat = "$a `,` $b `,` $c `,` $predicate attr-dict `:` type($result)";
let traits = [SameOperandsAndResultElementType];
}
Expand Down
1 change: 1 addition & 0 deletions include/NeuraDialect/NeuraPasses.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ std::unique_ptr<mlir::Pass> createLeveragePredicatedValuePass();
std::unique_ptr<mlir::Pass> createMapToAcceleratorPass();
std::unique_ptr<mlir::Pass> createGenerateCodePass();
std::unique_ptr<mlir::Pass> createFuseControlFlowPass();
std::unique_ptr<mlir::Pass> createCanonicalizeLiveInPass();

#define GEN_PASS_REGISTRATION
#include "NeuraDialect/NeuraPasses.h.inc"
Expand Down
11 changes: 11 additions & 0 deletions include/NeuraDialect/NeuraPasses.td
Original file line number Diff line number Diff line change
Expand Up @@ -75,4 +75,15 @@ def FuseControlFlow: Pass<"fuse-control-flow", "ModuleOp">{
let constructor = "neura::createFuseControlFlowPass()";
}

def CanonicalizeLiveIn : Pass<"canonicalize-live-in", "ModuleOp"> {
let summary = "Canonicalizes live-in values/operations in each basic block.";
let description = [{
This pass applies canonicalization transformations to Neura dialect operations.
The canonicalization includes:
1. Converting all live-in values of each basic block to block arguments.
2. Promoting function arguments to neura constant operations.
}];
let constructor = "neura::createCanonicalizeLiveInPass()";
}

#endif // NEURA_PASSES_TD
2 changes: 1 addition & 1 deletion lib/Conversion/ArithToNeura/ArithToNeuraPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ struct ArithConstantToNeuraConstant
Attribute value = op.getValue();
// Optional predicate parameter can be null.
rewriter.replaceOpWithNewOp<neura::ConstantOp>(op, result_type, value,
nullptr);
rewriter.getBoolAttr(true));
return success();
}
};
Expand Down
1 change: 1 addition & 0 deletions lib/NeuraDialect/Transforms/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ add_mlir_library(
MapToAcceleratorPass.cpp
GenerateCodePass.cpp
FuseControlFlowPass.cpp
CanonicalizeLiveInPass.cpp

DEPENDS
MLIRNeuraTransformsIncGen
Expand Down
236 changes: 236 additions & 0 deletions lib/NeuraDialect/Transforms/CanonicalizeLiveInPass.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,236 @@
#include "NeuraDialect/NeuraDialect.h"
#include "NeuraDialect/NeuraOps.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Region.h"
#include "mlir/IR/Value.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include <string>

using namespace mlir;

#define GEN_PASS_DEF_NEURACANONICALIZE
#include "NeuraDialect/NeuraPasses.h.inc"

namespace {
LogicalResult promoteFunctionArgsToConstants(Region &region) {
if (region.empty()) {
return success();
}

Block &entry_block = region.front();
OpBuilder builder(&entry_block, entry_block.begin());

// Collects all function arguments.
SmallVector<BlockArgument, 4> args(entry_block.getArguments().begin(),
entry_block.getArguments().end());

// Creates a constant operation for each function argument.
for (auto [idx, arg] : llvm::enumerate(args)) {
// For constant operation, the default predicate is true.
auto const_op = builder.create<neura::ConstantOp>(
arg.getLoc(), arg.getType(),
builder.getStringAttr("\%arg" + std::to_string(idx)),
builder.getBoolAttr(true));
arg.replaceAllUsesWith(const_op.getResult());
}

return success();
}

LogicalResult promoteLiveInValuesToBlockArgs(Region &region) {
if (region.empty()) {
return success();
}

for (Block &block : region.getBlocks()) {
// Skips the entry block.
if (&block == &region.front())
continue;

// Identifies all the live-in values in the block.
llvm::SetVector<Value> live_ins;

// Iterates over each operation in the block and its operands.
for (Operation &op : block.getOperations()) {
for (Value operand : op.getOperands()) {
// If the operand is not a block argument and is defined outside the
// current block, it is a live-in value.
if (!dyn_cast<BlockArgument>(operand)) {
Operation *def_op = operand.getDefiningOp();
if (def_op && def_op->getBlock() != &block) {
live_ins.insert(operand);
}
} else if (dyn_cast<BlockArgument>(operand).getOwner() != &block) {
// If it is a block argument but defined in another block,
// it is also considered a live-in value.
live_ins.insert(operand);
}
}
}

if (live_ins.empty())
continue;

// Adds new block arguments for each live-in value.
unsigned original_num_args = block.getNumArguments();
for (Value value : live_ins) {
block.addArgument(value.getType(), value.getLoc());
}

// Creates a mapping from live-in values to the new block arguments.
DenseMap<Value, Value> value_to_arg;
for (unsigned i = 0; i < live_ins.size(); ++i) {
value_to_arg[live_ins[i]] = block.getArgument(original_num_args + i);
}

// Updates all operations in the block to use the new block arguments
// instead of the live-in values.
for (Operation &op : block.getOperations()) {
for (unsigned i = 0; i < op.getNumOperands(); ++i) {
Value operand = op.getOperand(i);
auto it = value_to_arg.find(operand);
if (it != value_to_arg.end()) {
op.setOperand(i, it->second);
}
}
}

// Updates the terminator of predecessor blocks to include the new block
// arguments.
for (Block *pred_block : block.getPredecessors()) {
Operation *pred_op = pred_block->getTerminator();
// Handles br operations.
if (auto br_op = dyn_cast<neura::Br>(pred_op)) {
if (br_op.getDest() == &block) {
// Creates a new operand list, including the original operands.
SmallVector<Value, 4> new_operands;

for (Value operand : br_op.getOperands()) {
new_operands.push_back(operand);
}

// Adds live-in values as new operands.
for (Value live_in : live_ins) {
new_operands.push_back(live_in);
}

// Creates a new branch operation with the updated operands.
OpBuilder builder(br_op);
builder.create<neura::Br>(br_op.getLoc(), new_operands, &block);

// Erases the old branch operation.
br_op.erase();
}
}
// Handles conditional branch operations.
else if (auto cond_br_op = dyn_cast<neura::CondBr>(pred_op)) {
OpBuilder builder(cond_br_op);
bool needs_update = false;

SmallVector<Value, 4> true_operands, false_operands;
Block *true_dest = cond_br_op.getTrueDest();
Block *false_dest = cond_br_op.getFalseDest();

for (Value operand : cond_br_op.getTrueArgs()) {
true_operands.push_back(operand);
}
for (Value operand : cond_br_op.getFalseArgs()) {
false_operands.push_back(operand);
}

// Checks if the true branch destination is the current block.
if (true_dest == &block) {
needs_update = true;
for (Value live_in : live_ins) {
true_operands.push_back(live_in);
}
}

// Checks if the false branch destination is the current block.
if (false_dest == &block) {
needs_update = true;
for (Value live_in : live_ins) {
false_operands.push_back(live_in);
}
}

if (needs_update) {
// Predicated bit defaults to null.
builder.create<neura::CondBr>(
cond_br_op.getLoc(), cond_br_op.getCondition(), nullptr,
true_operands, false_operands, true_dest, false_dest);

cond_br_op.erase();
}
}
}
}

return success();
}

struct CanonicalizeLiveInPass
: public PassWrapper<CanonicalizeLiveInPass, OperationPass<ModuleOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(CanonicalizeLiveInPass)

StringRef getArgument() const override { return "canonicalize-live-in"; }
StringRef getDescription() const override {
return "Canonicalizes live-in values/operations in each basic block.";
}
void getDependentDialects(DialectRegistry &registry) const override {
registry.insert<mlir::neura::NeuraDialect>();
registry.insert<mlir::LLVM::LLVMDialect>();
registry.insert<mlir::func::FuncDialect>();
}

void runOnOperation() override {
ModuleOp module_op = getOperation();
module_op.walk([&](Operation *op) {
Region *region = nullptr;
if (auto func_op = dyn_cast<func::FuncOp>(op)) {
auto accel_attr = func_op->getAttrOfType<StringAttr>("accelerator");
if (!accel_attr || accel_attr.getValue() != "neura") {
return;
}
region = &func_op.getBody();
} else if (auto llvm_func = dyn_cast<LLVM::LLVMFuncOp>(op)) {
auto accel_attr = llvm_func->getAttrOfType<StringAttr>("accelerator");
if (!accel_attr || accel_attr.getValue() != "neura") {
return;
}
region = &llvm_func.getBody();
} else {
return;
}

if (!region || region->empty()) {
return;
}

if (failed(promoteFunctionArgsToConstants(*region))) {
signalPassFailure();
return;
}

if (failed(promoteLiveInValuesToBlockArgs(*region))) {
signalPassFailure();
return;
}
});
}
};
} // namespace

namespace mlir::neura {
std::unique_ptr<Pass> createCanonicalizeLiveInPass() {
return std::make_unique<CanonicalizeLiveInPass>();
}
} // namespace mlir::neura
14 changes: 8 additions & 6 deletions lib/NeuraDialect/Transforms/FusePatternsPass.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
#include "NeuraDialect/NeuraOps.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "NeuraDialect/NeuraOps.h"

using namespace mlir;

Expand Down Expand Up @@ -96,11 +96,14 @@ struct FuseFMulFAddPattern : public OpRewritePattern<neura::FAddOp> {
}
};

struct FusePatternsPass : public PassWrapper<FusePatternsPass, OperationPass<ModuleOp>> {
struct FusePatternsPass
: public PassWrapper<FusePatternsPass, OperationPass<ModuleOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(FusePatternsPass)

StringRef getArgument() const override { return "fuse-patterns"; }
StringRef getDescription() const override { return "Apply Neura fusion patterns."; }
StringRef getDescription() const override {
return "Apply Neura fusion patterns.";
}

void runOnOperation() override {
RewritePatternSet patterns(&getContext());
Expand Down Expand Up @@ -131,4 +134,3 @@ std::unique_ptr<Pass> createFusePatternsPass() {
return std::make_unique<FusePatternsPass>();
}
} // namespace mlir::neura

Loading