Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,11 @@ jobs:
run: |
sudo apt-get install ccache
sudo apt-get install lld


# install yaml library
- name: install yaml library
run: sudo apt-get install libyaml-cpp-dev

# setup LLVM
- name: install a specific version of LLVM
working-directory: ${{github.workspace}}
Expand Down
4 changes: 3 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED YES)

add_compile_options(-g)
add_compile_options(-g -fexceptions)

# set(MLIR_DIR /home/lucas/llvm-project/build/lib/cmake/mlir)
# set(LLVM_DIR /home/lucas/llvm-project/build/lib/cmake/llvm)
Expand All @@ -20,6 +20,8 @@ message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}")
find_package(MLIR REQUIRED CONFIG)
find_package(LLVM REQUIRED CONFIG)

find_package(yaml-cpp REQUIRED)

list(APPEND CMAKE_MODULE_PATH "${MLIR_CMAKE_DIR}")

include_directories(${LLVM_INCLUDE_DIRS} ${MLIR_INCLUDE_DIRS})
Expand Down
43 changes: 37 additions & 6 deletions include/NeuraDialect/Architecture/Architecture.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <set>
#include <unordered_map>
#include <vector>
#include <yaml-cpp/yaml.h>

namespace mlir {
namespace neura {
Expand All @@ -32,10 +33,39 @@ enum class FunctionUnitKind {

// Enum for supported operation types.
enum OperationKind {
IAdd = 0,
IMul = 1,
FAdd = 2,
FMul = 3
OpIAdd = 0,
OpIMul = 1,
OpFAdd = 2,
OpFMul = 3,
OpISub = 4,
OpFSub = 5,
OpIDiv = 6,
OpFDiv = 7,
OpFAddFAdd = 8,
OpFMulFAdd = 9,
OpVFMul = 10,
OpICmp = 11,
OpFCmp = 12,
OpNot = 13,
OpOr = 14,
OpSel = 15,
OpCast = 16,
OpPhi = 17,
OpLoad = 18,
OpLoadIndexed = 19,
OpStore = 20,
OpStoreIndexed = 21,
OpBr = 22,
OpCondBr = 23,
OpReturn = 24,
OpLoopController = 25,
OpGrantAlways = 26,
OpGrantOnce = 27,
OpGrantPredicate = 28,
OpGEP_ = 29,
OpConstant = 30,
OpDataMov = 31,
OpCtrlMov = 32
};

//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -103,7 +133,7 @@ class FunctionUnit : public BasicResource {
class FixedPointAdder : public FunctionUnit {
public:
FixedPointAdder(int id) : FunctionUnit(id) {
supported_operations.insert(OperationKind::IAdd);
supported_operations.insert(OperationKind::OpIAdd);
}
std::string getType() const override { return "fixed_point_adder"; }
ResourceKind getKind() const override { return ResourceKind::FunctionUnit; }
Expand All @@ -112,7 +142,7 @@ class FixedPointAdder : public FunctionUnit {
class FixedPointMultiplier : public FunctionUnit {
public:
FixedPointMultiplier(int id) : FunctionUnit(id) {
supported_operations.insert(OperationKind::IMul);
supported_operations.insert(OperationKind::OpIMul);
}
std::string getType() const override { return "fixed_point_multiplier"; }
ResourceKind getKind() const override { return ResourceKind::FunctionUnit; }
Expand Down Expand Up @@ -324,6 +354,7 @@ struct PairHash {
class Architecture {
public:
Architecture(int width, int height);
Architecture(const YAML::Node& config);

Tile* getTile(int id);
Tile* getTile(int x, int y);
Expand Down
40 changes: 39 additions & 1 deletion lib/NeuraDialect/Architecture/Architecture.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ Tile::Tile(int id, int x, int y) {

// TODO: Add function units based on architecture specs.
// @Jackcuii, https://github.com/coredac/dataflow/issues/82.
addFunctionUnit(std::make_unique<FixedPointAdder>(0));
// addFunctionUnit(std::make_unique<FixedPointAdder>(0));
}

int Tile::getId() const { return id; }
Expand Down Expand Up @@ -275,6 +275,44 @@ Architecture::Architecture(int width, int height) {
}
}

Architecture::Architecture(const YAML::Node& config) {
// Extract width and height from config
int width = 4; // default
int height = 4; // default

if (config["architecture"] && config["architecture"]["width"] && config["architecture"]["height"]) {
width = config["architecture"]["width"].as<int>();
height = config["architecture"]["height"].as<int>();
}

// Call the constructor with width and height.
*this = Architecture(width, height);

// Add function units based on the architecture specs.
int num_tiles = width * height;
Comment on lines +278 to +292
Copy link

Copilot AI Jul 31, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using assignment operator on *this in a constructor is problematic and can lead to undefined behavior. Consider using constructor delegation or member initialization instead.

Suggested change
Architecture::Architecture(const YAML::Node& config) {
// Extract width and height from config
int width = 4; // default
int height = 4; // default
if (config["architecture"] && config["architecture"]["width"] && config["architecture"]["height"]) {
width = config["architecture"]["width"].as<int>();
height = config["architecture"]["height"].as<int>();
}
// Call the constructor with width and height.
*this = Architecture(width, height);
// Add function units based on the architecture specs.
int num_tiles = width * height;
Architecture::Architecture(const YAML::Node& config)
: Architecture(
config["architecture"] && config["architecture"]["width"] && config["architecture"]["height"]
? config["architecture"]["width"].as<int>()
: 4,
config["architecture"] && config["architecture"]["height"]
? config["architecture"]["height"].as<int>()
: 4) {
// Add function units based on the architecture specs.
int num_tiles = getNumTiles();

Copilot uses AI. Check for mistakes.
for (int i = 0; i < num_tiles; ++i) {
Tile *tile = getTile(i);
int fu_id = 0;
if (config["tile_overrides"][i]) {
// Override the default function units.
for (const auto& operation : config["tile_overrides"][i]["operations"]) {
if (operation.as<std::string>() == "add") {
tile->addFunctionUnit(std::make_unique<FixedPointAdder>(fu_id++));
// Add more function units here if more operations are supported.
}
}
} else if (config["tile_defaults"]) {
// Add default function units.
for (const auto& operation : config["tile_defaults"]["operations"]) {
if (operation.as<std::string>() == "add") {
tile->addFunctionUnit(std::make_unique<FixedPointAdder>(fu_id++));
}
}
}
}
}


Tile *Architecture::getTile(int id) {
auto it = id_to_tile.find(id);
assert(it != id_to_tile.end() && "Tile with given ID not found");
Expand Down
4 changes: 4 additions & 0 deletions lib/NeuraDialect/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ add_mlir_dialect_library(MLIRNeura
MLIRIR
MLIRSupport
MLIRInferTypeOpInterface
yaml-cpp
)

# Enable exception handling for yaml-cpp
target_compile_options(MLIRNeura PRIVATE -fexceptions)

add_subdirectory(Transforms)
45 changes: 34 additions & 11 deletions lib/NeuraDialect/Mapping/mapping_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,17 +16,40 @@ using namespace mlir::neura;
namespace {

inline OperationKind getOperationKindFromMlirOp(Operation *op) {
if (isa<neura::AddOp>(op))
return IAdd;
if (isa<neura::MulOp>(op))
return IMul;
if (isa<neura::FAddOp>(op))
return FAdd;
if (isa<neura::FMulOp>(op))
return FMul;
// TODO: Complete the list here.
// @Jackcuii, https://github.com/coredac/dataflow/issues/82.
return IAdd;
if (isa<neura::AddOp>(op)) return OpIAdd;
if (isa<neura::MulOp>(op)) return OpIMul;
if (isa<neura::FAddOp>(op)) return OpFAdd;
if (isa<neura::FMulOp>(op)) return OpFMul;
if (isa<neura::SubOp>(op)) return OpISub;
if (isa<neura::FSubOp>(op)) return OpFSub;
if (isa<neura::DivOp>(op)) return OpIDiv;
if (isa<neura::FDivOp>(op)) return OpFDiv;
if (isa<neura::FAddFAddOp>(op)) return OpFAddFAdd;
if (isa<neura::FMulFAddOp>(op)) return OpFMulFAdd;
if (isa<neura::VFMulOp>(op)) return OpVFMul;
if (isa<neura::ICmpOp>(op)) return OpICmp;
if (isa<neura::NotOp>(op)) return OpNot;
if (isa<neura::OrOp>(op)) return OpOr;
if (isa<neura::SelOp>(op)) return OpSel;
if (isa<neura::CastOp>(op)) return OpCast;
if (isa<neura::LoadOp>(op)) return OpLoad;
if (isa<neura::LoadIndexedOp>(op)) return OpLoadIndexed;
if (isa<neura::StoreOp>(op)) return OpStore;
if (isa<neura::StoreIndexedOp>(op)) return OpStoreIndexed;
if (isa<neura::Br>(op)) return OpBr;
if (isa<neura::CondBr>(op)) return OpCondBr;
if (isa<neura::ReturnOp>(op)) return OpReturn;
if (isa<neura::LoopControllerOp>(op)) return OpLoopController;
if (isa<neura::GrantAlwaysOp>(op)) return OpGrantAlways;
if (isa<neura::GrantOnceOp>(op)) return OpGrantOnce;
if (isa<neura::GrantPredicateOp>(op)) return OpGrantPredicate;
if (isa<neura::GEP>(op)) return OpGEP_;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why this line use OpGEP_ instead of OpGEP?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh. My mistake. should use OpGEP

if (isa<neura::ConstantOp>(op)) return OpConstant;
if (isa<neura::PhiOp>(op)) return OpPhi;
if (isa<neura::DataMovOp>(op)) return OpDataMov;
if (isa<neura::CtrlMovOp>(op)) return OpCtrlMov;
// Default fallback
return OpIAdd;
}

// Returns true if the operation does not need CGRA tile placement.
Expand Down
55 changes: 54 additions & 1 deletion lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdlib>
#include <fstream>
#include <yaml-cpp/yaml.h>

using namespace mlir;
using namespace mlir::neura;
Expand Down Expand Up @@ -46,6 +49,50 @@ struct MapToAcceleratorPass
"max_loc=5, max_depth=3)"),
llvm::cl::init("heuristic")};

Option<std::string> archSpecPath{
*this, "arch-spec",
llvm::cl::desc("Path to the architecture specification YAML file. "
"If not specified, will use default 4x4 architecture."),
llvm::cl::init("")};

private:
// Loads architecture configuration from YAML file or returns default configuration
std::pair<YAML::Node, bool> loadArchitectureConfig() const {
YAML::Node config;
bool use_default_arch = false;

if (!archSpecPath.getValue().empty()) {
try {
std::ifstream file(archSpecPath.getValue());
if (file.is_open()) {
config = YAML::Load(file);
if (config["architecture"]) {
llvm::outs() << "\033[31m[MapToAcceleratorPass] Loaded architecture from "
<< archSpecPath.getValue() << "\033[0m\n";
} else {
llvm::errs() << "[MapToAcceleratorPass] Invalid YAML format in "
<< archSpecPath.getValue() << ", using default 4x4\n";
use_default_arch = true;
}
} else {
llvm::errs() << "[MapToAcceleratorPass] Could not open architecture file "
<< archSpecPath.getValue() << ", using default 4x4\n";
use_default_arch = true;
}
} catch (const std::exception& e) {
llvm::errs() << "[MapToAcceleratorPass] Error parsing YAML file "
<< archSpecPath.getValue() << ": " << e.what() << ", using default 4x4\n";
use_default_arch = true;
}
} else {
use_default_arch = true;
llvm::errs() << "[MapToAcceleratorPass] No architecture specification provided, using default 4x4\n";
}

return {config, use_default_arch};
}

public:
void runOnOperation() override {
ModuleOp module = getOperation();

Expand Down Expand Up @@ -140,7 +187,13 @@ struct MapToAcceleratorPass
func->setAttr("RecMII", rec_mii_attr);

// AcceleratorConfig config{/*numTiles=*/8}; // Example
Architecture architecture(4, 4);
// Read architecture specification from command line option
auto [config, use_default_arch] = loadArchitectureConfig();

constexpr int kWidth = 4;
constexpr int kHeight = 4;
Architecture architecture = use_default_arch ? Architecture(kWidth, kHeight) : Architecture(config);

int res_mii = calculateResMii(func, architecture);
IntegerAttr res_mii_attr =
IntegerAttr::get(IntegerType::get(func.getContext(), 32), res_mii);
Expand Down
Loading