diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index b4214ef0..8e801fd1 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -33,7 +33,11 @@ jobs: run: | sudo apt-get install ccache sudo apt-get install lld - + + # install yaml library + - name: install yaml library + run: sudo apt-get install libyaml-cpp-dev + # setup LLVM - name: install a specific version of LLVM working-directory: ${{github.workspace}} diff --git a/CMakeLists.txt b/CMakeLists.txt index 008e3eaa..9f5a76bc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,7 +8,7 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED YES) -add_compile_options(-g) +add_compile_options(-g -fexceptions) # set(MLIR_DIR /home/lucas/llvm-project/build/lib/cmake/mlir) # set(LLVM_DIR /home/lucas/llvm-project/build/lib/cmake/llvm) @@ -20,6 +20,8 @@ message(STATUS "Using LLVMConfig.cmake in: ${LLVM_DIR}") find_package(MLIR REQUIRED CONFIG) find_package(LLVM REQUIRED CONFIG) +find_package(yaml-cpp REQUIRED) + list(APPEND CMAKE_MODULE_PATH "${MLIR_CMAKE_DIR}") include_directories(${LLVM_INCLUDE_DIRS} ${MLIR_INCLUDE_DIRS}) diff --git a/include/NeuraDialect/Architecture/Architecture.h b/include/NeuraDialect/Architecture/Architecture.h index dfd81237..87090d01 100644 --- a/include/NeuraDialect/Architecture/Architecture.h +++ b/include/NeuraDialect/Architecture/Architecture.h @@ -9,6 +9,7 @@ #include #include #include +#include namespace mlir { namespace neura { @@ -32,10 +33,39 @@ enum class FunctionUnitKind { // Enum for supported operation types. enum OperationKind { - IAdd = 0, - IMul = 1, - FAdd = 2, - FMul = 3 + OpIAdd = 0, + OpIMul = 1, + OpFAdd = 2, + OpFMul = 3, + OpISub = 4, + OpFSub = 5, + OpIDiv = 6, + OpFDiv = 7, + OpFAddFAdd = 8, + OpFMulFAdd = 9, + OpVFMul = 10, + OpICmp = 11, + OpFCmp = 12, + OpNot = 13, + OpOr = 14, + OpSel = 15, + OpCast = 16, + OpPhi = 17, + OpLoad = 18, + OpLoadIndexed = 19, + OpStore = 20, + OpStoreIndexed = 21, + OpBr = 22, + OpCondBr = 23, + OpReturn = 24, + OpLoopController = 25, + OpGrantAlways = 26, + OpGrantOnce = 27, + OpGrantPredicate = 28, + OpGEP_ = 29, + OpConstant = 30, + OpDataMov = 31, + OpCtrlMov = 32 }; //===----------------------------------------------------------------------===// @@ -103,7 +133,7 @@ class FunctionUnit : public BasicResource { class FixedPointAdder : public FunctionUnit { public: FixedPointAdder(int id) : FunctionUnit(id) { - supported_operations.insert(OperationKind::IAdd); + supported_operations.insert(OperationKind::OpIAdd); } std::string getType() const override { return "fixed_point_adder"; } ResourceKind getKind() const override { return ResourceKind::FunctionUnit; } @@ -112,7 +142,7 @@ class FixedPointAdder : public FunctionUnit { class FixedPointMultiplier : public FunctionUnit { public: FixedPointMultiplier(int id) : FunctionUnit(id) { - supported_operations.insert(OperationKind::IMul); + supported_operations.insert(OperationKind::OpIMul); } std::string getType() const override { return "fixed_point_multiplier"; } ResourceKind getKind() const override { return ResourceKind::FunctionUnit; } @@ -324,6 +354,7 @@ struct PairHash { class Architecture { public: Architecture(int width, int height); + Architecture(const YAML::Node& config); Tile* getTile(int id); Tile* getTile(int x, int y); diff --git a/lib/NeuraDialect/Architecture/Architecture.cpp b/lib/NeuraDialect/Architecture/Architecture.cpp index 0c08d9a8..749f76ad 100644 --- a/lib/NeuraDialect/Architecture/Architecture.cpp +++ b/lib/NeuraDialect/Architecture/Architecture.cpp @@ -17,7 +17,7 @@ Tile::Tile(int id, int x, int y) { // TODO: Add function units based on architecture specs. // @Jackcuii, https://github.com/coredac/dataflow/issues/82. - addFunctionUnit(std::make_unique(0)); + // addFunctionUnit(std::make_unique(0)); } int Tile::getId() const { return id; } @@ -275,6 +275,44 @@ Architecture::Architecture(int width, int height) { } } +Architecture::Architecture(const YAML::Node& config) { + // Extract width and height from config + int width = 4; // default + int height = 4; // default + + if (config["architecture"] && config["architecture"]["width"] && config["architecture"]["height"]) { + width = config["architecture"]["width"].as(); + height = config["architecture"]["height"].as(); + } + + // Call the constructor with width and height. + *this = Architecture(width, height); + + // Add function units based on the architecture specs. + int num_tiles = width * height; + for (int i = 0; i < num_tiles; ++i) { + Tile *tile = getTile(i); + int fu_id = 0; + if (config["tile_overrides"][i]) { + // Override the default function units. + for (const auto& operation : config["tile_overrides"][i]["operations"]) { + if (operation.as() == "add") { + tile->addFunctionUnit(std::make_unique(fu_id++)); + // Add more function units here if more operations are supported. + } + } + } else if (config["tile_defaults"]) { + // Add default function units. + for (const auto& operation : config["tile_defaults"]["operations"]) { + if (operation.as() == "add") { + tile->addFunctionUnit(std::make_unique(fu_id++)); + } + } + } + } +} + + Tile *Architecture::getTile(int id) { auto it = id_to_tile.find(id); assert(it != id_to_tile.end() && "Tile with given ID not found"); diff --git a/lib/NeuraDialect/CMakeLists.txt b/lib/NeuraDialect/CMakeLists.txt index c1faae04..c0011c77 100644 --- a/lib/NeuraDialect/CMakeLists.txt +++ b/lib/NeuraDialect/CMakeLists.txt @@ -20,6 +20,10 @@ add_mlir_dialect_library(MLIRNeura MLIRIR MLIRSupport MLIRInferTypeOpInterface + yaml-cpp ) +# Enable exception handling for yaml-cpp +target_compile_options(MLIRNeura PRIVATE -fexceptions) + add_subdirectory(Transforms) \ No newline at end of file diff --git a/lib/NeuraDialect/Mapping/mapping_util.cpp b/lib/NeuraDialect/Mapping/mapping_util.cpp index 1640e5ec..372dbc12 100644 --- a/lib/NeuraDialect/Mapping/mapping_util.cpp +++ b/lib/NeuraDialect/Mapping/mapping_util.cpp @@ -16,17 +16,40 @@ using namespace mlir::neura; namespace { inline OperationKind getOperationKindFromMlirOp(Operation *op) { - if (isa(op)) - return IAdd; - if (isa(op)) - return IMul; - if (isa(op)) - return FAdd; - if (isa(op)) - return FMul; - // TODO: Complete the list here. - // @Jackcuii, https://github.com/coredac/dataflow/issues/82. - return IAdd; + if (isa(op)) return OpIAdd; + if (isa(op)) return OpIMul; + if (isa(op)) return OpFAdd; + if (isa(op)) return OpFMul; + if (isa(op)) return OpISub; + if (isa(op)) return OpFSub; + if (isa(op)) return OpIDiv; + if (isa(op)) return OpFDiv; + if (isa(op)) return OpFAddFAdd; + if (isa(op)) return OpFMulFAdd; + if (isa(op)) return OpVFMul; + if (isa(op)) return OpICmp; + if (isa(op)) return OpNot; + if (isa(op)) return OpOr; + if (isa(op)) return OpSel; + if (isa(op)) return OpCast; + if (isa(op)) return OpLoad; + if (isa(op)) return OpLoadIndexed; + if (isa(op)) return OpStore; + if (isa(op)) return OpStoreIndexed; + if (isa(op)) return OpBr; + if (isa(op)) return OpCondBr; + if (isa(op)) return OpReturn; + if (isa(op)) return OpLoopController; + if (isa(op)) return OpGrantAlways; + if (isa(op)) return OpGrantOnce; + if (isa(op)) return OpGrantPredicate; + if (isa(op)) return OpGEP_; + if (isa(op)) return OpConstant; + if (isa(op)) return OpPhi; + if (isa(op)) return OpDataMov; + if (isa(op)) return OpCtrlMov; + // Default fallback + return OpIAdd; } // Returns true if the operation does not need CGRA tile placement. diff --git a/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp b/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp index ee2c4a5e..884146b4 100644 --- a/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp +++ b/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp @@ -13,6 +13,9 @@ #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "llvm/Support/raw_ostream.h" +#include +#include +#include using namespace mlir; using namespace mlir::neura; @@ -46,6 +49,50 @@ struct MapToAcceleratorPass "max_loc=5, max_depth=3)"), llvm::cl::init("heuristic")}; + Option archSpecPath{ + *this, "arch-spec", + llvm::cl::desc("Path to the architecture specification YAML file. " + "If not specified, will use default 4x4 architecture."), + llvm::cl::init("")}; + +private: + // Loads architecture configuration from YAML file or returns default configuration + std::pair loadArchitectureConfig() const { + YAML::Node config; + bool use_default_arch = false; + + if (!archSpecPath.getValue().empty()) { + try { + std::ifstream file(archSpecPath.getValue()); + if (file.is_open()) { + config = YAML::Load(file); + if (config["architecture"]) { + llvm::outs() << "\033[31m[MapToAcceleratorPass] Loaded architecture from " + << archSpecPath.getValue() << "\033[0m\n"; + } else { + llvm::errs() << "[MapToAcceleratorPass] Invalid YAML format in " + << archSpecPath.getValue() << ", using default 4x4\n"; + use_default_arch = true; + } + } else { + llvm::errs() << "[MapToAcceleratorPass] Could not open architecture file " + << archSpecPath.getValue() << ", using default 4x4\n"; + use_default_arch = true; + } + } catch (const std::exception& e) { + llvm::errs() << "[MapToAcceleratorPass] Error parsing YAML file " + << archSpecPath.getValue() << ": " << e.what() << ", using default 4x4\n"; + use_default_arch = true; + } + } else { + use_default_arch = true; + llvm::errs() << "[MapToAcceleratorPass] No architecture specification provided, using default 4x4\n"; + } + + return {config, use_default_arch}; + } + +public: void runOnOperation() override { ModuleOp module = getOperation(); @@ -140,7 +187,13 @@ struct MapToAcceleratorPass func->setAttr("RecMII", rec_mii_attr); // AcceleratorConfig config{/*numTiles=*/8}; // Example - Architecture architecture(4, 4); + // Read architecture specification from command line option + auto [config, use_default_arch] = loadArchitectureConfig(); + + constexpr int kWidth = 4; + constexpr int kHeight = 4; + Architecture architecture = use_default_arch ? Architecture(kWidth, kHeight) : Architecture(config); + int res_mii = calculateResMii(func, architecture); IntegerAttr res_mii_attr = IntegerAttr::get(IntegerType::get(func.getContext(), 32), res_mii);