diff --git a/Cargo.lock b/Cargo.lock index 6936397..b2ad5be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -683,6 +683,56 @@ dependencies = [ "libc", ] +[[package]] +name = "anstream" +version = "0.6.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" + +[[package]] +name = "anstyle-parse" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" +dependencies = [ + "anstyle", + "once_cell_polyfill", + "windows-sys 0.59.0", +] + [[package]] name = "ark-bls12-381" version = "0.5.0" @@ -1318,6 +1368,46 @@ dependencies = [ "windows-link", ] +[[package]] +name = "clap" +version = "4.5.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be92d32e80243a54711e5d7ce823c35c41c9d929dc4ab58e1276f625841aadf9" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "707eab41e9622f9139419d573eca0900137718000c517d47da73045f54331c3d" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "clap_lex" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" + [[package]] name = "clocksource" version = "0.8.1" @@ -1380,6 +1470,12 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "colorchoice" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" + [[package]] name = "console" version = "0.15.11" @@ -1500,6 +1596,7 @@ dependencies = [ "alloy-consensus", "alloy-evm", "alloy-signer-local", + "clap", "core_affinity", "crossbeam-utils", "hex", @@ -1519,6 +1616,7 @@ dependencies = [ "thousands", "tokio", "tokio-metrics", + "toml", ] [[package]] @@ -2494,6 +2592,12 @@ dependencies = [ "serde", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + [[package]] name = "itertools" version = "0.10.5" @@ -2904,6 +3008,12 @@ version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +[[package]] +name = "once_cell_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" + [[package]] name = "op-alloy-consensus" version = "0.18.11" @@ -4015,6 +4125,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -4541,11 +4660,26 @@ dependencies = [ "tokio", ] +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + [[package]] name = "toml_datetime" version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] [[package]] name = "toml_edit" @@ -4554,10 +4688,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ "indexmap 2.10.0", + "serde", + "serde_spanned", "toml_datetime", + "toml_write", "winnow", ] +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + [[package]] name = "tower" version = "0.5.2" @@ -4723,6 +4866,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + [[package]] name = "valuable" version = "0.1.1" diff --git a/configs/default.toml b/configs/default.toml new file mode 100644 index 0000000..46b037a --- /dev/null +++ b/configs/default.toml @@ -0,0 +1,49 @@ +[network_worker] +target_url = "http://127.0.0.1:8545" +total_connections = 10_000 # Limited by # of ephemeral ports. + +batch_factor = 1 + +error_sleep_ms = 100 +tx_queue_empty_sleep_ms = 25 + +[tx_gen_worker] +chain_id = 1337 +num_accounts = 25_000 # Limited by the number in the genesis (see generate_genesis_alloc.rs) + +token_contract_address = "0x2000000000000000000000000000000000000001" + +gas_price = 100000000000 # 100 gwei +gas_limit = 100_000 + +mnemonic = "test test test test test test test test test test test junk" + +recipient_distribution_factor = 20 # 1/20 of accounts receive transfers. +max_transfer_amount = 10 + +[rate_limiting] +initial_ratelimit = 100 # txs/s + +# Rate limit thresholds: [(threshold, rate_limit)] +# threshold = number of transactions sent +# rate_limit = new transactions per second limit +ratelimit_thresholds = [ + [1_562, 250], # NUM_ACCOUNTS / 16 + [3_125, 500], # NUM_ACCOUNTS / 8 + [25_000, 1_000], # NUM_ACCOUNTS + [50_000, 2_500], # NUM_ACCOUNTS * 2 + [100_000, 5_000], # NUM_ACCOUNTS * 4 + [200_000, 7_500], # NUM_ACCOUNTS * 8 + [250_000, 12_500], # NUM_ACCOUNTS * 10 + [300_000, 15_000], # NUM_ACCOUNTS * 12 +] + +[workers] +thread_pinning = true + +tx_gen_worker_percentage = 0.1 +network_worker_percentage = 0.9 + +[reporters] +tx_queue_report_interval_secs = 3 +network_stats_report_interval_secs = 3 \ No newline at end of file diff --git a/crescendo/Cargo.toml b/crescendo/Cargo.toml index 446c622..d2631dd 100644 --- a/crescendo/Cargo.toml +++ b/crescendo/Cargo.toml @@ -26,4 +26,6 @@ serde_json = "1.0" hex = "0.4" simple-tqdm = { version = "0.2.0", features = ["rayon"] } rand = "0.9.1" -ratelimit = "0.10" \ No newline at end of file +ratelimit = "0.10" +clap = { version = "4.5", features = ["derive", "env"] } +toml = "0.8" \ No newline at end of file diff --git a/crescendo/src/config.rs b/crescendo/src/config.rs new file mode 100644 index 0000000..dd4f9f2 --- /dev/null +++ b/crescendo/src/config.rs @@ -0,0 +1,81 @@ +use std::path::PathBuf; +use std::sync::OnceLock; + +use serde::{Deserialize, Serialize}; + +/// Global configuration instance for the application. +static CONFIG_INSTANCE: OnceLock = OnceLock::new(); + +/// Initialize the global configuration instance. +pub fn init(config: Config) { + CONFIG_INSTANCE.set(config).unwrap(); +} + +/// Gets the global configuration instance's value, +/// blocking until initialized if necessary. +pub fn get() -> &'static Config { + CONFIG_INSTANCE.wait() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub tx_gen_worker: TxGenWorkerConfig, + pub network_worker: NetworkWorkerConfig, + pub rate_limiting: RateLimitingConfig, + + pub workers: WorkersConfig, + pub reporters: ReportersConfig, +} + +impl Config { + pub fn from_file(path: &PathBuf) -> Result> { + let config_str = std::fs::read_to_string(path)?; + let config: Config = toml::from_str(&config_str)?; + Ok(config) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NetworkWorkerConfig { + pub target_url: String, + pub total_connections: u64, + + pub batch_factor: usize, + + pub error_sleep_ms: u64, + pub tx_queue_empty_sleep_ms: u64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TxGenWorkerConfig { + pub chain_id: u64, + + pub mnemonic: String, + pub num_accounts: u32, + + pub gas_price: u64, + pub gas_limit: u64, + + pub token_contract_address: String, + pub recipient_distribution_factor: u32, + pub max_transfer_amount: u64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RateLimitingConfig { + pub initial_ratelimit: u64, + pub ratelimit_thresholds: Vec<(u32, u64)>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WorkersConfig { + pub thread_pinning: bool, + pub tx_gen_worker_percentage: f64, + pub network_worker_percentage: f64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReportersConfig { + pub tx_queue_report_interval_secs: u64, + pub network_stats_report_interval_secs: u64, +} diff --git a/crescendo/src/main.rs b/crescendo/src/main.rs index db8dc2e..d25cf44 100644 --- a/crescendo/src/main.rs +++ b/crescendo/src/main.rs @@ -1,15 +1,19 @@ use std::future::pending; +use std::path::PathBuf; use std::thread; use std::time::Duration; +use clap::Parser; use core_affinity; use mimalloc::MiMalloc; +mod config; mod network_stats; mod tx_queue; mod utils; mod workers; +use crate::config::Config; use crate::network_stats::NETWORK_STATS; use crate::tx_queue::TX_QUEUE; use crate::workers::{DesireType, WorkerType}; @@ -19,14 +23,24 @@ use crate::workers::{DesireType, WorkerType}; // writing. ~3.3% faster than jemalloc. static GLOBAL: MiMalloc = MiMalloc; -// TODO: Configurable CLI args. -const TOTAL_CONNECTIONS: u64 = 10_000; // This is limited by the amount of ephemeral ports available on the system. -const THREAD_PINNING: bool = true; -const TARGET_URL: &str = "http://127.0.0.1:8545"; +#[derive(Parser, Debug)] +#[command(version, about, long_about = None)] +struct CliArgs { + config: PathBuf, +} #[tokio::main(flavor = "current_thread")] async fn main() { - if let Err(err) = utils::increase_nofile_limit(TOTAL_CONNECTIONS * 10) { + let args = CliArgs::parse(); + + println!("[~] Loading config from {}...", args.config.display()); + config::init(if args.config.exists() { + Config::from_file(&args.config).unwrap_or_else(|e| panic!("[!] Failed to load config file: {e:?}")) + } else { + panic!("[!] Config file not found: {}", args.config.display()); + }); + + if let Err(err) = utils::increase_nofile_limit(config::get().network_worker.total_connections * 10) { println!("[!] Failed to increase file descriptor limit: {err}."); } @@ -37,16 +51,20 @@ async fn main() { rayon::ThreadPoolBuilder::new().num_threads(core_ids.len()).build_global().unwrap(); // Pin the tokio runtime to a core (if enabled). - utils::maybe_pin_thread(core_ids.pop().unwrap(), THREAD_PINNING); + utils::maybe_pin_thread(core_ids.pop().unwrap()); // Given our desired breakdown of workers, translate this into actual numbers of workers to spawn. let (workers, worker_counts) = workers::assign_workers( core_ids, // Doesn't include the main runtime core. - vec![(WorkerType::TxGen, DesireType::Percentage(0.1)), (WorkerType::Network, DesireType::Percentage(0.9))], - THREAD_PINNING, // Only log core ranges if thread pinning is actually enabled. + vec![ + (WorkerType::TxGen, DesireType::Percentage(config::get().workers.tx_gen_worker_percentage)), + (WorkerType::Network, DesireType::Percentage(config::get().workers.network_worker_percentage)), + ], + config::get().workers.thread_pinning, // Only log core ranges if thread pinning is actually enabled. ); - let connections_per_network_worker = TOTAL_CONNECTIONS / worker_counts[&WorkerType::Network]; + let connections_per_network_worker = + config::get().network_worker.total_connections / worker_counts[&WorkerType::Network]; println!("[*] Connections per network worker: {}", connections_per_network_worker); // TODO: Having the assign_workers function do this would be cleaner. @@ -60,20 +78,19 @@ async fn main() { match worker_type { WorkerType::TxGen => { thread::spawn(move || { - utils::maybe_pin_thread(core_id, THREAD_PINNING); + utils::maybe_pin_thread(core_id); workers::tx_gen_worker(tx_gen_worker_id); }); tx_gen_worker_id += 1; } WorkerType::Network => { thread::spawn(move || { - utils::maybe_pin_thread(core_id, THREAD_PINNING); + utils::maybe_pin_thread(core_id); let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap(); rt.block_on(async { for i in 0..connections_per_network_worker { tokio::spawn(workers::network_worker( - TARGET_URL, (network_worker_id * connections_per_network_worker + i) as usize, )); } @@ -88,8 +105,10 @@ async fn main() { println!("[*] Starting reporters..."); // Start reporters. - tokio::spawn(TX_QUEUE.start_reporter(Duration::from_secs(3))); - tokio::spawn(NETWORK_STATS.start_reporter(Duration::from_secs(3))) - .await // Keep the main thread alive forever. - .unwrap(); + tokio::spawn(TX_QUEUE.start_reporter(Duration::from_secs(config::get().reporters.tx_queue_report_interval_secs))); + tokio::spawn( + NETWORK_STATS.start_reporter(Duration::from_secs(config::get().reporters.network_stats_report_interval_secs)), + ) + .await // Keep the main thread alive forever. + .unwrap(); } diff --git a/crescendo/src/tx_queue.rs b/crescendo/src/tx_queue.rs index 77bfc78..500cdb8 100644 --- a/crescendo/src/tx_queue.rs +++ b/crescendo/src/tx_queue.rs @@ -6,24 +6,7 @@ use std::time::Duration; use ratelimit::Ratelimiter; use thousands::Separable; -use crate::workers::NUM_ACCOUNTS; - -const INITIAL_RATELIMIT: u64 = 100; -#[rustfmt::skip] -const RATELIMIT_THRESHOLDS: [(u32, u64); 8] = [ - (NUM_ACCOUNTS / 16, 250), - (NUM_ACCOUNTS / 8, 500), - (NUM_ACCOUNTS, 1_000), - (NUM_ACCOUNTS * 2, 2_500), - (NUM_ACCOUNTS * 4, 5_000), - // Avoid ramping up to max TPS before NUM_ACCOUNTS * ~5, - // as we want to make sure most storage slots have been - // touched + cached before we hit the max TPS. Why 5? See: - // https://grok.com/share/bGVnYWN5_e508360c-2313-4d31-8098-6d892f5bf1aa - (NUM_ACCOUNTS * 8, 7_500), - (NUM_ACCOUNTS * 10, 12_500), - (NUM_ACCOUNTS * 12, 15_000), -]; // Note: This must be sorted in ascending order of threshold! +use crate::config; pub struct TxQueue { // TODO: RwLock? Natively concurrent deque? @@ -35,14 +18,12 @@ pub struct TxQueue { impl TxQueue { fn new() -> Self { - // Modulates rate at which txs can be popped from the queue. - let rate_limiter = Ratelimiter::builder( - INITIAL_RATELIMIT, // Refill amount. - Duration::from_secs(1), // Refill rate. - ) - .max_tokens(INITIAL_RATELIMIT) // Burst limit. - .build() - .unwrap(); + let initial_ratelimit = config::get().rate_limiting.initial_ratelimit; + + let rate_limiter = Ratelimiter::builder(initial_ratelimit, Duration::from_secs(1)) + .max_tokens(initial_ratelimit) + .build() + .unwrap(); Self { queue: Mutex::new(VecDeque::new()), @@ -94,12 +75,15 @@ impl TxQueue { ((current_queue_len.saturating_sub(last_queue_len)) as u64) / measurement_interval.as_secs(); // Adjust rate limit based on total popped transactions and thresholds. - let new_rate_limit = RATELIMIT_THRESHOLDS + let rate_config = &config::get().rate_limiting; + let new_rate_limit = rate_config + .ratelimit_thresholds .iter() .rev() .find(|(threshold, _)| total_popped >= (*threshold as u64)) .map(|(_, rate_limit)| *rate_limit) - .unwrap_or(INITIAL_RATELIMIT); + .unwrap_or(rate_config.initial_ratelimit); + if self.rate_limiter.refill_amount() != new_rate_limit { println!("[+] Adjusting rate limit to {} txs/s", new_rate_limit.separate_with_commas()); self.rate_limiter.set_max_tokens(new_rate_limit).unwrap(); // Burst limit must be set first. diff --git a/crescendo/src/utils.rs b/crescendo/src/utils.rs index da2b677..2093573 100644 --- a/crescendo/src/utils.rs +++ b/crescendo/src/utils.rs @@ -3,6 +3,8 @@ use std::io; use core_affinity::CoreId; use rlimit::Resource; +use crate::config; + /// Increase the file descriptor limit to the given minimum. /// /// Panics if the hard limit is too low, otherwise tries to increase. @@ -26,8 +28,8 @@ pub fn increase_nofile_limit(min_limit: u64) -> io::Result { /// Pin the current thread to the given core ID if enabled. /// /// Panics if the thread fails to pin. -pub fn maybe_pin_thread(core_id: CoreId, enable_thread_pinning: bool) { - if !enable_thread_pinning { +pub fn maybe_pin_thread(core_id: CoreId) { + if !config::get().workers.thread_pinning { return; } diff --git a/crescendo/src/workers.rs b/crescendo/src/workers.rs index 530e747..93337be 100644 --- a/crescendo/src/workers.rs +++ b/crescendo/src/workers.rs @@ -6,7 +6,7 @@ mod network; mod tx_gen; pub use network::network_worker; -pub use tx_gen::{tx_gen_worker, NUM_ACCOUNTS}; +pub use tx_gen::tx_gen_worker; use crate::utils::format_ranges; diff --git a/crescendo/src/workers/network.rs b/crescendo/src/workers/network.rs index a0d00d5..c90a961 100644 --- a/crescendo/src/workers/network.rs +++ b/crescendo/src/workers/network.rs @@ -9,25 +9,26 @@ use hyper_util::client::legacy::Client; use hyper_util::rt::TokioExecutor; use thousands::Separable; +use crate::config; use crate::network_stats::NETWORK_STATS; use crate::tx_queue::TX_QUEUE; -use crate::TOTAL_CONNECTIONS; -const BATCH_FACTOR: usize = 1; // How many txs to send in a single request. - -pub async fn network_worker(url: &str, worker_id: usize) { - let mut connector = HttpConnector::new(); - connector.set_nodelay(true); - connector.set_keepalive(Some(Duration::from_secs(60))); +pub async fn network_worker(worker_id: usize) { + let config = &config::get().network_worker; let client: Client<_, Full> = Client::builder(TokioExecutor::new()) .pool_idle_timeout(Duration::from_secs(90)) .pool_max_idle_per_host(100) .retry_canceled_requests(true) - .build(connector); + .build({ + let mut connector = HttpConnector::new(); + connector.set_nodelay(true); + connector.set_keepalive(Some(Duration::from_secs(60))); + connector + }); loop { - if let Some(txs) = TX_QUEUE.pop_at_most(BATCH_FACTOR).await { + if let Some(txs) = TX_QUEUE.pop_at_most(config.batch_factor).await { let json_body = format!( "[{}]", txs.iter() @@ -45,7 +46,7 @@ pub async fn network_worker(url: &str, worker_id: usize) { let req = Request::builder() .method("POST") - .uri(url) + .uri(&config.target_url) .header("Content-Type", "application/json") .body(Full::new(Bytes::from(json_body.into_bytes()))) .unwrap(); @@ -56,7 +57,7 @@ pub async fn network_worker(url: &str, worker_id: usize) { if worker_id == 0 { let duration = start_time.elapsed(); let implied_total_rps = - (txs.len() as f64 / duration.as_secs_f64()) * (TOTAL_CONNECTIONS as f64); + (txs.len() as f64 / duration.as_secs_f64()) * (config.total_connections as f64); println!( "[~] Worker {} request duration: {:.1?} ({} implied total RPS)", worker_id, @@ -82,7 +83,7 @@ pub async fn network_worker(url: &str, worker_id: usize) { Err(e) => { eprintln!("[!] Failed to read response body: {:?}", e); NETWORK_STATS.inc_errors_by(txs.len()); - tokio::time::sleep(Duration::from_millis(100)).await; + tokio::time::sleep(Duration::from_millis(config.error_sleep_ms)).await; } } } else { @@ -99,7 +100,7 @@ pub async fn network_worker(url: &str, worker_id: usize) { } } else { // Sleep for a bit while the tx queue repopulates. - tokio::time::sleep(Duration::from_millis(25)).await; + tokio::time::sleep(Duration::from_millis(config.tx_queue_empty_sleep_ms)).await; } } } diff --git a/crescendo/src/workers/tx_gen.rs b/crescendo/src/workers/tx_gen.rs index 0371127..16fb55b 100644 --- a/crescendo/src/workers/tx_gen.rs +++ b/crescendo/src/workers/tx_gen.rs @@ -3,7 +3,7 @@ use std::sync::{LazyLock, Mutex}; use std::time::Instant; use alloy::network::TxSignerSync; -use alloy::primitives::{address, TxKind, U256}; +use alloy::primitives::{Address, TxKind, U256}; use alloy::sol; use alloy::sol_types::SolCall; use alloy_consensus::{SignableTransaction, TxLegacy}; @@ -13,14 +13,12 @@ use rand::Rng; use rayon::prelude::*; use thousands::Separable; +use crate::config; use crate::tx_queue::TX_QUEUE; -pub const CHAIN_ID: u64 = 1337; -pub const NUM_ACCOUNTS: u32 = 25_000; // Limited by the number in the genesis (see bin/generate_genesis_alloc.rs) - static NONCE_MAP: LazyLock>> = LazyLock::new(|| { - let mut map = HashMap::with_capacity(NUM_ACCOUNTS as usize); - for i in 0..NUM_ACCOUNTS { + let mut map = HashMap::with_capacity(config::get().tx_gen_worker.num_accounts as usize); + for i in 0..config::get().tx_gen_worker.num_accounts { map.insert(i, 0); } Mutex::new(map) @@ -28,19 +26,13 @@ static NONCE_MAP: LazyLock>> = LazyLock::new(|| { static SIGNER_LIST: LazyLock> = LazyLock::new(|| { let start = Instant::now(); - let list: Vec = (0..NUM_ACCOUNTS) + let config = &config::get().tx_gen_worker; + let list: Vec = (0..config.num_accounts) .into_par_iter() - .map(|i| { - MnemonicBuilder::::default() - .phrase("test test test test test test test test test test test junk") - .index(i) - .unwrap() - .build() - .unwrap() - }) + .map(|i| MnemonicBuilder::::default().phrase(&config.mnemonic).index(i).unwrap().build().unwrap()) .collect(); let duration = start.elapsed(); - println!("[+] Initalized signer list of length {} in {:.1?}", NUM_ACCOUNTS.separate_with_commas(), duration); + println!("[+] Initalized signer list of length {} in {:.1?}", config.num_accounts.separate_with_commas(), duration); list }); @@ -54,7 +46,9 @@ pub fn tx_gen_worker(_worker_id: u32) { let mut rng = rand::rng(); loop { - let account_index = rng.random_range(0..NUM_ACCOUNTS); + let config = &config::get().tx_gen_worker; + + let account_index = rng.random_range(0..config.num_accounts); // Acount we'll be sending from. // Get and increment nonce atomically. let nonce = { @@ -66,22 +60,25 @@ pub fn tx_gen_worker(_worker_id: u32) { let (signer, recipient) = ( &SIGNER_LIST[account_index as usize], - // Send to 1/20th of the accounts, so recipients are pareto-principle distributed. - SIGNER_LIST[rng.random_range(0..(NUM_ACCOUNTS / 20)) as usize].address(), + SIGNER_LIST[rng.random_range(0..(config.num_accounts / config.recipient_distribution_factor)) as usize] // Send to 1/Nth of the accounts. + .address(), ); let tx = sign_and_encode_tx( signer, TxLegacy { - chain_id: Some(CHAIN_ID), + chain_id: Some(config.chain_id), nonce, - gas_price: 100_000_000_000, // 100 gwei - gas_limit: 100_000, // 100k gas limit - to: TxKind::Call(address!("0x2000000000000000000000000000000000000001")), + gas_price: config.gas_price as u128, + gas_limit: config.gas_limit, + to: TxKind::Call(config.token_contract_address.parse::
().unwrap()), value: U256::ZERO, - input: ERC20::transferCall { to: recipient, amount: U256::from(rng.random_range(1..=10)) } - .abi_encode() - .into(), + input: ERC20::transferCall { + to: recipient, + amount: U256::from(rng.random_range(1..=config.max_transfer_amount)), + } + .abi_encode() + .into(), }, ); diff --git a/genesis.json b/utils/genesis.json similarity index 100% rename from genesis.json rename to utils/genesis.json diff --git a/start-reth.sh b/utils/start-reth.sh similarity index 100% rename from start-reth.sh rename to utils/start-reth.sh