From f34447fc17145f96bef13156404005043a478280 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Tue, 10 Mar 2026 13:42:34 +0100 Subject: [PATCH 01/13] Add build-test-clone subcommand --- Cargo.lock | 26 ++ node/Cargo.toml | 4 +- node/src/cli.rs | 141 +++++++ node/src/clone_spec.rs | 473 ++++++++++++++++++++++ node/src/command.rs | 8 +- node/src/conditional_evm_block_import.rs | 12 +- node/src/consensus/hybrid_import_queue.rs | 9 +- node/src/lib.rs | 2 + node/src/main.rs | 2 + node/src/sync_options.rs | 13 + 10 files changed, 684 insertions(+), 6 deletions(-) create mode 100644 node/src/clone_spec.rs create mode 100644 node/src/sync_options.rs diff --git a/Cargo.lock b/Cargo.lock index e1458ac23c..62ec1464d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6662,6 +6662,7 @@ checksum = "37b26c20e2178756451cfeb0661fb74c47dd5988cb7e3939de7e9241fd604d42" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", + "jsonrpsee-http-client", "jsonrpsee-proc-macros", "jsonrpsee-server", "jsonrpsee-types", @@ -6719,6 +6720,31 @@ dependencies = [ "tracing", ] +[[package]] +name = "jsonrpsee-http-client" +version = "0.24.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c872b6c9961a4ccc543e321bb5b89f6b2d2c7fe8b61906918273a3333c95400c" +dependencies = [ + "async-trait", + "base64 0.22.1", + "http-body 1.0.1", + "hyper 1.7.0", + "hyper-rustls", + "hyper-util", + "jsonrpsee-core", + "jsonrpsee-types", + "rustls", + "rustls-platform-verifier", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tower", + "tracing", + "url", +] + [[package]] name = "jsonrpsee-proc-macros" version = "0.24.9" diff --git a/node/Cargo.toml b/node/Cargo.toml index 2766893452..32de891091 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -26,7 +26,7 @@ clap = { workspace = true, features = ["derive"] } futures = { workspace = true, features = ["thread-pool"] } serde = { workspace = true, features = ["derive"] } hex.workspace = true -tokio = { workspace = true, features = ["time"] } +tokio = { workspace = true, features = ["time", "rt", "net"] } # Storage import memmap2.workspace = true @@ -80,7 +80,7 @@ polkadot-sdk = { workspace = true, features = [ ] } # These dependencies are used for the subtensor's RPCs -jsonrpsee = { workspace = true, features = ["server"] } +jsonrpsee = { workspace = true, features = ["server", "http-client"] } sc-rpc.workspace = true sp-api.workspace = true sc-rpc-api.workspace = true diff --git a/node/src/cli.rs b/node/src/cli.rs index e46c71857b..175a9b41a6 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -8,6 +8,8 @@ use node_subtensor_runtime::opaque::Block; use sc_cli::RunCmd; use sc_consensus::BasicQueue; use sc_service::{Configuration, TaskManager}; +use std::fmt; +use std::path::PathBuf; use std::sync::Arc; #[derive(Debug, clap::Parser)] @@ -33,6 +35,12 @@ pub struct Cli { #[command(flatten)] pub eth: EthConfiguration, + + /// Skip creating historical gap-backfill during initial/catch-up sync. + /// + /// This reduces sync time/disk usage but historical block data may be incomplete. + #[arg(long, default_value_t = false)] + pub skip_history_backfill: bool, } #[allow(clippy::large_enum_variant)] @@ -70,6 +78,139 @@ pub enum Subcommand { // Db meta columns information. ChainInfo(sc_cli::ChainInfoCmd), + + // Build a patched test clone chainspec from synced network state. + #[command(name = "build-test-clone")] + CloneState(CloneStateCmd), +} + +/// Build a patched clone chainspec by syncing state, exporting raw state, and applying test patch. +#[derive(Debug, Clone, clap::Args)] +pub struct CloneStateCmd { + /// Chain spec identifier or path (same semantics as `--chain`). + #[arg(long, value_name = "CHAIN")] + pub chain: String, + + /// Base path used for syncing and state export. + #[arg(long, value_name = "PATH")] + pub base_path: PathBuf, + + /// Output file path for the final patched chainspec JSON. + #[arg(long, value_name = "FILE")] + pub output: PathBuf, + + /// Sync mode for the temporary sync node. + #[arg(long, value_enum, default_value_t = CloneSyncMode::Warp)] + pub sync: CloneSyncMode, + + /// Database backend for the temporary sync/export node. + #[arg(long, value_enum, default_value_t = CloneDatabase::ParityDb)] + pub database: CloneDatabase, + + /// Whether to keep or skip history backfill after state sync. + #[arg(long, value_enum, default_value_t = CloneHistoryBackfill::Skip)] + pub history_backfill: CloneHistoryBackfill, + + /// RPC port used by the temporary sync node. + #[arg(long, default_value_t = 9966)] + pub rpc_port: u16, + + /// P2P port used by the temporary sync node. + #[arg(long, default_value_t = 30466)] + pub port: u16, + + /// Maximum time to wait for sync completion. + #[arg(long, default_value_t = 7200)] + pub sync_timeout_sec: u64, + + /// Accept sync completion when current is within this many blocks of highest. + #[arg(long, default_value_t = 8)] + pub sync_lag_blocks: u64, + + /// Optional bootnodes for the sync step. Repeatable. + #[arg(long, value_name = "BOOTNODE")] + pub bootnodes: Vec, + + /// Include Alice in patched validator authorities. + #[arg(long, default_value_t = false)] + pub alice: bool, + + /// Include Bob in patched validator authorities. + #[arg(long, default_value_t = false)] + pub bob: bool, + + /// Include Charlie in patched validator authorities. + #[arg(long, default_value_t = false)] + pub charlie: bool, +} + +#[derive(Debug, Clone, Copy, clap::ValueEnum)] +pub enum CloneSyncMode { + Warp, + Full, +} + +impl AsRef for CloneSyncMode { + fn as_ref(&self) -> &str { + match self { + CloneSyncMode::Warp => "warp", + CloneSyncMode::Full => "full", + } + } +} + +impl fmt::Display for CloneSyncMode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_ref()) + } +} + +#[derive(Debug, Clone, Copy, clap::ValueEnum)] +pub enum CloneDatabase { + #[value(name = "auto")] + Auto, + #[value(name = "rocksdb")] + RocksDb, + #[value(name = "paritydb")] + ParityDb, +} + +impl AsRef for CloneDatabase { + fn as_ref(&self) -> &str { + match self { + CloneDatabase::Auto => "auto", + CloneDatabase::RocksDb => "rocksdb", + CloneDatabase::ParityDb => "paritydb", + } + } +} + +impl fmt::Display for CloneDatabase { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_ref()) + } +} + +#[derive(Debug, Clone, Copy, clap::ValueEnum, Default)] +pub enum CloneHistoryBackfill { + Keep, + #[default] + Skip, +} + +impl AsRef for CloneHistoryBackfill { + fn as_ref(&self) -> &str { + match self { + CloneHistoryBackfill::Keep => "keep", + CloneHistoryBackfill::Skip => "skip", + } + } +} + +impl fmt::Display for CloneHistoryBackfill { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_ref()) + } } /// Available Sealing methods. diff --git a/node/src/clone_spec.rs b/node/src/clone_spec.rs new file mode 100644 index 0000000000..5e51f26e66 --- /dev/null +++ b/node/src/clone_spec.rs @@ -0,0 +1,473 @@ +use std::collections::VecDeque; +use std::fs::{self, File}; +use std::io::{BufReader, BufWriter}; +use std::path::{Path, PathBuf}; +use std::process::{Child, Command, Stdio}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +use jsonrpsee::{ + core::client::ClientT, + http_client::{HttpClient, HttpClientBuilder}, + rpc_params, +}; +use serde_json::{Value, json}; + +use crate::cli::{CloneHistoryBackfill, CloneStateCmd}; + +type CloneResult = Result>; + +const RPC_POLL_INTERVAL: Duration = Duration::from_secs(2); + +#[derive(Clone, Copy)] +struct Validator { + name: &'static str, + sr25519_hex: &'static str, + ed25519_hex: &'static str, +} + +static VALIDATORS: &[Validator] = &[ + Validator { + name: "alice", + sr25519_hex: "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d", + ed25519_hex: "88dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee", + }, + Validator { + name: "bob", + sr25519_hex: "8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48", + ed25519_hex: "d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae69", + }, + Validator { + name: "charlie", + sr25519_hex: "90b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe22", + ed25519_hex: "439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f", + }, +]; + +/// Execute `build-test-clone`: sync network state, export raw chainspec, apply clone patch. +pub fn run(cmd: &CloneStateCmd) -> sc_cli::Result<()> { + let runtime = tokio::runtime::Builder::new_current_thread() + .enable_io() + .enable_time() + .build() + .map_err(|err| sc_cli::Error::Application(Box::new(err)))?; + + runtime + .block_on(async_run(cmd)) + .map_err(sc_cli::Error::Application) +} + +async fn async_run(cmd: &CloneStateCmd) -> CloneResult<()> { + let validators = selected_validators(cmd); + let selected_names = validators + .iter() + .map(|v| v.name) + .collect::>() + .join(","); + + fs::create_dir_all(&cmd.base_path)?; + + if let Some(parent) = cmd.output.parent() { + fs::create_dir_all(parent)?; + } + + let current_exe = std::env::current_exe()?; + let database_arg = cmd.database.as_ref(); + let sync_arg = cmd.sync.as_ref(); + let skip_backfill = matches!(cmd.history_backfill, CloneHistoryBackfill::Skip); + + log::info!("build-test-clone: validators={selected_names}"); + + let mut sync_args = vec![ + "--base-path".to_string(), + cmd.base_path.display().to_string(), + "--chain".to_string(), + cmd.chain.clone(), + "--sync".to_string(), + sync_arg.to_string(), + "--database".to_string(), + database_arg.to_string(), + "--rpc-port".to_string(), + cmd.rpc_port.to_string(), + "--port".to_string(), + cmd.port.to_string(), + "--rpc-methods".to_string(), + "unsafe".to_string(), + "--no-telemetry".to_string(), + "--no-prometheus".to_string(), + "--no-mdns".to_string(), + "--name".to_string(), + "build-test-clone-sync".to_string(), + ]; + + for bootnode in &cmd.bootnodes { + sync_args.push("--bootnodes".to_string()); + sync_args.push(bootnode.clone()); + } + + if skip_backfill { + sync_args.push("--skip-history-backfill".to_string()); + } + + log::info!("build-test-clone: starting sync node"); + + let mut sync_child = Command::new(¤t_exe) + .args(&sync_args) + .stdin(Stdio::null()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .spawn()?; + + let sync_wait_result = wait_for_sync_completion(&mut sync_child, cmd).await; + let stop_result = stop_child_gracefully(&mut sync_child).await; + + sync_wait_result?; + stop_result?; + + let raw_tmp = temp_raw_path()?; + + log::info!("build-test-clone: exporting raw state"); + + export_raw_state(¤t_exe, cmd, database_arg, &raw_tmp)?; + + log::info!("build-test-clone: applying clone patch"); + + patch_raw_chainspec_file(&raw_tmp, &cmd.output, &validators)?; + + if let Err(err) = fs::remove_file(&raw_tmp) { + log::warn!( + "build-test-clone: warning: failed to remove temp file {}: {err}", + raw_tmp.display() + ); + } + + log::info!("build-test-clone: wrote {}", cmd.output.display()); + + Ok(()) +} + +async fn wait_for_sync_completion(sync_child: &mut Child, cmd: &CloneStateCmd) -> CloneResult<()> { + let timeout = Duration::from_secs(cmd.sync_timeout_sec); + let start = Instant::now(); + let mut stable_ready_checks = 0u8; + let rpc_url = format!("http://127.0.0.1:{}", cmd.rpc_port); + let rpc_client = HttpClientBuilder::default() + .request_timeout(Duration::from_secs(10)) + .build(rpc_url)?; + + log::info!( + "build-test-clone: waiting for sync completion (timeout={}s)", + cmd.sync_timeout_sec + ); + + while let None = sync_child + .try_wait() + .map_err(|err| std::io::Error::other(format!("Failed to poll sync node process: {err}")))? + { + if start.elapsed() > timeout { + return Err(format!( + "Timed out waiting for sync completion after {} seconds", + cmd.sync_timeout_sec + ) + .into()); + } + + match query_sync_status(&rpc_client).await { + Ok(status) => { + let is_ready = !status.is_syncing + && status.peers > 0 + && status.current > 0 + && status.highest > 0 + && status.current.saturating_add(cmd.sync_lag_blocks) >= status.highest; + + if is_ready { + stable_ready_checks = stable_ready_checks.saturating_add(1); + if stable_ready_checks >= 3 { + log::info!("build-test-clone: sync target reached"); + return Ok(()); + } + } else { + stable_ready_checks = 0; + } + } + Err(_) => { + // RPC may not be ready yet. + stable_ready_checks = 0; + } + } + + tokio::time::sleep(RPC_POLL_INTERVAL).await; + } + + let status = sync_child + .try_wait() + .map_err(|err| std::io::Error::other(format!("Failed to poll sync node process: {err}")))? + .ok_or_else(|| std::io::Error::other("Sync node status became unavailable"))?; + + Err(format!("Sync node exited unexpectedly: {status}").into()) +} + +async fn stop_child_gracefully(child: &mut Child) -> CloneResult<()> { + if child.try_wait()?.is_some() { + return Ok(()); + } + + Command::new("kill") + .arg("-INT") + .arg(child.id().to_string()) + .status()?; + + for _ in 0..30 { + if child.try_wait()?.is_some() { + return Ok(()); + } + tokio::time::sleep(Duration::from_secs(1)).await; + } + + child.kill()?; + + child.wait()?; + + Ok(()) +} + +fn export_raw_state( + current_exe: &Path, + cmd: &CloneStateCmd, + database_arg: &str, + raw_tmp: &Path, +) -> CloneResult<()> { + let stdout = File::create(raw_tmp)?; + let status = Command::new(current_exe) + .args([ + "export-state", + "--chain", + &cmd.chain, + "--base-path", + &cmd.base_path.display().to_string(), + "--database", + database_arg, + ]) + .stdin(Stdio::null()) + .stdout(Stdio::from(stdout)) + .stderr(Stdio::inherit()) + .status()?; + + if !status.success() { + return Err(format!("export-state failed with status {status}").into()); + } + + Ok(()) +} + +struct SyncStatus { + current: u64, + highest: u64, + peers: u64, + is_syncing: bool, +} + +async fn query_sync_status(rpc_client: &HttpClient) -> CloneResult { + let sync = rpc_call(rpc_client, "system_syncState").await?; + let health = rpc_call(rpc_client, "system_health").await?; + + let current = parse_u64_field(&sync, "currentBlock") + .ok_or_else(|| "system_syncState.currentBlock missing".to_string())?; + let highest = parse_u64_field(&sync, "highestBlock") + .ok_or_else(|| "system_syncState.highestBlock missing".to_string())?; + let peers = parse_u64_field(&health, "peers") + .ok_or_else(|| "system_health.peers missing".to_string())?; + let is_syncing = health + .get("isSyncing") + .and_then(Value::as_bool) + .ok_or_else(|| "system_health.isSyncing missing".to_string())?; + + Ok(SyncStatus { + current, + highest, + peers, + is_syncing, + }) +} + +async fn rpc_call(rpc_client: &HttpClient, method: &str) -> CloneResult { + rpc_client + .request(method, rpc_params![]) + .await + .map_err(Into::into) +} + +fn parse_u64_field(value: &Value, field: &str) -> Option { + let field_value = value.get(field)?; + + if let Some(n) = field_value.as_u64() { + return Some(n); + } + + let s = field_value.as_str()?; + + s.parse::() + .ok() + .or_else(|| u64::from_str_radix(s.trim_start_matches("0x"), 16).ok()) +} + +fn temp_raw_path() -> CloneResult { + let epoch = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs(); + Ok(std::env::temp_dir().join(format!("subtensor-clone-export-{epoch}.json"))) +} + +fn selected_validators(cmd: &CloneStateCmd) -> Vec { + let explicit = cmd.alice || cmd.bob || cmd.charlie; + let mut selected = Vec::new(); + + if explicit { + if cmd.alice { + selected.push(VALIDATORS[0]); + } + if cmd.bob { + selected.push(VALIDATORS[1]); + } + if cmd.charlie { + selected.push(VALIDATORS[2]); + } + } else { + selected.push(VALIDATORS[0]); // only alice be default + } + + selected +} + +fn patch_raw_chainspec_file( + input: &Path, + output: &Path, + validators: &[Validator], +) -> CloneResult<()> { + let file = File::open(input)?; + let reader = BufReader::with_capacity(64 * 1024 * 1024, file); + let mut spec: Value = serde_json::from_reader(reader)?; + patch_raw_spec(&mut spec, validators)?; + + let out = File::create(output)?; + let writer = BufWriter::with_capacity(64 * 1024 * 1024, out); + serde_json::to_writer(writer, &spec)?; + Ok(()) +} + +fn patch_raw_spec(spec: &mut Value, validators: &[Validator]) -> CloneResult<()> { + let top = spec + .pointer_mut("/genesis/raw/top") + .and_then(Value::as_object_mut) + .ok_or_else(|| "missing or invalid genesis.raw.top".to_string())?; + + let aura_keys: Vec> = validators + .iter() + .map(|v| hex::decode(v.sr25519_hex)) + .collect::>()?; + let aura_refs: Vec<&[u8]> = aura_keys.iter().map(Vec::as_slice).collect(); + top.insert( + storage_key("Aura", "Authorities"), + Value::String(to_hex(&encode_vec(&aura_refs))), + ); + + let grandpa_entries: Vec> = validators + .iter() + .map(|v| { + let mut entry = hex::decode(v.ed25519_hex)?; + entry.extend_from_slice(&1u64.to_le_bytes()); + Ok::<_, hex::FromHexError>(entry) + }) + .collect::>()?; + let grandpa_refs: Vec<&[u8]> = grandpa_entries.iter().map(Vec::as_slice).collect(); + let grandpa_encoded = encode_vec(&grandpa_refs); + + top.insert( + storage_key("Grandpa", "Authorities"), + Value::String(to_hex(&grandpa_encoded)), + ); + + let mut well_known = vec![0x01u8]; + well_known.extend_from_slice(&grandpa_encoded); + top.insert( + "0x3a6772616e6470615f617574686f726974696573".into(), + Value::String(to_hex(&well_known)), + ); + + top.insert( + storage_key("Grandpa", "CurrentSetId"), + Value::String(to_hex(&0u64.to_le_bytes())), + ); + top.insert( + storage_key("Grandpa", "State"), + Value::String("0x00".into()), + ); + top.remove(&storage_key("Grandpa", "PendingChange")); + top.remove(&storage_key("Grandpa", "NextForced")); + top.remove(&storage_key("Grandpa", "Stalled")); + remove_by_prefix(top, &storage_key("Grandpa", "SetIdSession")); + + top.insert( + storage_key("Sudo", "Key"), + Value::String(to_hex(&hex::decode(validators[0].sr25519_hex)?)), + ); + + remove_by_prefix(top, &storage_prefix("Session")); + clear_top_level(spec); + Ok(()) +} + +fn remove_by_prefix(map: &mut serde_json::Map, prefix: &str) { + let mut keys_to_remove = VecDeque::new(); + for key in map.keys() { + if key.starts_with(prefix) { + keys_to_remove.push_back(key.clone()); + } + } + while let Some(key) = keys_to_remove.pop_front() { + map.remove(&key); + } +} + +fn clear_top_level(spec: &mut Value) { + if let Some(object) = spec.as_object_mut() { + object.insert("bootNodes".into(), json!([])); + object.insert("codeSubstitutes".into(), json!({})); + object.insert("chainType".into(), json!("Local")); + } +} + +fn storage_key(pallet: &str, item: &str) -> String { + let mut key = Vec::with_capacity(32); + key.extend_from_slice(&sp_io::hashing::twox_128(pallet.as_bytes())); + key.extend_from_slice(&sp_io::hashing::twox_128(item.as_bytes())); + format!("0x{}", hex::encode(key)) +} + +fn storage_prefix(pallet: &str) -> String { + format!( + "0x{}", + hex::encode(sp_io::hashing::twox_128(pallet.as_bytes())) + ) +} + +fn compact_encode(n: u32) -> Vec { + if n <= 63 { + vec![(n as u8) << 2] + } else if n <= 16_383 { + let v = (n << 2) | 1; + vec![v as u8, (v >> 8) as u8] + } else { + let v = (n << 2) | 2; + vec![v as u8, (v >> 8) as u8, (v >> 16) as u8, (v >> 24) as u8] + } +} + +fn encode_vec(items: &[&[u8]]) -> Vec { + let mut out = compact_encode(items.len() as u32); + for item in items { + out.extend_from_slice(item); + } + out +} + +fn to_hex(data: &[u8]) -> String { + format!("0x{}", hex::encode(data)) +} diff --git a/node/src/command.rs b/node/src/command.rs index 3350c1443e..6ba4cf67b8 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -5,7 +5,7 @@ use crate::{ cli::{Cli, Subcommand, SupportedConsensusMechanism}, consensus::BabeConsensus, ethereum::db_config_dir, - service, + service, sync_options, }; use fc_db::{DatabaseSource, kv::frontier_database_dir}; @@ -62,6 +62,7 @@ pub fn run() -> sc_cli::Result<()> { let cmd = Cli::command(); let arg_matches = cmd.get_matches(); let cli = Cli::from_arg_matches(&arg_matches)?; + sync_options::set_skip_history_backfill(cli.skip_history_backfill); match &cli.subcommand { Some(Subcommand::Key(cmd)) => cmd.run(&cli), @@ -233,6 +234,11 @@ pub fn run() -> sc_cli::Result<()> { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run::(&config)) } + Some(Subcommand::CloneState(cmd)) => { + let runner = cli.create_runner(&cli.run)?; + let cmd = cmd.clone(); + runner.sync_run(move |_| crate::clone_spec::run(&cmd)) + } // Start with the initial consensus type asked. None => { let arg_matches = Cli::command().get_matches(); diff --git a/node/src/conditional_evm_block_import.rs b/node/src/conditional_evm_block_import.rs index b6ba445c1f..def7606297 100644 --- a/node/src/conditional_evm_block_import.rs +++ b/node/src/conditional_evm_block_import.rs @@ -1,5 +1,5 @@ use sc_consensus::{BlockCheckParams, BlockImport, BlockImportParams, ImportResult}; -use sp_consensus::Error as ConsensusError; +use sp_consensus::{BlockOrigin, Error as ConsensusError}; use sp_runtime::traits::{Block as BlockT, Header}; use std::marker::PhantomData; @@ -56,7 +56,15 @@ where self.inner.check_block(block).await.map_err(Into::into) } - async fn import_block(&self, block: BlockImportParams) -> Result { + async fn import_block( + &self, + mut block: BlockImportParams, + ) -> Result { + if crate::sync_options::skip_history_backfill() + && matches!(block.origin, BlockOrigin::NetworkInitialSync) + { + block.create_gap = false; + } // 4345556 - mainnet runtime upgrade block with Frontier if *block.header.number() < 4345557u32.into() { self.inner.import_block(block).await.map_err(Into::into) diff --git a/node/src/consensus/hybrid_import_queue.rs b/node/src/consensus/hybrid_import_queue.rs index 30d8ff4065..04de9a59c9 100644 --- a/node/src/consensus/hybrid_import_queue.rs +++ b/node/src/consensus/hybrid_import_queue.rs @@ -29,6 +29,7 @@ use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::HeaderBackend; use sp_blockchain::HeaderMetadata; +use sp_consensus::BlockOrigin; use sp_consensus::SelectChain; use sp_consensus::error::Error as ConsensusError; use sp_consensus_aura::AuraApi; @@ -119,8 +120,14 @@ impl BlockImport for HybridBlockImport { async fn import_block( &self, - block: BlockImportParams, + mut block: BlockImportParams, ) -> Result { + // Clone mode can opt into skipping history-gap creation during catch-up. + if crate::sync_options::skip_history_backfill() + && matches!(block.origin, BlockOrigin::NetworkInitialSync) + { + block.create_gap = false; + } if is_babe_digest(block.header.digest()) { self.inner_babe .import_block(block) diff --git a/node/src/lib.rs b/node/src/lib.rs index c447a07309..f1c73cc339 100644 --- a/node/src/lib.rs +++ b/node/src/lib.rs @@ -1,8 +1,10 @@ pub mod chain_spec; pub mod cli; pub mod client; +pub mod clone_spec; pub mod conditional_evm_block_import; pub mod consensus; pub mod ethereum; pub mod rpc; pub mod service; +pub mod sync_options; diff --git a/node/src/main.rs b/node/src/main.rs index 64f25acc67..2db3f22f42 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -6,12 +6,14 @@ mod benchmarking; mod chain_spec; mod cli; mod client; +mod clone_spec; mod command; mod conditional_evm_block_import; mod consensus; mod ethereum; mod rpc; mod service; +mod sync_options; fn main() -> sc_cli::Result<()> { command::run() diff --git a/node/src/sync_options.rs b/node/src/sync_options.rs new file mode 100644 index 0000000000..ec5ab40c58 --- /dev/null +++ b/node/src/sync_options.rs @@ -0,0 +1,13 @@ +use std::sync::atomic::{AtomicBool, Ordering}; + +static SKIP_HISTORY_BACKFILL: AtomicBool = AtomicBool::new(false); + +/// Enable or disable history backfill skipping for initial sync imports. +pub fn set_skip_history_backfill(enabled: bool) { + SKIP_HISTORY_BACKFILL.store(enabled, Ordering::Relaxed); +} + +/// Returns whether initial-sync imports should avoid creating history gaps. +pub fn skip_history_backfill() -> bool { + SKIP_HISTORY_BACKFILL.load(Ordering::Relaxed) +} From 0652bcc265be9775e94eee1f35afed7df34a89a3 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Tue, 10 Mar 2026 18:33:30 +0100 Subject: [PATCH 02/13] Add tests --- Cargo.lock | 1 + node/Cargo.toml | 1 + node/src/clone_spec.rs | 201 +++++++++++++++++++++++++++++++++-------- 3 files changed, 166 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62ec1464d7..c0079d8a78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8291,6 +8291,7 @@ dependencies = [ "frame-benchmarking", "frame-benchmarking-cli", "frame-metadata-hash-extension", + "frame-support", "frame-system", "frame-system-rpc-runtime-api", "futures", diff --git a/node/Cargo.toml b/node/Cargo.toml index 32de891091..3e76a53d51 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -68,6 +68,7 @@ sp-offchain.workspace = true sp-session.workspace = true frame-metadata-hash-extension.workspace = true frame-system.workspace = true +frame-support.workspace = true pallet-transaction-payment.workspace = true pallet-commitments.workspace = true pallet-drand.workspace = true diff --git a/node/src/clone_spec.rs b/node/src/clone_spec.rs index 5e51f26e66..f2da421318 100644 --- a/node/src/clone_spec.rs +++ b/node/src/clone_spec.rs @@ -11,12 +11,14 @@ use jsonrpsee::{ rpc_params, }; use serde_json::{Value, json}; +use sp_runtime::codec::Encode; use crate::cli::{CloneHistoryBackfill, CloneStateCmd}; type CloneResult = Result>; const RPC_POLL_INTERVAL: Duration = Duration::from_secs(2); +const GRANDPA_AUTHORITIES_WELL_KNOWN_KEY: &[u8] = b":grandpa_authorities"; #[derive(Clone, Copy)] struct Validator { @@ -358,26 +360,20 @@ fn patch_raw_spec(spec: &mut Value, validators: &[Validator]) -> CloneResult<()> .and_then(Value::as_object_mut) .ok_or_else(|| "missing or invalid genesis.raw.top".to_string())?; - let aura_keys: Vec> = validators + let aura_keys: Vec<[u8; 32]> = validators .iter() - .map(|v| hex::decode(v.sr25519_hex)) - .collect::>()?; - let aura_refs: Vec<&[u8]> = aura_keys.iter().map(Vec::as_slice).collect(); + .map(|v| decode_hex_32(v.sr25519_hex)) + .collect::>()?; top.insert( storage_key("Aura", "Authorities"), - Value::String(to_hex(&encode_vec(&aura_refs))), + Value::String(to_hex(&aura_keys.encode())), ); - let grandpa_entries: Vec> = validators + let grandpa_entries: Vec<([u8; 32], u64)> = validators .iter() - .map(|v| { - let mut entry = hex::decode(v.ed25519_hex)?; - entry.extend_from_slice(&1u64.to_le_bytes()); - Ok::<_, hex::FromHexError>(entry) - }) - .collect::>()?; - let grandpa_refs: Vec<&[u8]> = grandpa_entries.iter().map(Vec::as_slice).collect(); - let grandpa_encoded = encode_vec(&grandpa_refs); + .map(|v| Ok((decode_hex_32(v.ed25519_hex)?, 1u64))) + .collect::>()?; + let grandpa_encoded = grandpa_entries.encode(); top.insert( storage_key("Grandpa", "Authorities"), @@ -387,7 +383,7 @@ fn patch_raw_spec(spec: &mut Value, validators: &[Validator]) -> CloneResult<()> let mut well_known = vec![0x01u8]; well_known.extend_from_slice(&grandpa_encoded); top.insert( - "0x3a6772616e6470615f617574686f726974696573".into(), + to_hex(GRANDPA_AUTHORITIES_WELL_KNOWN_KEY), Value::String(to_hex(&well_known)), ); @@ -435,10 +431,8 @@ fn clear_top_level(spec: &mut Value) { } fn storage_key(pallet: &str, item: &str) -> String { - let mut key = Vec::with_capacity(32); - key.extend_from_slice(&sp_io::hashing::twox_128(pallet.as_bytes())); - key.extend_from_slice(&sp_io::hashing::twox_128(item.as_bytes())); - format!("0x{}", hex::encode(key)) + let key = frame_support::storage::storage_prefix(pallet.as_bytes(), item.as_bytes()); + to_hex(&key) } fn storage_prefix(pallet: &str) -> String { @@ -448,26 +442,159 @@ fn storage_prefix(pallet: &str) -> String { ) } -fn compact_encode(n: u32) -> Vec { - if n <= 63 { - vec![(n as u8) << 2] - } else if n <= 16_383 { - let v = (n << 2) | 1; - vec![v as u8, (v >> 8) as u8] - } else { - let v = (n << 2) | 2; - vec![v as u8, (v >> 8) as u8, (v >> 16) as u8, (v >> 24) as u8] - } +fn to_hex(data: &[u8]) -> String { + format!("0x{}", hex::encode(data)) } -fn encode_vec(items: &[&[u8]]) -> Vec { - let mut out = compact_encode(items.len() as u32); - for item in items { - out.extend_from_slice(item); - } - out +fn decode_hex_32(value: &str) -> CloneResult<[u8; 32]> { + let bytes = hex::decode(value)?; + let len = bytes.len(); + let bytes: [u8; 32] = bytes.try_into().map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("expected 32-byte hex value, got {len} bytes"), + ) + })?; + Ok(bytes) } -fn to_hex(data: &[u8]) -> String { - format!("0x{}", hex::encode(data)) +#[cfg(test)] +mod tests { + use super::*; + use crate::cli::{CloneDatabase, CloneHistoryBackfill, CloneSyncMode}; + + fn target_artifact_path(name: &str) -> PathBuf { + let target_dir = std::env::var_os("CARGO_TARGET_DIR") + .map(PathBuf::from) + .unwrap_or_else(|| PathBuf::from("target")); + target_dir.join("clone-spec-tests").join(name) + } + + fn default_cmd() -> CloneStateCmd { + CloneStateCmd { + chain: "finney".to_string(), + base_path: target_artifact_path("base"), + output: target_artifact_path("out.json"), + sync: CloneSyncMode::Warp, + database: CloneDatabase::ParityDb, + history_backfill: CloneHistoryBackfill::Skip, + rpc_port: 9966, + port: 30466, + sync_timeout_sec: 10, + sync_lag_blocks: 8, + bootnodes: Vec::new(), + alice: false, + bob: false, + charlie: false, + } + } + + fn make_minimal_spec() -> Value { + let mut top = serde_json::Map::new(); + top.insert(storage_key("Grandpa", "PendingChange"), json!("0x01")); + top.insert(storage_key("Grandpa", "NextForced"), json!("0x02")); + top.insert(storage_key("Grandpa", "Stalled"), json!("0x03")); + top.insert( + format!("{}{}", storage_key("Grandpa", "SetIdSession"), "deadbeef"), + json!("0x04"), + ); + top.insert(format!("{}abcd", storage_prefix("Session")), json!("0x05")); + top.insert(storage_key("Balances", "TotalIssuance"), json!("0x06")); + + json!({ + "genesis": { "raw": { "top": top } }, + "bootNodes": ["/dns4/example.com/tcp/30333/p2p/12D3KooW..."], + "codeSubstitutes": { "0x01": "0x02" }, + "chainType": "Live" + }) + } + + #[test] + fn selected_validators_defaults_to_alice() { + let cmd = default_cmd(); + let selected = selected_validators(&cmd); + assert_eq!(selected.len(), 1); + assert_eq!(selected[0].name, "alice"); + } + + #[test] + fn selected_validators_respects_explicit_flags() { + let mut cmd = default_cmd(); + cmd.bob = true; + cmd.charlie = true; + + let selected = selected_validators(&cmd); + let names = selected.into_iter().map(|v| v.name).collect::>(); + assert_eq!(names, vec!["bob", "charlie"]); + } + + #[test] + fn parse_u64_field_supports_u64_decimal_and_hex_string() { + let value = json!({ + "a": 42, + "b": "123", + "c": "0x2a" + }); + + assert_eq!(parse_u64_field(&value, "a"), Some(42)); + assert_eq!(parse_u64_field(&value, "b"), Some(123)); + assert_eq!(parse_u64_field(&value, "c"), Some(42)); + assert_eq!(parse_u64_field(&value, "missing"), None); + } + + #[test] + fn patch_raw_spec_updates_authorities_sudo_and_top_level() { + let mut spec = make_minimal_spec(); + let validators = vec![VALIDATORS[0], VALIDATORS[1]]; + patch_raw_spec(&mut spec, &validators).expect("patch should succeed"); + + let top = spec + .pointer("/genesis/raw/top") + .and_then(Value::as_object) + .expect("top should be object"); + + let aura_hex = top + .get(&storage_key("Aura", "Authorities")) + .and_then(Value::as_str) + .expect("aura authorities key should exist"); + let aura_raw = hex::decode(aura_hex.trim_start_matches("0x")).expect("hex decode aura"); + let expected_aura = vec![ + decode_hex_32(VALIDATORS[0].sr25519_hex).expect("decode"), + decode_hex_32(VALIDATORS[1].sr25519_hex).expect("decode"), + ] + .encode(); + assert_eq!(aura_raw, expected_aura); + + let sudo_hex = top + .get(&storage_key("Sudo", "Key")) + .and_then(Value::as_str) + .expect("sudo key should exist"); + assert_eq!( + sudo_hex, + to_hex(&hex::decode(VALIDATORS[0].sr25519_hex).expect("decode")).as_str() + ); + + assert!(!top.contains_key(&storage_key("Grandpa", "PendingChange"))); + assert!(!top.contains_key(&storage_key("Grandpa", "NextForced"))); + assert!(!top.contains_key(&storage_key("Grandpa", "Stalled"))); + assert!( + top.keys() + .all(|k| !k.starts_with(&storage_prefix("Session"))) + ); + + assert_eq!(spec.get("chainType"), Some(&json!("Local"))); + assert_eq!(spec.get("bootNodes"), Some(&json!([]))); + assert_eq!(spec.get("codeSubstitutes"), Some(&json!({}))); + } + + #[test] + fn patch_raw_spec_fails_when_top_missing() { + let mut spec = json!({}); + let err = patch_raw_spec(&mut spec, &[VALIDATORS[0]]).expect_err("must fail"); + assert!( + err.to_string() + .contains("missing or invalid genesis.raw.top"), + "unexpected error: {err}" + ); + } } From b9b0e7a0431b2028752ab7216d734f70101945ab Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Tue, 10 Mar 2026 19:21:31 +0100 Subject: [PATCH 03/13] Clean up --- node/Cargo.toml | 2 + node/src/cli.rs | 87 +++++++++--------------------------------- node/src/clone_spec.rs | 71 ++++++++++++++++++++++++---------- node/src/command.rs | 24 ++++++++++-- 4 files changed, 92 insertions(+), 92 deletions(-) diff --git a/node/Cargo.toml b/node/Cargo.toml index 3e76a53d51..e787009d9a 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -157,6 +157,7 @@ runtime-benchmarks = [ "node-subtensor-runtime/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-benchmarking-cli/runtime-benchmarks", + "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", @@ -175,6 +176,7 @@ pow-faucet = [] # in the near future. try-runtime = [ "node-subtensor-runtime/try-runtime", + "frame-support/try-runtime", "frame-system/try-runtime", "pallet-transaction-payment/try-runtime", "sp-runtime/try-runtime", diff --git a/node/src/cli.rs b/node/src/cli.rs index 175a9b41a6..341969eb9e 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -36,11 +36,13 @@ pub struct Cli { #[command(flatten)] pub eth: EthConfiguration, - /// Skip creating historical gap-backfill during initial/catch-up sync. + /// Control historical gap-backfill during initial/catch-up sync. /// - /// This reduces sync time/disk usage but historical block data may be incomplete. - #[arg(long, default_value_t = false)] - pub skip_history_backfill: bool, + /// `keep` preserves complete history (default for normal node runs). + /// `skip` is faster/lighter but historical block data may be incomplete. + /// For `build-test-clone`, the implicit default is `skip` unless this flag is explicitly set. + #[arg(long, value_enum, default_value_t = HistoryBackfill::Keep)] + pub history_backfill: HistoryBackfill, } #[allow(clippy::large_enum_variant)] @@ -100,16 +102,12 @@ pub struct CloneStateCmd { pub output: PathBuf, /// Sync mode for the temporary sync node. - #[arg(long, value_enum, default_value_t = CloneSyncMode::Warp)] - pub sync: CloneSyncMode, + #[arg(long, value_enum, default_value_t = sc_cli::SyncMode::Warp)] + pub sync: sc_cli::SyncMode, /// Database backend for the temporary sync/export node. - #[arg(long, value_enum, default_value_t = CloneDatabase::ParityDb)] - pub database: CloneDatabase, - - /// Whether to keep or skip history backfill after state sync. - #[arg(long, value_enum, default_value_t = CloneHistoryBackfill::Skip)] - pub history_backfill: CloneHistoryBackfill, + #[arg(long, value_enum, default_value_t = sc_cli::Database::ParityDb)] + pub database: sc_cli::Database, /// RPC port used by the temporary sync node. #[arg(long, default_value_t = 9966)] @@ -131,83 +129,36 @@ pub struct CloneStateCmd { #[arg(long, value_name = "BOOTNODE")] pub bootnodes: Vec, - /// Include Alice in patched validator authorities. + /// Include Alice in patched validator authorities (default if no validator flags are passed). #[arg(long, default_value_t = false)] pub alice: bool, - /// Include Bob in patched validator authorities. + /// Include Bob in patched validator authorities (if any validator flag is set, only selected validators are used). #[arg(long, default_value_t = false)] pub bob: bool, - /// Include Charlie in patched validator authorities. + /// Include Charlie in patched validator authorities (if any validator flag is set, only selected validators are used). #[arg(long, default_value_t = false)] pub charlie: bool, } -#[derive(Debug, Clone, Copy, clap::ValueEnum)] -pub enum CloneSyncMode { - Warp, - Full, -} - -impl AsRef for CloneSyncMode { - fn as_ref(&self) -> &str { - match self { - CloneSyncMode::Warp => "warp", - CloneSyncMode::Full => "full", - } - } -} - -impl fmt::Display for CloneSyncMode { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.as_ref()) - } -} - -#[derive(Debug, Clone, Copy, clap::ValueEnum)] -pub enum CloneDatabase { - #[value(name = "auto")] - Auto, - #[value(name = "rocksdb")] - RocksDb, - #[value(name = "paritydb")] - ParityDb, -} - -impl AsRef for CloneDatabase { - fn as_ref(&self) -> &str { - match self { - CloneDatabase::Auto => "auto", - CloneDatabase::RocksDb => "rocksdb", - CloneDatabase::ParityDb => "paritydb", - } - } -} - -impl fmt::Display for CloneDatabase { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.as_ref()) - } -} - #[derive(Debug, Clone, Copy, clap::ValueEnum, Default)] -pub enum CloneHistoryBackfill { - Keep, +pub enum HistoryBackfill { #[default] + Keep, Skip, } -impl AsRef for CloneHistoryBackfill { +impl AsRef for HistoryBackfill { fn as_ref(&self) -> &str { match self { - CloneHistoryBackfill::Keep => "keep", - CloneHistoryBackfill::Skip => "skip", + HistoryBackfill::Keep => "keep", + HistoryBackfill::Skip => "skip", } } } -impl fmt::Display for CloneHistoryBackfill { +impl fmt::Display for HistoryBackfill { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.as_ref()) } diff --git a/node/src/clone_spec.rs b/node/src/clone_spec.rs index f2da421318..10577585b7 100644 --- a/node/src/clone_spec.rs +++ b/node/src/clone_spec.rs @@ -13,7 +13,7 @@ use jsonrpsee::{ use serde_json::{Value, json}; use sp_runtime::codec::Encode; -use crate::cli::{CloneHistoryBackfill, CloneStateCmd}; +use crate::cli::{CloneStateCmd, HistoryBackfill}; type CloneResult = Result>; @@ -46,7 +46,7 @@ static VALIDATORS: &[Validator] = &[ ]; /// Execute `build-test-clone`: sync network state, export raw chainspec, apply clone patch. -pub fn run(cmd: &CloneStateCmd) -> sc_cli::Result<()> { +pub fn run(cmd: &CloneStateCmd, history_backfill: HistoryBackfill) -> sc_cli::Result<()> { let runtime = tokio::runtime::Builder::new_current_thread() .enable_io() .enable_time() @@ -54,11 +54,11 @@ pub fn run(cmd: &CloneStateCmd) -> sc_cli::Result<()> { .map_err(|err| sc_cli::Error::Application(Box::new(err)))?; runtime - .block_on(async_run(cmd)) + .block_on(async_run(cmd, history_backfill)) .map_err(sc_cli::Error::Application) } -async fn async_run(cmd: &CloneStateCmd) -> CloneResult<()> { +async fn async_run(cmd: &CloneStateCmd, history_backfill: HistoryBackfill) -> CloneResult<()> { let validators = selected_validators(cmd); let selected_names = validators .iter() @@ -73,11 +73,14 @@ async fn async_run(cmd: &CloneStateCmd) -> CloneResult<()> { } let current_exe = std::env::current_exe()?; - let database_arg = cmd.database.as_ref(); - let sync_arg = cmd.sync.as_ref(); - let skip_backfill = matches!(cmd.history_backfill, CloneHistoryBackfill::Skip); + let database_arg = database_arg(cmd.database); + let sync_arg = sync_arg(cmd.sync); - log::info!("build-test-clone: validators={selected_names}"); + log::info!( + "build-test-clone: validators={} history_backfill={}", + selected_names, + history_backfill + ); let mut sync_args = vec![ "--base-path".to_string(), @@ -99,6 +102,8 @@ async fn async_run(cmd: &CloneStateCmd) -> CloneResult<()> { "--no-mdns".to_string(), "--name".to_string(), "build-test-clone-sync".to_string(), + "--history-backfill".to_string(), + history_backfill.to_string(), ]; for bootnode in &cmd.bootnodes { @@ -106,10 +111,6 @@ async fn async_run(cmd: &CloneStateCmd) -> CloneResult<()> { sync_args.push(bootnode.clone()); } - if skip_backfill { - sync_args.push("--skip-history-backfill".to_string()); - } - log::info!("build-test-clone: starting sync node"); let mut sync_child = Command::new(¤t_exe) @@ -129,7 +130,7 @@ async fn async_run(cmd: &CloneStateCmd) -> CloneResult<()> { log::info!("build-test-clone: exporting raw state"); - export_raw_state(¤t_exe, cmd, database_arg, &raw_tmp)?; + export_raw_state(¤t_exe, cmd, database_arg, history_backfill, &raw_tmp)?; log::info!("build-test-clone: applying clone patch"); @@ -161,9 +162,10 @@ async fn wait_for_sync_completion(sync_child: &mut Child, cmd: &CloneStateCmd) - cmd.sync_timeout_sec ); - while let None = sync_child + while sync_child .try_wait() .map_err(|err| std::io::Error::other(format!("Failed to poll sync node process: {err}")))? + .is_none() { if start.elapsed() > timeout { return Err(format!( @@ -236,6 +238,7 @@ fn export_raw_state( current_exe: &Path, cmd: &CloneStateCmd, database_arg: &str, + history_backfill: HistoryBackfill, raw_tmp: &Path, ) -> CloneResult<()> { let stdout = File::create(raw_tmp)?; @@ -248,6 +251,8 @@ fn export_raw_state( &cmd.base_path.display().to_string(), "--database", database_arg, + "--history-backfill", + history_backfill.as_ref(), ]) .stdin(Stdio::null()) .stdout(Stdio::from(stdout)) @@ -301,8 +306,8 @@ async fn rpc_call(rpc_client: &HttpClient, method: &str) -> CloneResult { fn parse_u64_field(value: &Value, field: &str) -> Option { let field_value = value.get(field)?; - if let Some(n) = field_value.as_u64() { - return Some(n); + if let Value::Number(number) = field_value { + return number.to_string().parse::().ok(); } let s = field_value.as_str()?; @@ -317,6 +322,26 @@ fn temp_raw_path() -> CloneResult { Ok(std::env::temp_dir().join(format!("subtensor-clone-export-{epoch}.json"))) } +fn sync_arg(mode: sc_cli::SyncMode) -> &'static str { + match mode { + sc_cli::SyncMode::Full => "full", + sc_cli::SyncMode::Fast => "fast", + sc_cli::SyncMode::FastUnsafe => "fast-unsafe", + sc_cli::SyncMode::Warp => "warp", + } +} + +fn database_arg(database: sc_cli::Database) -> &'static str { + match database { + #[cfg(feature = "rocksdb")] + sc_cli::Database::RocksDb => "rocksdb", + sc_cli::Database::ParityDb => "paritydb", + sc_cli::Database::Auto => "auto", + sc_cli::Database::ParityDbDeprecated => "paritydb-experimental", + } +} + +#[allow(clippy::indexing_slicing)] fn selected_validators(cmd: &CloneStateCmd) -> Vec { let explicit = cmd.alice || cmd.bob || cmd.charlie; let mut selected = Vec::new(); @@ -355,6 +380,10 @@ fn patch_raw_chainspec_file( } fn patch_raw_spec(spec: &mut Value, validators: &[Validator]) -> CloneResult<()> { + let sudo = validators + .first() + .ok_or_else(|| "at least one validator must be selected".to_string())?; + let top = spec .pointer_mut("/genesis/raw/top") .and_then(Value::as_object_mut) @@ -402,7 +431,7 @@ fn patch_raw_spec(spec: &mut Value, validators: &[Validator]) -> CloneResult<()> top.insert( storage_key("Sudo", "Key"), - Value::String(to_hex(&hex::decode(validators[0].sr25519_hex)?)), + Value::String(to_hex(&hex::decode(sudo.sr25519_hex)?)), ); remove_by_prefix(top, &storage_prefix("Session")); @@ -459,9 +488,10 @@ fn decode_hex_32(value: &str) -> CloneResult<[u8; 32]> { } #[cfg(test)] +#[allow(clippy::indexing_slicing)] +#[allow(clippy::expect_used)] mod tests { use super::*; - use crate::cli::{CloneDatabase, CloneHistoryBackfill, CloneSyncMode}; fn target_artifact_path(name: &str) -> PathBuf { let target_dir = std::env::var_os("CARGO_TARGET_DIR") @@ -475,9 +505,8 @@ mod tests { chain: "finney".to_string(), base_path: target_artifact_path("base"), output: target_artifact_path("out.json"), - sync: CloneSyncMode::Warp, - database: CloneDatabase::ParityDb, - history_backfill: CloneHistoryBackfill::Skip, + sync: sc_cli::SyncMode::Warp, + database: sc_cli::Database::ParityDb, rpc_port: 9966, port: 30466, sync_timeout_sec: 10, diff --git a/node/src/command.rs b/node/src/command.rs index 731b3532d7..ca4dc94908 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -2,7 +2,7 @@ use std::sync::{Arc, atomic::AtomicBool}; use crate::{ chain_spec, - cli::{Cli, Subcommand, SupportedConsensusMechanism}, + cli::{Cli, HistoryBackfill, Subcommand, SupportedConsensusMechanism}, consensus::BabeConsensus, ethereum::db_config_dir, service, sync_options, @@ -62,7 +62,8 @@ pub fn run() -> sc_cli::Result<()> { let cmd = Cli::command(); let arg_matches = cmd.get_matches(); let cli = Cli::from_arg_matches(&arg_matches)?; - sync_options::set_skip_history_backfill(cli.skip_history_backfill); + let history_backfill = effective_history_backfill(&cli, &arg_matches); + sync_options::set_skip_history_backfill(matches!(history_backfill, HistoryBackfill::Skip)); match &cli.subcommand { Some(Subcommand::Key(cmd)) => cmd.run(&cli), @@ -237,7 +238,7 @@ pub fn run() -> sc_cli::Result<()> { Some(Subcommand::CloneState(cmd)) => { let runner = cli.create_runner(&cli.run)?; let cmd = cmd.clone(); - runner.sync_run(move |_| crate::clone_spec::run(&cmd)) + runner.sync_run(move |_| crate::clone_spec::run(&cmd, history_backfill)) } // Start with the initial consensus type asked. None => { @@ -251,6 +252,23 @@ pub fn run() -> sc_cli::Result<()> { } } +fn effective_history_backfill(cli: &Cli, arg_matches: &ArgMatches) -> HistoryBackfill { + // We keep a single global `--history-backfill` flag, but `build-test-clone` should default to + // `skip` when the operator didn't set the flag explicitly. This preserves `keep` as the default + // for normal node runs. + if matches!( + arg_matches.value_source("history_backfill"), + Some(ValueSource::CommandLine) + ) { + return cli.history_backfill; + } + + match &cli.subcommand { + Some(Subcommand::CloneState(_)) => HistoryBackfill::Skip, + _ => HistoryBackfill::Keep, + } +} + #[allow(clippy::expect_used)] fn start_babe_service(arg_matches: &ArgMatches) -> Result<(), sc_cli::Error> { let cli = Cli::from_arg_matches(arg_matches).expect("Bad arg_matches"); From 2b7803211f9de9f639028d13599f781e88de8b5d Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Wed, 11 Mar 2026 13:59:21 +0100 Subject: [PATCH 04/13] Remove --history-backfill from export-state stage in build-test-clone --- node/src/clone_spec.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/node/src/clone_spec.rs b/node/src/clone_spec.rs index 10577585b7..7ac8fb703a 100644 --- a/node/src/clone_spec.rs +++ b/node/src/clone_spec.rs @@ -130,7 +130,7 @@ async fn async_run(cmd: &CloneStateCmd, history_backfill: HistoryBackfill) -> Cl log::info!("build-test-clone: exporting raw state"); - export_raw_state(¤t_exe, cmd, database_arg, history_backfill, &raw_tmp)?; + export_raw_state(¤t_exe, cmd, database_arg, &raw_tmp)?; log::info!("build-test-clone: applying clone patch"); @@ -238,7 +238,6 @@ fn export_raw_state( current_exe: &Path, cmd: &CloneStateCmd, database_arg: &str, - history_backfill: HistoryBackfill, raw_tmp: &Path, ) -> CloneResult<()> { let stdout = File::create(raw_tmp)?; @@ -251,8 +250,6 @@ fn export_raw_state( &cmd.base_path.display().to_string(), "--database", database_arg, - "--history-backfill", - history_backfill.as_ref(), ]) .stdin(Stdio::null()) .stdout(Stdio::from(stdout)) From 902bd718b7b135094209fc1f1ab76e1017277ad0 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Wed, 11 Mar 2026 14:13:33 +0100 Subject: [PATCH 05/13] Use authority keys generation helpers in clone spec --- node/src/cli.rs | 6 +-- node/src/clone_spec.rs | 90 +++++++++++++----------------------------- 2 files changed, 31 insertions(+), 65 deletions(-) diff --git a/node/src/cli.rs b/node/src/cli.rs index 341969eb9e..a29d6852c0 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -129,15 +129,15 @@ pub struct CloneStateCmd { #[arg(long, value_name = "BOOTNODE")] pub bootnodes: Vec, - /// Include Alice in patched validator authorities (default if no validator flags are passed). + /// Include Alice in patched validator authorities (default if no validator flags are passed; Sudo is assigned to the first selected validator in Alice->Bob->Charlie order). #[arg(long, default_value_t = false)] pub alice: bool, - /// Include Bob in patched validator authorities (if any validator flag is set, only selected validators are used). + /// Include Bob in patched validator authorities (if any validator flag is set, only selected validators are used; Sudo is assigned to the first selected validator in Alice->Bob->Charlie order). #[arg(long, default_value_t = false)] pub bob: bool, - /// Include Charlie in patched validator authorities (if any validator flag is set, only selected validators are used). + /// Include Charlie in patched validator authorities (if any validator flag is set, only selected validators are used; Sudo is assigned to the first selected validator in Alice->Bob->Charlie order). #[arg(long, default_value_t = false)] pub charlie: bool, } diff --git a/node/src/clone_spec.rs b/node/src/clone_spec.rs index 7ac8fb703a..c40dd6b980 100644 --- a/node/src/clone_spec.rs +++ b/node/src/clone_spec.rs @@ -20,31 +20,6 @@ type CloneResult = Result>; const RPC_POLL_INTERVAL: Duration = Duration::from_secs(2); const GRANDPA_AUTHORITIES_WELL_KNOWN_KEY: &[u8] = b":grandpa_authorities"; -#[derive(Clone, Copy)] -struct Validator { - name: &'static str, - sr25519_hex: &'static str, - ed25519_hex: &'static str, -} - -static VALIDATORS: &[Validator] = &[ - Validator { - name: "alice", - sr25519_hex: "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d", - ed25519_hex: "88dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee", - }, - Validator { - name: "bob", - sr25519_hex: "8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48", - ed25519_hex: "d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae69", - }, - Validator { - name: "charlie", - sr25519_hex: "90b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe22", - ed25519_hex: "439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f", - }, -]; - /// Execute `build-test-clone`: sync network state, export raw chainspec, apply clone patch. pub fn run(cmd: &CloneStateCmd, history_backfill: HistoryBackfill) -> sc_cli::Result<()> { let runtime = tokio::runtime::Builder::new_current_thread() @@ -62,7 +37,7 @@ async fn async_run(cmd: &CloneStateCmd, history_backfill: HistoryBackfill) -> Cl let validators = selected_validators(cmd); let selected_names = validators .iter() - .map(|v| v.name) + .map(|seed| seed.to_ascii_lowercase()) .collect::>() .join(","); @@ -338,23 +313,22 @@ fn database_arg(database: sc_cli::Database) -> &'static str { } } -#[allow(clippy::indexing_slicing)] -fn selected_validators(cmd: &CloneStateCmd) -> Vec { +fn selected_validators(cmd: &CloneStateCmd) -> Vec<&'static str> { let explicit = cmd.alice || cmd.bob || cmd.charlie; let mut selected = Vec::new(); if explicit { if cmd.alice { - selected.push(VALIDATORS[0]); + selected.push("Alice"); } if cmd.bob { - selected.push(VALIDATORS[1]); + selected.push("Bob"); } if cmd.charlie { - selected.push(VALIDATORS[2]); + selected.push("Charlie"); } } else { - selected.push(VALIDATORS[0]); // only alice be default + selected.push("Alice"); // only alice by default } selected @@ -363,7 +337,7 @@ fn selected_validators(cmd: &CloneStateCmd) -> Vec { fn patch_raw_chainspec_file( input: &Path, output: &Path, - validators: &[Validator], + validators: &[&'static str], ) -> CloneResult<()> { let file = File::open(input)?; let reader = BufReader::with_capacity(64 * 1024 * 1024, file); @@ -376,7 +350,7 @@ fn patch_raw_chainspec_file( Ok(()) } -fn patch_raw_spec(spec: &mut Value, validators: &[Validator]) -> CloneResult<()> { +fn patch_raw_spec(spec: &mut Value, validators: &[&'static str]) -> CloneResult<()> { let sudo = validators .first() .ok_or_else(|| "at least one validator must be selected".to_string())?; @@ -386,19 +360,19 @@ fn patch_raw_spec(spec: &mut Value, validators: &[Validator]) -> CloneResult<()> .and_then(Value::as_object_mut) .ok_or_else(|| "missing or invalid genesis.raw.top".to_string())?; - let aura_keys: Vec<[u8; 32]> = validators + let aura_keys = validators .iter() - .map(|v| decode_hex_32(v.sr25519_hex)) - .collect::>()?; + .map(|seed| crate::chain_spec::authority_keys_from_seed(seed).0) + .collect::>(); top.insert( storage_key("Aura", "Authorities"), Value::String(to_hex(&aura_keys.encode())), ); - let grandpa_entries: Vec<([u8; 32], u64)> = validators + let grandpa_entries = validators .iter() - .map(|v| Ok((decode_hex_32(v.ed25519_hex)?, 1u64))) - .collect::>()?; + .map(|seed| (crate::chain_spec::authority_keys_from_seed(seed).1, 1u64)) + .collect::>(); let grandpa_encoded = grandpa_entries.encode(); top.insert( @@ -428,7 +402,9 @@ fn patch_raw_spec(spec: &mut Value, validators: &[Validator]) -> CloneResult<()> top.insert( storage_key("Sudo", "Key"), - Value::String(to_hex(&hex::decode(sudo.sr25519_hex)?)), + Value::String(to_hex( + &crate::chain_spec::get_account_id_from_seed::(sudo).encode(), + )), ); remove_by_prefix(top, &storage_prefix("Session")); @@ -472,20 +448,7 @@ fn to_hex(data: &[u8]) -> String { format!("0x{}", hex::encode(data)) } -fn decode_hex_32(value: &str) -> CloneResult<[u8; 32]> { - let bytes = hex::decode(value)?; - let len = bytes.len(); - let bytes: [u8; 32] = bytes.try_into().map_err(|_| { - std::io::Error::new( - std::io::ErrorKind::InvalidData, - format!("expected 32-byte hex value, got {len} bytes"), - ) - })?; - Ok(bytes) -} - #[cfg(test)] -#[allow(clippy::indexing_slicing)] #[allow(clippy::expect_used)] mod tests { use super::*; @@ -540,7 +503,7 @@ mod tests { let cmd = default_cmd(); let selected = selected_validators(&cmd); assert_eq!(selected.len(), 1); - assert_eq!(selected[0].name, "alice"); + assert_eq!(selected[0], "Alice"); } #[test] @@ -550,8 +513,7 @@ mod tests { cmd.charlie = true; let selected = selected_validators(&cmd); - let names = selected.into_iter().map(|v| v.name).collect::>(); - assert_eq!(names, vec!["bob", "charlie"]); + assert_eq!(selected, vec!["Bob", "Charlie"]); } #[test] @@ -571,7 +533,7 @@ mod tests { #[test] fn patch_raw_spec_updates_authorities_sudo_and_top_level() { let mut spec = make_minimal_spec(); - let validators = vec![VALIDATORS[0], VALIDATORS[1]]; + let validators = vec!["Alice", "Bob"]; patch_raw_spec(&mut spec, &validators).expect("patch should succeed"); let top = spec @@ -585,8 +547,8 @@ mod tests { .expect("aura authorities key should exist"); let aura_raw = hex::decode(aura_hex.trim_start_matches("0x")).expect("hex decode aura"); let expected_aura = vec![ - decode_hex_32(VALIDATORS[0].sr25519_hex).expect("decode"), - decode_hex_32(VALIDATORS[1].sr25519_hex).expect("decode"), + crate::chain_spec::authority_keys_from_seed("Alice").0, + crate::chain_spec::authority_keys_from_seed("Bob").0, ] .encode(); assert_eq!(aura_raw, expected_aura); @@ -597,7 +559,11 @@ mod tests { .expect("sudo key should exist"); assert_eq!( sudo_hex, - to_hex(&hex::decode(VALIDATORS[0].sr25519_hex).expect("decode")).as_str() + to_hex( + &crate::chain_spec::get_account_id_from_seed::("Alice") + .encode() + ) + .as_str() ); assert!(!top.contains_key(&storage_key("Grandpa", "PendingChange"))); @@ -616,7 +582,7 @@ mod tests { #[test] fn patch_raw_spec_fails_when_top_missing() { let mut spec = json!({}); - let err = patch_raw_spec(&mut spec, &[VALIDATORS[0]]).expect_err("must fail"); + let err = patch_raw_spec(&mut spec, &["Alice"]).expect_err("must fail"); assert!( err.to_string() .contains("missing or invalid genesis.raw.top"), From abf2fd46fe6869d17ec0705aae72774bd95e78d3 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 11 Mar 2026 17:14:02 -0400 Subject: [PATCH 06/13] Patch weights --- pallets/subtensor/src/macros/dispatches.rs | 30 +++++++++++----------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 93803cfc13..c9d80805ac 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -711,7 +711,7 @@ mod dispatches { /// #[pallet::call_index(2)] #[pallet::weight((Weight::from_parts(340_800_000, 0) - .saturating_add(T::DbWeight::get().reads(25_u64)) + .saturating_add(T::DbWeight::get().reads(27_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake( origin: OriginFor, @@ -1054,8 +1054,8 @@ mod dispatches { /// The extrinsic for user to change its hotkey in subnet or all subnets. #[pallet::call_index(70)] #[pallet::weight((Weight::from_parts(275_300_000, 0) - .saturating_add(T::DbWeight::get().reads(52_u64)) - .saturating_add(T::DbWeight::get().writes(35_u64)), DispatchClass::Normal, Pays::No))] + .saturating_add(T::DbWeight::get().reads(57_u64)) + .saturating_add(T::DbWeight::get().writes(39_u64)), DispatchClass::Normal, Pays::No))] pub fn swap_hotkey( origin: OriginFor, hotkey: T::AccountId, @@ -1491,7 +1491,7 @@ mod dispatches { /// - Thrown if key has hit transaction rate limit #[pallet::call_index(84)] #[pallet::weight((Weight::from_parts(358_500_000, 0) - .saturating_add(T::DbWeight::get().reads(40_u64)) + .saturating_add(T::DbWeight::get().reads(44_u64)) .saturating_add(T::DbWeight::get().writes(24_u64)), DispatchClass::Normal, Pays::Yes))] pub fn unstake_all_alpha(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_unstake_all_alpha(origin, hotkey) @@ -1520,7 +1520,7 @@ mod dispatches { /// #[pallet::call_index(85)] #[pallet::weight((Weight::from_parts(164_300_000, 0) - .saturating_add(T::DbWeight::get().reads(15_u64)) + .saturating_add(T::DbWeight::get().reads(19_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)), DispatchClass::Normal, Pays::Yes))] pub fn move_stake( origin: T::RuntimeOrigin, @@ -1563,7 +1563,7 @@ mod dispatches { /// May emit a `StakeTransferred` event on success. #[pallet::call_index(86)] #[pallet::weight((Weight::from_parts(160_300_000, 0) - .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().reads(16_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)), DispatchClass::Normal, Pays::Yes))] pub fn transfer_stake( origin: T::RuntimeOrigin, @@ -1605,7 +1605,7 @@ mod dispatches { #[pallet::call_index(87)] #[pallet::weight(( Weight::from_parts(351_300_000, 0) - .saturating_add(T::DbWeight::get().reads(36_u64)) + .saturating_add(T::DbWeight::get().reads(40_u64)) .saturating_add(T::DbWeight::get().writes(22_u64)), DispatchClass::Normal, Pays::Yes @@ -1670,7 +1670,7 @@ mod dispatches { /// #[pallet::call_index(88)] #[pallet::weight((Weight::from_parts(402_900_000, 0) - .saturating_add(T::DbWeight::get().reads(25_u64)) + .saturating_add(T::DbWeight::get().reads(27_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake_limit( origin: OriginFor, @@ -1735,7 +1735,7 @@ mod dispatches { /// #[pallet::call_index(89)] #[pallet::weight((Weight::from_parts(377_400_000, 0) - .saturating_add(T::DbWeight::get().reads(28_u64)) + .saturating_add(T::DbWeight::get().reads(30_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)), DispatchClass::Normal, Pays::Yes))] pub fn remove_stake_limit( origin: OriginFor, @@ -1779,7 +1779,7 @@ mod dispatches { #[pallet::call_index(90)] #[pallet::weight(( Weight::from_parts(411_500_000, 0) - .saturating_add(T::DbWeight::get().reads(36_u64)) + .saturating_add(T::DbWeight::get().reads(40_u64)) .saturating_add(T::DbWeight::get().writes(22_u64)), DispatchClass::Normal, Pays::Yes @@ -1901,7 +1901,7 @@ mod dispatches { /// Emits a `TokensRecycled` event on success. #[pallet::call_index(101)] #[pallet::weight(( - Weight::from_parts(113_400_000, 0).saturating_add(T::DbWeight::get().reads_writes(7, 4)), + Weight::from_parts(113_400_000, 0).saturating_add(T::DbWeight::get().reads_writes(9, 4)), DispatchClass::Normal, Pays::Yes ))] @@ -1926,7 +1926,7 @@ mod dispatches { /// Emits a `TokensBurned` event on success. #[pallet::call_index(102)] #[pallet::weight(( - Weight::from_parts(112_200_000, 0).saturating_add(T::DbWeight::get().reads_writes(7, 3)), + Weight::from_parts(112_200_000, 0).saturating_add(T::DbWeight::get().reads_writes(9, 3)), DispatchClass::Normal, Pays::Yes ))] @@ -1957,7 +1957,7 @@ mod dispatches { /// Without limit_price it remove all the stake similar to `remove_stake` extrinsic #[pallet::call_index(103)] #[pallet::weight((Weight::from_parts(395_300_000, 10142) - .saturating_add(T::DbWeight::get().reads(28_u64)) + .saturating_add(T::DbWeight::get().reads(30_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)), DispatchClass::Normal, Pays::Yes))] pub fn remove_stake_full_limit( origin: T::RuntimeOrigin, @@ -2233,7 +2233,7 @@ mod dispatches { #[pallet::call_index(121)] #[pallet::weight(( Weight::from_parts(117_000_000, 7767) - .saturating_add(T::DbWeight::get().reads(12_u64)) + .saturating_add(T::DbWeight::get().reads(16_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)), DispatchClass::Normal, Pays::Yes @@ -2555,7 +2555,7 @@ mod dispatches { #[pallet::call_index(132)] #[pallet::weight(( Weight::from_parts(368_000_000, 8556) - .saturating_add(T::DbWeight::get().reads(28_u64)) + .saturating_add(T::DbWeight::get().reads(30_u64)) .saturating_add(T::DbWeight::get().writes(16_u64)), DispatchClass::Normal, Pays::Yes From 59736cbf0b35753f75008c3754bf379a75351017 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Wed, 11 Mar 2026 19:06:48 +0100 Subject: [PATCH 07/13] Propagate history-backfill down to import_block --- node/src/cli.rs | 20 ++++-- node/src/clone_spec.rs | 22 ++++-- node/src/command.rs | 87 ++++++++++++++--------- node/src/conditional_evm_block_import.rs | 9 +-- node/src/consensus/aura_consensus.rs | 3 +- node/src/consensus/babe_consensus.rs | 3 +- node/src/consensus/consensus_mechanism.rs | 2 +- node/src/consensus/hybrid_import_queue.rs | 12 ++-- node/src/lib.rs | 1 - node/src/main.rs | 1 - node/src/service.rs | 14 +++- node/src/sync_options.rs | 13 ---- 12 files changed, 110 insertions(+), 77 deletions(-) delete mode 100644 node/src/sync_options.rs diff --git a/node/src/cli.rs b/node/src/cli.rs index a29d6852c0..9f2ef206c0 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -129,15 +129,20 @@ pub struct CloneStateCmd { #[arg(long, value_name = "BOOTNODE")] pub bootnodes: Vec, - /// Include Alice in patched validator authorities (default if no validator flags are passed; Sudo is assigned to the first selected validator in Alice->Bob->Charlie order). + /// Include Alice in patched validator authorities (default if no validator flags are passed; + /// Sudo is assigned to the first selected validator in Alice->Bob->Charlie order). #[arg(long, default_value_t = false)] pub alice: bool, - /// Include Bob in patched validator authorities (if any validator flag is set, only selected validators are used; Sudo is assigned to the first selected validator in Alice->Bob->Charlie order). + /// Include Bob in patched validator authorities (if any validator flag is set, only selected + /// validators are used; Sudo is assigned to the first selected validator in Alice->Bob->Charlie + /// order). #[arg(long, default_value_t = false)] pub bob: bool, - /// Include Charlie in patched validator authorities (if any validator flag is set, only selected validators are used; Sudo is assigned to the first selected validator in Alice->Bob->Charlie order). + /// Include Charlie in patched validator authorities (if any validator flag is set, only + /// selected validators are used; Sudo is assigned to the first selected validator in + /// Alice->Bob->Charlie order). #[arg(long, default_value_t = false)] pub charlie: bool, } @@ -191,6 +196,7 @@ impl SupportedConsensusMechanism { &self, config: &mut Configuration, eth_config: &EthConfiguration, + skip_history_backfill: bool, ) -> Result< ( Arc, @@ -202,8 +208,12 @@ impl SupportedConsensusMechanism { sc_service::Error, > { match self { - SupportedConsensusMechanism::Aura => new_chain_ops::(config, eth_config), - SupportedConsensusMechanism::Babe => new_chain_ops::(config, eth_config), + SupportedConsensusMechanism::Aura => { + new_chain_ops::(config, eth_config, skip_history_backfill) + } + SupportedConsensusMechanism::Babe => { + new_chain_ops::(config, eth_config, skip_history_backfill) + } } } } diff --git a/node/src/clone_spec.rs b/node/src/clone_spec.rs index c40dd6b980..016aaaf29f 100644 --- a/node/src/clone_spec.rs +++ b/node/src/clone_spec.rs @@ -13,7 +13,7 @@ use jsonrpsee::{ use serde_json::{Value, json}; use sp_runtime::codec::Encode; -use crate::cli::{CloneStateCmd, HistoryBackfill}; +use crate::cli::CloneStateCmd; type CloneResult = Result>; @@ -21,7 +21,7 @@ const RPC_POLL_INTERVAL: Duration = Duration::from_secs(2); const GRANDPA_AUTHORITIES_WELL_KNOWN_KEY: &[u8] = b":grandpa_authorities"; /// Execute `build-test-clone`: sync network state, export raw chainspec, apply clone patch. -pub fn run(cmd: &CloneStateCmd, history_backfill: HistoryBackfill) -> sc_cli::Result<()> { +pub fn run(cmd: &CloneStateCmd, skip_history_backfill: bool) -> sc_cli::Result<()> { let runtime = tokio::runtime::Builder::new_current_thread() .enable_io() .enable_time() @@ -29,11 +29,11 @@ pub fn run(cmd: &CloneStateCmd, history_backfill: HistoryBackfill) -> sc_cli::Re .map_err(|err| sc_cli::Error::Application(Box::new(err)))?; runtime - .block_on(async_run(cmd, history_backfill)) + .block_on(async_run(cmd, skip_history_backfill)) .map_err(sc_cli::Error::Application) } -async fn async_run(cmd: &CloneStateCmd, history_backfill: HistoryBackfill) -> CloneResult<()> { +async fn async_run(cmd: &CloneStateCmd, skip_history_backfill: bool) -> CloneResult<()> { let validators = selected_validators(cmd); let selected_names = validators .iter() @@ -54,7 +54,11 @@ async fn async_run(cmd: &CloneStateCmd, history_backfill: HistoryBackfill) -> Cl log::info!( "build-test-clone: validators={} history_backfill={}", selected_names, - history_backfill + if skip_history_backfill { + "skip" + } else { + "keep" + } ); let mut sync_args = vec![ @@ -78,7 +82,11 @@ async fn async_run(cmd: &CloneStateCmd, history_backfill: HistoryBackfill) -> Cl "--name".to_string(), "build-test-clone-sync".to_string(), "--history-backfill".to_string(), - history_backfill.to_string(), + if skip_history_backfill { + "skip".to_string() + } else { + "keep".to_string() + }, ]; for bootnode in &cmd.bootnodes { @@ -503,7 +511,7 @@ mod tests { let cmd = default_cmd(); let selected = selected_validators(&cmd); assert_eq!(selected.len(), 1); - assert_eq!(selected[0], "Alice"); + assert_eq!(selected.first(), Some(&"Alice")); } #[test] diff --git a/node/src/command.rs b/node/src/command.rs index ca4dc94908..22280caf98 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -5,7 +5,7 @@ use crate::{ cli::{Cli, HistoryBackfill, Subcommand, SupportedConsensusMechanism}, consensus::BabeConsensus, ethereum::db_config_dir, - service, sync_options, + service, }; use fc_db::{DatabaseSource, kv::frontier_database_dir}; @@ -62,8 +62,7 @@ pub fn run() -> sc_cli::Result<()> { let cmd = Cli::command(); let arg_matches = cmd.get_matches(); let cli = Cli::from_arg_matches(&arg_matches)?; - let history_backfill = effective_history_backfill(&cli, &arg_matches); - sync_options::set_skip_history_backfill(matches!(history_backfill, HistoryBackfill::Skip)); + let skip_history_backfill = resolve_skip_history_backfill(&cli, &arg_matches); match &cli.subcommand { Some(Subcommand::Key(cmd)) => cmd.run(&cli), @@ -74,32 +73,40 @@ pub fn run() -> sc_cli::Result<()> { Some(Subcommand::CheckBlock(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|mut config| { - let (client, _, import_queue, task_manager, _) = - cli.initial_consensus.new_chain_ops(&mut config, &cli.eth)?; + let (client, _, import_queue, task_manager, _) = cli + .initial_consensus + .new_chain_ops(&mut config, &cli.eth, skip_history_backfill)?; Ok((cmd.run(client, import_queue), task_manager)) }) } Some(Subcommand::ExportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|mut config| { - let (client, _, _, task_manager, _) = - cli.initial_consensus.new_chain_ops(&mut config, &cli.eth)?; + let (client, _, _, task_manager, _) = cli.initial_consensus.new_chain_ops( + &mut config, + &cli.eth, + skip_history_backfill, + )?; Ok((cmd.run(client, config.database), task_manager)) }) } Some(Subcommand::ExportState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|mut config| { - let (client, _, _, task_manager, _) = - cli.initial_consensus.new_chain_ops(&mut config, &cli.eth)?; + let (client, _, _, task_manager, _) = cli.initial_consensus.new_chain_ops( + &mut config, + &cli.eth, + skip_history_backfill, + )?; Ok((cmd.run(client, config.chain_spec), task_manager)) }) } Some(Subcommand::ImportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|mut config| { - let (client, _, import_queue, task_manager, _) = - cli.initial_consensus.new_chain_ops(&mut config, &cli.eth)?; + let (client, _, import_queue, task_manager, _) = cli + .initial_consensus + .new_chain_ops(&mut config, &cli.eth, skip_history_backfill)?; Ok((cmd.run(client, import_queue), task_manager)) }) } @@ -152,8 +159,11 @@ pub fn run() -> sc_cli::Result<()> { Some(Subcommand::Revert(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|mut config| { - let (client, backend, _, task_manager, _) = - cli.initial_consensus.new_chain_ops(&mut config, &cli.eth)?; + let (client, backend, _, task_manager, _) = cli.initial_consensus.new_chain_ops( + &mut config, + &cli.eth, + skip_history_backfill, + )?; let aux_revert = Box::new(move |client, _, blocks| { sc_consensus_grandpa::revert(client, blocks)?; Ok(()) @@ -238,21 +248,21 @@ pub fn run() -> sc_cli::Result<()> { Some(Subcommand::CloneState(cmd)) => { let runner = cli.create_runner(&cli.run)?; let cmd = cmd.clone(); - runner.sync_run(move |_| crate::clone_spec::run(&cmd, history_backfill)) + runner.sync_run(move |_| crate::clone_spec::run(&cmd, skip_history_backfill)) } // Start with the initial consensus type asked. - None => { - let arg_matches = Cli::command().get_matches(); - let cli = Cli::from_args(); - match cli.initial_consensus { - SupportedConsensusMechanism::Babe => start_babe_service(&arg_matches), - SupportedConsensusMechanism::Aura => start_aura_service(&arg_matches), + None => match cli.initial_consensus { + SupportedConsensusMechanism::Babe => { + start_babe_service(&arg_matches, skip_history_backfill) } - } + SupportedConsensusMechanism::Aura => { + start_aura_service(&arg_matches, skip_history_backfill) + } + }, } } -fn effective_history_backfill(cli: &Cli, arg_matches: &ArgMatches) -> HistoryBackfill { +fn resolve_skip_history_backfill(cli: &Cli, arg_matches: &ArgMatches) -> bool { // We keep a single global `--history-backfill` flag, but `build-test-clone` should default to // `skip` when the operator didn't set the flag explicitly. This preserves `keep` as the default // for normal node runs. @@ -260,22 +270,29 @@ fn effective_history_backfill(cli: &Cli, arg_matches: &ArgMatches) -> HistoryBac arg_matches.value_source("history_backfill"), Some(ValueSource::CommandLine) ) { - return cli.history_backfill; + return matches!(cli.history_backfill, HistoryBackfill::Skip); } - match &cli.subcommand { - Some(Subcommand::CloneState(_)) => HistoryBackfill::Skip, - _ => HistoryBackfill::Keep, - } + matches!(&cli.subcommand, Some(Subcommand::CloneState(_))) } #[allow(clippy::expect_used)] -fn start_babe_service(arg_matches: &ArgMatches) -> Result<(), sc_cli::Error> { +fn start_babe_service( + arg_matches: &ArgMatches, + skip_history_backfill: bool, +) -> Result<(), sc_cli::Error> { let cli = Cli::from_arg_matches(arg_matches).expect("Bad arg_matches"); let runner = cli.create_runner(&cli.run)?; match runner.run_node_until_exit(|config| async move { let config = customise_config(arg_matches, config); - service::build_full::(config, cli.eth, cli.sealing, None).await + service::build_full::( + config, + cli.eth, + cli.sealing, + None, + skip_history_backfill, + ) + .await }) { Ok(_) => Ok(()), Err(e) => { @@ -288,7 +305,7 @@ fn start_babe_service(arg_matches: &ArgMatches) -> Result<(), sc_cli::Error> { log::info!( "💡 Chain is using Aura consensus. Switching to Aura service until Babe block is detected.", ); - start_aura_service(arg_matches) + start_aura_service(arg_matches, skip_history_backfill) // Handle Aura service still has DB lock. This never has been observed to take more // than 1s to drop. } else if matches!(e, sc_service::Error::Client(sp_blockchain::Error::Backend(ref msg)) @@ -296,7 +313,7 @@ fn start_babe_service(arg_matches: &ArgMatches) -> Result<(), sc_cli::Error> { { log::info!("Failed to aquire DB lock, trying again in 1s..."); std::thread::sleep(std::time::Duration::from_secs(1)); - start_babe_service(arg_matches) + start_babe_service(arg_matches, skip_history_backfill) // Unknown error, return it. } else { log::error!("Failed to start Babe service: {e:?}"); @@ -307,7 +324,10 @@ fn start_babe_service(arg_matches: &ArgMatches) -> Result<(), sc_cli::Error> { } #[allow(clippy::expect_used)] -fn start_aura_service(arg_matches: &ArgMatches) -> Result<(), sc_cli::Error> { +fn start_aura_service( + arg_matches: &ArgMatches, + skip_history_backfill: bool, +) -> Result<(), sc_cli::Error> { let cli = Cli::from_arg_matches(arg_matches).expect("Bad arg_matches"); let runner = cli.create_runner(&cli.run)?; @@ -325,13 +345,14 @@ fn start_aura_service(arg_matches: &ArgMatches) -> Result<(), sc_cli::Error> { cli.eth, cli.sealing, Some(custom_service_signal_clone), + skip_history_backfill, ) .await }) { Ok(()) => Ok(()), Err(e) => { if custom_service_signal.load(std::sync::atomic::Ordering::Relaxed) { - start_babe_service(arg_matches) + start_babe_service(arg_matches, skip_history_backfill) } else { Err(e.into()) } diff --git a/node/src/conditional_evm_block_import.rs b/node/src/conditional_evm_block_import.rs index def7606297..60cf0da20e 100644 --- a/node/src/conditional_evm_block_import.rs +++ b/node/src/conditional_evm_block_import.rs @@ -6,6 +6,7 @@ use std::marker::PhantomData; pub struct ConditionalEVMBlockImport { inner: I, frontier_block_import: F, + skip_history_backfill: bool, _marker: PhantomData, } @@ -19,6 +20,7 @@ where ConditionalEVMBlockImport { inner: self.inner.clone(), frontier_block_import: self.frontier_block_import.clone(), + skip_history_backfill: self.skip_history_backfill, _marker: PhantomData, } } @@ -32,10 +34,11 @@ where F: BlockImport, F::Error: Into, { - pub fn new(inner: I, frontier_block_import: F) -> Self { + pub fn new(inner: I, frontier_block_import: F, skip_history_backfill: bool) -> Self { Self { inner, frontier_block_import, + skip_history_backfill, _marker: PhantomData, } } @@ -60,9 +63,7 @@ where &self, mut block: BlockImportParams, ) -> Result { - if crate::sync_options::skip_history_backfill() - && matches!(block.origin, BlockOrigin::NetworkInitialSync) - { + if self.skip_history_backfill && matches!(block.origin, BlockOrigin::NetworkInitialSync) { block.create_gap = false; } // 4345556 - mainnet runtime upgrade block with Frontier diff --git a/node/src/consensus/aura_consensus.rs b/node/src/consensus/aura_consensus.rs index ce34e8125a..74ec8fea1e 100644 --- a/node/src/consensus/aura_consensus.rs +++ b/node/src/consensus/aura_consensus.rs @@ -139,7 +139,7 @@ impl ConsensusMechanism for AuraConsensus { Self {} } - fn build_biq(&mut self) -> Result, sc_service::Error> + fn build_biq(&mut self, skip_history_backfill: bool) -> Result, sc_service::Error> where NumberFor: BlockNumberOps, { @@ -157,6 +157,7 @@ impl ConsensusMechanism for AuraConsensus { client.clone(), grandpa_block_import.clone(), expected_babe_config.clone(), + skip_history_backfill, ); let slot_duration = sc_consensus_aura::slot_duration(&*client)?; diff --git a/node/src/consensus/babe_consensus.rs b/node/src/consensus/babe_consensus.rs index 4f84cbb87b..fad204fb48 100644 --- a/node/src/consensus/babe_consensus.rs +++ b/node/src/consensus/babe_consensus.rs @@ -152,7 +152,7 @@ impl ConsensusMechanism for BabeConsensus { } } - fn build_biq(&mut self) -> Result, sc_service::Error> + fn build_biq(&mut self, skip_history_backfill: bool) -> Result, sc_service::Error> where NumberFor: BlockNumberOps, { @@ -188,6 +188,7 @@ impl ConsensusMechanism for BabeConsensus { let conditional_block_import = ConditionalEVMBlockImport::new( babe_import.clone(), FrontierBlockImport::new(babe_import.clone(), client.clone()), + skip_history_backfill, ); let slot_duration = babe_link.config().slot_duration(); diff --git a/node/src/consensus/consensus_mechanism.rs b/node/src/consensus/consensus_mechanism.rs index 9fd8cad63b..41cb2fb4a8 100644 --- a/node/src/consensus/consensus_mechanism.rs +++ b/node/src/consensus/consensus_mechanism.rs @@ -78,7 +78,7 @@ pub trait ConsensusMechanism { fn new() -> Self; /// Builds a `BIQ` that uses the ConsensusMechanism. - fn build_biq(&mut self) -> Result, sc_service::Error>; + fn build_biq(&mut self, skip_history_backfill: bool) -> Result, sc_service::Error>; /// Returns the slot duration. fn slot_duration(&self, client: &FullClient) -> Result; diff --git a/node/src/consensus/hybrid_import_queue.rs b/node/src/consensus/hybrid_import_queue.rs index 04de9a59c9..342d67dbc1 100644 --- a/node/src/consensus/hybrid_import_queue.rs +++ b/node/src/consensus/hybrid_import_queue.rs @@ -29,7 +29,6 @@ use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::HeaderBackend; use sp_blockchain::HeaderMetadata; -use sp_consensus::BlockOrigin; use sp_consensus::SelectChain; use sp_consensus::error::Error as ConsensusError; use sp_consensus_aura::AuraApi; @@ -72,10 +71,12 @@ impl HybridBlockImport { client: Arc, grandpa_block_import: GrandpaBlockImport, babe_config: BabeConfiguration, + skip_history_backfill: bool, ) -> Self { let inner_aura = ConditionalEVMBlockImport::new( grandpa_block_import.clone(), FrontierBlockImport::new(grandpa_block_import.clone(), client.clone()), + skip_history_backfill, ); #[allow(clippy::expect_used)] @@ -89,6 +90,7 @@ impl HybridBlockImport { let inner_babe = ConditionalEVMBlockImport::new( babe_import.clone(), FrontierBlockImport::new(babe_import.clone(), client.clone()), + skip_history_backfill, ); HybridBlockImport { @@ -120,14 +122,8 @@ impl BlockImport for HybridBlockImport { async fn import_block( &self, - mut block: BlockImportParams, + block: BlockImportParams, ) -> Result { - // Clone mode can opt into skipping history-gap creation during catch-up. - if crate::sync_options::skip_history_backfill() - && matches!(block.origin, BlockOrigin::NetworkInitialSync) - { - block.create_gap = false; - } if is_babe_digest(block.header.digest()) { self.inner_babe .import_block(block) diff --git a/node/src/lib.rs b/node/src/lib.rs index f1c73cc339..4740155f5e 100644 --- a/node/src/lib.rs +++ b/node/src/lib.rs @@ -7,4 +7,3 @@ pub mod consensus; pub mod ethereum; pub mod rpc; pub mod service; -pub mod sync_options; diff --git a/node/src/main.rs b/node/src/main.rs index 2db3f22f42..2766b93054 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -13,7 +13,6 @@ mod consensus; mod ethereum; mod rpc; mod service; -mod sync_options; fn main() -> sc_cli::Result<()> { command::run() diff --git a/node/src/service.rs b/node/src/service.rs index f33931d210..7a745b6c3f 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -241,6 +241,7 @@ pub fn build_manual_seal_import_queue( crate::conditional_evm_block_import::ConditionalEVMBlockImport::new( grandpa_block_import.clone(), fc_consensus::FrontierBlockImport::new(grandpa_block_import.clone(), client.clone()), + false, ); Ok(( sc_consensus_manual_seal::import_queue( @@ -259,6 +260,7 @@ pub async fn new_full( eth_config: EthConfiguration, sealing: Option, custom_service_signal: Option>, + skip_history_backfill: bool, ) -> Result where NumberFor: BlockNumberOps, @@ -275,7 +277,7 @@ where } let mut consensus_mechanism = CM::new(); - let build_import_queue = consensus_mechanism.build_biq()?; + let build_import_queue = consensus_mechanism.build_biq(skip_history_backfill)?; let PartialComponents { client, @@ -660,6 +662,7 @@ pub async fn build_full( eth_config: EthConfiguration, sealing: Option, custom_service_signal: Option>, + skip_history_backfill: bool, ) -> Result { match config.network.network_backend { sc_network::config::NetworkBackendType::Libp2p => { @@ -668,6 +671,7 @@ pub async fn build_full( eth_config, sealing, custom_service_signal, + skip_history_backfill, ) .await } @@ -677,6 +681,7 @@ pub async fn build_full( eth_config, sealing, custom_service_signal, + skip_history_backfill, ) .await } @@ -686,6 +691,7 @@ pub async fn build_full( pub fn new_chain_ops( config: &mut Configuration, eth_config: &EthConfiguration, + skip_history_backfill: bool, ) -> Result< ( Arc, @@ -705,7 +711,11 @@ pub fn new_chain_ops( task_manager, other, .. - } = new_partial(config, eth_config, consensus_mechanism.build_biq()?)?; + } = new_partial( + config, + eth_config, + consensus_mechanism.build_biq(skip_history_backfill)?, + )?; Ok((client, backend, import_queue, task_manager, other.3)) } diff --git a/node/src/sync_options.rs b/node/src/sync_options.rs deleted file mode 100644 index ec5ab40c58..0000000000 --- a/node/src/sync_options.rs +++ /dev/null @@ -1,13 +0,0 @@ -use std::sync::atomic::{AtomicBool, Ordering}; - -static SKIP_HISTORY_BACKFILL: AtomicBool = AtomicBool::new(false); - -/// Enable or disable history backfill skipping for initial sync imports. -pub fn set_skip_history_backfill(enabled: bool) { - SKIP_HISTORY_BACKFILL.store(enabled, Ordering::Relaxed); -} - -/// Returns whether initial-sync imports should avoid creating history gaps. -pub fn skip_history_backfill() -> bool { - SKIP_HISTORY_BACKFILL.load(Ordering::Relaxed) -} From d4ea945c93df2f2e3a49b622f445b47aa0aabf4c Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Thu, 12 Mar 2026 14:12:03 +0100 Subject: [PATCH 08/13] Add a comment where we false filling block gaps --- node/src/conditional_evm_block_import.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/node/src/conditional_evm_block_import.rs b/node/src/conditional_evm_block_import.rs index 60cf0da20e..cc09a4d66b 100644 --- a/node/src/conditional_evm_block_import.rs +++ b/node/src/conditional_evm_block_import.rs @@ -64,6 +64,10 @@ where mut block: BlockImportParams, ) -> Result { if self.skip_history_backfill && matches!(block.origin, BlockOrigin::NetworkInitialSync) { + // During initial network sync, Substrate can mark missing historical ranges as "gaps" + // (`create_gap = true`) and then backfill them later. When history backfill is set to + // `skip`, we disable gap creation so no history reconstruction work is scheduled. + // `build-test-clone` just defaults this setting to `skip`. block.create_gap = false; } // 4345556 - mainnet runtime upgrade block with Frontier From a84b62a0e62697f61e525a0526333e1d3b200ae5 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Thu, 12 Mar 2026 14:15:26 +0100 Subject: [PATCH 09/13] Rename build-test-clone to build-patched-spec --- node/src/cli.rs | 6 +++--- node/src/clone_spec.rs | 20 ++++++++++---------- node/src/command.rs | 2 +- node/src/conditional_evm_block_import.rs | 8 ++++---- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/node/src/cli.rs b/node/src/cli.rs index 9f2ef206c0..e7719b619c 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -40,7 +40,7 @@ pub struct Cli { /// /// `keep` preserves complete history (default for normal node runs). /// `skip` is faster/lighter but historical block data may be incomplete. - /// For `build-test-clone`, the implicit default is `skip` unless this flag is explicitly set. + /// For `build-patched-spec`, the implicit default is `skip` unless this flag is explicitly set. #[arg(long, value_enum, default_value_t = HistoryBackfill::Keep)] pub history_backfill: HistoryBackfill, } @@ -81,8 +81,8 @@ pub enum Subcommand { // Db meta columns information. ChainInfo(sc_cli::ChainInfoCmd), - // Build a patched test clone chainspec from synced network state. - #[command(name = "build-test-clone")] + // Build a patched test chainspec from synced network state. + #[command(name = "build-patched-spec")] CloneState(CloneStateCmd), } diff --git a/node/src/clone_spec.rs b/node/src/clone_spec.rs index 016aaaf29f..4619335f2d 100644 --- a/node/src/clone_spec.rs +++ b/node/src/clone_spec.rs @@ -20,7 +20,7 @@ type CloneResult = Result>; const RPC_POLL_INTERVAL: Duration = Duration::from_secs(2); const GRANDPA_AUTHORITIES_WELL_KNOWN_KEY: &[u8] = b":grandpa_authorities"; -/// Execute `build-test-clone`: sync network state, export raw chainspec, apply clone patch. +/// Execute `build-patched-spec`: sync network state, export raw chainspec, apply clone patch. pub fn run(cmd: &CloneStateCmd, skip_history_backfill: bool) -> sc_cli::Result<()> { let runtime = tokio::runtime::Builder::new_current_thread() .enable_io() @@ -52,7 +52,7 @@ async fn async_run(cmd: &CloneStateCmd, skip_history_backfill: bool) -> CloneRes let sync_arg = sync_arg(cmd.sync); log::info!( - "build-test-clone: validators={} history_backfill={}", + "build-patched-spec: validators={} history_backfill={}", selected_names, if skip_history_backfill { "skip" @@ -80,7 +80,7 @@ async fn async_run(cmd: &CloneStateCmd, skip_history_backfill: bool) -> CloneRes "--no-prometheus".to_string(), "--no-mdns".to_string(), "--name".to_string(), - "build-test-clone-sync".to_string(), + "build-patched-spec-sync".to_string(), "--history-backfill".to_string(), if skip_history_backfill { "skip".to_string() @@ -94,7 +94,7 @@ async fn async_run(cmd: &CloneStateCmd, skip_history_backfill: bool) -> CloneRes sync_args.push(bootnode.clone()); } - log::info!("build-test-clone: starting sync node"); + log::info!("build-patched-spec: starting sync node"); let mut sync_child = Command::new(¤t_exe) .args(&sync_args) @@ -111,22 +111,22 @@ async fn async_run(cmd: &CloneStateCmd, skip_history_backfill: bool) -> CloneRes let raw_tmp = temp_raw_path()?; - log::info!("build-test-clone: exporting raw state"); + log::info!("build-patched-spec: exporting raw state"); export_raw_state(¤t_exe, cmd, database_arg, &raw_tmp)?; - log::info!("build-test-clone: applying clone patch"); + log::info!("build-patched-spec: applying clone patch"); patch_raw_chainspec_file(&raw_tmp, &cmd.output, &validators)?; if let Err(err) = fs::remove_file(&raw_tmp) { log::warn!( - "build-test-clone: warning: failed to remove temp file {}: {err}", + "build-patched-spec: warning: failed to remove temp file {}: {err}", raw_tmp.display() ); } - log::info!("build-test-clone: wrote {}", cmd.output.display()); + log::info!("build-patched-spec: wrote {}", cmd.output.display()); Ok(()) } @@ -141,7 +141,7 @@ async fn wait_for_sync_completion(sync_child: &mut Child, cmd: &CloneStateCmd) - .build(rpc_url)?; log::info!( - "build-test-clone: waiting for sync completion (timeout={}s)", + "build-patched-spec: waiting for sync completion (timeout={}s)", cmd.sync_timeout_sec ); @@ -169,7 +169,7 @@ async fn wait_for_sync_completion(sync_child: &mut Child, cmd: &CloneStateCmd) - if is_ready { stable_ready_checks = stable_ready_checks.saturating_add(1); if stable_ready_checks >= 3 { - log::info!("build-test-clone: sync target reached"); + log::info!("build-patched-spec: sync target reached"); return Ok(()); } } else { diff --git a/node/src/command.rs b/node/src/command.rs index 22280caf98..fd3a122ec5 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -263,7 +263,7 @@ pub fn run() -> sc_cli::Result<()> { } fn resolve_skip_history_backfill(cli: &Cli, arg_matches: &ArgMatches) -> bool { - // We keep a single global `--history-backfill` flag, but `build-test-clone` should default to + // We keep a single global `--history-backfill` flag, but `build-patched-spec` should default to // `skip` when the operator didn't set the flag explicitly. This preserves `keep` as the default // for normal node runs. if matches!( diff --git a/node/src/conditional_evm_block_import.rs b/node/src/conditional_evm_block_import.rs index cc09a4d66b..0a69bdc090 100644 --- a/node/src/conditional_evm_block_import.rs +++ b/node/src/conditional_evm_block_import.rs @@ -64,10 +64,10 @@ where mut block: BlockImportParams, ) -> Result { if self.skip_history_backfill && matches!(block.origin, BlockOrigin::NetworkInitialSync) { - // During initial network sync, Substrate can mark missing historical ranges as "gaps" - // (`create_gap = true`) and then backfill them later. When history backfill is set to - // `skip`, we disable gap creation so no history reconstruction work is scheduled. - // `build-test-clone` just defaults this setting to `skip`. + // During initial network sync, Substrate can mark missing historical ranges as "gaps" + // (`create_gap = true`) and then backfill them later. When history backfill is set to + // `skip`, we disable gap creation so no history reconstruction work is scheduled. + // `build-patched-spec` just defaults this setting to `skip`. block.create_gap = false; } // 4345556 - mainnet runtime upgrade block with Frontier From 8d4c06b60bb57923d53b8833dfa67dbe09f139ad Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Thu, 12 Mar 2026 14:17:54 +0100 Subject: [PATCH 10/13] Document clone_spec module --- node/src/clone_spec.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/node/src/clone_spec.rs b/node/src/clone_spec.rs index 4619335f2d..65489d24c6 100644 --- a/node/src/clone_spec.rs +++ b/node/src/clone_spec.rs @@ -1,3 +1,19 @@ +//! Build-and-patch workflow for producing a local test chainspec from live network state. +//! +//! This module implements the `build-patched-spec` subcommand scenario: +//! +//! 1. Start a temporary node and sync it to the requested chain. +//! 2. Wait until sync is considered stable (RPC-reported near-head status). +//! 3. Stop the temporary node and run `export-state` from the synced database. +//! 4. Apply patching to the raw chainspec: +//! - replace validator/authority sets with selected dev authorities, +//! - set Sudo to the first selected validator, +//! - clear session-derived keys and localize top-level chain fields. +//! 5. Write the final patched chainspec JSON to the requested output path. +//! +//! The result is intended for local/mainnet-clone style testing where runtime state is taken from a +//! live network, but governance/validator control is reassigned to test authorities. + use std::collections::VecDeque; use std::fs::{self, File}; use std::io::{BufReader, BufWriter}; From c873cad6288258f65dbf5983ecf3d50185a15ba5 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Thu, 12 Mar 2026 15:38:34 +0100 Subject: [PATCH 11/13] Bump spec version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index f1231df83d..66ad48fa88 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -268,7 +268,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 389, + spec_version: 390, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 6147e98faffcc8f88c357d91a597b4e17c76eff8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 12 Mar 2026 15:27:07 +0000 Subject: [PATCH 12/13] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 32 +++++++++++----------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index c9d80805ac..c050dbd755 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -711,7 +711,7 @@ mod dispatches { /// #[pallet::call_index(2)] #[pallet::weight((Weight::from_parts(340_800_000, 0) - .saturating_add(T::DbWeight::get().reads(27_u64)) + .saturating_add(T::DbWeight::get().reads(25_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake( origin: OriginFor, @@ -1054,8 +1054,8 @@ mod dispatches { /// The extrinsic for user to change its hotkey in subnet or all subnets. #[pallet::call_index(70)] #[pallet::weight((Weight::from_parts(275_300_000, 0) - .saturating_add(T::DbWeight::get().reads(57_u64)) - .saturating_add(T::DbWeight::get().writes(39_u64)), DispatchClass::Normal, Pays::No))] + .saturating_add(T::DbWeight::get().reads(52_u64)) + .saturating_add(T::DbWeight::get().writes(35_u64)), DispatchClass::Normal, Pays::No))] pub fn swap_hotkey( origin: OriginFor, hotkey: T::AccountId, @@ -1491,7 +1491,7 @@ mod dispatches { /// - Thrown if key has hit transaction rate limit #[pallet::call_index(84)] #[pallet::weight((Weight::from_parts(358_500_000, 0) - .saturating_add(T::DbWeight::get().reads(44_u64)) + .saturating_add(T::DbWeight::get().reads(40_u64)) .saturating_add(T::DbWeight::get().writes(24_u64)), DispatchClass::Normal, Pays::Yes))] pub fn unstake_all_alpha(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_unstake_all_alpha(origin, hotkey) @@ -1520,7 +1520,7 @@ mod dispatches { /// #[pallet::call_index(85)] #[pallet::weight((Weight::from_parts(164_300_000, 0) - .saturating_add(T::DbWeight::get().reads(19_u64)) + .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)), DispatchClass::Normal, Pays::Yes))] pub fn move_stake( origin: T::RuntimeOrigin, @@ -1563,7 +1563,7 @@ mod dispatches { /// May emit a `StakeTransferred` event on success. #[pallet::call_index(86)] #[pallet::weight((Weight::from_parts(160_300_000, 0) - .saturating_add(T::DbWeight::get().reads(16_u64)) + .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)), DispatchClass::Normal, Pays::Yes))] pub fn transfer_stake( origin: T::RuntimeOrigin, @@ -1605,7 +1605,7 @@ mod dispatches { #[pallet::call_index(87)] #[pallet::weight(( Weight::from_parts(351_300_000, 0) - .saturating_add(T::DbWeight::get().reads(40_u64)) + .saturating_add(T::DbWeight::get().reads(36_u64)) .saturating_add(T::DbWeight::get().writes(22_u64)), DispatchClass::Normal, Pays::Yes @@ -1670,7 +1670,7 @@ mod dispatches { /// #[pallet::call_index(88)] #[pallet::weight((Weight::from_parts(402_900_000, 0) - .saturating_add(T::DbWeight::get().reads(27_u64)) + .saturating_add(T::DbWeight::get().reads(25_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake_limit( origin: OriginFor, @@ -1735,7 +1735,7 @@ mod dispatches { /// #[pallet::call_index(89)] #[pallet::weight((Weight::from_parts(377_400_000, 0) - .saturating_add(T::DbWeight::get().reads(30_u64)) + .saturating_add(T::DbWeight::get().reads(28_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)), DispatchClass::Normal, Pays::Yes))] pub fn remove_stake_limit( origin: OriginFor, @@ -1779,7 +1779,7 @@ mod dispatches { #[pallet::call_index(90)] #[pallet::weight(( Weight::from_parts(411_500_000, 0) - .saturating_add(T::DbWeight::get().reads(40_u64)) + .saturating_add(T::DbWeight::get().reads(36_u64)) .saturating_add(T::DbWeight::get().writes(22_u64)), DispatchClass::Normal, Pays::Yes @@ -1901,7 +1901,7 @@ mod dispatches { /// Emits a `TokensRecycled` event on success. #[pallet::call_index(101)] #[pallet::weight(( - Weight::from_parts(113_400_000, 0).saturating_add(T::DbWeight::get().reads_writes(9, 4)), + Weight::from_parts(113_400_000, 0).saturating_add(T::DbWeight::get().reads_writes(7_u64, 4)), DispatchClass::Normal, Pays::Yes ))] @@ -1926,7 +1926,7 @@ mod dispatches { /// Emits a `TokensBurned` event on success. #[pallet::call_index(102)] #[pallet::weight(( - Weight::from_parts(112_200_000, 0).saturating_add(T::DbWeight::get().reads_writes(9, 3)), + Weight::from_parts(112_200_000, 0).saturating_add(T::DbWeight::get().reads_writes(7_u64, 3)), DispatchClass::Normal, Pays::Yes ))] @@ -1957,7 +1957,7 @@ mod dispatches { /// Without limit_price it remove all the stake similar to `remove_stake` extrinsic #[pallet::call_index(103)] #[pallet::weight((Weight::from_parts(395_300_000, 10142) - .saturating_add(T::DbWeight::get().reads(30_u64)) + .saturating_add(T::DbWeight::get().reads(28_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)), DispatchClass::Normal, Pays::Yes))] pub fn remove_stake_full_limit( origin: T::RuntimeOrigin, @@ -2233,7 +2233,7 @@ mod dispatches { #[pallet::call_index(121)] #[pallet::weight(( Weight::from_parts(117_000_000, 7767) - .saturating_add(T::DbWeight::get().reads(16_u64)) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)), DispatchClass::Normal, Pays::Yes @@ -2426,7 +2426,7 @@ mod dispatches { /// #[pallet::call_index(127)] #[pallet::weight( - Weight::from_parts(20_750_000, 0) + Weight::from_parts(30_810_000, 0) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) )] @@ -2555,7 +2555,7 @@ mod dispatches { #[pallet::call_index(132)] #[pallet::weight(( Weight::from_parts(368_000_000, 8556) - .saturating_add(T::DbWeight::get().reads(30_u64)) + .saturating_add(T::DbWeight::get().reads(28_u64)) .saturating_add(T::DbWeight::get().writes(16_u64)), DispatchClass::Normal, Pays::Yes From 7763927f4774dce6384b2550edc8168dd69cd449 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Thu, 12 Mar 2026 16:29:02 +0100 Subject: [PATCH 13/13] Bump spec version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 66ad48fa88..ef4a863ca0 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -268,7 +268,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 390, + spec_version: 391, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1,