Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ cumulus-client-consensus-proposer ={ git = "https://github.com/paritytech/polkad
cumulus-client-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", rev = "9136565addc23a552f6960a7581f13c8dfc651f1" }
cumulus-client-consensus-common ={ git = "https://github.com/paritytech/polkadot-sdk", rev = "9136565addc23a552f6960a7581f13c8dfc651f1" }
cumulus-client-service = { git = "https://github.com/paritytech/polkadot-sdk", rev = "9136565addc23a552f6960a7581f13c8dfc651f1" }
cumulus-client-parachain-inherent = { git = "https://github.com/paritytech/polkadot-sdk", rev = "9136565addc23a552f6960a7581f13c8dfc651f1", default-features = false }
cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/polkadot-sdk", rev = "9136565addc23a552f6960a7581f13c8dfc651f1", default-features = false }
cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/polkadot-sdk", rev = "9136565addc23a552f6960a7581f13c8dfc651f1", default-features = false }
cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/polkadot-sdk", rev = "9136565addc23a552f6960a7581f13c8dfc651f1", default-features = false }
Expand All @@ -136,4 +137,4 @@ cumulus-primitives-utility = { git = "https://github.com/paritytech/polkadot-sdk
cumulus-relay-chain-interface = { git = "https://github.com/paritytech/polkadot-sdk", rev = "9136565addc23a552f6960a7581f13c8dfc651f1" }
pallet-collator-selection = { git = "https://github.com/paritytech/polkadot-sdk", rev = "9136565addc23a552f6960a7581f13c8dfc651f1", default-features = false }
parachain-info = { git = "https://github.com/paritytech/polkadot-sdk", rev = "9136565addc23a552f6960a7581f13c8dfc651f1", package = "staging-parachain-info", default-features = false }
parachains-common = { git = "https://github.com/paritytech/polkadot-sdk", rev = "9136565addc23a552f6960a7581f13c8dfc651f1", default-features = false }
parachains-common = { git = "https://github.com/paritytech/polkadot-sdk", rev = "9136565addc23a552f6960a7581f13c8dfc651f1", default-features = false }
1 change: 1 addition & 0 deletions node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ cumulus-client-collator = { workspace = true }
cumulus-client-consensus-aura = { workspace = true }
cumulus-client-consensus-common = { workspace = true }
cumulus-client-consensus-proposer = { workspace = true }
cumulus-client-parachain-inherent = { workspace = true }
cumulus-client-service = { workspace = true }
cumulus-primitives-core = { workspace = true }
cumulus-primitives-parachain-inherent = { workspace = true }
Expand Down
17 changes: 17 additions & 0 deletions node/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,23 @@ pub struct Cli {
/// The number of seconds to delay before finalizing blocks.
#[arg(long, default_value_t = 1)]
pub finalize_delay_sec: u8,

/// Start a dev node with instant seal.
///
/// This is a dev option that enables instant sealing, meaning blocks are produced
/// immediately when transactions are received, rather than at fixed intervals.
/// Using this option won't result in starting or connecting to a parachain network.
/// The resulting node will work on its own, running the wasm blob and producing blocks
/// instantly upon receiving transactions.
#[arg(long)]
pub instant_seal: bool,
}

impl Cli {
/// Returns true if instant seal mode is enabled.
pub fn is_instant_seal(&self) -> bool {
self.instant_seal
}
}

#[derive(Debug)]
Expand Down
12 changes: 12 additions & 0 deletions node/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,8 @@ pub fn run() -> Result<()> {
let collator_options = cli.run.collator_options();

runner.run_node_until_exit(|config| async move {
// Check for solochain dev mode first (chain spec "Development")
// The solochain runtime doesn't have Aura, so it needs separate handling
if config.chain_spec.name() == "Development" {
return dev::new_full::<sc_network::NetworkWorker<_, _>>(
config,
Expand All @@ -231,6 +233,16 @@ pub fn run() -> Result<()> {
.map_err(sc_cli::Error::Service);
}

// Check for instant seal mode for parachain (requires parachain runtime with Aura)
if cli.is_instant_seal() {
return crate::service::start_dev_parachain_node(
config,
cli.finalize_delay_sec.into(),
)
.await
.map_err(sc_cli::Error::Service);
}

let hwbench = (!cli.no_hardware_benchmarks)
.then_some(config.database.path().map(|database_path| {
let _ = std::fs::create_dir_all(database_path);
Expand Down
272 changes: 271 additions & 1 deletion node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ use cumulus_client_cli::CollatorOptions;
use cumulus_client_collator::service::CollatorService;
use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams};
use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport;
use cumulus_client_parachain_inherent::MockValidationDataInherentDataProvider;
use cumulus_client_service::{
build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks,
BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, ParachainHostFunctions,
Expand All @@ -25,18 +26,24 @@ use cumulus_primitives_core::{
ParaId,
};
use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface};
use polkadot_primitives::{HeadData, UpgradeGoAhead};

// Substrate Imports
use codec::Encode;
use cumulus_primitives_core::CollectCollationInfo;
use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE;
use prometheus_endpoint::Registry;
use sc_client_api::Backend;
use sc_consensus::ImportQueue;
use sc_consensus::{ImportQueue, LongestChain};
use sc_consensus_manual_seal::consensus::aura::AuraConsensusDataProvider;
use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
use sc_network::{NetworkBackend, NetworkBlock};
use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
use sc_transaction_pool_api::OffchainTransactionPoolFactory;
use sp_api::ProvideRuntimeApi;
use sp_keystore::KeystorePtr;
use sp_runtime::traits::UniqueSaturatedInto;

type ParachainExecutor = WasmExecutor<ParachainHostFunctions>;

Expand Down Expand Up @@ -406,3 +413,266 @@ pub async fn start_parachain_node(

Ok((task_manager, client))
}

/// Creates the inherent data providers for dev mode (instant/manual seal).
///
/// This function sets up the timestamp and parachain validation data providers
/// required for dev seal block production in a parachain environment without a relay chain.
fn create_dev_inherent_data_providers(
client: Arc<ParachainClient>,
para_id: ParaId,
slot_duration: sp_consensus_aura::SlotDuration,
) -> impl Fn(
Hash,
(),
) -> std::future::Ready<
Result<
(sp_timestamp::InherentDataProvider, MockValidationDataInherentDataProvider<()>),
Box<dyn std::error::Error + Send + Sync>,
>,
> + Send
+ Sync {
move |parent_hash: Hash, ()| {
let current_para_head = client
.header(parent_hash)
.expect("Header lookup should succeed")
.expect("Header passed in as parent should be present in backend.");

// Check if there's pending validation code that needs upgrade signal
let should_send_go_ahead = client
.runtime_api()
.collect_collation_info(parent_hash, &current_para_head)
.map(|info| info.new_validation_code.is_some())
.unwrap_or_default();

let current_para_block_head = Some(HeadData(current_para_head.encode()));
let current_block_number =
UniqueSaturatedInto::<u32>::unique_saturated_into(current_para_head.number) + 1;

// Create mock parachain validation data
let mocked_parachain = MockValidationDataInherentDataProvider::<()> {
current_para_block: current_block_number,
para_id,
current_para_block_head,
relay_blocks_per_para_block: 1,
para_blocks_per_relay_epoch: 10,
upgrade_go_ahead: should_send_go_ahead.then(|| {
log::info!("Detected pending validation code, sending go-ahead signal.");
UpgradeGoAhead::GoAhead
}),
..Default::default()
};

// Create timestamp aligned to Aura slot timing
let timestamp = sp_timestamp::InherentDataProvider::new(
(slot_duration.as_millis() * current_block_number as u64).into(),
);

std::future::ready(Ok((timestamp, mocked_parachain)))
}
}

/// Start a parachain node in dev mode with instant seal.
///
/// This allows the parachain to run standalone without connecting to a relay chain,
/// useful for development and testing.
pub async fn start_dev_parachain_node(
mut config: Configuration,
finalize_delay_sec: u64,
) -> sc_service::error::Result<TaskManager> {
// Since this is a dev node, prevent it from connecting to peers
config.network.default_peers_set.in_peers = 0;
config.network.default_peers_set.out_peers = 0;

// Build client, backend, and other components manually
// We can't use new_partial() because it creates an Aura import queue
let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;

let heap_pages = config
.executor
.default_heap_pages
.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { extra_pages: h as _ });

let executor = ParachainExecutor::builder()
.with_execution_method(config.executor.wasm_method)
.with_onchain_heap_alloc_strategy(heap_pages)
.with_offchain_heap_alloc_strategy(heap_pages)
.with_max_runtime_instances(config.executor.max_runtime_instances)
.with_runtime_cache_size(config.executor.runtime_cache_size)
.build();

let (client, backend, keystore_container, mut task_manager) =
sc_service::new_full_parts_record_import::<Block, RuntimeApi, _>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
true,
)?;
let client = Arc::new(client);

let mut telemetry = telemetry.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", None, worker.run());
telemetry
});

let transaction_pool = Arc::from(
sc_transaction_pool::Builder::new(
task_manager.spawn_essential_handle(),
client.clone(),
config.role.is_authority().into(),
)
.with_options(config.transaction_pool.clone())
.with_prometheus(config.prometheus_registry())
.build(),
);

// Create manual seal import queue (accepts all blocks without relay chain validation)
let import_queue = sc_consensus_manual_seal::import_queue(
Box::new(client.clone()),
&task_manager.spawn_essential_handle(),
config.prometheus_registry(),
);

let net_config = sc_network::config::FullNetworkConfiguration::<
_,
_,
sc_network::NetworkWorker<Block, Hash>,
>::new(
&config.network,
config.prometheus_config.as_ref().map(|cfg| cfg.registry.clone()),
);

let (network, system_rpc_tx, tx_handler_controller, sync_service) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
net_config,
block_announce_validator_builder: None,
warp_sync_config: None,
block_relay: None,
metrics: sc_network::NetworkWorker::<Block, Hash>::register_notification_metrics(
config.prometheus_config.as_ref().map(|config| &config.registry),
),
})?;

if config.offchain_worker.enabled {
use futures::FutureExt;

let offchain_workers =
sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
runtime_api_provider: client.clone(),
keystore: Some(keystore_container.keystore()),
offchain_db: backend.offchain_storage(),
transaction_pool: Some(OffchainTransactionPoolFactory::new(
transaction_pool.clone(),
)),
network_provider: Arc::new(network.clone()),
is_validator: config.role.is_authority(),
enable_http_requests: true,
custom_extensions: move |_| vec![],
})?;
task_manager.spawn_handle().spawn(
"offchain-workers-runner",
"offchain-work",
offchain_workers.run(client.clone(), task_manager.spawn_handle()).boxed(),
);
}

let proposer = sc_basic_authorship::ProposerFactory::new(
task_manager.spawn_handle(),
client.clone(),
transaction_pool.clone(),
None,
None,
);

// Get slot duration from runtime
let slot_duration = sc_consensus_aura::slot_duration(&*client)
.expect("Slot duration is always present in Aura runtime; qed.");

// The aura digest provider will provide digests that match the provided timestamp data.
// Without this, the AURA parachain runtime complains about slot mismatches.
let aura_digest_provider = AuraConsensusDataProvider::new_with_slot_duration(slot_duration);

// Extract para_id from chain spec
let para_id = crate::chain_spec::Extensions::try_get(&*config.chain_spec)
.map(|e| e.para_id)
.ok_or("Could not find parachain ID in chain-spec.")?;
let para_id = ParaId::from(para_id);

// Create inherent data providers with mocked relay chain data
let create_inherent_data_providers =
create_dev_inherent_data_providers(client.clone(), para_id, slot_duration);

// Spawn instant seal consensus
let params = sc_consensus_manual_seal::InstantSealParams {
block_import: client.clone(),
env: proposer,
client: client.clone(),
pool: transaction_pool.clone(),
select_chain: LongestChain::new(backend.clone()),
consensus_data_provider: Some(Box::new(aura_digest_provider)),
create_inherent_data_providers,
};

let authorship_future = sc_consensus_manual_seal::run_instant_seal(params);
task_manager
.spawn_essential_handle()
.spawn_blocking("instant-seal", None, authorship_future);

// Optional delayed finalization
if finalize_delay_sec > 0 {
let delayed_finalize_params = sc_consensus_manual_seal::DelayedFinalizeParams {
client: client.clone(),
spawn_handle: task_manager.spawn_handle(),
delay_sec: finalize_delay_sec,
};
task_manager.spawn_essential_handle().spawn_blocking(
"delayed_finalize",
None,
sc_consensus_manual_seal::run_delayed_finalize(delayed_finalize_params),
);
}

let rpc_builder = {
let client = client.clone();
let transaction_pool = transaction_pool.clone();

Box::new(move |_| {
let deps =
crate::rpc::FullDeps { client: client.clone(), pool: transaction_pool.clone() };

crate::rpc::create_full(deps).map_err(Into::into)
})
};

let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
network,
client,
keystore: keystore_container.keystore(),
task_manager: &mut task_manager,
transaction_pool,
rpc_builder,
backend,
system_rpc_tx,
tx_handler_controller,
sync_service,
config,
telemetry: telemetry.as_mut(),
tracing_execute_block: None,
})?;

Ok(task_manager)
}
Loading