Unverified Commit 7eabd8d2 authored by Max Inden's avatar Max Inden
Browse files

Merge branch 'paritytech/master' into mxinden-auth-disc-timing

parents 8b035284 ffedeab4
Pipeline #114761 failed with stages
in 18 minutes and 10 seconds
......@@ -333,9 +333,9 @@ dependencies = [
[[package]]
name = "async-trait"
version = "0.1.41"
version = "0.1.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b246867b8b3b6ae56035f1eb1ed557c1d8eae97f0d53696138a50fa0e3a3b8c0"
checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d"
dependencies = [
"proc-macro2 1.0.24",
"quote 1.0.7",
......@@ -4942,6 +4942,8 @@ dependencies = [
"sp-core",
"sp-keystore",
"tempfile",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -4967,6 +4969,8 @@ dependencies = [
"sp-keyring",
"sp-keystore",
"thiserror",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -4986,6 +4990,7 @@ dependencies = [
"substrate-browser-utils",
"substrate-build-script-utils",
"thiserror",
"tracing-futures",
"wasm-bindgen",
"wasm-bindgen-futures",
]
......@@ -5008,6 +5013,8 @@ dependencies = [
"sp-core",
"sp-keyring",
"thiserror",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5039,7 +5046,6 @@ dependencies = [
"assert_matches",
"async-trait",
"futures 0.3.8",
"log",
"parity-scale-codec",
"parking_lot 0.11.1",
"polkadot-node-network-protocol",
......@@ -5050,6 +5056,8 @@ dependencies = [
"sc-network",
"sp-core",
"sp-keyring",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5057,7 +5065,6 @@ name = "polkadot-node-collation-generation"
version = "0.1.0"
dependencies = [
"futures 0.3.8",
"log",
"polkadot-erasure-coding",
"polkadot-node-primitives",
"polkadot-node-subsystem",
......@@ -5066,6 +5073,8 @@ dependencies = [
"polkadot-primitives",
"sp-core",
"thiserror",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5091,6 +5100,8 @@ dependencies = [
"smallvec 1.5.0",
"sp-core",
"thiserror",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5100,7 +5111,6 @@ dependencies = [
"assert_matches",
"bitvec",
"futures 0.3.8",
"log",
"polkadot-erasure-coding",
"polkadot-node-primitives",
"polkadot-node-subsystem",
......@@ -5114,6 +5124,8 @@ dependencies = [
"sp-keyring",
"sp-keystore",
"thiserror",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5122,12 +5134,13 @@ version = "0.1.0"
dependencies = [
"derive_more",
"futures 0.3.8",
"log",
"polkadot-node-subsystem",
"polkadot-node-subsystem-util",
"polkadot-primitives",
"sp-keystore",
"thiserror",
"tracing",
"tracing-futures",
"wasm-timer",
]
......@@ -5136,12 +5149,13 @@ name = "polkadot-node-core-candidate-selection"
version = "0.1.0"
dependencies = [
"futures 0.3.8",
"log",
"polkadot-node-subsystem",
"polkadot-node-subsystem-util",
"polkadot-primitives",
"sp-core",
"thiserror",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5150,7 +5164,6 @@ version = "0.1.0"
dependencies = [
"assert_matches",
"futures 0.3.8",
"log",
"parity-scale-codec",
"polkadot-node-primitives",
"polkadot-node-subsystem",
......@@ -5160,6 +5173,8 @@ dependencies = [
"polkadot-primitives",
"sp-core",
"sp-keyring",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5174,6 +5189,8 @@ dependencies = [
"polkadot-primitives",
"sp-blockchain",
"sp-core",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5182,7 +5199,6 @@ version = "0.1.0"
dependencies = [
"futures 0.3.8",
"futures-timer 3.0.2",
"log",
"polkadot-node-subsystem",
"polkadot-overseer",
"polkadot-primitives",
......@@ -5197,6 +5213,7 @@ dependencies = [
"sp-runtime",
"sp-transaction-pool",
"substrate-prometheus-endpoint",
"tracing",
]
[[package]]
......@@ -5206,13 +5223,14 @@ dependencies = [
"bitvec",
"futures 0.3.8",
"futures-timer 3.0.2",
"log",
"polkadot-node-subsystem",
"polkadot-node-subsystem-util",
"polkadot-primitives",
"sp-application-crypto",
"sp-keystore",
"thiserror",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5226,6 +5244,8 @@ dependencies = [
"polkadot-primitives",
"sp-api",
"sp-core",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5259,7 +5279,6 @@ dependencies = [
"derive_more",
"futures 0.3.8",
"futures-timer 3.0.2",
"log",
"parity-scale-codec",
"parking_lot 0.11.1",
"pin-project 1.0.2",
......@@ -5273,6 +5292,8 @@ dependencies = [
"sp-core",
"substrate-prometheus-endpoint",
"thiserror",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5282,7 +5303,6 @@ dependencies = [
"async-trait",
"futures 0.3.8",
"futures-timer 3.0.2",
"log",
"parity-scale-codec",
"parking_lot 0.11.1",
"pin-project 1.0.2",
......@@ -5295,6 +5315,8 @@ dependencies = [
"sc-network",
"smallvec 1.5.0",
"sp-core",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5321,6 +5343,8 @@ dependencies = [
"streamunordered",
"substrate-prometheus-endpoint",
"thiserror",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5332,7 +5356,6 @@ dependencies = [
"futures 0.3.8",
"futures-timer 3.0.2",
"kv-log-macro",
"log",
"polkadot-node-network-protocol",
"polkadot-node-primitives",
"polkadot-node-subsystem",
......@@ -5341,6 +5364,8 @@ dependencies = [
"sc-client-api",
"sp-core",
"streamunordered",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5371,13 +5396,14 @@ version = "0.1.0"
dependencies = [
"assert_matches",
"futures 0.3.8",
"log",
"polkadot-node-network-protocol",
"polkadot-node-subsystem",
"polkadot-node-subsystem-test-helpers",
"polkadot-node-subsystem-util",
"polkadot-primitives",
"sp-core",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5613,7 +5639,6 @@ dependencies = [
"hex-literal",
"kusama-runtime",
"lazy_static",
"log",
"pallet-babe",
"pallet-im-online",
"pallet-staking",
......@@ -5679,6 +5704,8 @@ dependencies = [
"sp-transaction-pool",
"sp-trie",
"substrate-prometheus-endpoint",
"tracing",
"tracing-futures",
"westend-runtime",
]
......@@ -5690,7 +5717,6 @@ dependencies = [
"assert_matches",
"futures 0.3.8",
"indexmap",
"log",
"polkadot-node-network-protocol",
"polkadot-node-primitives",
"polkadot-node-subsystem",
......@@ -5703,6 +5729,8 @@ dependencies = [
"sp-keyring",
"sp-keystore",
"sp-staking",
"tracing",
"tracing-futures",
]
[[package]]
......@@ -5806,7 +5834,6 @@ dependencies = [
"futures 0.1.30",
"futures 0.3.8",
"hex",
"log",
"pallet-balances",
"pallet-staking",
"pallet-transaction-payment",
......@@ -5848,6 +5875,8 @@ dependencies = [
"substrate-test-utils",
"tempfile",
"tokio 0.2.22",
"tracing",
"tracing-futures",
]
[[package]]
......
......@@ -40,6 +40,9 @@ gpg --export 9D4B2B6EB8F97156D19669A9FF0812D491B96798 > /usr/share/keyrings/pari
# Add the Parity repository and update the package index
echo 'deb [signed-by=/usr/share/keyrings/parity.gpg] https://releases.parity.io/deb release main' > /etc/apt/sources.list.d/parity.list
apt update
# Install the `parity-keyring` package - This will ensure the GPG key
# used by APT remains up-to-date
apt install parity-keyring
# Install polkadot
apt install polkadot
......
......@@ -25,6 +25,7 @@ polkadot-parachain = { path = "../parachain", optional = true }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" }
tracing-futures = "0.2.4"
frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
......
......@@ -59,6 +59,13 @@ pub type Hash = sp_core::H256;
#[derive(Clone, Copy, Encode, Decode, Hash, Eq, PartialEq, Debug, Default)]
pub struct CandidateHash(pub Hash);
#[cfg(feature="std")]
impl std::fmt::Display for CandidateHash {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
/// Index of a transaction in the relay chain. 32-bit should be plenty.
pub type Nonce = u32;
......
......@@ -6,7 +6,8 @@ edition = "2018"
[dependencies]
futures = "0.3.8"
log = "0.4.11"
tracing = "0.1.21"
tracing-futures = "0.2.4"
polkadot-erasure-coding = { path = "../../erasure-coding" }
polkadot-node-primitives = { path = "../primitives" }
polkadot-node-subsystem = { path = "../subsystem" }
......
......@@ -74,6 +74,7 @@ impl CollationGenerationSubsystem {
///
/// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur.
/// Otherwise, most are logged and then discarded.
#[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))]
async fn run<Context>(mut self, mut ctx: Context)
where
Context: SubsystemContext<Message = CollationGenerationMessage>,
......@@ -94,10 +95,7 @@ impl CollationGenerationSubsystem {
},
msg = receiver.next().fuse() => {
if let Some(msg) = msg {
if let Err(err) = ctx.send_message(msg).await {
log::warn!(target: LOG_TARGET, "failed to forward message to overseer: {:?}", err);
break;
}
ctx.send_message(msg).await;
}
},
}
......@@ -108,6 +106,7 @@ impl CollationGenerationSubsystem {
// note: this doesn't strictly need to be a separate function; it's more an administrative function
// so that we don't clutter the run loop. It could in principle be inlined directly into there.
// it should hopefully therefore be ok that it's an async function mutably borrowing self.
#[tracing::instrument(level = "trace", skip(self, ctx, sender), fields(subsystem = LOG_TARGET))]
async fn handle_incoming<Context>(
&mut self,
incoming: SubsystemResult<FromOverseer<Context::Message>>,
......@@ -129,7 +128,7 @@ impl CollationGenerationSubsystem {
if let Err(err) =
handle_new_activations(config.clone(), &activated, ctx, metrics, sender).await
{
log::warn!(target: LOG_TARGET, "failed to handle new activations: {}", err);
tracing::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activations");
};
}
false
......@@ -139,7 +138,7 @@ impl CollationGenerationSubsystem {
msg: CollationGenerationMessage::Initialize(config),
}) => {
if self.config.is_some() {
log::error!(target: LOG_TARGET, "double initialization");
tracing::error!(target: LOG_TARGET, "double initialization");
} else {
self.config = Some(Arc::new(config));
}
......@@ -147,8 +146,9 @@ impl CollationGenerationSubsystem {
}
Ok(Signal(BlockFinalized(_))) => false,
Err(err) => {
log::error!(
tracing::error!(
target: LOG_TARGET,
err = ?err,
"error receiving message from subsystem context: {:?}",
err
);
......@@ -175,6 +175,7 @@ where
}
}
#[tracing::instrument(level = "trace", skip(ctx, metrics, sender), fields(subsystem = LOG_TARGET))]
async fn handle_new_activations<Context: SubsystemContext>(
config: Arc<CollationGenerationConfig>,
activated: &[Hash],
......@@ -185,7 +186,11 @@ async fn handle_new_activations<Context: SubsystemContext>(
// follow the procedure from the guide:
// https://w3f.github.io/parachain-implementers-guide/node/collators/collation-generation.html
let _overall_timer = metrics.time_new_activations();
for relay_parent in activated.iter().copied() {
let _relay_parent_timer = metrics.time_new_activations_relay_parent();
// double-future magic happens here: the first layer of requests takes a mutable borrow of the context, and
// returns a receiver. The second layer of requests actually polls those receivers to completion.
let (availability_cores, validators) = join!(
......@@ -197,6 +202,8 @@ async fn handle_new_activations<Context: SubsystemContext>(
let n_validators = validators??.len();
for core in availability_cores {
let _availability_core_timer = metrics.time_new_activations_availability_core();
let (scheduled_core, assumption) = match core {
CoreState::Scheduled(scheduled_core) => {
(scheduled_core, OccupiedCoreAssumption::Free)
......@@ -237,10 +244,10 @@ async fn handle_new_activations<Context: SubsystemContext>(
let collation = match (task_config.collator)(relay_parent, &validation_data).await {
Some(collation) => collation,
None => {
log::debug!(
tracing::debug!(
target: LOG_TARGET,
"collator returned no collation on collate for para_id {}.",
scheduled_core.para_id,
para_id = %scheduled_core.para_id,
"collator returned no collation on collate",
);
return
}
......@@ -262,11 +269,11 @@ async fn handle_new_activations<Context: SubsystemContext>(
) {
Ok(erasure_root) => erasure_root,
Err(err) => {
log::error!(
tracing::error!(
target: LOG_TARGET,
"failed to calculate erasure root for para_id {}: {:?}",
scheduled_core.para_id,
err
para_id = %scheduled_core.para_id,
err = ?err,
"failed to calculate erasure root",
);
return
}
......@@ -299,11 +306,11 @@ async fn handle_new_activations<Context: SubsystemContext>(
if let Err(err) = task_sender.send(AllMessages::CollatorProtocol(
CollatorProtocolMessage::DistributeCollation(ccr, collation.proof_of_validity)
)).await {
log::warn!(
tracing::warn!(
target: LOG_TARGET,
"failed to send collation result for para_id {}: {:?}",
scheduled_core.para_id,
err
para_id = %scheduled_core.para_id,
err = ?err,
"failed to send collation result",
);
}
})).await?;
......@@ -313,6 +320,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
Ok(())
}
#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))]
fn erasure_root(
n_validators: usize,
persisted_validation: PersistedValidationData,
......@@ -330,6 +338,9 @@ fn erasure_root(
#[derive(Clone)]
struct MetricsInner {
collations_generated_total: prometheus::Counter<prometheus::U64>,
new_activations_overall: prometheus::Histogram,
new_activations_per_relay_parent: prometheus::Histogram,
new_activations_per_availability_core: prometheus::Histogram,
}
/// CollationGenerationSubsystem metrics.
......@@ -342,6 +353,21 @@ impl Metrics {
metrics.collations_generated_total.inc();
}
}
/// Provide a timer for new activations which updates on drop.
fn time_new_activations(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.new_activations_overall.start_timer())
}
/// Provide a timer per relay parents which updates on drop.
fn time_new_activations_relay_parent(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.new_activations_per_relay_parent.start_timer())
}
/// Provide a timer per availability core which updates on drop.
fn time_new_activations_availability_core(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.new_activations_per_availability_core.start_timer())
}
}
impl metrics::Metrics for Metrics {
......@@ -354,6 +380,33 @@ impl metrics::Metrics for Metrics {
)?,
registry,
)?,
new_activations_overall: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_collation_generation_new_activations",
"Time spent within fn handle_new_activations",
)
)?,
registry,
)?,
new_activations_per_relay_parent: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_collation_generation_per_relay_parent",
"Time spent handling a particular relay parent within fn handle_new_activations"
)
)?,
registry,
)?,
new_activations_per_availability_core: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_collation_generation_per_availability_core",
"Time spent handling a particular availability core for a relay parent in fn handle_new_activations",
)
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
......
......@@ -9,8 +9,9 @@ futures = "0.3.8"
futures-timer = "3.0.2"
kvdb = "0.7.0"
kvdb-rocksdb = "0.9.1"
log = "0.4.11"
thiserror = "1.0.22"
tracing = "0.1.21"
tracing-futures = "0.2.4"
parity-scale-codec = { version = "1.3.5", features = ["derive"] }
erasure = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" }
......@@ -22,6 +23,7 @@ polkadot-primitives = { path = "../../../primitives" }
sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
[dev-dependencies]
log = "0.4.11"
env_logger = "0.8.2"
assert_matches = "1.4.0"
smallvec = "1.5.0"
......
......@@ -73,13 +73,13 @@ enum Error {
}
impl Error {
fn severity(&self) -> log::Level {
fn trace(&self) {
match self {
// don't spam the log with spurious errors
Self::RuntimeApi(_) |
Self::Oneshot(_) => log::Level::Debug,
Self::Oneshot(_) => tracing::debug!(target: LOG_TARGET, err = ?self),
// it's worth reporting otherwise
_ => log::Level::Warn,
_ => tracing::warn!(target: LOG_TARGET, err = ?self),
}
}
}
......@@ -311,18 +311,21 @@ pub struct AvailabilityStoreSubsystem {
impl AvailabilityStoreSubsystem {
// Perform pruning of PoVs
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
fn prune_povs(&self) -> Result<(), Error> {
let _timer = self.metrics.time_prune_povs();
let mut tx = DBTransaction::new();
let mut pov_pruning = pov_pruning(&self.inner).unwrap_or_default();
let now = PruningDelay::now()?;
log::trace!(target: LOG_TARGET, "Pruning PoVs");
tracing::trace!(target: LOG_TARGET, "Pruning PoVs");
let outdated_records_count = pov_pruning.iter()
.take_while(|r| r.prune_at <= now)
.count();
for record in pov_pruning.drain(..outdated_records_count) {
log::trace!(target: LOG_TARGET, "Removing record {:?}", record);
tracing::trace!(target: LOG_TARGET, record = ?record, "Removing record");
tx.delete(
columns::DATA,
available_data_key(&record.candidate_hash).as_slice(),
......@@ -335,18 +338,21 @@ impl AvailabilityStoreSubsystem {
}
// Perform pruning of chunks.
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
fn prune_chunks(&self) -> Result<(), Error> {
let _timer = self.metrics.time_prune_chunks();
let mut tx = DBTransaction::new();
let mut chunk_pruning = chunk_pruning(&self.inner).unwrap_or_default();
let now = PruningDelay::now()?;
log::trace!(target: LOG_TARGET, "Pruning Chunks");
tracing::trace!(target: LOG_TARGET, "Pruning Chunks");
let outdated_records_count = chunk_pruning.iter()
.take_while(|r| r.prune_at <= now)
.count();
for record in chunk_pruning.drain(..outdated_records_count) {
log::trace!(target: LOG_TARGET, "Removing record {:?}", record);
tracing::trace!(target: LOG_TARGET, record = ?record, "Removing record");
tx.delete(
columns::DATA,
erasure_chunk_key(&record.candidate_hash, record.chunk_index).as_slice(),
......@@ -361,6 +367,7 @@ impl AvailabilityStoreSubsystem {
// Return a `Future` that either resolves when another PoV pruning has to happen
// or is indefinitely `pending` in case no pruning has to be done.
// Just a helper to `select` over multiple things at once.