Commit 48983e74 authored by Cecile Tonglet's avatar Cecile Tonglet
Browse files

Merge commit 9a32ab1d (no conflict)

parents 27e75e3c 9a32ab1d
Pipeline #115041 failed with stages
in 7 minutes and 36 seconds
This diff is collapsed.
......@@ -13,14 +13,14 @@ readme = "README.md"
[dependencies]
cli = { package = "polkadot-cli", path = "cli" }
color-eyre = "0.5.7"
color-eyre = "0.5.8"
thiserror = "1.0.22"
futures = "0.3.8"
service = { package = "polkadot-service", path = "node/service" }
parity-util-mem = { version = "*", default-features = false, features = ["jemalloc-global"] }
[dev-dependencies]
assert_cmd = "1.0.1"
assert_cmd = "1.0.2"
nix = "0.19.0"
tempfile = "3.1.0"
......
......@@ -40,6 +40,9 @@ gpg --export 9D4B2B6EB8F97156D19669A9FF0812D491B96798 > /usr/share/keyrings/pari
# Add the Parity repository and update the package index
echo 'deb [signed-by=/usr/share/keyrings/parity.gpg] https://releases.parity.io/deb release main' > /etc/apt/sources.list.d/parity.list
apt update
# Install the `parity-keyring` package - This will ensure the GPG key
# used by APT remains up-to-date
apt install parity-keyring
# Install polkadot
apt install polkadot
......
......@@ -25,6 +25,7 @@ polkadot-parachain = { path = "../parachain", optional = true }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" }
tracing-futures = "0.2.4"
frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
......
......@@ -148,7 +148,6 @@ pub fn run() -> Result<()> {
config,
service::IsCollator::No,
grandpa_pause,
None,
).map(|full| full.task_manager),
}
})
......
......@@ -59,6 +59,13 @@ pub type Hash = sp_core::H256;
#[derive(Clone, Copy, Encode, Decode, Hash, Eq, PartialEq, Debug, Default)]
pub struct CandidateHash(pub Hash);
#[cfg(feature="std")]
impl std::fmt::Display for CandidateHash {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
/// Index of a transaction in the relay chain. 32-bit should be plenty.
pub type Nonce = u32;
......
......@@ -7,29 +7,29 @@ the polkadot binary, pulled from our package repository.
Let´s first check the version we have. The first time you run this command, the polkadot docker image will be downloaded. This takes a bit of time and bandwidth, be patient:
```bash
docker run --rm -it parity/polkadot:latest polkadot --version
docker run --rm -it parity/polkadot:latest --version
```
You can also pass any argument/flag that polkadot supports:
```bash
docker run --rm -it parity/polkadot:latest polkadot --chain westend --name "PolkaDocker"
docker run --rm -it parity/polkadot:latest --chain westend --name "PolkaDocker"
```
Once you are done experimenting and picking the best node name :) you can start polkadot as daemon, exposes the polkadot ports and mount a volume that will keep your blockchain data locally:
```bash
docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/data parity/polkadot:latest polkadot --chain westend
docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/data parity/polkadot:latest --chain westend
```
Additionally if you want to have custom node name you can add the `--name "YourName"` at the end
```bash
docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/data parity/polkadot:latest polkadot --chain westend --name "PolkaDocker"
docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/data parity/polkadot:latest --chain westend --name "PolkaDocker"
```
```bash
docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/data parity/polkadot:latest polkadot --rpc-external --chain westend
docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/data parity/polkadot:latest --rpc-external --chain westend
```
If you want to connect to rpc port 9933, then must add polkadot startup parameter: `--rpc-external`.
......@@ -70,7 +70,7 @@ If you run into issues with polkadot when using docker, please run the following
(replace the tag with the appropriate one if you do not use latest):
```bash
docker run --rm -it parity/polkadot:latest polkadot --version
docker run --rm -it parity/polkadot:latest --version
```
This will show you the polkadot version as well as the git commit ref that was used to build your container.
......
......@@ -6,7 +6,8 @@ edition = "2018"
[dependencies]
futures = "0.3.8"
log = "0.4.11"
tracing = "0.1.22"
tracing-futures = "0.2.4"
polkadot-erasure-coding = { path = "../../erasure-coding" }
polkadot-node-primitives = { path = "../primitives" }
polkadot-node-subsystem = { path = "../subsystem" }
......
......@@ -74,6 +74,7 @@ impl CollationGenerationSubsystem {
///
/// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur.
/// Otherwise, most are logged and then discarded.
#[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))]
async fn run<Context>(mut self, mut ctx: Context)
where
Context: SubsystemContext<Message = CollationGenerationMessage>,
......@@ -94,10 +95,7 @@ impl CollationGenerationSubsystem {
},
msg = receiver.next().fuse() => {
if let Some(msg) = msg {
if let Err(err) = ctx.send_message(msg).await {
log::warn!(target: LOG_TARGET, "failed to forward message to overseer: {:?}", err);
break;
}
ctx.send_message(msg).await;
}
},
}
......@@ -108,6 +106,7 @@ impl CollationGenerationSubsystem {
// note: this doesn't strictly need to be a separate function; it's more an administrative function
// so that we don't clutter the run loop. It could in principle be inlined directly into there.
// it should hopefully therefore be ok that it's an async function mutably borrowing self.
#[tracing::instrument(level = "trace", skip(self, ctx, sender), fields(subsystem = LOG_TARGET))]
async fn handle_incoming<Context>(
&mut self,
incoming: SubsystemResult<FromOverseer<Context::Message>>,
......@@ -129,7 +128,7 @@ impl CollationGenerationSubsystem {
if let Err(err) =
handle_new_activations(config.clone(), &activated, ctx, metrics, sender).await
{
log::warn!(target: LOG_TARGET, "failed to handle new activations: {}", err);
tracing::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activations");
};
}
false
......@@ -139,7 +138,7 @@ impl CollationGenerationSubsystem {
msg: CollationGenerationMessage::Initialize(config),
}) => {
if self.config.is_some() {
log::error!(target: LOG_TARGET, "double initialization");
tracing::error!(target: LOG_TARGET, "double initialization");
} else {
self.config = Some(Arc::new(config));
}
......@@ -147,8 +146,9 @@ impl CollationGenerationSubsystem {
}
Ok(Signal(BlockFinalized(_))) => false,
Err(err) => {
log::error!(
tracing::error!(
target: LOG_TARGET,
err = ?err,
"error receiving message from subsystem context: {:?}",
err
);
......@@ -175,6 +175,7 @@ where
}
}
#[tracing::instrument(level = "trace", skip(ctx, metrics, sender), fields(subsystem = LOG_TARGET))]
async fn handle_new_activations<Context: SubsystemContext>(
config: Arc<CollationGenerationConfig>,
activated: &[Hash],
......@@ -185,7 +186,11 @@ async fn handle_new_activations<Context: SubsystemContext>(
// follow the procedure from the guide:
// https://w3f.github.io/parachain-implementers-guide/node/collators/collation-generation.html
let _overall_timer = metrics.time_new_activations();
for relay_parent in activated.iter().copied() {
let _relay_parent_timer = metrics.time_new_activations_relay_parent();
// double-future magic happens here: the first layer of requests takes a mutable borrow of the context, and
// returns a receiver. The second layer of requests actually polls those receivers to completion.
let (availability_cores, validators) = join!(
......@@ -197,6 +202,8 @@ async fn handle_new_activations<Context: SubsystemContext>(
let n_validators = validators??.len();
for core in availability_cores {
let _availability_core_timer = metrics.time_new_activations_availability_core();
let (scheduled_core, assumption) = match core {
CoreState::Scheduled(scheduled_core) => {
(scheduled_core, OccupiedCoreAssumption::Free)
......@@ -237,10 +244,10 @@ async fn handle_new_activations<Context: SubsystemContext>(
let collation = match (task_config.collator)(relay_parent, &validation_data).await {
Some(collation) => collation,
None => {
log::debug!(
tracing::debug!(
target: LOG_TARGET,
"collator returned no collation on collate for para_id {}.",
scheduled_core.para_id,
para_id = %scheduled_core.para_id,
"collator returned no collation on collate",
);
return
}
......@@ -262,11 +269,11 @@ async fn handle_new_activations<Context: SubsystemContext>(
) {
Ok(erasure_root) => erasure_root,
Err(err) => {
log::error!(
tracing::error!(
target: LOG_TARGET,
"failed to calculate erasure root for para_id {}: {:?}",
scheduled_core.para_id,
err
para_id = %scheduled_core.para_id,
err = ?err,
"failed to calculate erasure root",
);
return
}
......@@ -299,11 +306,11 @@ async fn handle_new_activations<Context: SubsystemContext>(
if let Err(err) = task_sender.send(AllMessages::CollatorProtocol(
CollatorProtocolMessage::DistributeCollation(ccr, collation.proof_of_validity)
)).await {
log::warn!(
tracing::warn!(
target: LOG_TARGET,
"failed to send collation result for para_id {}: {:?}",
scheduled_core.para_id,
err
para_id = %scheduled_core.para_id,
err = ?err,
"failed to send collation result",
);
}
})).await?;
......@@ -313,6 +320,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
Ok(())
}
#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))]
fn erasure_root(
n_validators: usize,
persisted_validation: PersistedValidationData,
......@@ -330,6 +338,9 @@ fn erasure_root(
#[derive(Clone)]
struct MetricsInner {
collations_generated_total: prometheus::Counter<prometheus::U64>,
new_activations_overall: prometheus::Histogram,
new_activations_per_relay_parent: prometheus::Histogram,
new_activations_per_availability_core: prometheus::Histogram,
}
/// CollationGenerationSubsystem metrics.
......@@ -342,6 +353,21 @@ impl Metrics {
metrics.collations_generated_total.inc();
}
}
/// Provide a timer for new activations which updates on drop.
fn time_new_activations(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.new_activations_overall.start_timer())
}
/// Provide a timer per relay parents which updates on drop.
fn time_new_activations_relay_parent(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.new_activations_per_relay_parent.start_timer())
}
/// Provide a timer per availability core which updates on drop.
fn time_new_activations_availability_core(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.new_activations_per_availability_core.start_timer())
}
}
impl metrics::Metrics for Metrics {
......@@ -354,6 +380,33 @@ impl metrics::Metrics for Metrics {
)?,
registry,
)?,
new_activations_overall: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_collation_generation_new_activations",
"Time spent within fn handle_new_activations",
)
)?,
registry,
)?,
new_activations_per_relay_parent: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_collation_generation_per_relay_parent",
"Time spent handling a particular relay parent within fn handle_new_activations"
)
)?,
registry,
)?,
new_activations_per_availability_core: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_collation_generation_per_availability_core",
"Time spent handling a particular availability core for a relay parent in fn handle_new_activations",
)
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
......
......@@ -9,8 +9,9 @@ futures = "0.3.8"
futures-timer = "3.0.2"
kvdb = "0.7.0"
kvdb-rocksdb = "0.9.1"
log = "0.4.11"
thiserror = "1.0.22"
tracing = "0.1.22"
tracing-futures = "0.2.4"
parity-scale-codec = { version = "1.3.5", features = ["derive"] }
erasure = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" }
......@@ -22,7 +23,8 @@ polkadot-primitives = { path = "../../../primitives" }
sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
[dev-dependencies]
env_logger = "0.8.1"
log = "0.4.11"
env_logger = "0.8.2"
assert_matches = "1.4.0"
smallvec = "1.5.0"
kvdb-memorydb = "0.7.0"
......
This diff is collapsed.
......@@ -73,6 +73,7 @@ impl Default for TestState {
block_number: 5,
hrmp_mqc_heads: Vec::new(),
dmq_mqc_head: Default::default(),
max_pov_size: 1024,
};
let pruning_config = PruningConfig {
......@@ -127,7 +128,7 @@ async fn overseer_send(
overseer: &mut test_helpers::TestSubsystemContextHandle<AvailabilityStoreMessage>,
msg: AvailabilityStoreMessage,
) {
log::trace!("Sending message:\n{:?}", &msg);
tracing::trace!(meg = ?msg, "sending message");
overseer
.send(FromOverseer::Communication { msg })
.timeout(TIMEOUT)
......@@ -142,7 +143,7 @@ async fn overseer_recv(
.await
.expect(&format!("{:?} is more than enough to receive messages", TIMEOUT));
log::trace!("Received message:\n{:?}", &msg);
tracing::trace!(msg = ?msg, "received message");
msg
}
......@@ -151,7 +152,7 @@ async fn overseer_recv_with_timeout(
overseer: &mut test_helpers::TestSubsystemContextHandle<AvailabilityStoreMessage>,
timeout: Duration,
) -> Option<AllMessages> {
log::trace!("Waiting for message...");
tracing::trace!("waiting for message...");
overseer
.recv()
.timeout(timeout)
......
......@@ -14,7 +14,8 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" }
erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" }
statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" }
bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] }
log = "0.4.11"
tracing = "0.1.22"
tracing-futures = "0.2.4"
thiserror = "1.0.22"
[dev-dependencies]
......
This diff is collapsed.
......@@ -6,7 +6,8 @@ edition = "2018"
[dependencies]
futures = "0.3.8"
log = "0.4.11"
tracing = "0.1.22"
tracing-futures = "0.2.4"
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-subsystem = { path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
......
......@@ -140,6 +140,7 @@ pub enum Error {
/// If there is a candidate pending availability, query the Availability Store
/// for whether we have the availability chunk for our validator index.
#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))]
async fn get_core_availability(
relay_parent: Hash,
core: CoreState,
......@@ -164,7 +165,7 @@ async fn get_core_availability(
Ok(None) => return Ok(false),
Err(e) => {
// Don't take down the node on runtime API errors.
log::warn!(target: LOG_TARGET, "Encountered a runtime API error: {:?}", e);
tracing::warn!(target: LOG_TARGET, err = ?e, "Encountered a runtime API error");
return Ok(false);
}
};
......@@ -201,6 +202,7 @@ async fn get_availability_cores(relay_parent: Hash, sender: &mut mpsc::Sender<Fr
/// - for each core, concurrently determine chunk availability (see `get_core_availability`)
/// - return the bitfield if there were no errors at any point in this process
/// (otherwise, it's prone to false negatives)
#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))]
async fn construct_availability_bitfield(
relay_parent: Hash,
validator_idx: ValidatorIndex,
......@@ -228,6 +230,7 @@ async fn construct_availability_bitfield(
#[derive(Clone)]
struct MetricsInner {
bitfields_signed_total: prometheus::Counter<prometheus::U64>,
run: prometheus::Histogram,
}
/// Bitfield signing metrics.
......@@ -240,6 +243,11 @@ impl Metrics {
metrics.bitfields_signed_total.inc();
}
}
/// Provide a timer for `prune_povs` which observes on drop.
fn time_run(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.run.start_timer())
}
}
impl metrics::Metrics for Metrics {
......@@ -252,6 +260,15 @@ impl metrics::Metrics for Metrics {
)?,
registry,
)?,
run: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_bitfield_signing_run",
"Time spent within `bitfield_signing::run`",
)
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
......@@ -267,6 +284,7 @@ impl JobTrait for BitfieldSigningJob {
const NAME: &'static str = "BitfieldSigningJob";
/// Run a job for the parent block indicated
#[tracing::instrument(skip(keystore, metrics, _receiver, sender), fields(subsystem = LOG_TARGET))]
fn run(
relay_parent: Hash,
keystore: Self::RunArgs,
......@@ -274,6 +292,7 @@ impl JobTrait for BitfieldSigningJob {
_receiver: mpsc::Receiver<ToJob>,
mut sender: mpsc::Sender<FromJob>,
) -> Pin<Box<dyn Future<Output = Result<(), Self::Error>> + Send>> {
let metrics = metrics.clone();
async move {
let wait_until = Instant::now() + JOB_DELAY;
......@@ -288,12 +307,16 @@ impl JobTrait for BitfieldSigningJob {
// wait a bit before doing anything else
Delay::new_at(wait_until).await?;
// this timer does not appear at the head of the function because we don't want to include
// JOB_DELAY each time.
let _timer = metrics.time_run();
let bitfield =
match construct_availability_bitfield(relay_parent, validator.index(), &mut sender).await
{
Err(Error::Runtime(runtime_err)) => {
// Don't take down the node on runtime API errors.
log::warn!(target: LOG_TARGET, "Encountered a runtime API error: {:?}", runtime_err);
tracing::warn!(target: LOG_TARGET, err = ?runtime_err, "Encountered a runtime API error");
return Ok(());
}
Err(err) => return Err(err),
......
......@@ -6,7 +6,8 @@ edition = "2018"
[dependencies]
futures = "0.3.8"
log = "0.4.11"
tracing = "0.1.22"
tracing-futures = "0.2.4"
thiserror = "1.0.22"
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-subsystem = { path = "../../subsystem" }
......
This diff is collapsed.
......@@ -6,7 +6,8 @@ edition = "2018"
[dependencies]
futures = "0.3.8"
log = "0.4.11"
tracing = "0.1.22"
tracing-futures = "0.2.4"
sp-core = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "master" }
parity-scale-codec = { version = "1.3.5", default-features = false, features = ["bit-vec", "derive"] }
......
This diff is collapsed.
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment