Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
parity
Mirrored projects
polkadot
Commits
9b5a6560
Commit
9b5a6560
authored
Mar 10, 2021
by
Cecile Tonglet
Browse files
Merge commit
95b2e18e
(no conflict)
parents
d5e196f4
95b2e18e
Pipeline
#127718
failed with stages
in 7 minutes and 11 seconds
Changes
50
Pipelines
1
Expand all
Hide whitespace changes
Inline
Side-by-side
Cargo.lock
View file @
9b5a6560
This diff is collapsed.
Click to expand it.
bridges/bin/millau/runtime/src/lib.rs
View file @
9b5a6560
...
...
@@ -458,7 +458,7 @@ impl_runtime_apis! {
}
fn
random_seed
()
->
<
Block
as
BlockT
>
::
Hash
{
RandomnessCollectiveFlip
::
random_seed
()
RandomnessCollectiveFlip
::
random_seed
()
.0
}
}
...
...
bridges/bin/rialto/runtime/src/lib.rs
View file @
9b5a6560
...
...
@@ -569,7 +569,7 @@ impl_runtime_apis! {
}
fn
random_seed
()
->
<
Block
as
BlockT
>
::
Hash
{
RandomnessCollectiveFlip
::
random_seed
()
RandomnessCollectiveFlip
::
random_seed
()
.0
}
}
...
...
node/collation-generation/src/lib.rs
View file @
9b5a6560
...
...
@@ -46,7 +46,7 @@ use std::sync::Arc;
mod
error
;
const
LOG_TARGET
:
&
'static
str
=
"collation
_
generation"
;
const
LOG_TARGET
:
&
'static
str
=
"
parachain::
collation
-
generation"
;
/// Collation Generation Subsystem
pub
struct
CollationGenerationSubsystem
{
...
...
node/core/approval-voting/Cargo.toml
View file @
9b5a6560
...
...
@@ -17,7 +17,8 @@ kvdb = "0.9.0"
kvdb-rocksdb
=
"0.11.0"
derive_more
=
"0.99.1"
polkadot-subsystem
=
{
package
=
"polkadot-node-subsystem"
,
path
=
"../../subsystem"
}
polkadot-node-subsystem
=
{
path
=
"../../subsystem"
}
polkadot-node-subsystem-util
=
{
path
=
"../../subsystem-util"
}
polkadot-overseer
=
{
path
=
"../../overseer"
}
polkadot-primitives
=
{
path
=
"../../../primitives"
}
polkadot-node-primitives
=
{
path
=
"../../primitives"
}
...
...
node/core/approval-voting/src/import.rs
View file @
9b5a6560
...
...
@@ -28,7 +28,7 @@
//!
//! We maintain a rolling window of session indices. This starts as empty
use
polkadot_subsystem
::{
use
polkadot_
node_
subsystem
::{
messages
::{
RuntimeApiMessage
,
RuntimeApiRequest
,
ChainApiMessage
,
ApprovalDistributionMessage
,
},
...
...
@@ -708,10 +708,12 @@ mod tests {
use
polkadot_node_subsystem_test_helpers
::
make_subsystem_context
;
use
polkadot_node_primitives
::
approval
::{
VRFOutput
,
VRFProof
};
use
polkadot_primitives
::
v1
::
ValidatorIndex
;
use
polkadot_subsystem
::
messages
::
AllMessages
;
use
polkadot_
node_
subsystem
::
messages
::
AllMessages
;
use
sp_core
::
testing
::
TaskExecutor
;
use
sp_runtime
::{
Digest
,
DigestItem
};
use
sp_consensus_babe
::
Epoch
as
BabeEpoch
;
use
sp_consensus_babe
::{
Epoch
as
BabeEpoch
,
BabeEpochConfiguration
,
AllowedSlots
,
};
use
sp_consensus_babe
::
digests
::{
CompatibleDigestItem
,
PreDigest
,
SecondaryVRFPreDigest
};
use
sp_keyring
::
sr25519
::
Keyring
as
Sr25519Keyring
;
use
assert_matches
::
assert_matches
;
...
...
@@ -1358,6 +1360,10 @@ mod tests {
duration
:
200
,
authorities
:
vec!
[(
Sr25519Keyring
::
Alice
.public
()
.into
(),
1
)],
randomness
:
[
0u8
;
32
],
config
:
BabeEpochConfiguration
{
c
:
(
1
,
4
),
allowed_slots
:
AllowedSlots
::
PrimarySlots
,
},
}));
}
);
...
...
@@ -1463,6 +1469,10 @@ mod tests {
duration
:
200
,
authorities
:
vec!
[(
Sr25519Keyring
::
Alice
.public
()
.into
(),
1
)],
randomness
:
[
0u8
;
32
],
config
:
BabeEpochConfiguration
{
c
:
(
1
,
4
),
allowed_slots
:
AllowedSlots
::
PrimarySlots
,
},
}));
}
);
...
...
@@ -1714,6 +1724,10 @@ mod tests {
duration
:
200
,
authorities
:
vec!
[(
Sr25519Keyring
::
Alice
.public
()
.into
(),
1
)],
randomness
:
[
0u8
;
32
],
config
:
BabeEpochConfiguration
{
c
:
(
1
,
4
),
allowed_slots
:
AllowedSlots
::
PrimarySlots
,
},
}));
}
);
...
...
node/core/approval-voting/src/lib.rs
View file @
9b5a6560
...
...
@@ -21,7 +21,7 @@
//! of others. It uses this information to determine when candidates and blocks have
//! been sufficiently approved to finalize.
use
polkadot_subsystem
::{
use
polkadot_
node_
subsystem
::{
messages
::{
AssignmentCheckResult
,
ApprovalCheckResult
,
ApprovalVotingMessage
,
RuntimeApiMessage
,
RuntimeApiRequest
,
ChainApiMessage
,
ApprovalDistributionMessage
,
...
...
@@ -31,6 +31,9 @@ use polkadot_subsystem::{
Subsystem
,
SubsystemContext
,
SubsystemError
,
SubsystemResult
,
SpawnedSubsystem
,
FromOverseer
,
OverseerSignal
,
};
use
polkadot_node_subsystem_util
::{
metrics
::{
self
,
prometheus
},
};
use
polkadot_primitives
::
v1
::{
ValidatorIndex
,
Hash
,
SessionIndex
,
SessionInfo
,
CandidateHash
,
CandidateReceipt
,
BlockNumber
,
PersistedValidationData
,
...
...
@@ -72,7 +75,7 @@ mod persisted_entries;
mod
tests
;
const
APPROVAL_SESSIONS
:
SessionIndex
=
6
;
const
LOG_TARGET
:
&
str
=
"approval
_
voting"
;
const
LOG_TARGET
:
&
str
=
"
parachain::
approval
-
voting"
;
/// Configuration for the approval voting subsystem
pub
struct
Config
{
...
...
@@ -94,15 +97,79 @@ pub struct ApprovalVotingSubsystem {
keystore
:
Arc
<
LocalKeystore
>
,
slot_duration_millis
:
u64
,
db
:
Arc
<
dyn
KeyValueDB
>
,
metrics
:
Metrics
,
}
#[derive(Clone)]
struct
MetricsInner
{
imported_candidates_total
:
prometheus
::
Counter
<
prometheus
::
U64
>
,
assignments_produced_total
:
prometheus
::
Counter
<
prometheus
::
U64
>
,
approvals_produced_total
:
prometheus
::
Counter
<
prometheus
::
U64
>
,
}
/// Aproval Voting metrics.
#[derive(Default,
Clone)]
pub
struct
Metrics
(
Option
<
MetricsInner
>
);
impl
Metrics
{
fn
on_candidate_imported
(
&
self
)
{
if
let
Some
(
metrics
)
=
&
self
.0
{
metrics
.imported_candidates_total
.inc
();
}
}
fn
on_assignment_produced
(
&
self
)
{
if
let
Some
(
metrics
)
=
&
self
.0
{
metrics
.assignments_produced_total
.inc
();
}
}
fn
on_approval_produced
(
&
self
)
{
if
let
Some
(
metrics
)
=
&
self
.0
{
metrics
.approvals_produced_total
.inc
();
}
}
}
impl
metrics
::
Metrics
for
Metrics
{
fn
try_register
(
registry
:
&
prometheus
::
Registry
,
)
->
std
::
result
::
Result
<
Self
,
prometheus
::
PrometheusError
>
{
let
metrics
=
MetricsInner
{
imported_candidates_total
:
prometheus
::
register
(
prometheus
::
Counter
::
new
(
"parachain_imported_candidates_total"
,
"Number of candidates imported by the approval voting subsystem"
,
)
?
,
registry
,
)
?
,
assignments_produced_total
:
prometheus
::
register
(
prometheus
::
Counter
::
new
(
"parachain_assignments_produced_total"
,
"Number of assignments produced by the approval voting subsystem"
,
)
?
,
registry
,
)
?
,
approvals_produced_total
:
prometheus
::
register
(
prometheus
::
Counter
::
new
(
"parachain_approvals_produced_total"
,
"Number of approvals produced by the approval voting subsystem"
,
)
?
,
registry
,
)
?
,
};
Ok
(
Metrics
(
Some
(
metrics
)))
}
}
impl
ApprovalVotingSubsystem
{
/// Create a new approval voting subsystem with the given keystore
, slot duration
,
/// Create a new approval voting subsystem with the given keystore
and config
,
/// which creates a DB at the given path. This function will delete the directory
/// at the given path if it already exists.
pub
fn
with_config
(
config
:
Config
,
keystore
:
Arc
<
LocalKeystore
>
,
metrics
:
Metrics
,
)
->
std
::
io
::
Result
<
Self
>
{
const
DEFAULT_CACHE_SIZE
:
usize
=
100
*
1024
*
1024
;
// 100MiB default should be fine unless finality stalls.
...
...
@@ -115,6 +182,7 @@ impl ApprovalVotingSubsystem {
keystore
,
slot_duration_millis
:
config
.slot_duration_millis
,
db
,
metrics
,
})
}
}
...
...
@@ -343,6 +411,7 @@ async fn run<C>(
handle_from_overseer
(
&
mut
ctx
,
&
mut
state
,
&
subsystem
.metrics
,
db_writer
,
next_msg
?
,
&
mut
last_finalized_height
,
...
...
@@ -353,6 +422,7 @@ async fn run<C>(
handle_background_request
(
&
mut
ctx
,
&
mut
state
,
&
subsystem
.metrics
,
req
,
)
.await
?
}
else
{
...
...
@@ -363,6 +433,7 @@ async fn run<C>(
if
handle_actions
(
&
mut
ctx
,
&
subsystem
.metrics
,
&
mut
wakeups
,
db_writer
,
&
background_tx
,
...
...
@@ -378,6 +449,7 @@ async fn run<C>(
// returns `true` if any of the actions was a `Conclude` command.
async
fn
handle_actions
(
ctx
:
&
mut
impl
SubsystemContext
,
metrics
:
&
Metrics
,
wakeups
:
&
mut
Wakeups
,
db
:
&
dyn
KeyValueDB
,
background_tx
:
&
mpsc
::
Sender
<
BackgroundRequest
>
,
...
...
@@ -406,6 +478,7 @@ async fn handle_actions(
candidate
,
backing_group
,
}
=>
{
metrics
.on_assignment_produced
();
let
block_hash
=
indirect_cert
.block_hash
;
let
validator_index
=
indirect_cert
.validator
;
...
...
@@ -439,6 +512,7 @@ async fn handle_actions(
async
fn
handle_from_overseer
(
ctx
:
&
mut
impl
SubsystemContext
,
state
:
&
mut
State
<
impl
DBReader
>
,
metrics
:
&
Metrics
,
db_writer
:
&
dyn
KeyValueDB
,
x
:
FromOverseer
<
ApprovalVotingMessage
>
,
last_finalized_height
:
&
mut
Option
<
BlockNumber
>
,
...
...
@@ -468,6 +542,8 @@ async fn handle_from_overseer(
);
for
(
c_hash
,
c_entry
)
in
block_batch
.imported_candidates
{
metrics
.on_candidate_imported
();
let
our_tranche
=
c_entry
.approval_entry
(
&
block_batch
.block_hash
)
.and_then
(|
a
|
a
.our_assignment
()
.map
(|
a
|
a
.tranche
()));
...
...
@@ -542,11 +618,12 @@ async fn handle_from_overseer(
async
fn
handle_background_request
(
ctx
:
&
mut
impl
SubsystemContext
,
state
:
&
State
<
impl
DBReader
>
,
metrics
:
&
Metrics
,
request
:
BackgroundRequest
,
)
->
SubsystemResult
<
Vec
<
Action
>>
{
match
request
{
BackgroundRequest
::
ApprovalVote
(
vote_request
)
=>
{
issue_approval
(
ctx
,
state
,
vote_request
)
.await
issue_approval
(
ctx
,
state
,
metrics
,
vote_request
)
.await
}
BackgroundRequest
::
CandidateValidation
(
validation_data
,
...
...
@@ -1443,6 +1520,7 @@ async fn launch_approval(
async
fn
issue_approval
(
ctx
:
&
mut
impl
SubsystemContext
,
state
:
&
State
<
impl
DBReader
>
,
metrics
:
&
Metrics
,
request
:
ApprovalVoteRequest
,
)
->
SubsystemResult
<
Vec
<
Action
>>
{
let
ApprovalVoteRequest
{
validator_index
,
block_hash
,
candidate_index
}
=
request
;
...
...
@@ -1541,6 +1619,8 @@ async fn issue_approval(
validator_index
as
_
,
)
?
;
metrics
.on_approval_produced
();
// dispatch to approval distribution.
ctx
.send_message
(
ApprovalDistributionMessage
::
DistributeApproval
(
IndirectSignedApprovalVote
{
block_hash
,
...
...
node/core/approval-voting/src/tests.rs
View file @
9b5a6560
...
...
@@ -21,7 +21,7 @@ use polkadot_node_primitives::approval::{
RELAY_VRF_MODULO_CONTEXT
,
DelayTranche
,
};
use
polkadot_node_subsystem_test_helpers
::
make_subsystem_context
;
use
polkadot_subsystem
::
messages
::
AllMessages
;
use
polkadot_
node_
subsystem
::
messages
::
AllMessages
;
use
sp_core
::
testing
::
TaskExecutor
;
use
parking_lot
::
Mutex
;
...
...
node/core/av-store/src/lib.rs
View file @
9b5a6560
...
...
@@ -49,7 +49,7 @@ use bitvec::{vec::BitVec, order::Lsb0 as BitOrderLsb0};
#[cfg(test)]
mod
tests
;
const
LOG_TARGET
:
&
str
=
"availability"
;
const
LOG_TARGET
:
&
str
=
"
parachain::
availability"
;
mod
columns
{
pub
const
DATA
:
u32
=
0
;
...
...
node/core/backing/src/lib.rs
View file @
9b5a6560
...
...
@@ -66,7 +66,7 @@ use statement_table::{
};
use
thiserror
::
Error
;
const
LOG_TARGET
:
&
str
=
"candidate
_
backing"
;
const
LOG_TARGET
:
&
str
=
"
parachain::
candidate
-
backing"
;
#[derive(Debug,
Error)]
enum
Error
{
...
...
@@ -197,7 +197,6 @@ fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement
let
statement
=
match
s
.payload
()
{
Statement
::
Seconded
(
c
)
=>
TableStatement
::
Seconded
(
c
.clone
()),
Statement
::
Valid
(
h
)
=>
TableStatement
::
Valid
(
h
.clone
()),
Statement
::
Invalid
(
h
)
=>
TableStatement
::
Invalid
(
h
.clone
()),
};
TableSignedStatement
{
...
...
@@ -555,14 +554,11 @@ impl CandidateBackingJob {
ValidatedCandidateCommand
::
Attest
(
res
)
=>
{
// sanity check.
if
!
self
.issued_statements
.contains
(
&
candidate_hash
)
{
let
statement
=
if
res
.is_ok
()
{
Statement
::
Valid
(
candidate_hash
)
}
else
{
Statement
::
Invalid
(
candidate_hash
)
};
if
res
.is_ok
()
{
let
statement
=
Statement
::
Valid
(
candidate_hash
);
self
.sign_import_and_distribute_statement
(
statement
,
&
parent_span
)
.await
?
;
}
self
.issued_statements
.insert
(
candidate_hash
);
self
.sign_import_and_distribute_statement
(
statement
,
&
parent_span
)
.await
?
;
}
}
}
...
...
@@ -1241,7 +1237,6 @@ mod tests {
match
statement
{
TableStatement
::
Seconded
(
committed_candidate_receipt
)
=>
Statement
::
Seconded
(
committed_candidate_receipt
),
TableStatement
::
Valid
(
candidate_hash
)
=>
Statement
::
Valid
(
candidate_hash
),
TableStatement
::
Invalid
(
candidate_hash
)
=>
Statement
::
Invalid
(
candidate_hash
),
}
}
...
...
@@ -1883,15 +1878,11 @@ mod tests {
}
.build
();
let
candidate_a_hash
=
candidate_a
.hash
();
let
public0
=
CryptoStore
::
sr25519_generate_new
(
&*
test_state
.keystore
,
ValidatorId
::
ID
,
Some
(
&
test_state
.validators
[
0
]
.to_seed
())
)
.await
.expect
(
"Insert key into keystore"
);
let
public2
=
CryptoStore
::
sr25519_generate_new
(
&*
test_state
.keystore
,
ValidatorId
::
ID
,
Some
(
&
test_state
.validators
[
2
]
.to_seed
())
)
.await
.expect
(
"Insert key into keystore"
);
let
s
ign
ed_
a
=
SignedFullStatement
::
sign
(
let
s
econd
ed_
2
=
SignedFullStatement
::
sign
(
&
test_state
.keystore
,
Statement
::
Seconded
(
candidate_a
.clone
()),
&
test_state
.signing_context
,
...
...
@@ -1899,25 +1890,17 @@ mod tests {
&
public2
.into
(),
)
.await
.ok
()
.flatten
()
.expect
(
"should be signed"
);
let
signe
d_
b
=
SignedFullStatement
::
sign
(
let
vali
d_
2
=
SignedFullStatement
::
sign
(
&
test_state
.keystore
,
Statement
::
Inv
alid
(
candidate_a_hash
),
Statement
::
V
alid
(
candidate_a_hash
),
&
test_state
.signing_context
,
ValidatorIndex
(
2
),
&
public2
.into
(),
)
.await
.ok
()
.flatten
()
.expect
(
"should be signed"
);
let
signed_c
=
SignedFullStatement
::
sign
(
&
test_state
.keystore
,
Statement
::
Invalid
(
candidate_a_hash
),
&
test_state
.signing_context
,
ValidatorIndex
(
0
),
&
public0
.into
(),
)
.await
.ok
()
.flatten
()
.expect
(
"should be signed"
);
let
statement
=
CandidateBackingMessage
::
Statement
(
test_state
.relay_parent
,
seconded_2
.clone
());
let
statement
=
CandidateBackingMessage
::
Statement
(
test_state
.relay_parent
,
signed_a
.clone
());
virtual_overseer
.send
(
FromOverseer
::
Communication
{
msg
:
statement
})
.await
;
virtual_overseer
.send
(
FromOverseer
::
Communication
{
msg
:
statement
})
.await
;
assert_matches!
(
virtual_overseer
.recv
()
.await
,
...
...
@@ -1976,51 +1959,10 @@ mod tests {
}
);
// This `Invalid` statement contradicts the `Candidate` statement
// sent at first.
let
statement
=
CandidateBackingMessage
::
Statement
(
test_state
.relay_parent
,
signed_b
.clone
());
// This `Valid` statement is redundant after the `Seconded` statement already sent.
let
statement
=
CandidateBackingMessage
::
Statement
(
test_state
.relay_parent
,
valid_2
.clone
());
virtual_overseer
.send
(
FromOverseer
::
Communication
{
msg
:
statement
})
.await
;
assert_matches!
(
virtual_overseer
.recv
()
.await
,
AllMessages
::
Provisioner
(
ProvisionerMessage
::
ProvisionableData
(
_
,
ProvisionableData
::
MisbehaviorReport
(
relay_parent
,
validator_index
,
Misbehavior
::
ValidityDoubleVote
(
vdv
),
)
)
)
if
relay_parent
==
test_state
.relay_parent
=>
{
let
((
t1
,
s1
),
(
t2
,
s2
))
=
vdv
.deconstruct
::
<
TableContext
>
();
let
t1
=
table_statement_to_primitive
(
t1
);
let
t2
=
table_statement_to_primitive
(
t2
);
SignedFullStatement
::
new
(
t1
,
validator_index
,
s1
,
&
test_state
.signing_context
,
&
test_state
.validator_public
[
validator_index
.0
as
usize
],
)
.expect
(
"signature must be valid"
);
SignedFullStatement
::
new
(
t2
,
validator_index
,
s2
,
&
test_state
.signing_context
,
&
test_state
.validator_public
[
validator_index
.0
as
usize
],
)
.expect
(
"signature must be valid"
);
}
);
// This `Invalid` statement contradicts the `Valid` statement the subsystem
// should have issued behind the scenes.
let
statement
=
CandidateBackingMessage
::
Statement
(
test_state
.relay_parent
,
signed_c
.clone
());
virtual_overseer
.send
(
FromOverseer
::
Communication
{
msg
:
statement
})
.await
;
virtual_overseer
.send
(
FromOverseer
::
Communication
{
msg
:
statement
})
.await
;
assert_matches!
(
virtual_overseer
.recv
()
.await
,
...
...
@@ -2192,7 +2134,7 @@ mod tests {
// Test that if we have already issued a statement (in this case `Invalid`) about a
// candidate we will not be issuing a `Seconded` statement on it.
#[test]
fn
backing_
multiple_statement
s_work
()
{
fn
backing_
second_after_first_fail
s_work
s
()
{
let
test_state
=
TestState
::
default
();
test_harness
(
test_state
.keystore
.clone
(),
|
test_harness
|
async
move
{
let
TestHarness
{
mut
virtual_overseer
}
=
test_harness
;
...
...
@@ -2213,8 +2155,6 @@ mod tests {
..
Default
::
default
()
}
.build
();
let
candidate_hash
=
candidate
.hash
();
let
validator2
=
CryptoStore
::
sr25519_generate_new
(
&*
test_state
.keystore
,
ValidatorId
::
ID
,
Some
(
&
test_state
.validators
[
2
]
.to_seed
())
...
...
@@ -2262,24 +2202,6 @@ mod tests {
}
);
// The invalid message is shared.
assert_matches!
(
virtual_overseer
.recv
()
.await
,
AllMessages
::
StatementDistribution
(
StatementDistributionMessage
::
Share
(
relay_parent
,
signed_statement
,
)
)
=>
{
assert_eq!
(
relay_parent
,
test_state
.relay_parent
);
signed_statement
.check_signature
(
&
test_state
.signing_context
,
&
test_state
.validator_public
[
0
],
)
.unwrap
();
assert_eq!
(
*
signed_statement
.payload
(),
Statement
::
Invalid
(
candidate_hash
));
}
);
// Ask subsystem to `Second` a candidate that already has a statement issued about.
// This should emit no actions from subsystem.
let
second
=
CandidateBackingMessage
::
Second
(
...
...
node/core/bitfield-signing/src/lib.rs
View file @
9b5a6560
...
...
@@ -39,7 +39,7 @@ use wasm_timer::{Delay, Instant};
/// Delay between starting a bitfield signing job and its attempting to create a bitfield.
const
JOB_DELAY
:
Duration
=
Duration
::
from_millis
(
1500
);
const
LOG_TARGET
:
&
str
=
"bitfield
_
signing"
;
const
LOG_TARGET
:
&
str
=
"
parachain::
bitfield
-
signing"
;
/// Each `BitfieldSigningJob` prepares a signed bitfield for a single relay parent.
pub
struct
BitfieldSigningJob
;
...
...
node/core/candidate-selection/src/lib.rs
View file @
9b5a6560
...
...
@@ -43,7 +43,7 @@ use polkadot_node_primitives::SignedFullStatement;
use
std
::{
pin
::
Pin
,
sync
::
Arc
};
use
thiserror
::
Error
;
const
LOG_TARGET
:
&
'static
str
=
"candidate
_
selection"
;
const
LOG_TARGET
:
&
'static
str
=
"
parachain::
candidate
-
selection"
;
struct
CandidateSelectionJob
{
assignment
:
ParaId
,
...
...
node/core/candidate-validation/src/lib.rs
View file @
9b5a6560
...
...
@@ -51,7 +51,7 @@ use futures::prelude::*;
use
std
::
sync
::
Arc
;
const
LOG_TARGET
:
&
'static
str
=
"candidate
_
validation"
;
const
LOG_TARGET
:
&
'static
str
=
"
parachain::
candidate
-
validation"
;
/// The candidate validation subsystem.
pub
struct
CandidateValidationSubsystem
<
S
>
{
...
...
node/core/chain-api/src/lib.rs
View file @
9b5a6560
...
...
@@ -44,7 +44,7 @@ use std::sync::Arc;
use
futures
::
prelude
::
*
;
const
LOG_TARGET
:
&
str
=
"chain
_
api"
;
const
LOG_TARGET
:
&
str
=
"
para
chain
::chain-
api"
;
/// The Chain API Subsystem implementation.
pub
struct
ChainApiSubsystem
<
Client
>
{
...
...
node/core/provisioner/src/lib.rs
View file @
9b5a6560
...
...
@@ -46,7 +46,7 @@ use futures_timer::Delay;
/// How long to wait before proposing.
const
PRE_PROPOSE_TIMEOUT
:
std
::
time
::
Duration
=
core
::
time
::
Duration
::
from_millis
(
2000
);
const
LOG_TARGET
:
&
str
=
"provisioner"
;
const
LOG_TARGET
:
&
str
=
"
parachain::
provisioner"
;
enum
InherentAfter
{
Ready
,
...
...
node/core/runtime-api/src/lib.rs
View file @
9b5a6560
...
...
@@ -43,7 +43,7 @@ use cache::{RequestResult, RequestResultCache};
mod
cache
;
const
LOG_TARGET
:
&
str
=
"runtime
_
api"
;
const
LOG_TARGET
:
&
str
=
"
parachain::
runtime
-
api"
;
/// The number of maximum runtime api requests can be executed in parallel. Further requests will be buffered.
const
MAX_PARALLEL_REQUESTS
:
usize
=
4
;
...
...
@@ -422,7 +422,9 @@ mod tests {
use
sp_core
::
testing
::
TaskExecutor
;
use
std
::{
collections
::{
HashMap
,
BTreeMap
},
sync
::{
Arc
,
Mutex
}};
use
futures
::
channel
::
oneshot
;
use
polkadot_node_primitives
::
BabeEpoch
;
use
polkadot_node_primitives
::{
BabeEpoch
,
BabeEpochConfiguration
,
BabeAllowedSlots
,
};
#[derive(Default,
Clone)]
struct
MockRuntimeApi
{
...
...
@@ -1158,6 +1160,10 @@ mod tests {
duration
:
10
,
authorities
:
Vec
::
new
(),
randomness
:
[
1u8
;
32
],
config
:
BabeEpochConfiguration
{
c
:
(
1
,
4
),
allowed_slots
:
BabeAllowedSlots
::
PrimarySlots
,
},
};
runtime_api
.babe_epoch
=
Some
(
epoch
.clone
());
let
runtime_api
=
Arc
::
new
(
runtime_api
);
...
...
node/network/approval-distribution/src/lib.rs
View file @
9b5a6560
...
...
@@ -45,7 +45,7 @@ use polkadot_node_network_protocol::{
};
const
LOG_TARGET
:
&
str
=
"approval
_
distribution"
;
const
LOG_TARGET
:
&
str
=
"
parachain::
approval
-
distribution"
;
const
COST_UNEXPECTED_MESSAGE
:
Rep
=
Rep
::
CostMinor
(
"Peer sent an out-of-view assignment or approval"
);
const
COST_DUPLICATE_MESSAGE
:
Rep
=
Rep
::
CostMinorRepeated
(
"Peer sent identical messages"
);
...
...
node/network/availability-distribution/src/lib.rs
View file @
9b5a6560
...
...
@@ -46,7 +46,7 @@ pub use metrics::Metrics;
#[cfg(test)]
mod
tests
;
const
LOG_TARGET
:
&
'static
str
=
"availability
_
distribution"
;
const
LOG_TARGET
:
&
'static
str
=
"
parachain::
availability
-
distribution"
;
/// The availability distribution subsystem.
pub
struct
AvailabilityDistributionSubsystem
{
...
...
node/network/availability-recovery/src/lib.rs
View file @
9b5a6560
...
...
@@ -56,7 +56,7 @@ mod error;
#[cfg(test)]
mod
tests
;
const
LOG_TARGET
:
&
str
=
"availability
_
recovery"
;
const
LOG_TARGET
:
&
str
=
"
parachain::
availability
-
recovery"
;