Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
parity
Mirrored projects
polkadot
Commits
8be63cd6
Unverified
Commit
8be63cd6
authored
Jun 09, 2021
by
Andronik Ordian
Committed by
GitHub
Jun 09, 2021
Browse files
remove tracing::intrument annotations (#3197)
* remove tracing::intrument annotations * remove unused param and leftover * more leftovers
parent
a803f872
Pipeline
#141419
passed with stages
in 36 minutes and 29 seconds
Changes
22
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
node/collation-generation/src/lib.rs
View file @
8be63cd6
...
...
@@ -77,7 +77,6 @@ impl CollationGenerationSubsystem {
///
/// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur.
/// Otherwise, most are logged and then discarded.
#[tracing::instrument(skip(self,
ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
run
<
Context
>
(
mut
self
,
mut
ctx
:
Context
)
where
Context
:
SubsystemContext
<
Message
=
CollationGenerationMessage
>
,
...
...
@@ -110,7 +109,6 @@ impl CollationGenerationSubsystem {
// note: this doesn't strictly need to be a separate function; it's more an administrative function
// so that we don't clutter the run loop. It could in principle be inlined directly into there.
// it should hopefully therefore be ok that it's an async function mutably borrowing self.
#[tracing::instrument(level
=
"trace"
,
skip(self,
ctx,
sender),
fields(subsystem
=
LOG_TARGET))]
async
fn
handle_incoming
<
Context
>
(
&
mut
self
,
incoming
:
SubsystemResult
<
FromOverseer
<
Context
::
Message
>>
,
...
...
@@ -184,7 +182,6 @@ where
}
}
#[tracing::instrument(level
=
"trace"
,
skip(ctx,
metrics,
sender,
activated),
fields(subsystem
=
LOG_TARGET))]
async
fn
handle_new_activations
<
Context
:
SubsystemContext
>
(
config
:
Arc
<
CollationGenerationConfig
>
,
activated
:
impl
IntoIterator
<
Item
=
Hash
>
,
...
...
@@ -419,7 +416,6 @@ async fn handle_new_activations<Context: SubsystemContext>(
Ok
(())
}
#[tracing::instrument(level
=
"trace"
,
fields(subsystem
=
LOG_TARGET))]
fn
erasure_root
(
n_validators
:
usize
,
persisted_validation
:
PersistedValidationData
,
...
...
node/core/av-store/src/lib.rs
View file @
8be63cd6
...
...
@@ -498,7 +498,6 @@ where
}
}
#[tracing::instrument(skip(subsystem,
ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
run
<
Context
>
(
mut
subsystem
:
AvailabilityStoreSubsystem
,
mut
ctx
:
Context
)
where
Context
:
SubsystemContext
<
Message
=
AvailabilityStoreMessage
>
,
...
...
@@ -524,7 +523,6 @@ where
}
}
#[tracing::instrument(level
=
"trace"
,
skip(subsystem,
ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
run_iteration
<
Context
>
(
ctx
:
&
mut
Context
,
subsystem
:
&
mut
AvailabilityStoreSubsystem
,
...
...
node/core/backing/src/lib.rs
View file @
8be63cd6
...
...
@@ -241,7 +241,6 @@ fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement
}
}
#[tracing::instrument(level
=
"trace"
,
skip(attested,
table_context),
fields(subsystem
=
LOG_TARGET))]
fn
table_attested_to_backed
(
attested
:
TableAttestedCandidate
<
ParaId
,
...
...
@@ -317,7 +316,6 @@ async fn store_available_data(
//
// This will compute the erasure root internally and compare it to the expected erasure root.
// This returns `Err()` iff there is an internal error. Otherwise, it returns either `Ok(Ok(()))` or `Ok(Err(_))`.
#[tracing::instrument(level
=
"trace"
,
skip(sender,
pov,
span),
fields(subsystem
=
LOG_TARGET))]
async
fn
make_pov_available
(
sender
:
&
mut
JobSender
<
impl
SubsystemSender
>
,
validator_index
:
Option
<
ValidatorIndex
>
,
...
...
@@ -570,7 +568,6 @@ impl CandidateBackingJob {
Ok
(())
}
#[tracing::instrument(level
=
"trace"
,
skip(self,
root_span,
sender),
fields(subsystem
=
LOG_TARGET))]
async
fn
handle_validated_candidate_command
(
&
mut
self
,
root_span
:
&
jaeger
::
Span
,
...
...
@@ -647,7 +644,6 @@ impl CandidateBackingJob {
Ok
(())
}
#[tracing::instrument(level
=
"trace"
,
skip(self,
sender,
params),
fields(subsystem
=
LOG_TARGET))]
async
fn
background_validate_and_make_available
(
&
mut
self
,
sender
:
&
mut
JobSender
<
impl
SubsystemSender
>
,
...
...
@@ -671,7 +667,6 @@ impl CandidateBackingJob {
}
/// Kick off background validation with intent to second.
#[tracing::instrument(level
=
"trace"
,
skip(self,
parent_span,
sender,
pov),
fields(subsystem
=
LOG_TARGET))]
async
fn
validate_and_second
(
&
mut
self
,
parent_span
:
&
jaeger
::
Span
,
...
...
@@ -743,7 +738,6 @@ impl CandidateBackingJob {
}
/// Check if there have happened any new misbehaviors and issue necessary messages.
#[tracing::instrument(level
=
"trace"
,
skip(self,
sender),
fields(subsystem
=
LOG_TARGET))]
async
fn
issue_new_misbehaviors
(
&
mut
self
,
sender
:
&
mut
JobSender
<
impl
SubsystemSender
>
)
{
// collect the misbehaviors to avoid double mutable self borrow issues
let
misbehaviors
:
Vec
<
_
>
=
self
.table
.drain_misbehaviors
()
.collect
();
...
...
@@ -758,7 +752,6 @@ impl CandidateBackingJob {
}
/// Import a statement into the statement table and return the summary of the import.
#[tracing::instrument(level
=
"trace"
,
skip(self,
sender),
fields(subsystem
=
LOG_TARGET))]
async
fn
import_statement
(
&
mut
self
,
sender
:
&
mut
JobSender
<
impl
SubsystemSender
>
,
...
...
@@ -828,7 +821,6 @@ impl CandidateBackingJob {
Ok
(
summary
)
}
#[tracing::instrument(level
=
"trace"
,
skip(self,
root_span,
sender),
fields(subsystem
=
LOG_TARGET))]
async
fn
process_msg
(
&
mut
self
,
root_span
:
&
jaeger
::
Span
,
...
...
@@ -895,7 +887,6 @@ impl CandidateBackingJob {
}
/// Kick off validation work and distribute the result as a signed statement.
#[tracing::instrument(level
=
"trace"
,
skip(self,
sender,
attesting,
span),
fields(subsystem
=
LOG_TARGET))]
async
fn
kick_off_validation_work
(
&
mut
self
,
sender
:
&
mut
JobSender
<
impl
SubsystemSender
>
,
...
...
@@ -951,7 +942,6 @@ impl CandidateBackingJob {
}
/// Import the statement and kick off validation work if it is a part of our assignment.
#[tracing::instrument(level
=
"trace"
,
skip(self,
root_span,
sender),
fields(subsystem
=
LOG_TARGET))]
async
fn
maybe_validate_and_import
(
&
mut
self
,
root_span
:
&
jaeger
::
Span
,
...
...
@@ -1014,7 +1004,6 @@ impl CandidateBackingJob {
Ok
(())
}
#[tracing::instrument(level
=
"trace"
,
skip(self),
fields(subsystem
=
LOG_TARGET))]
async
fn
sign_statement
(
&
self
,
statement
:
Statement
)
->
Option
<
SignedFullStatement
>
{
let
signed
=
self
.table_context
.validator
...
...
@@ -1090,7 +1079,6 @@ impl util::JobTrait for CandidateBackingJob {
const
NAME
:
&
'static
str
=
"CandidateBackingJob"
;
#[tracing::instrument(skip(span,
keystore,
metrics,
rx_to,
sender),
fields(subsystem
=
LOG_TARGET))]
fn
run
<
S
:
SubsystemSender
>
(
parent
:
Hash
,
span
:
Arc
<
jaeger
::
Span
>
,
...
...
node/core/bitfield-signing/src/lib.rs
View file @
8be63cd6
...
...
@@ -70,7 +70,6 @@ pub enum Error {
/// If there is a candidate pending availability, query the Availability Store
/// for whether we have the availability chunk for our validator index.
#[tracing::instrument(level
=
"trace"
,
skip(sender,
span),
fields(subsystem
=
LOG_TARGET))]
async
fn
get_core_availability
(
core
:
&
CoreState
,
validator_idx
:
ValidatorIndex
,
...
...
@@ -132,7 +131,6 @@ async fn get_availability_cores(
/// - for each core, concurrently determine chunk availability (see `get_core_availability`)
/// - return the bitfield if there were no errors at any point in this process
/// (otherwise, it's prone to false negatives)
#[tracing::instrument(level
=
"trace"
,
skip(sender,
span),
fields(subsystem
=
LOG_TARGET))]
async
fn
construct_availability_bitfield
(
relay_parent
:
Hash
,
span
:
&
jaeger
::
Span
,
...
...
@@ -226,7 +224,6 @@ impl JobTrait for BitfieldSigningJob {
const
NAME
:
&
'static
str
=
"BitfieldSigningJob"
;
/// Run a job for the parent block indicated
#[tracing::instrument(skip(span,
keystore,
metrics,
_receiver,
sender),
fields(subsystem
=
LOG_TARGET))]
fn
run
<
S
:
SubsystemSender
>
(
relay_parent
:
Hash
,
span
:
Arc
<
jaeger
::
Span
>
,
...
...
node/core/candidate-validation/src/lib.rs
View file @
8be63cd6
...
...
@@ -94,7 +94,6 @@ impl<C> Subsystem<C> for CandidateValidationSubsystem where
}
}
#[tracing::instrument(skip(ctx,
metrics),
fields(subsystem
=
LOG_TARGET))]
async
fn
run
(
mut
ctx
:
impl
SubsystemContext
<
Message
=
CandidateValidationMessage
>
,
metrics
:
Metrics
,
...
...
@@ -194,7 +193,6 @@ enum AssumptionCheckOutcome {
BadRequest
,
}
#[tracing::instrument(level
=
"trace"
,
skip(ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
check_assumption_validation_data
(
ctx
:
&
mut
impl
SubsystemContext
<
Message
=
CandidateValidationMessage
>
,
descriptor
:
&
CandidateDescriptor
,
...
...
@@ -245,7 +243,6 @@ async fn check_assumption_validation_data(
})
}
#[tracing::instrument(level
=
"trace"
,
skip(ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
find_assumed_validation_data
(
ctx
:
&
mut
impl
SubsystemContext
<
Message
=
CandidateValidationMessage
>
,
descriptor
:
&
CandidateDescriptor
,
...
...
@@ -277,11 +274,6 @@ async fn find_assumed_validation_data(
Ok
(
AssumptionCheckOutcome
::
DoesNotMatch
)
}
#[tracing::instrument(
level
=
"trace"
,
skip(ctx,
validation_host,
pov,
metrics),
fields(subsystem
=
LOG_TARGET),
)]
async
fn
spawn_validate_from_chain_state
(
ctx
:
&
mut
impl
SubsystemContext
<
Message
=
CandidateValidationMessage
>
,
validation_host
:
&
mut
ValidationHost
,
...
...
@@ -340,11 +332,6 @@ async fn spawn_validate_from_chain_state(
validation_result
}
#[tracing::instrument(
level
=
"trace"
,
skip(validation_backend,
validation_code,
pov,
metrics),
fields(subsystem
=
LOG_TARGET),
)]
async
fn
validate_candidate_exhaustive
(
mut
validation_backend
:
impl
ValidationBackend
,
persisted_validation_data
:
PersistedValidationData
,
...
...
@@ -478,7 +465,6 @@ impl ValidationBackend for &'_ mut ValidationHost {
/// Does basic checks of a candidate. Provide the encoded PoV-block. Returns `Ok` if basic checks
/// are passed, `Err` otherwise.
#[tracing::instrument(level
=
"trace"
,
skip(pov,
validation_code),
fields(subsystem
=
LOG_TARGET))]
fn
perform_basic_checks
(
candidate
:
&
CandidateDescriptor
,
max_pov_size
:
u32
,
...
...
node/core/chain-api/src/lib.rs
View file @
8be63cd6
...
...
@@ -77,7 +77,6 @@ impl<Client, Context> Subsystem<Context> for ChainApiSubsystem<Client> where
}
}
#[tracing::instrument(skip(ctx,
subsystem),
fields(subsystem
=
LOG_TARGET))]
async
fn
run
<
Client
>
(
mut
ctx
:
impl
SubsystemContext
<
Message
=
ChainApiMessage
>
,
subsystem
:
ChainApiSubsystem
<
Client
>
,
...
...
node/core/provisioner/src/lib.rs
View file @
8be63cd6
...
...
@@ -140,7 +140,6 @@ impl JobTrait for ProvisioningJob {
/// Run a job for the parent block indicated
//
// this function is in charge of creating and executing the job's main loop
#[tracing::instrument(skip(span,
_run_args,
metrics,
receiver,
sender),
fields(subsystem
=
LOG_TARGET))]
fn
run
<
S
:
SubsystemSender
>
(
relay_parent
:
Hash
,
span
:
Arc
<
jaeger
::
Span
>
,
...
...
@@ -242,7 +241,6 @@ impl ProvisioningJob {
}
}
#[tracing::instrument(level
=
"trace"
,
skip(self),
fields(subsystem
=
LOG_TARGET))]
fn
note_provisionable_data
(
&
mut
self
,
span
:
&
jaeger
::
Span
,
provisionable_data
:
ProvisionableData
)
{
match
provisionable_data
{
ProvisionableData
::
Bitfield
(
_
,
signed_bitfield
)
=>
{
...
...
@@ -277,7 +275,6 @@ type CoreAvailability = BitVec<bitvec::order::Lsb0, u8>;
/// When we're choosing bitfields to include, the rule should be simple:
/// maximize availability. So basically, include all bitfields. And then
/// choose a coherent set of candidates along with that.
#[tracing::instrument(level
=
"trace"
,
skip(return_senders,
from_job),
fields(subsystem
=
LOG_TARGET))]
async
fn
send_inherent_data
(
relay_parent
:
Hash
,
bitfields
:
&
[
SignedAvailabilityBitfield
],
...
...
@@ -321,7 +318,6 @@ async fn send_inherent_data(
///
/// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated
/// to the sorting of the input.
#[tracing::instrument(level
=
"trace"
,
fields(subsystem
=
LOG_TARGET))]
fn
select_availability_bitfields
(
cores
:
&
[
CoreState
],
bitfields
:
&
[
SignedAvailabilityBitfield
],
...
...
@@ -353,7 +349,6 @@ fn select_availability_bitfields(
}
/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core.
#[tracing::instrument(level
=
"trace"
,
skip(sender),
fields(subsystem
=
LOG_TARGET))]
async
fn
select_candidates
(
availability_cores
:
&
[
CoreState
],
bitfields
:
&
[
SignedAvailabilityBitfield
],
...
...
@@ -475,7 +470,6 @@ async fn select_candidates(
/// Produces a block number 1 higher than that of the relay parent
/// in the event of an invalid `relay_parent`, returns `Ok(0)`
#[tracing::instrument(level
=
"trace"
,
skip(sender),
fields(subsystem
=
LOG_TARGET))]
async
fn
get_block_number_under_construction
(
relay_parent
:
Hash
,
sender
:
&
mut
impl
SubsystemSender
,
...
...
@@ -501,7 +495,6 @@ async fn get_block_number_under_construction(
/// - construct a transverse slice along `core_idx`
/// - bitwise-or it with the availability slice
/// - count the 1 bits, compare to the total length; true on 2/3+
#[tracing::instrument(level
=
"trace"
,
fields(subsystem
=
LOG_TARGET))]
fn
bitfields_indicate_availability
(
core_idx
:
usize
,
bitfields
:
&
[
SignedAvailabilityBitfield
],
...
...
node/core/runtime-api/src/lib.rs
View file @
8be63cd6
...
...
@@ -262,7 +262,6 @@ impl<Client> RuntimeApiSubsystem<Client> where
}
}
#[tracing::instrument(skip(ctx,
subsystem),
fields(subsystem
=
LOG_TARGET))]
async
fn
run
<
Client
>
(
mut
ctx
:
impl
SubsystemContext
<
Message
=
RuntimeApiMessage
>
,
mut
subsystem
:
RuntimeApiSubsystem
<
Client
>
,
...
...
@@ -287,7 +286,6 @@ async fn run<Client>(
}
}
#[tracing::instrument(level
=
"trace"
,
skip(client,
metrics),
fields(subsystem
=
LOG_TARGET))]
fn
make_runtime_api_request
<
Client
>
(
client
:
Arc
<
Client
>
,
metrics
:
Metrics
,
...
...
node/network/approval-distribution/src/lib.rs
View file @
8be63cd6
...
...
@@ -1102,7 +1102,6 @@ impl State {
/// Modify the reputation of a peer based on its behavior.
#[tracing::instrument(level
=
"trace"
,
skip(ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
modify_reputation
(
ctx
:
&
mut
impl
SubsystemContext
<
Message
=
ApprovalDistributionMessage
>
,
peer_id
:
PeerId
,
...
...
@@ -1126,7 +1125,6 @@ impl ApprovalDistribution {
Self
{
metrics
}
}
#[tracing::instrument(skip(self,
ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
run
<
Context
>
(
self
,
ctx
:
Context
)
where
Context
:
SubsystemContext
<
Message
=
ApprovalDistributionMessage
>
,
...
...
@@ -1136,7 +1134,6 @@ impl ApprovalDistribution {
}
/// Used for testing.
#[tracing::instrument(skip(self,
ctx,
state),
fields(subsystem
=
LOG_TARGET))]
async
fn
run_inner
<
Context
>
(
self
,
mut
ctx
:
Context
,
state
:
&
mut
State
)
where
Context
:
SubsystemContext
<
Message
=
ApprovalDistributionMessage
>
,
...
...
node/network/availability-distribution/src/requester/fetch_task/mod.rs
View file @
8be63cd6
...
...
@@ -176,7 +176,6 @@ impl FetchTask {
/// Start fetching a chunk.
///
/// A task handling the fetching of the configured chunk will be spawned.
#[tracing::instrument(level
=
"trace"
,
skip(config,
ctx),
fields(subsystem
=
LOG_TARGET))]
pub
async
fn
start
<
Context
>
(
config
:
FetchTaskConfig
,
ctx
:
&
mut
Context
)
->
Result
<
Self
>
where
Context
:
SubsystemContext
,
...
...
@@ -249,7 +248,6 @@ enum TaskError {
}
impl
RunningTask
{
#[tracing::instrument(level
=
"trace"
,
skip(self,
kill),
fields(subsystem
=
LOG_TARGET))]
async
fn
run
(
self
,
kill
:
oneshot
::
Receiver
<
()
>
)
{
// Wait for completion/or cancel.
let
run_it
=
self
.run_inner
();
...
...
node/network/availability-distribution/src/requester/mod.rs
View file @
8be63cd6
...
...
@@ -78,7 +78,6 @@ impl Requester {
///
/// You must feed it with `ActiveLeavesUpdate` via `update_fetching_heads` and make it progress
/// by advancing the stream.
#[tracing::instrument(level
=
"trace"
,
skip(metrics),
fields(subsystem
=
LOG_TARGET))]
pub
fn
new
(
metrics
:
Metrics
)
->
Self
{
let
(
tx
,
rx
)
=
mpsc
::
channel
(
1
);
Requester
{
...
...
@@ -92,7 +91,6 @@ impl Requester {
/// Update heads that need availability distribution.
///
/// For all active heads we will be fetching our chunks for availability distribution.
#[tracing::instrument(level
=
"trace"
,
skip(self,
ctx,
runtime,
update),
fields(subsystem
=
LOG_TARGET))]
pub
async
fn
update_fetching_heads
<
Context
>
(
&
mut
self
,
ctx
:
&
mut
Context
,
...
...
node/network/availability-distribution/src/requester/session_cache.rs
View file @
8be63cd6
...
...
@@ -95,7 +95,6 @@ impl SessionCache {
///
/// Use this function over any `fetch_session_info` if all you need is a reference to
/// `SessionInfo`, as it avoids an expensive clone.
#[tracing::instrument(level
=
"trace"
,
skip(self,
ctx,
runtime,
with_info),
fields(subsystem
=
LOG_TARGET))]
pub
async
fn
with_session_info
<
Context
,
F
,
R
>
(
&
mut
self
,
ctx
:
&
mut
Context
,
...
...
@@ -146,7 +145,6 @@ impl SessionCache {
///
/// We assume validators in a group are tried in reverse order, so the reported bad validators
/// will be put at the beginning of the group.
#[tracing::instrument(level
=
"trace"
,
skip(self,
report),
fields(subsystem
=
LOG_TARGET))]
pub
fn
report_bad
(
&
mut
self
,
report
:
BadValidators
)
->
crate
::
Result
<
()
>
{
let
session
=
self
.session_info_cache
...
...
node/network/availability-distribution/src/responder.rs
View file @
8be63cd6
...
...
@@ -149,7 +149,6 @@ where
}
/// Query chunk from the availability store.
#[tracing::instrument(level
=
"trace"
,
skip(ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
query_chunk
<
Context
>
(
ctx
:
&
mut
Context
,
candidate_hash
:
CandidateHash
,
...
...
@@ -178,7 +177,6 @@ where
}
/// Query PoV from the availability store.
#[tracing::instrument(level
=
"trace"
,
skip(ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
query_available_data
<
Context
>
(
ctx
:
&
mut
Context
,
candidate_hash
:
CandidateHash
,
...
...
node/network/availability-recovery/src/lib.rs
View file @
8be63cd6
...
...
@@ -608,11 +608,9 @@ async fn handle_signal(
}
/// Machinery around launching interactions into the background.
#[tracing::instrument(level
=
"trace"
,
skip(ctx,
state),
fields(subsystem
=
LOG_TARGET))]
async
fn
launch_interaction
(
state
:
&
mut
State
,
ctx
:
&
mut
impl
SubsystemContext
<
Message
=
AvailabilityRecoveryMessage
>
,
session_index
:
SessionIndex
,
session_info
:
SessionInfo
,
receipt
:
CandidateReceipt
,
backing_group
:
Option
<
GroupIndex
>
,
...
...
@@ -663,7 +661,6 @@ async fn launch_interaction(
}
/// Handles an availability recovery request.
#[tracing::instrument(level
=
"trace"
,
skip(ctx,
state),
fields(subsystem
=
LOG_TARGET))]
async
fn
handle_recover
(
state
:
&
mut
State
,
ctx
:
&
mut
impl
SubsystemContext
<
Message
=
AvailabilityRecoveryMessage
>
,
...
...
@@ -706,7 +703,6 @@ async fn handle_recover(
launch_interaction
(
state
,
ctx
,
session_index
,
session_info
,
receipt
,
backing_group
,
...
...
@@ -727,7 +723,6 @@ async fn handle_recover(
}
/// Queries a chunk from av-store.
#[tracing::instrument(level
=
"trace"
,
skip(ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
query_full_data
(
ctx
:
&
mut
impl
SubsystemContext
<
Message
=
AvailabilityRecoveryMessage
>
,
candidate_hash
:
CandidateHash
,
...
...
node/network/bitfield-distribution/src/lib.rs
View file @
8be63cd6
...
...
@@ -152,7 +152,6 @@ impl BitfieldDistribution {
}
/// Start processing work as passed on from the Overseer.
#[tracing::instrument(skip(self,
ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
run
<
Context
>
(
self
,
mut
ctx
:
Context
)
where
Context
:
SubsystemContext
<
Message
=
BitfieldDistributionMessage
>
,
...
...
@@ -234,7 +233,6 @@ impl BitfieldDistribution {
}
/// Modify the reputation of a peer based on its behavior.
#[tracing::instrument(level
=
"trace"
,
skip(ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
modify_reputation
<
Context
>
(
ctx
:
&
mut
Context
,
peer
:
PeerId
,
...
...
@@ -254,7 +252,6 @@ where
/// Distribute a given valid and signature checked bitfield message.
///
/// For this variant the source is this node.
#[tracing::instrument(level
=
"trace"
,
skip(ctx,
metrics),
fields(subsystem
=
LOG_TARGET))]
async
fn
handle_bitfield_distribution
<
Context
>
(
ctx
:
&
mut
Context
,
state
:
&
mut
ProtocolState
,
...
...
@@ -308,7 +305,6 @@ where
/// Distribute a given valid and signature checked bitfield message.
///
/// Can be originated by another subsystem or received via network from another peer.
#[tracing::instrument(level
=
"trace"
,
skip(ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
relay_message
<
Context
>
(
ctx
:
&
mut
Context
,
job_data
:
&
mut
PerRelayParentData
,
...
...
@@ -385,7 +381,6 @@ where
}
/// Handle an incoming message from a peer.
#[tracing::instrument(level
=
"trace"
,
skip(ctx,
metrics),
fields(subsystem
=
LOG_TARGET))]
async
fn
process_incoming_peer_message
<
Context
>
(
ctx
:
&
mut
Context
,
state
:
&
mut
ProtocolState
,
...
...
@@ -506,7 +501,6 @@ where
/// Deal with network bridge updates and track what needs to be tracked
/// which depends on the message type received.
#[tracing::instrument(level
=
"trace"
,
skip(ctx,
metrics),
fields(subsystem
=
LOG_TARGET))]
async
fn
handle_network_msg
<
Context
>
(
ctx
:
&
mut
Context
,
state
:
&
mut
ProtocolState
,
...
...
@@ -561,7 +555,6 @@ where
}
/// Handle the changes necessary when our view changes.
#[tracing::instrument(level
=
"trace"
,
fields(subsystem
=
LOG_TARGET))]
fn
handle_our_view_change
(
state
:
&
mut
ProtocolState
,
view
:
OurView
)
{
let
old_view
=
std
::
mem
::
replace
(
&
mut
(
state
.view
),
view
);
...
...
@@ -584,7 +577,6 @@ fn handle_our_view_change(state: &mut ProtocolState, view: OurView) {
// Send the difference between two views which were not sent
// to that particular peer.
#[tracing::instrument(level
=
"trace"
,
skip(ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
handle_peer_view_change
<
Context
>
(
ctx
:
&
mut
Context
,
state
:
&
mut
ProtocolState
,
...
...
@@ -637,7 +629,6 @@ where
}
/// Send a gossip message and track it in the per relay parent data.
#[tracing::instrument(level
=
"trace"
,
skip(ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
send_tracked_gossip_message
<
Context
>
(
ctx
:
&
mut
Context
,
state
:
&
mut
ProtocolState
,
...
...
@@ -693,7 +684,6 @@ where
}
/// Query our validator set and signing context for a particular relay parent.
#[tracing::instrument(level
=
"trace"
,
skip(ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
query_basics
<
Context
>
(
ctx
:
&
mut
Context
,
relay_parent
:
Hash
,
...
...
node/network/bridge/src/lib.rs
View file @
8be63cd6
...
...
@@ -827,7 +827,6 @@ async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
/// #fn is_send<T: Send>();
/// #is_send::<parking_lot::MutexGuard<'static, ()>();
/// ```
#[tracing::instrument(skip(bridge,
ctx,
network_stream),
fields(subsystem
=
LOG_TARGET))]
async
fn
run_network
<
N
,
AD
>
(
bridge
:
NetworkBridge
<
N
,
AD
>
,
mut
ctx
:
impl
SubsystemContext
<
Message
=
NetworkBridgeMessage
>
,
...
...
@@ -924,7 +923,6 @@ fn construct_view(live_heads: impl DoubleEndedIterator<Item = Hash>, finalized_n
)
}
#[tracing::instrument(level
=
"trace"
,
skip(net,
ctx,
shared,
metrics),
fields(subsystem
=
LOG_TARGET))]
async
fn
update_our_view
(
net
:
&
mut
impl
Network
,
ctx
:
&
mut
impl
SubsystemContext
<
Message
=
NetworkBridgeMessage
>
,
...
...
@@ -997,7 +995,6 @@ async fn update_our_view(
// Handle messages on a specific peer-set. The peer is expected to be connected on that
// peer-set.
#[tracing::instrument(level
=
"trace"
,
skip(peers,
messages,
metrics),
fields(subsystem
=
LOG_TARGET))]
fn
handle_peer_messages
<
M
>
(
peer
:
PeerId
,
peer_set
:
PeerSet
,
...
...
@@ -1048,7 +1045,6 @@ fn handle_peer_messages<M>(
(
outgoing_messages
,
reports
)
}
#[tracing::instrument(level
=
"trace"
,
skip(net,
peers,
metrics),
fields(subsystem
=
LOG_TARGET))]
async
fn
send_validation_message
<
I
>
(
net
:
&
mut
impl
Network
,
peers
:
I
,
...
...
@@ -1062,7 +1058,6 @@ async fn send_validation_message<I>(
send_message
(
net
,
peers
,
PeerSet
::
Validation
,
message
,
metrics
)
.await
}
#[tracing::instrument(level
=
"trace"
,
skip(net,
peers,
metrics),
fields(subsystem
=
LOG_TARGET))]
async
fn
send_collation_message
<
I
>
(
net
:
&
mut
impl
Network
,
peers
:
I
,
...
...
@@ -1109,7 +1104,6 @@ fn dispatch_collation_event_to_all_unbounded(
}
}
#[tracing::instrument(level
=
"trace"
,
skip(events,
ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
dispatch_validation_events_to_all
<
I
>
(
events
:
I
,
ctx
:
&
mut
impl
SubsystemSender
...
...
@@ -1121,7 +1115,6 @@ async fn dispatch_validation_events_to_all<I>(
ctx
.send_messages
(
events
.into_iter
()
.flat_map
(
AllMessages
::
dispatch_iter
))
.await
}
#[tracing::instrument(level
=
"trace"
,
skip(events,
ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
dispatch_collation_events_to_all
<
I
>
(
events
:
I
,
ctx
:
&
mut
impl
SubsystemSender
...
...
node/network/bridge/src/network.rs
View file @
8be63cd6
...
...
@@ -192,7 +192,6 @@ impl Network for Arc<NetworkService<Block, Hash>> {
sc_network
::
NetworkService
::
remove_from_peers_set
(
&**
self
,
protocol
,
multiaddresses
)
}
#[tracing::instrument(level
=
"trace"
,
skip(self),
fields(subsystem
=
LOG_TARGET))]
fn
action_sink
<
'a
>
(
&
'a
mut
self
,
)
->
Pin
<
Box
<
dyn
Sink
<
NetworkAction
,
Error
=
SubsystemError
>
+
Send
+
'a
>>
{
...
...
node/network/collator-protocol/src/collator_side.rs
View file @
8be63cd6
...
...
@@ -258,7 +258,6 @@ impl State {
/// or the relay-parent isn't in the active-leaves set, we ignore the message
/// as it must be invalid in that case - although this indicates a logic error
/// elsewhere in the node.
#[tracing::instrument(level
=
"trace"
,
skip(ctx,
runtime,
state,
pov),
fields(subsystem
=
LOG_TARGET))]
async
fn
distribute_collation
(
ctx
:
&
mut
impl
SubsystemContext
,
runtime
:
&
mut
RuntimeInfo
,
...
...
@@ -357,7 +356,6 @@ async fn distribute_collation(
/// Get the Id of the Core that is assigned to the para being collated on if any
/// and the total number of cores.
#[tracing::instrument(level
=
"trace"
,
skip(ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
determine_core
(
ctx
:
&
mut
impl
SubsystemContext
,
para_id
:
ParaId
,
...
...
@@ -387,7 +385,6 @@ struct GroupValidators {
/// Figure out current and next group of validators assigned to the para being collated on.
///
/// Returns [`ValidatorId`]'s of current and next group as determined based on the `relay_parent`.
#[tracing::instrument(level
=
"trace"
,
skip(ctx,
runtime),
fields(subsystem
=
LOG_TARGET))]
async
fn
determine_our_validators
(
ctx
:
&
mut
impl
SubsystemContext
,
runtime
:
&
mut
RuntimeInfo
,
...
...
@@ -424,7 +421,6 @@ async fn determine_our_validators(
}
/// Issue a `Declare` collation message to the given `peer`.
#[tracing::instrument(level
=
"trace"
,
skip(ctx,
state),
fields(subsystem
=
LOG_TARGET))]
async
fn
declare
(
ctx
:
&
mut
impl
SubsystemContext
<
Message
=
CollatorProtocolMessage
>
,
state
:
&
mut
State
,
...
...
@@ -450,7 +446,6 @@ async fn declare(
/// Issue a connection request to a set of validators and
/// revoke the previous connection request.
#[tracing::instrument(level
=
"trace"
,
skip(ctx),
fields(subsystem
=
LOG_TARGET))]
async
fn
connect_to_validators
(
ctx
:
&
mut
impl
SubsystemContext
,
validator_ids
:
Vec
<
AuthorityDiscoveryId
>
,
...
...
@@ -467,7 +462,6 @@ async fn connect_to_validators(
///
/// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is
/// set as validator for our para at the given `relay_parent`.
#[tracing::instrument(level
=
"trace"
,
skip(ctx,
state),
fields(subsystem
=
LOG_TARGET))]
async
fn
advertise_collation
(
ctx
:
&
mut
impl
SubsystemContext
,
state
:
&
mut
State
,
...
...
@@ -528,7 +522,6 @@ async fn advertise_collation(
}
/// The main incoming message dispatching switch.
#[tracing::instrument(level
=
"trace"
,
skip(ctx,
runtime,
state),
fields(subsystem
=
LOG_TARGET))]
async
fn
process_msg
(
ctx
:
&
mut
impl
SubsystemContext
<
Message
=
CollatorProtocolMessage
>
,
runtime
:
&
mut
RuntimeInfo
,
...
...
@@ -635,7 +628,6 @@ async fn process_msg(
}