Commit 7caa0bd1 authored by Robert Klotzner's avatar Robert Klotzner
Browse files

Fix instrumentation to use log target properly.

parent 78e6a1cd
......@@ -74,7 +74,7 @@ impl CollationGenerationSubsystem {
///
/// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur.
/// Otherwise, most are logged and then discarded.
#[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))]
async fn run<Context>(mut self, mut ctx: Context)
where
Context: SubsystemContext<Message = CollationGenerationMessage>,
......@@ -107,7 +107,7 @@ impl CollationGenerationSubsystem {
// note: this doesn't strictly need to be a separate function; it's more an administrative function
// so that we don't clutter the run loop. It could in principle be inlined directly into there.
// it should hopefully therefore be ok that it's an async function mutably borrowing self.
#[tracing::instrument(level = "trace", skip(self, ctx, sender), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self, ctx, sender), fields(target = LOG_TARGET))]
async fn handle_incoming<Context>(
&mut self,
incoming: SubsystemResult<FromOverseer<Context::Message>>,
......@@ -181,7 +181,7 @@ where
}
}
#[tracing::instrument(level = "trace", skip(ctx, metrics, sender, activated), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, metrics, sender, activated), fields(target = LOG_TARGET))]
async fn handle_new_activations<Context: SubsystemContext>(
config: Arc<CollationGenerationConfig>,
activated: impl IntoIterator<Item = Hash>,
......@@ -364,7 +364,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
Ok(())
}
#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", fields(target = LOG_TARGET))]
fn erasure_root(
n_validators: usize,
persisted_validation: PersistedValidationData,
......
......@@ -508,7 +508,7 @@ where
}
}
#[tracing::instrument(skip(subsystem, ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(skip(subsystem, ctx), fields(target = LOG_TARGET))]
async fn run<Context>(mut subsystem: AvailabilityStoreSubsystem, mut ctx: Context)
where
Context: SubsystemContext<Message=AvailabilityStoreMessage>,
......@@ -534,7 +534,7 @@ where
}
}
#[tracing::instrument(level = "trace", skip(subsystem, ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(subsystem, ctx), fields(target = LOG_TARGET))]
async fn run_iteration<Context>(
ctx: &mut Context,
subsystem: &mut AvailabilityStoreSubsystem,
......
......@@ -865,7 +865,7 @@ impl CandidateBackingJob {
}
/// Import the statement and kick off validation work if it is a part of our assignment.
#[tracing::instrument(level = "trace", skip(self, parent_span), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self, parent_span), fields(target = LOG_TARGET))]
async fn maybe_validate_and_import(
&mut self,
parent_span: &jaeger::Span,
......@@ -884,7 +884,7 @@ impl CandidateBackingJob {
Ok(())
}
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))]
async fn sign_statement(&self, statement: Statement) -> Option<SignedFullStatement> {
let signed = self.table_context
.validator
......@@ -897,7 +897,7 @@ impl CandidateBackingJob {
Some(signed)
}
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))]
fn check_statement_signature(&self, statement: &SignedFullStatement) -> Result<(), Error> {
let idx = statement.validator_index().0 as usize;
......@@ -987,7 +987,7 @@ impl util::JobTrait for CandidateBackingJob {
const NAME: &'static str = "CandidateBackingJob";
#[tracing::instrument(skip(span, keystore, metrics, rx_to, tx_from), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(skip(span, keystore, metrics, rx_to, tx_from), fields(target = LOG_TARGET))]
fn run(
parent: Hash,
span: Arc<jaeger::Span>,
......
......@@ -93,7 +93,7 @@ impl JobTrait for CandidateSelectionJob {
const NAME: &'static str = "CandidateSelectionJob";
#[tracing::instrument(skip(keystore, metrics, receiver, sender), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(skip(keystore, metrics, receiver, sender), fields(target = LOG_TARGET))]
fn run(
relay_parent: Hash,
span: Arc<jaeger::Span>,
......@@ -222,7 +222,7 @@ impl CandidateSelectionJob {
Ok(())
}
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))]
async fn handle_collation(
&mut self,
relay_parent: Hash,
......@@ -280,7 +280,7 @@ impl CandidateSelectionJob {
}
}
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))]
async fn handle_invalid(&mut self, candidate_receipt: CandidateReceipt) {
let _timer = self.metrics.time_handle_invalid();
......@@ -358,7 +358,7 @@ impl CandidateSelectionJob {
// get a collation from the Collator Protocol subsystem
//
// note that this gets an owned clone of the sender; that's becuase unlike `forward_invalidity_note`, it's expected to take a while longer
#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(sender), fields(target = LOG_TARGET))]
async fn get_collation(
relay_parent: Hash,
para_id: ParaId,
......
......@@ -85,7 +85,7 @@ impl<S, C> Subsystem<C> for CandidateValidationSubsystem<S> where
}
}
#[tracing::instrument(skip(ctx, spawn, metrics), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(skip(ctx, spawn, metrics), fields(target = LOG_TARGET))]
async fn run(
mut ctx: impl SubsystemContext<Message = CandidateValidationMessage>,
spawn: impl SpawnNamed + Clone + 'static,
......@@ -183,7 +183,7 @@ enum AssumptionCheckOutcome {
BadRequest,
}
#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))]
async fn check_assumption_validation_data(
ctx: &mut impl SubsystemContext<Message = CandidateValidationMessage>,
descriptor: &CandidateDescriptor,
......@@ -234,7 +234,7 @@ async fn check_assumption_validation_data(
})
}
#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))]
async fn find_assumed_validation_data(
ctx: &mut impl SubsystemContext<Message = CandidateValidationMessage>,
descriptor: &CandidateDescriptor,
......@@ -266,7 +266,7 @@ async fn find_assumed_validation_data(
Ok(AssumptionCheckOutcome::DoesNotMatch)
}
#[tracing::instrument(level = "trace", skip(ctx, pov, spawn, metrics), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, pov, spawn, metrics), fields(target = LOG_TARGET))]
async fn spawn_validate_from_chain_state(
ctx: &mut impl SubsystemContext<Message = CandidateValidationMessage>,
isolation_strategy: IsolationStrategy,
......@@ -328,7 +328,7 @@ async fn spawn_validate_from_chain_state(
validation_result
}
#[tracing::instrument(level = "trace", skip(ctx, validation_code, pov, spawn, metrics), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, validation_code, pov, spawn, metrics), fields(target = LOG_TARGET))]
async fn spawn_validate_exhaustive(
ctx: &mut impl SubsystemContext<Message = CandidateValidationMessage>,
isolation_strategy: IsolationStrategy,
......@@ -361,7 +361,7 @@ async fn spawn_validate_exhaustive(
/// Does basic checks of a candidate. Provide the encoded PoV-block. Returns `Ok` if basic checks
/// are passed, `Err` otherwise.
#[tracing::instrument(level = "trace", skip(pov), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(pov), fields(target = LOG_TARGET))]
fn perform_basic_checks(
candidate: &CandidateDescriptor,
max_pov_size: u32,
......@@ -419,7 +419,7 @@ impl ValidationBackend for RealValidationBackend {
/// Validates the candidate from exhaustive parameters.
///
/// Sends the result of validation on the channel once complete.
#[tracing::instrument(level = "trace", skip(backend_arg, validation_code, pov, spawn, metrics), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(backend_arg, validation_code, pov, spawn, metrics), fields(target = LOG_TARGET))]
fn validate_candidate_exhaustive<B: ValidationBackend, S: SpawnNamed + 'static>(
backend_arg: B::Arg,
persisted_validation_data: PersistedValidationData,
......
......@@ -77,7 +77,7 @@ impl<Client, Context> Subsystem<Context> for ChainApiSubsystem<Client> where
}
}
#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(skip(ctx, subsystem), fields(target = LOG_TARGET))]
async fn run<Client>(
mut ctx: impl SubsystemContext<Message = ChainApiMessage>,
subsystem: ChainApiSubsystem<Client>,
......
......@@ -138,7 +138,7 @@ impl JobTrait for ProvisioningJob {
/// Run a job for the parent block indicated
//
// this function is in charge of creating and executing the job's main loop
#[tracing::instrument(skip(span, _run_args, metrics, receiver, sender), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(skip(span, _run_args, metrics, receiver, sender), fields(target = LOG_TARGET))]
fn run(
relay_parent: Hash,
span: Arc<jaeger::Span>,
......@@ -238,7 +238,7 @@ impl ProvisioningJob {
}
}
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))]
fn note_provisionable_data(&mut self, provisionable_data: ProvisionableData) {
match provisionable_data {
ProvisionableData::Bitfield(_, signed_bitfield) => {
......@@ -271,7 +271,7 @@ type CoreAvailability = BitVec<bitvec::order::Lsb0, u8>;
/// When we're choosing bitfields to include, the rule should be simple:
/// maximize availability. So basically, include all bitfields. And then
/// choose a coherent set of candidates along with that.
#[tracing::instrument(level = "trace", skip(return_senders, from_job), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(return_senders, from_job), fields(target = LOG_TARGET))]
async fn send_inherent_data(
relay_parent: Hash,
bitfields: &[SignedAvailabilityBitfield],
......@@ -311,7 +311,7 @@ async fn send_inherent_data(
///
/// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated
/// to the sorting of the input.
#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", fields(target = LOG_TARGET))]
fn select_availability_bitfields(
cores: &[CoreState],
bitfields: &[SignedAvailabilityBitfield],
......@@ -343,7 +343,7 @@ fn select_availability_bitfields(
}
/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core.
#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(sender), fields(target = LOG_TARGET))]
async fn select_candidates(
availability_cores: &[CoreState],
bitfields: &[SignedAvailabilityBitfield],
......@@ -465,7 +465,7 @@ async fn select_candidates(
/// Produces a block number 1 higher than that of the relay parent
/// in the event of an invalid `relay_parent`, returns `Ok(0)`
#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(sender), fields(target = LOG_TARGET))]
async fn get_block_number_under_construction(
relay_parent: Hash,
sender: &mut mpsc::Sender<FromJobCommand>,
......@@ -491,7 +491,7 @@ async fn get_block_number_under_construction(
/// - construct a transverse slice along `core_idx`
/// - bitwise-or it with the availability slice
/// - count the 1 bits, compare to the total length; true on 2/3+
#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", fields(target = LOG_TARGET))]
fn bitfields_indicate_availability(
core_idx: usize,
bitfields: &[SignedAvailabilityBitfield],
......
......@@ -257,7 +257,7 @@ impl<Client> RuntimeApiSubsystem<Client> where
}
}
#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(skip(ctx, subsystem), fields(target = LOG_TARGET))]
async fn run<Client>(
mut ctx: impl SubsystemContext<Message = RuntimeApiMessage>,
mut subsystem: RuntimeApiSubsystem<Client>,
......@@ -282,7 +282,7 @@ async fn run<Client>(
}
}
#[tracing::instrument(level = "trace", skip(client, metrics), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(client, metrics), fields(target = LOG_TARGET))]
fn make_runtime_api_request<Client>(
client: Arc<Client>,
metrics: Metrics,
......
......@@ -830,7 +830,7 @@ impl State {
/// Modify the reputation of a peer based on its behavior.
#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))]
async fn modify_reputation(
ctx: &mut impl SubsystemContext<Message = ApprovalDistributionMessage>,
peer_id: PeerId,
......@@ -854,7 +854,7 @@ impl ApprovalDistribution {
Self { metrics }
}
#[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(skip(self, ctx), fields(target = LOG_TARGET))]
async fn run<Context>(self, ctx: Context)
where
Context: SubsystemContext<Message = ApprovalDistributionMessage>,
......@@ -864,7 +864,7 @@ impl ApprovalDistribution {
}
/// Used for testing.
#[tracing::instrument(skip(self, ctx, state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(skip(self, ctx, state), fields(target = LOG_TARGET))]
async fn run_inner<Context>(self, mut ctx: Context, state: &mut State)
where
Context: SubsystemContext<Message = ApprovalDistributionMessage>,
......
......@@ -213,7 +213,7 @@ impl Stream for Requester {
}
/// Query all hashes and descriptors of candidates pending availability at a particular block.
#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))]
async fn query_occupied_cores<Context>(
ctx: &mut Context,
relay_parent: Hash,
......
......@@ -57,6 +57,7 @@ where
/// Answer an incoming chunk request by querying the av store.
///
/// Returns: Ok(true) if chunk was found and served.
#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))]
pub async fn answer_request<Context>(
ctx: &mut Context,
req: IncomingRequest<v1::AvailabilityFetchingRequest>,
......@@ -78,7 +79,7 @@ where
}
/// Query chunk from the availability store.
#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))]
async fn query_chunk<Context>(
ctx: &mut Context,
candidate_hash: CandidateHash,
......
......@@ -182,7 +182,7 @@ impl SessionCache {
///
/// We assume validators in a group are tried in reverse order, so the reported bad validators
/// will be put at the beginning of the group.
#[tracing::instrument(level = "trace", skip(self, report), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self, report), fields(target = LOG_TARGET))]
pub fn report_bad(&mut self, report: BadValidators) -> Result<()> {
let session = self
.session_info_cache
......
......@@ -589,7 +589,7 @@ async fn report_peer(
}
/// Machinery around launching interactions into the background.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))]
async fn launch_interaction(
state: &mut State,
ctx: &mut impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
......@@ -654,7 +654,7 @@ async fn launch_interaction(
}
/// Handles an availability recovery request.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))]
async fn handle_recover(
state: &mut State,
ctx: &mut impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
......@@ -718,7 +718,7 @@ async fn handle_recover(
}
/// Queries a chunk from av-store.
#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))]
async fn query_chunk(
ctx: &mut impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
candidate_hash: CandidateHash,
......@@ -733,7 +733,7 @@ async fn query_chunk(
}
/// Queries a chunk from av-store.
#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))]
async fn query_full_data(
ctx: &mut impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
candidate_hash: CandidateHash,
......@@ -747,7 +747,7 @@ async fn query_full_data(
}
/// Handles message from interaction.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))]
async fn handle_from_interaction(
state: &mut State,
ctx: &mut impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
......@@ -827,7 +827,7 @@ async fn handle_from_interaction(
}
/// Handles a network bridge update.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))]
async fn handle_network_update(
state: &mut State,
ctx: &mut impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
......
......@@ -142,7 +142,7 @@ struct PeerData {
}
/// Main driver, processing network events and messages from other subsystems.
#[tracing::instrument(skip(bridge, ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(skip(bridge, ctx), fields(target = LOG_TARGET))]
async fn run_network<N, AD>(
mut bridge: NetworkBridge<N, AD>,
mut ctx: impl SubsystemContext<Message=NetworkBridgeMessage>,
......@@ -417,7 +417,7 @@ fn construct_view(live_heads: impl DoubleEndedIterator<Item = Hash>, finalized_n
)
}
#[tracing::instrument(level = "trace", skip(net, ctx, validation_peers, collation_peers), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(net, ctx, validation_peers, collation_peers), fields(target = LOG_TARGET))]
async fn update_our_view(
net: &mut impl Network,
ctx: &mut impl SubsystemContext<Message = NetworkBridgeMessage>,
......@@ -460,7 +460,7 @@ async fn update_our_view(
// Handle messages on a specific peer-set. The peer is expected to be connected on that
// peer-set.
#[tracing::instrument(level = "trace", skip(peers, messages, net), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(peers, messages, net), fields(target = LOG_TARGET))]
async fn handle_peer_messages<M>(
peer: PeerId,
peers: &mut HashMap<PeerId, PeerData>,
......@@ -516,7 +516,7 @@ async fn handle_peer_messages<M>(
Ok(outgoing_messages)
}
#[tracing::instrument(level = "trace", skip(net, peers), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(net, peers), fields(target = LOG_TARGET))]
async fn send_validation_message<I>(
net: &mut impl Network,
peers: I,
......@@ -529,7 +529,7 @@ async fn send_validation_message<I>(
send_message(net, peers, PeerSet::Validation, message).await
}
#[tracing::instrument(level = "trace", skip(net, peers), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(net, peers), fields(target = LOG_TARGET))]
async fn send_collation_message<I>(
net: &mut impl Network,
peers: I,
......@@ -557,7 +557,7 @@ async fn dispatch_collation_event_to_all(
dispatch_collation_events_to_all(std::iter::once(event), ctx).await
}
#[tracing::instrument(level = "trace", skip(events, ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(events, ctx), fields(target = LOG_TARGET))]
async fn dispatch_validation_events_to_all<I>(
events: I,
ctx: &mut impl SubsystemContext<Message=NetworkBridgeMessage>,
......@@ -569,7 +569,7 @@ async fn dispatch_validation_events_to_all<I>(
ctx.send_messages(events.into_iter().flat_map(AllMessages::dispatch_iter)).await
}
#[tracing::instrument(level = "trace", skip(events, ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(events, ctx), fields(target = LOG_TARGET))]
async fn dispatch_collation_events_to_all<I>(
events: I,
ctx: &mut impl SubsystemContext<Message=NetworkBridgeMessage>,
......
......@@ -151,7 +151,7 @@ impl Network for Arc<NetworkService<Block, Hash>> {
NetworkService::event_stream(self, "polkadot-network-bridge").boxed()
}
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self), fields(target = LOG_TARGET))]
fn action_sink<'a>(
&'a mut self,
) -> Pin<Box<dyn Sink<NetworkAction, Error = SubsystemError> + Send + 'a>> {
......
......@@ -169,7 +169,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
/// Find connected validators using the given `validator_ids`.
///
/// Returns a [`HashMap`] that contains the found [`AuthorityDiscoveryId`]'s and their associated [`PeerId`]'s.
#[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(target = LOG_TARGET))]
async fn find_connected_validators(
&mut self,
validator_ids: &[AuthorityDiscoveryId],
......@@ -216,7 +216,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
/// This method will also clean up all previously revoked requests.
/// it takes `network_service` and `authority_discovery_service` by value
/// and returns them as a workaround for the Future: Send requirement imposed by async fn impl.
#[tracing::instrument(level = "trace", skip(self, connected, network_service, authority_discovery_service), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self, connected, network_service, authority_discovery_service), fields(target = LOG_TARGET))]
pub async fn on_request(
&mut self,
validator_ids: Vec<AuthorityDiscoveryId>,
......@@ -335,7 +335,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
}
/// Should be called when a peer connected.
#[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(target = LOG_TARGET))]
pub async fn on_peer_connected(
&mut self,
peer_id: PeerId,
......
......@@ -260,7 +260,7 @@ impl State {
/// or the relay-parent isn't in the active-leaves set, we ignore the message
/// as it must be invalid in that case - although this indicates a logic error
/// elsewhere in the node.
#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))]
async fn distribute_collation(
ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
state: &mut State,
......@@ -338,7 +338,7 @@ async fn distribute_collation(
/// Get the Id of the Core that is assigned to the para being collated on if any
/// and the total number of cores.
#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))]
async fn determine_core(
ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
para_id: ParaId,
......@@ -360,7 +360,7 @@ async fn determine_core(
/// Figure out current and next group of validators assigned to the para being collated on.
///
/// Returns [`ValidatorId`]'s of current and next group as determined based on the `relay_parent`.
#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx), fields(target = LOG_TARGET))]
async fn determine_our_validators(
ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
core_index: CoreIndex,
......@@ -386,7 +386,7 @@ async fn determine_our_validators(
}
/// Issue a `Declare` collation message to the given `peer`.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))]
async fn declare(
ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
state: &mut State,
......@@ -404,7 +404,7 @@ async fn declare(
/// Issue a connection request to a set of validators and
/// revoke the previous connection request.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))]
async fn connect_to_validators(
ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
relay_parent: Hash,
......@@ -428,7 +428,7 @@ async fn connect_to_validators(
///
/// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is
/// set as validator for our para at the given `relay_parent`.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))]
async fn advertise_collation(
ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
state: &mut State,
......@@ -484,7 +484,7 @@ async fn advertise_collation(
}
/// The main incoming message dispatching switch.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))]
async fn process_msg(
ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
state: &mut State,
......@@ -568,7 +568,7 @@ async fn process_msg(
}
/// Issue a response to a previously requested collation.
#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(target = LOG_TARGET))]
async fn send_collation(
ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
state: &mut State,
......@@ -602,7 +602,7 @@ async fn send_collation(
}
/// A networking messages switch.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))]
async fn handle_incoming_peer_message(
ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
state: &mut State,
......@@ -685,7 +685,7 @@ async fn handle_incoming_peer_message(
}
/// Our view has changed.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))]
async fn handle_peer_view_change(
ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
state: &mut State,
......@@ -706,7 +706,7 @@ async fn handle_peer_view_change(
/// A validator is connected.
///
/// `Declare` that we are a collator with a given `CollatorId`.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))]
async fn handle_validator_connected(
ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
state: &mut State,
......@@ -735,7 +735,7 @@ async fn handle_validator_connected(
}
/// Bridge messages switch.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(ctx, state), fields(target = LOG_TARGET))]
async fn handle_network_msg(
ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
state: &mut State,
......@@ -767,7 +767,7 @@ async fn handle_network_msg(
}
/// Handles our view changes.
#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(state), fields(target = LOG_TARGET))]
async fn handle_our_view_change(
state: &mut State,
view: OurView,
......@@ -810,7 +810,7 @@ async fn handle_our_view_change(
}
/// The collator protocol collator side main loop.
#[tracing::instrument(skip(ctx, metrics), fields(subsystem = LOG_TARGET))]