diff --git a/polkadot/Cargo.lock b/polkadot/Cargo.lock
index 1143c521b42ad1e55185472f626c942895cad510..1ebd93aaf460d35af678ee40104f62a5fb67abab 100644
--- a/polkadot/Cargo.lock
+++ b/polkadot/Cargo.lock
@@ -1877,6 +1877,7 @@ dependencies = [
  "fs-err",
  "proc-macro2",
  "quote",
+ "syn",
 ]
 
 [[package]]
@@ -4464,6 +4465,7 @@ dependencies = [
  "log",
  "thiserror",
  "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6271,7 +6273,7 @@ dependencies = [
  "rand_core 0.5.1",
  "schnorrkel",
  "sp-core",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6292,7 +6294,7 @@ dependencies = [
  "sp-application-crypto",
  "sp-core",
  "sp-keystore",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6321,7 +6323,7 @@ dependencies = [
  "sp-keystore",
  "sp-tracing",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6350,7 +6352,7 @@ dependencies = [
  "sp-core",
  "sp-keyring",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6433,7 +6435,7 @@ dependencies = [
  "sp-keystore",
  "sp-runtime",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6476,7 +6478,7 @@ dependencies = [
  "sp-keystore",
  "sp-tracing",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6515,7 +6517,7 @@ dependencies = [
  "sp-keyring",
  "sp-keystore",
  "sp-tracing",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6539,7 +6541,7 @@ dependencies = [
  "sp-consensus",
  "sp-core",
  "sp-keyring",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6558,7 +6560,7 @@ dependencies = [
  "sp-core",
  "sp-maybe-compressed-blob",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6595,7 +6597,7 @@ dependencies = [
  "sp-keyring",
  "sp-keystore",
  "sp-runtime",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6623,7 +6625,7 @@ dependencies = [
  "sp-core",
  "sp-keyring",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6648,7 +6650,7 @@ dependencies = [
  "sp-keystore",
  "sp-tracing",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6663,7 +6665,7 @@ dependencies = [
  "polkadot-primitives-test-helpers",
  "sp-keystore",
  "thiserror",
- "tracing",
+ "tracing-gum",
  "wasm-timer",
 ]
 
@@ -6686,7 +6688,7 @@ dependencies = [
  "sp-core",
  "sp-keyring",
  "sp-maybe-compressed-blob",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6705,7 +6707,7 @@ dependencies = [
  "sc-consensus-babe",
  "sp-blockchain",
  "sp-core",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6726,7 +6728,7 @@ dependencies = [
  "polkadot-primitives",
  "sp-core",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6751,7 +6753,7 @@ dependencies = [
  "sp-keyring",
  "sp-keystore",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6767,7 +6769,7 @@ dependencies = [
  "sp-inherents",
  "sp-runtime",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6787,7 +6789,7 @@ dependencies = [
  "sp-application-crypto",
  "sp-keystore",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6820,7 +6822,7 @@ dependencies = [
  "tempfile",
  "test-parachain-adder",
  "test-parachain-halt",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6843,7 +6845,7 @@ dependencies = [
  "sp-keystore",
  "sp-runtime",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6864,7 +6866,7 @@ dependencies = [
  "sp-consensus-babe",
  "sp-core",
  "sp-keyring",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -6909,7 +6911,7 @@ dependencies = [
  "substrate-test-utils",
  "tempfile",
  "tokio",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -7033,7 +7035,7 @@ dependencies = [
  "sp-keystore",
  "tempfile",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -7058,7 +7060,7 @@ dependencies = [
  "sc-client-api",
  "sp-api",
  "sp-core",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -7074,7 +7076,7 @@ dependencies = [
  "polkadot-node-primitives",
  "polkadot-overseer-gen-proc-macro",
  "thiserror",
- "tracing",
+ "tracing-gum",
  "trybuild",
 ]
 
@@ -7505,7 +7507,7 @@ dependencies = [
  "sp-trie",
  "substrate-prometheus-endpoint",
  "thiserror",
- "tracing",
+ "tracing-gum",
  "westend-runtime",
  "westend-runtime-constants",
 ]
@@ -7537,7 +7539,7 @@ dependencies = [
  "sp-staking",
  "sp-tracing",
  "thiserror",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -7599,7 +7601,7 @@ dependencies = [
  "polkadot-primitives",
  "sp-core",
  "sp-keystore",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -7720,7 +7722,7 @@ dependencies = [
  "tempfile",
  "test-runtime-constants",
  "tokio",
- "tracing",
+ "tracing-gum",
 ]
 
 [[package]]
@@ -11502,6 +11504,28 @@ dependencies = [
  "tracing",
 ]
 
+[[package]]
+name = "tracing-gum"
+version = "0.9.17"
+dependencies = [
+ "polkadot-node-jaeger",
+ "polkadot-primitives",
+ "tracing",
+ "tracing-gum-proc-macro",
+]
+
+[[package]]
+name = "tracing-gum-proc-macro"
+version = "0.9.17"
+dependencies = [
+ "assert_matches",
+ "expander 0.0.6",
+ "proc-macro-crate 1.1.3",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
 [[package]]
 name = "tracing-log"
 version = "0.1.2"
@@ -12721,7 +12745,7 @@ dependencies = [
  "thiserror",
  "tokio",
  "tokio-tungstenite",
- "tracing",
+ "tracing-gum",
  "url 2.2.2",
 ]
 
diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml
index fb45e396523ce10ffabe7d74aaa90b8da91214dd..4443e9c8f881da27407d49a86c232f2b09a70197 100644
--- a/polkadot/Cargo.toml
+++ b/polkadot/Cargo.toml
@@ -93,6 +93,8 @@ members = [
 	"node/subsystem-test-helpers",
 	"node/subsystem-util",
 	"node/jaeger",
+	"node/gum",
+	"node/gum/proc-macro",
 	"node/metrics",
 	"node/metered-channel",
 	"node/test/client",
diff --git a/polkadot/core-primitives/src/lib.rs b/polkadot/core-primitives/src/lib.rs
index d1b21256f046498946ee791f86998fec030ae86c..4fdfe146c5665f5bf396e0249362a2354bd690de 100644
--- a/polkadot/core-primitives/src/lib.rs
+++ b/polkadot/core-primitives/src/lib.rs
@@ -69,6 +69,14 @@ pub type Hash = sp_core::H256;
 #[cfg_attr(feature = "std", derive(MallocSizeOf))]
 pub struct CandidateHash(pub Hash);
 
+#[cfg(feature = "std")]
+impl std::ops::Deref for CandidateHash {
+	type Target = Hash;
+	fn deref(&self) -> &Self::Target {
+		&self.0
+	}
+}
+
 #[cfg(feature = "std")]
 impl std::fmt::Display for CandidateHash {
 	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml
index eeedd2c448e26a70e01d33366b779a1418bb8a17..bb0a4fc4b0e3cb65f039cac34e016e19ccba5b19 100644
--- a/polkadot/node/collation-generation/Cargo.toml
+++ b/polkadot/node/collation-generation/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
 
 [dependencies]
 futures = "0.3.21"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../gum" }
 polkadot-erasure-coding = { path = "../../erasure-coding" }
 polkadot-node-primitives = { path = "../primitives" }
 polkadot-node-subsystem = { path = "../subsystem" }
diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs
index 5cb888e100b4e4bbfb39ad6c5a3f074a5e37d808..8d6ed419e6557886df6f1fa692838b64aa421470 100644
--- a/polkadot/node/collation-generation/src/lib.rs
+++ b/polkadot/node/collation-generation/src/lib.rs
@@ -129,7 +129,7 @@ impl CollationGenerationSubsystem {
 					)
 					.await
 					{
-						tracing::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activations");
+						gum::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activations");
 					}
 				}
 
@@ -140,7 +140,7 @@ impl CollationGenerationSubsystem {
 				msg: CollationGenerationMessage::Initialize(config),
 			}) => {
 				if self.config.is_some() {
-					tracing::error!(target: LOG_TARGET, "double initialization");
+					gum::error!(target: LOG_TARGET, "double initialization");
 				} else {
 					self.config = Some(Arc::new(config));
 				}
@@ -148,7 +148,7 @@ impl CollationGenerationSubsystem {
 			},
 			Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(..))) => false,
 			Err(err) => {
-				tracing::error!(
+				gum::error!(
 					target: LOG_TARGET,
 					err = ?err,
 					"error receiving message from subsystem context: {:?}",
@@ -207,7 +207,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
 					(scheduled_core, OccupiedCoreAssumption::Free),
 				CoreState::Occupied(_occupied_core) => {
 					// TODO: https://github.com/paritytech/polkadot/issues/1573
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						core_idx = %core_idx,
 						relay_parent = ?relay_parent,
@@ -216,7 +216,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
 					continue
 				},
 				CoreState::Free => {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						core_idx = %core_idx,
 						"core is free. Keep going.",
@@ -226,7 +226,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
 			};
 
 			if scheduled_core.para_id != config.para_id {
-				tracing::trace!(
+				gum::trace!(
 					target: LOG_TARGET,
 					core_idx = %core_idx,
 					relay_parent = ?relay_parent,
@@ -252,7 +252,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
 			{
 				Some(v) => v,
 				None => {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						core_idx = %core_idx,
 						relay_parent = ?relay_parent,
@@ -274,7 +274,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
 			{
 				Some(v) => v,
 				None => {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						core_idx = %core_idx,
 						relay_parent = ?relay_parent,
@@ -298,7 +298,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
 						match (task_config.collator)(relay_parent, &validation_data).await {
 							Some(collation) => collation.into_inner(),
 							None => {
-								tracing::debug!(
+								gum::debug!(
 									target: LOG_TARGET,
 									para_id = %scheduled_core.para_id,
 									"collator returned no collation on collate",
@@ -318,7 +318,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
 						// As such, honest collators never produce an uncompressed PoV which starts with
 						// a compression magic number, which would lead validators to reject the collation.
 						if encoded_size > validation_data.max_pov_size as usize {
-							tracing::debug!(
+							gum::debug!(
 								target: LOG_TARGET,
 								para_id = %scheduled_core.para_id,
 								size = encoded_size,
@@ -346,7 +346,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
 						match erasure_root(n_validators, validation_data, pov.clone()) {
 							Ok(erasure_root) => erasure_root,
 							Err(err) => {
-								tracing::error!(
+								gum::error!(
 									target: LOG_TARGET,
 									para_id = %scheduled_core.para_id,
 									err = ?err,
@@ -380,7 +380,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
 						},
 					};
 
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						candidate_hash = ?ccr.hash(),
 						?pov_hash,
@@ -396,7 +396,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
 						))
 						.await
 					{
-						tracing::warn!(
+						gum::warn!(
 							target: LOG_TARGET,
 							para_id = %scheduled_core.para_id,
 							err = ?err,
diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml
index 7146a2b3648d5343fd2a95435d20279d2f5363a3..c9139c785506c3b86c7ddeef20b0b366ae53cb18 100644
--- a/polkadot/node/core/approval-voting/Cargo.toml
+++ b/polkadot/node/core/approval-voting/Cargo.toml
@@ -8,7 +8,7 @@ edition = "2021"
 futures = "0.3.21"
 futures-timer = "3.0.2"
 parity-scale-codec = { version = "3.1.0", default-features = false, features = ["bit-vec", "derive"] }
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] }
 lru = "0.7"
 merlin = "2.0"
diff --git a/polkadot/node/core/approval-voting/src/criteria.rs b/polkadot/node/core/approval-voting/src/criteria.rs
index 7630a038a94406a9119382877cbdd49b158004b0..a2bd0889f034b57d83150b9b45d13d6c7b639cdb 100644
--- a/polkadot/node/core/approval-voting/src/criteria.rs
+++ b/polkadot/node/core/approval-voting/src/criteria.rs
@@ -236,7 +236,7 @@ pub(crate) fn compute_assignments(
 		config.assignment_keys.is_empty() ||
 		config.validator_groups.is_empty()
 	{
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			n_cores = config.n_cores,
 			has_assignment_keys = !config.assignment_keys.is_empty(),
@@ -255,7 +255,7 @@ pub(crate) fn compute_assignments(
 				Err(sc_keystore::Error::Unavailable) => None,
 				Err(sc_keystore::Error::Io(e)) if e.kind() == std::io::ErrorKind::NotFound => None,
 				Err(e) => {
-					tracing::warn!(target: LOG_TARGET, "Encountered keystore error: {:?}", e);
+					gum::warn!(target: LOG_TARGET, "Encountered keystore error: {:?}", e);
 					None
 				},
 			}
@@ -263,7 +263,7 @@ pub(crate) fn compute_assignments(
 
 		match key {
 			None => {
-				tracing::trace!(target: LOG_TARGET, "No assignment key");
+				gum::trace!(target: LOG_TARGET, "No assignment key");
 				return HashMap::new()
 			},
 			Some(k) => k,
@@ -277,7 +277,7 @@ pub(crate) fn compute_assignments(
 		.map(|(c_hash, core, _)| (c_hash, core))
 		.collect::<Vec<_>>();
 
-	tracing::trace!(
+	gum::trace!(
 		target: LOG_TARGET,
 		assignable_cores = leaving_cores.len(),
 		"Assigning to candidates from different backing groups"
@@ -333,7 +333,7 @@ fn compute_relay_vrf_modulo_assignments(
 					if let Some((candidate_hash, _)) =
 						leaving_cores.clone().into_iter().find(|(_, c)| c == core)
 					{
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							?candidate_hash,
 							?core,
@@ -415,7 +415,7 @@ fn compute_relay_vrf_delay_assignments(
 		};
 
 		if used {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				?candidate_hash,
 				?core,
diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs
index 86152bfb55b2e65458f0a7121fbe198d95610302..cb8dea17a51e170c776c0624ee4c81d9fbd7dfe0 100644
--- a/polkadot/node/core/approval-voting/src/import.rs
+++ b/polkadot/node/core/approval-voting/src/import.rs
@@ -138,7 +138,7 @@ async fn imported_block_info(
 			.as_ref()
 			.map_or(true, |s| session_index < s.earliest_session())
 		{
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				"Block {} is from ancient session {}. Skipping",
 				block_hash,
@@ -188,11 +188,7 @@ async fn imported_block_info(
 	{
 		Some(s) => s,
 		None => {
-			tracing::debug!(
-				target: LOG_TARGET,
-				"Session info unavailable for block {}",
-				block_hash,
-			);
+			gum::debug!(target: LOG_TARGET, "Session info unavailable for block {}", block_hash,);
 
 			return Ok(None)
 		},
@@ -227,7 +223,7 @@ async fn imported_block_info(
 				}
 			},
 			None => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					"BABE VRF info unavailable for block {}",
 					block_hash,
@@ -238,12 +234,12 @@ async fn imported_block_info(
 		}
 	};
 
-	tracing::trace!(target: LOG_TARGET, n_assignments = assignments.len(), "Produced assignments");
+	gum::trace!(target: LOG_TARGET, n_assignments = assignments.len(), "Produced assignments");
 
 	let force_approve =
 		block_header.digest.convert_first(|l| match ConsensusLog::from_digest_item(l) {
 			Ok(Some(ConsensusLog::ForceApprove(num))) if num < block_header.number => {
-				tracing::trace!(
+				gum::trace!(
 					target: LOG_TARGET,
 					?block_hash,
 					current_number = block_header.number,
@@ -256,7 +252,7 @@ async fn imported_block_info(
 			Ok(Some(_)) => None,
 			Ok(None) => None,
 			Err(err) => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					?err,
 					?block_hash,
@@ -313,7 +309,7 @@ pub(crate) async fn handle_new_head(
 
 		match h_rx.await? {
 			Err(e) => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					"Chain API subsystem temporarily unreachable {}",
 					e,
@@ -322,7 +318,7 @@ pub(crate) async fn handle_new_head(
 				return Ok(Vec::new())
 			},
 			Ok(None) => {
-				tracing::warn!(target: LOG_TARGET, "Missing header for new head {}", head);
+				gum::warn!(target: LOG_TARGET, "Missing header for new head {}", head);
 				return Ok(Vec::new())
 			},
 			Ok(Some(h)) => h,
@@ -332,7 +328,7 @@ pub(crate) async fn handle_new_head(
 	// Update session info based on most recent head.
 	match state.cache_session_info_for_head(ctx, head).await {
 		Err(e) => {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				?head,
 				?e,
@@ -342,7 +338,7 @@ pub(crate) async fn handle_new_head(
 			return Ok(Vec::new())
 		},
 		Ok(Some(a @ SessionWindowUpdate::Advanced { .. })) => {
-			tracing::info!(
+			gum::info!(
 				target: LOG_TARGET,
 				update = ?a,
 				"Advanced session window for approvals",
@@ -404,7 +400,7 @@ pub(crate) async fn handle_new_head(
 					if !lost_to_finality {
 						// Such errors are likely spurious, but this prevents us from getting gaps
 						// in the approval-db.
-						tracing::warn!(
+						gum::warn!(
 							target: LOG_TARGET,
 							"Unable to gather info about imported block {:?}. Skipping chain.",
 							(block_hash, block_header.number),
@@ -419,7 +415,7 @@ pub(crate) async fn handle_new_head(
 		imported_blocks_and_info
 	};
 
-	tracing::trace!(
+	gum::trace!(
 		target: LOG_TARGET,
 		imported_blocks = imported_blocks_and_info.len(),
 		"Inserting imported blocks into database"
@@ -458,7 +454,7 @@ pub(crate) async fn handle_new_head(
 		let num_candidates = included_candidates.len();
 		let approved_bitfield = {
 			if needed_approvals == 0 {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					block_hash = ?block_hash,
 					"Insta-approving all candidates",
@@ -476,7 +472,7 @@ pub(crate) async fn handle_new_head(
 					}
 				}
 				if result.any() {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						block_hash = ?block_hash,
 						"Insta-approving {}/{} candidates as the number of validators is too low",
@@ -510,7 +506,7 @@ pub(crate) async fn handle_new_head(
 		};
 
 		if let Some(up_to) = force_approve {
-			tracing::debug!(target: LOG_TARGET, ?block_hash, up_to, "Enacting force-approve");
+			gum::debug!(target: LOG_TARGET, ?block_hash, up_to, "Enacting force-approve");
 
 			let approved_hashes = crate::ops::force_approve(db, block_hash, up_to)
 				.map_err(|e| SubsystemError::with_origin("approval-voting", e))?;
@@ -521,7 +517,7 @@ pub(crate) async fn handle_new_head(
 			}
 		}
 
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			?block_hash,
 			block_number = block_header.number,
@@ -561,7 +557,7 @@ pub(crate) async fn handle_new_head(
 		});
 	}
 
-	tracing::trace!(
+	gum::trace!(
 		target: LOG_TARGET,
 		head = ?head,
 		chain_length = approval_meta.len(),
diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs
index 7d0a4a1e573a371b6550c783ed6cd9692b310b36..a75ffcdac8ad44072fff04ae01d965f14e2cc076 100644
--- a/polkadot/node/core/approval-voting/src/lib.rs
+++ b/polkadot/node/core/approval-voting/src/lib.rs
@@ -621,7 +621,7 @@ impl State {
 		let session_info = match self.session_info(block_entry.session()) {
 			Some(s) => s,
 			None => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					"Unknown session info for {}",
 					block_entry.session()
@@ -718,7 +718,7 @@ where
 		match rx.await? {
 			Ok(number) => Some(number),
 			Err(err) => {
-				tracing::warn!(target: LOG_TARGET, ?err, "Failed fetching finalized number");
+				gum::warn!(target: LOG_TARGET, ?err, "Failed fetching finalized number");
 				None
 			},
 		}
@@ -962,13 +962,11 @@ async fn handle_actions(
 
 				match confirmation_rx.await {
 					Err(oneshot::Canceled) => {
-						tracing::debug!(target: LOG_TARGET, "Dispute coordinator confirmation lost",)
+						gum::debug!(target: LOG_TARGET, "Dispute coordinator confirmation lost",)
 					},
 					Ok(ImportStatementsResult::ValidImport) => {},
-					Ok(ImportStatementsResult::InvalidImport) => tracing::warn!(
-						target: LOG_TARGET,
-						"Failed to import statements of validity",
-					),
+					Ok(ImportStatementsResult::InvalidImport) =>
+						gum::warn!(target: LOG_TARGET, "Failed to import statements of validity",),
 				}
 			},
 			Action::NoteApprovedInChainSelection(block_hash) => {
@@ -1004,7 +1002,7 @@ fn distribution_messages_for_activation(
 		let block_entry = match db.load_block_entry(&block_hash)? {
 			Some(b) => b,
 			None => {
-				tracing::warn!(target: LOG_TARGET, ?block_hash, "Missing block entry");
+				gum::warn!(target: LOG_TARGET, ?block_hash, "Missing block entry");
 
 				continue
 			},
@@ -1021,7 +1019,7 @@ fn distribution_messages_for_activation(
 			let candidate_entry = match db.load_candidate_entry(&candidate_hash)? {
 				Some(c) => c,
 				None => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						?block_hash,
 						?candidate_hash,
@@ -1068,7 +1066,7 @@ fn distribution_messages_for_activation(
 					}
 				},
 				None => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						?block_hash,
 						?candidate_hash,
@@ -1105,7 +1103,7 @@ async fn handle_from_overseer(
 					Ok(block_imported_candidates) => {
 						// Schedule wakeups for all imported candidates.
 						for block_batch in block_imported_candidates {
-							tracing::debug!(
+							gum::debug!(
 								target: LOG_TARGET,
 								block_number = ?block_batch.block_number,
 								block_hash = ?block_batch.block_hash,
@@ -1122,7 +1120,7 @@ async fn handle_from_overseer(
 
 								if let Some(our_tranche) = our_tranche {
 									let tick = our_tranche as Tick + block_batch.block_tick;
-									tracing::trace!(
+									gum::trace!(
 										target: LOG_TARGET,
 										tranche = our_tranche,
 										candidate_hash = ?c_hash,
@@ -1150,7 +1148,7 @@ async fn handle_from_overseer(
 			actions
 		},
 		FromOverseer::Signal(OverseerSignal::BlockFinalized(block_hash, block_number)) => {
-			tracing::debug!(target: LOG_TARGET, ?block_hash, ?block_number, "Block finalized");
+			gum::debug!(target: LOG_TARGET, ?block_hash, ?block_number, "Block finalized");
 			*last_finalized_height = Some(block_number);
 
 			crate::ops::canonicalize(db, block_number, block_hash)
@@ -1262,7 +1260,7 @@ async fn handle_approved_ancestor(
 		let entry = match db.load_block_entry(&block_hash)? {
 			None => {
 				let block_number = target_number.saturating_sub(i as u32);
-				tracing::info!(
+				gum::info!(
 					target: LOG_TARGET,
 					unknown_number = ?block_number,
 					unknown_hash = ?block_hash,
@@ -1303,7 +1301,7 @@ async fn handle_approved_ancestor(
 			block_descriptions.clear();
 
 			let unapproved: Vec<_> = entry.unapproved_candidates().collect();
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				"Block {} is {} blocks deep and has {}/{} candidates unapproved",
 				block_hash,
@@ -1315,7 +1313,7 @@ async fn handle_approved_ancestor(
 			for candidate_hash in unapproved {
 				match db.load_candidate_entry(&candidate_hash)? {
 					None => {
-						tracing::warn!(
+						gum::warn!(
 							target: LOG_TARGET,
 							?candidate_hash,
 							"Missing expected candidate in DB",
@@ -1325,7 +1323,7 @@ async fn handle_approved_ancestor(
 					},
 					Some(c_entry) => match c_entry.approval_entry(&block_hash) {
 						None => {
-							tracing::warn!(
+							gum::warn!(
 								target: LOG_TARGET,
 								?candidate_hash,
 								?block_hash,
@@ -1357,7 +1355,7 @@ async fn handle_approved_ancestor(
 							};
 
 							match a_entry.our_assignment() {
-								None => tracing::debug!(
+								None => gum::debug!(
 									target: LOG_TARGET,
 									?candidate_hash,
 									?block_hash,
@@ -1374,7 +1372,7 @@ async fn handle_approved_ancestor(
 									let approved =
 										triggered && { a_entry.local_statements().1.is_some() };
 
-									tracing::debug!(
+									gum::debug!(
 										target: LOG_TARGET,
 										?candidate_hash,
 										?block_hash,
@@ -1394,7 +1392,7 @@ async fn handle_approved_ancestor(
 		}
 	}
 
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		"approved blocks {}-[{}]-{}",
 		target_number,
@@ -1507,7 +1505,7 @@ fn schedule_wakeup_action(
 	};
 
 	match maybe_action {
-		Some(Action::ScheduleWakeup { ref tick, .. }) => tracing::trace!(
+		Some(Action::ScheduleWakeup { ref tick, .. }) => gum::trace!(
 			target: LOG_TARGET,
 			tick,
 			?candidate_hash,
@@ -1515,7 +1513,7 @@ fn schedule_wakeup_action(
 			block_tick,
 			"Scheduling next wakeup.",
 		),
-		None => tracing::trace!(
+		None => gum::trace!(
 			target: LOG_TARGET,
 			?candidate_hash,
 			?block_hash,
@@ -1633,7 +1631,7 @@ fn check_and_import_assignment(
 		if is_duplicate {
 			AssignmentCheckResult::AcceptedDuplicate
 		} else {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				validator = assignment.validator.0,
 				candidate_hash = ?assigned_candidate_hash,
@@ -1756,7 +1754,7 @@ fn check_and_import_approval<T>(
 	// importing the approval can be heavy as it may trigger acceptance for a series of blocks.
 	let t = with_response(ApprovalCheckResult::Accepted);
 
-	tracing::trace!(
+	gum::trace!(
 		target: LOG_TARGET,
 		validator_index = approval.validator.0,
 		validator = ?pubkey,
@@ -1877,7 +1875,7 @@ fn advance_approval_state(
 		let is_approved = check.is_approved(tick_now.saturating_sub(APPROVAL_DELAY));
 
 		if is_approved {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				?candidate_hash,
 				?block_hash,
@@ -1906,7 +1904,7 @@ fn advance_approval_state(
 
 		(is_approved, status)
 	} else {
-		tracing::warn!(
+		gum::warn!(
 			target: LOG_TARGET,
 			?candidate_hash,
 			?block_hash,
@@ -2021,7 +2019,7 @@ fn process_wakeup(
 	let session_info = match state.session_info(block_entry.session()) {
 		Some(i) => i,
 		None => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"Missing session info for live block {} in session {}",
 				relay_block,
@@ -2040,7 +2038,7 @@ fn process_wakeup(
 
 	let tranche_now = state.clock.tranche_now(state.slot_duration_millis, block_entry.slot());
 
-	tracing::trace!(
+	gum::trace!(
 		target: LOG_TARGET,
 		tranche = tranche_now,
 		?candidate_hash,
@@ -2100,7 +2098,7 @@ fn process_wakeup(
 			block_entry.candidates().iter().position(|(_, h)| &candidate_hash == h);
 
 		if let Some(i) = index_in_candidate {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				?candidate_hash,
 				para_id = ?candidate_receipt.descriptor.para_id,
@@ -2185,7 +2183,7 @@ async fn launch_approval(
 	let candidate_hash = candidate.hash();
 	let para_id = candidate.descriptor.para_id;
 
-	tracing::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Recovering data.");
+	gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Recovering data.");
 
 	let timer = metrics.time_recover_and_approve();
 	ctx.send_message(AvailabilityRecoveryMessage::RecoverAvailableData(
@@ -2219,7 +2217,7 @@ async fn launch_approval(
 			Ok(Err(e)) => {
 				match &e {
 					&RecoveryError::Unavailable => {
-						tracing::warn!(
+						gum::warn!(
 							target: LOG_TARGET,
 							?para_id,
 							?candidate_hash,
@@ -2230,7 +2228,7 @@ async fn launch_approval(
 						metrics_guard.take().on_approval_unavailable();
 					},
 					&RecoveryError::Invalid => {
-						tracing::warn!(
+						gum::warn!(
 							target: LOG_TARGET,
 							?para_id,
 							?candidate_hash,
@@ -2261,7 +2259,7 @@ async fn launch_approval(
 			Ok(Err(_)) => return ApprovalState::failed(validator_index, candidate_hash),
 			Ok(Ok(Some(code))) => code,
 			Ok(Ok(None)) => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					"Validation code unavailable for block {:?} in the state of block {:?} (a recent descendant)",
 					candidate.descriptor.relay_parent,
@@ -2297,7 +2295,7 @@ async fn launch_approval(
 				// Validation checked out. Issue an approval command. If the underlying service is unreachable,
 				// then there isn't anything we can do.
 
-				tracing::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Candidate Valid");
+				gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Candidate Valid");
 
 				let expected_commitments_hash = candidate.commitments_hash;
 				if commitments.hash() == expected_commitments_hash {
@@ -2322,7 +2320,7 @@ async fn launch_approval(
 				}
 			},
 			Ok(Ok(ValidationResult::Invalid(reason))) => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					?reason,
 					?candidate_hash,
@@ -2346,7 +2344,7 @@ async fn launch_approval(
 				return ApprovalState::failed(validator_index, candidate_hash)
 			},
 			Ok(Err(e)) => {
-				tracing::error!(
+				gum::error!(
 					target: LOG_TARGET,
 					err = ?e,
 					?candidate_hash,
@@ -2385,7 +2383,7 @@ async fn issue_approval(
 	let candidate_index = match block_entry.candidates().iter().position(|e| e.1 == candidate_hash)
 	{
 		None => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"Candidate hash {} is not present in the block entry's candidates for relay block {}",
 				candidate_hash,
@@ -2401,7 +2399,7 @@ async fn issue_approval(
 	let session_info = match state.session_info(block_entry.session()) {
 		Some(s) => s,
 		None => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"Missing session info for live block {} in session {}",
 				block_hash,
@@ -2416,7 +2414,7 @@ async fn issue_approval(
 	let candidate_hash = match block_entry.candidate(candidate_index as usize) {
 		Some((_, h)) => h.clone(),
 		None => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"Received malformed request to approve out-of-bounds candidate index {} included at block {:?}",
 				candidate_index,
@@ -2431,7 +2429,7 @@ async fn issue_approval(
 	let candidate_entry = match db.load_candidate_entry(&candidate_hash)? {
 		Some(c) => c,
 		None => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"Missing entry for candidate index {} included at block {:?}",
 				candidate_index,
@@ -2446,7 +2444,7 @@ async fn issue_approval(
 	let validator_pubkey = match session_info.validators.get(validator_index.0 as usize) {
 		Some(p) => p,
 		None => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"Validator index {} out of bounds in session {}",
 				validator_index.0,
@@ -2462,7 +2460,7 @@ async fn issue_approval(
 	let sig = match sign_approval(&state.keystore, &validator_pubkey, candidate_hash, session) {
 		Some(sig) => sig,
 		None => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				validator_index = ?validator_index,
 				session,
@@ -2485,7 +2483,7 @@ async fn issue_approval(
 	)
 	.expect("Statement just signed; should pass checks; qed");
 
-	tracing::trace!(
+	gum::trace!(
 		target: LOG_TARGET,
 		?candidate_hash,
 		?block_hash,
diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs
index b40a8ed40b9cfc8152d35b6a07402bc5e144606b..e546668ab1c6b368d273b2e81c518cbfa1a95816 100644
--- a/polkadot/node/core/approval-voting/src/tests.rs
+++ b/polkadot/node/core/approval-voting/src/tests.rs
@@ -512,7 +512,7 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(
 }
 
 async fn overseer_send(overseer: &mut VirtualOverseer, msg: FromOverseer<ApprovalVotingMessage>) {
-	tracing::trace!("Sending message:\n{:?}", &msg);
+	gum::trace!("Sending message:\n{:?}", &msg);
 	overseer
 		.send(msg)
 		.timeout(TIMEOUT)
@@ -525,7 +525,7 @@ async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
 		.await
 		.expect(&format!("{:?} is enough to receive messages.", TIMEOUT));
 
-	tracing::trace!("Received message:\n{:?}", &msg);
+	gum::trace!("Received message:\n{:?}", &msg);
 
 	msg
 }
@@ -534,7 +534,7 @@ async fn overseer_recv_with_timeout(
 	overseer: &mut VirtualOverseer,
 	timeout: Duration,
 ) -> Option<AllMessages> {
-	tracing::trace!("Waiting for message...");
+	gum::trace!("Waiting for message...");
 	overseer.recv().timeout(timeout).await
 }
 
diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml
index 1a3788dd5f18bd1d816d6f60d7b91c6498063b8a..0b40b44e7d125bd79f62ff5332a35375f80321b7 100644
--- a/polkadot/node/core/av-store/Cargo.toml
+++ b/polkadot/node/core/av-store/Cargo.toml
@@ -9,7 +9,7 @@ futures = "0.3.21"
 futures-timer = "3.0.2"
 kvdb = "0.11.0"
 thiserror = "1.0.30"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 bitvec = "1.0.0"
 
 parity-scale-codec = { version = "3.1.0", features = ["derive"] }
diff --git a/polkadot/node/core/av-store/src/lib.rs b/polkadot/node/core/av-store/src/lib.rs
index 4bdb952bfe55ec231ed0e6b8ca2f22ca48273bda..34b027242339b99983e69801e113e5dae53a6ae4 100644
--- a/polkadot/node/core/av-store/src/lib.rs
+++ b/polkadot/node/core/av-store/src/lib.rs
@@ -163,7 +163,7 @@ fn query_inner<D: Decode>(
 		},
 		Ok(None) => Ok(None),
 		Err(err) => {
-			tracing::warn!(target: LOG_TARGET, ?err, "Error reading from the availability store");
+			gum::warn!(target: LOG_TARGET, ?err, "Error reading from the availability store");
 			Err(err.into())
 		},
 	}
@@ -384,10 +384,10 @@ impl Error {
 		match self {
 			// don't spam the log with spurious errors
 			Self::RuntimeApi(_) | Self::Oneshot(_) => {
-				tracing::debug!(target: LOG_TARGET, err = ?self)
+				gum::debug!(target: LOG_TARGET, err = ?self)
 			},
 			// it's worth reporting otherwise
-			_ => tracing::warn!(target: LOG_TARGET, err = ?self),
+			_ => gum::warn!(target: LOG_TARGET, err = ?self),
 		}
 	}
 }
@@ -544,7 +544,7 @@ where
 				}
 			},
 			Ok(true) => {
-				tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
+				gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
 				break
 			},
 			Ok(false) => continue,
@@ -721,7 +721,7 @@ fn note_block_backed(
 ) -> Result<(), Error> {
 	let candidate_hash = candidate.hash();
 
-	tracing::debug!(target: LOG_TARGET, ?candidate_hash, "Candidate backed");
+	gum::debug!(target: LOG_TARGET, ?candidate_hash, "Candidate backed");
 
 	if load_meta(db, config, &candidate_hash)?.is_none() {
 		let meta = CandidateMeta {
@@ -753,7 +753,7 @@ fn note_block_included(
 		None => {
 			// This is alarming. We've observed a block being included without ever seeing it backed.
 			// Warn and ignore.
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				?candidate_hash,
 				"Candidate included without being backed?",
@@ -762,7 +762,7 @@ fn note_block_included(
 		Some(mut meta) => {
 			let be_block = (BEBlockNumber(block.0), block.1);
 
-			tracing::debug!(target: LOG_TARGET, ?candidate_hash, "Candidate included");
+			gum::debug!(target: LOG_TARGET, ?candidate_hash, "Candidate included");
 
 			meta.state = match meta.state {
 				State::Unavailable(at) => {
@@ -856,7 +856,7 @@ where
 
 			match rx.await? {
 				Err(err) => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						batch_num,
 						?err,
@@ -866,7 +866,7 @@ where
 					break
 				},
 				Ok(None) => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						"Availability store was informed that block #{} is finalized, \
 						but chain API has no finalized hash.",
@@ -944,7 +944,7 @@ fn update_blocks_at_finalized_height(
 	for (candidate_hash, is_finalized) in candidates {
 		let mut meta = match load_meta(&subsystem.db, &subsystem.config, &candidate_hash)? {
 			None => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					"Dangling candidate metadata for {}",
 					candidate_hash,
@@ -1061,7 +1061,7 @@ fn process_message(
 						)? {
 							Some(c) => chunks.push(c),
 							None => {
-								tracing::warn!(
+								gum::warn!(
 									target: LOG_TARGET,
 									?candidate,
 									index,
@@ -1151,7 +1151,7 @@ fn store_chunk(
 		None => return Ok(false), // out of bounds.
 	}
 
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		?candidate_hash,
 		chunk_index = %chunk.index.0,
@@ -1217,7 +1217,7 @@ fn store_available_data(
 
 	subsystem.db.write(tx)?;
 
-	tracing::debug!(target: LOG_TARGET, ?candidate_hash, "Stored data and chunks");
+	gum::debug!(target: LOG_TARGET, ?candidate_hash, "Stored data and chunks");
 
 	Ok(())
 }
diff --git a/polkadot/node/core/av-store/src/tests.rs b/polkadot/node/core/av-store/src/tests.rs
index 70374c873bb29c0c71cb6e5d18ce27ef02fce826..76b35607638a62afa0805aa908ecc00304878f77 100644
--- a/polkadot/node/core/av-store/src/tests.rs
+++ b/polkadot/node/core/av-store/src/tests.rs
@@ -146,7 +146,7 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(
 const TIMEOUT: Duration = Duration::from_millis(100);
 
 async fn overseer_send(overseer: &mut VirtualOverseer, msg: AvailabilityStoreMessage) {
-	tracing::trace!(meg = ?msg, "sending message");
+	gum::trace!(meg = ?msg, "sending message");
 	overseer
 		.send(FromOverseer::Communication { msg })
 		.timeout(TIMEOUT)
@@ -159,7 +159,7 @@ async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
 		.await
 		.expect(&format!("{:?} is more than enough to receive messages", TIMEOUT));
 
-	tracing::trace!(msg = ?msg, "received message");
+	gum::trace!(msg = ?msg, "received message");
 
 	msg
 }
@@ -168,7 +168,7 @@ async fn overseer_recv_with_timeout(
 	overseer: &mut VirtualOverseer,
 	timeout: Duration,
 ) -> Option<AllMessages> {
-	tracing::trace!("waiting for message...");
+	gum::trace!("waiting for message...");
 	overseer.recv().timeout(timeout).await
 }
 
diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml
index 0f78ebe925916db6e012036005af7cd77e917cd3..df6b684069f1a5b12da673809c0070721a0309e8 100644
--- a/polkadot/node/core/backing/Cargo.toml
+++ b/polkadot/node/core/backing/Cargo.toml
@@ -14,7 +14,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" }
 erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" }
 statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" }
 bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] }
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 thiserror = "1.0.30"
 
 [dev-dependencies]
diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs
index 0d3008bbeef8053051c0afe1d09ee016561daac3..d91968a2fa4c5639003afa2f2f1382c62a272c61 100644
--- a/polkadot/node/core/backing/src/lib.rs
+++ b/polkadot/node/core/backing/src/lib.rs
@@ -276,7 +276,7 @@ fn table_attested_to_backed(
 			validator_indices.set(position, true);
 			vote_positions.push((orig_idx, position));
 		} else {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"Logic error: Validity vote from table does not correspond to group",
 			);
@@ -463,7 +463,7 @@ async fn validate_and_make_available(
 
 	let res = match v {
 		ValidationResult::Valid(commitments, validation_data) => {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				candidate_hash = ?candidate.hash(),
 				"Validation successful",
@@ -471,7 +471,7 @@ async fn validate_and_make_available(
 
 			// If validation produces a new set of commitments, we vote the candidate as invalid.
 			if commitments.hash() != expected_commitments_hash {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					candidate_hash = ?candidate.hash(),
 					actual_commitments = ?commitments,
@@ -493,7 +493,7 @@ async fn validate_and_make_available(
 				match erasure_valid {
 					Ok(()) => Ok((candidate, commitments, pov.clone())),
 					Err(InvalidErasureRoot) => {
-						tracing::debug!(
+						gum::debug!(
 							target: LOG_TARGET,
 							candidate_hash = ?candidate.hash(),
 							actual_commitments = ?commitments,
@@ -505,7 +505,7 @@ async fn validate_and_make_available(
 			}
 		},
 		ValidationResult::Invalid(reason) => {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				candidate_hash = ?candidate.hash(),
 				reason = ?reason,
@@ -621,7 +621,7 @@ impl CandidateBackingJob {
 						self.kick_off_validation_work(sender, attesting, c_span).await?
 					}
 				} else {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						"AttestNoPoV was triggered without fallback being available."
 					);
@@ -647,13 +647,13 @@ impl CandidateBackingJob {
 			let bg = async move {
 				if let Err(e) = validate_and_make_available(params).await {
 					if let Error::BackgroundValidationMpsc(error) = e {
-						tracing::debug!(
+						gum::debug!(
 							target: LOG_TARGET,
 							?error,
 							"Mpsc background validation mpsc died during validation- leaf no longer active?"
 						);
 					} else {
-						tracing::error!(
+						gum::error!(
 							target: LOG_TARGET,
 							"Failed to validate and make available: {:?}",
 							e
@@ -699,7 +699,7 @@ impl CandidateBackingJob {
 
 		span.as_mut().map(|span| span.add_follows_from(parent_span));
 
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			candidate_hash = ?candidate_hash,
 			candidate_receipt = ?candidate,
@@ -763,7 +763,7 @@ impl CandidateBackingJob {
 		statement: &SignedFullStatement,
 		root_span: &jaeger::Span,
 	) -> Result<Option<TableSummary>, Error> {
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			statement = ?statement.payload().to_compact(),
 			validator_index = statement.validator_index().0,
@@ -784,7 +784,7 @@ impl CandidateBackingJob {
 			.dispatch_new_statement_to_dispute_coordinator(sender, candidate_hash, &statement)
 			.await
 		{
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				session_index = ?self.session_index,
 				relay_parent = ?self.parent,
@@ -809,7 +809,7 @@ impl CandidateBackingJob {
 				let span = self.remove_unbacked_span(&candidate_hash);
 
 				if let Some(backed) = table_attested_to_backed(attested, &self.table_context) {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						candidate_hash = ?candidate_hash,
 						relay_parent = ?self.parent,
@@ -904,11 +904,11 @@ impl CandidateBackingJob {
 
 			match confirmation_rx.await {
 				Err(oneshot::Canceled) => {
-					tracing::debug!(target: LOG_TARGET, "Dispute coordinator confirmation lost",)
+					gum::debug!(target: LOG_TARGET, "Dispute coordinator confirmation lost",)
 				},
 				Ok(ImportStatementsResult::ValidImport) => {},
 				Ok(ImportStatementsResult::InvalidImport) => {
-					tracing::warn!(target: LOG_TARGET, "Failed to import statements of validity",)
+					gum::warn!(target: LOG_TARGET, "Failed to import statements of validity",)
 				},
 			}
 		}
@@ -935,7 +935,7 @@ impl CandidateBackingJob {
 
 				// Sanity check that candidate is from our assignment.
 				if Some(candidate.descriptor().para_id) != self.assignment {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						our_assignment = ?self.assignment,
 						collation = ?candidate.descriptor().para_id,
@@ -1006,7 +1006,7 @@ impl CandidateBackingJob {
 
 		let descriptor = attesting.candidate.descriptor().clone();
 
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			candidate_hash = ?candidate_hash,
 			candidate_receipt = ?attesting.candidate,
@@ -1196,7 +1196,7 @@ impl util::JobTrait for CandidateBackingJob {
 					match $x {
 						Ok(x) => x,
 						Err(e) => {
-							tracing::warn!(
+							gum::warn!(
 								target: LOG_TARGET,
 								err = ?e,
 								"Failed to fetch runtime API data for job",
@@ -1241,7 +1241,7 @@ impl util::JobTrait for CandidateBackingJob {
 					Ok(v) => Some(v),
 					Err(util::Error::NotAValidator) => None,
 					Err(e) => {
-						tracing::warn!(
+						gum::warn!(
 							target: LOG_TARGET,
 							err = ?e,
 							"Cannot participate in candidate backing",
diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml
index 27b93adcf26077677326a878fd553ae5d6e170d7..423ed649bd27150b4c2aed0c1072070f0890c3d5 100644
--- a/polkadot/node/core/bitfield-signing/Cargo.toml
+++ b/polkadot/node/core/bitfield-signing/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
 
 [dependencies]
 futures = "0.3.21"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 polkadot-primitives = { path = "../../../primitives" }
 polkadot-node-subsystem = { path = "../../subsystem" }
 polkadot-node-subsystem-util = { path = "../../subsystem-util" }
diff --git a/polkadot/node/core/bitfield-signing/src/lib.rs b/polkadot/node/core/bitfield-signing/src/lib.rs
index 4bb2c51828db9d8feeb9a4f7c3f1101f88ae4435..9e7ccd66c1c6b3bdd38ef914273afa5ed3bb87c2 100644
--- a/polkadot/node/core/bitfield-signing/src/lib.rs
+++ b/polkadot/node/core/bitfield-signing/src/lib.rs
@@ -106,7 +106,7 @@ async fn get_core_availability(
 
 		let res = rx.await.map_err(Into::into);
 
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			para_id = %core.para_id(),
 			availability = ?res,
@@ -172,7 +172,7 @@ async fn construct_availability_bitfield(
 	.await?;
 
 	let core_bits = FromIterator::from_iter(results.into_iter());
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		?relay_parent,
 		"Signing Bitfield for {core_count} cores: {core_bits}",
@@ -247,7 +247,7 @@ impl JobTrait for BitfieldSigningJob {
 		let metrics = metrics.clone();
 		async move {
 			if let LeafStatus::Stale = leaf.status {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					hash = ?leaf.hash,
 					block_number =  ?leaf.number,
@@ -288,7 +288,7 @@ impl JobTrait for BitfieldSigningJob {
 			{
 				Err(Error::Runtime(runtime_err)) => {
 					// Don't take down the node on runtime API errors.
-					tracing::warn!(target: LOG_TARGET, err = ?runtime_err, "Encountered a runtime API error");
+					gum::warn!(target: LOG_TARGET, err = ?runtime_err, "Encountered a runtime API error");
 					return Ok(())
 				},
 				Err(err) => return Err(err),
@@ -305,7 +305,7 @@ impl JobTrait for BitfieldSigningJob {
 			{
 				Some(b) => b,
 				None => {
-					tracing::error!(
+					gum::error!(
 						target: LOG_TARGET,
 						"Key was found at construction, but while signing it could not be found.",
 					);
diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml
index b3de8fa6d9d61f9389190320974480469517beab..7502d124046bd3c77b202cd8b624816fb3936552 100644
--- a/polkadot/node/core/candidate-validation/Cargo.toml
+++ b/polkadot/node/core/candidate-validation/Cargo.toml
@@ -7,7 +7,7 @@ edition = "2021"
 [dependencies]
 async-trait = "0.1.52"
 futures = "0.3.21"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 
 sp-maybe-compressed-blob = { package = "sp-maybe-compressed-blob", git = "https://github.com/paritytech/substrate", branch = "master" }
 parity-scale-codec = { version = "3.1.0", default-features = false, features = ["bit-vec", "derive"] }
diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs
index 2dafe481b1b702b4199207f62415e9a9f8e1f3e7..8256690bac47080e0f3343b8af942963839802de 100644
--- a/polkadot/node/core/candidate-validation/src/lib.rs
+++ b/polkadot/node/core/candidate-validation/src/lib.rs
@@ -242,13 +242,13 @@ where
 	receiver
 		.await
 		.map_err(|_| {
-			tracing::debug!(target: LOG_TARGET, ?relay_parent, "Runtime API request dropped");
+			gum::debug!(target: LOG_TARGET, ?relay_parent, "Runtime API request dropped");
 
 			RuntimeRequestFailed
 		})
 		.and_then(|res| {
 			res.map_err(|e| {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					?relay_parent,
 					err = ?e,
@@ -295,7 +295,7 @@ where
 				// during pre-checking voting the relay-chain will pin the code. In case the code
 				// actually is not there, we issue failed since this looks more like a bug. This
 				// leads to us abstaining.
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					?relay_parent,
 					?validation_code_hash,
@@ -311,7 +311,7 @@ where
 	) {
 		Ok(code) => Pvf::from_code(code.into_owned()),
 		Err(e) => {
-			tracing::debug!(target: LOG_TARGET, err=?e, "precheck: cannot decompress validation code");
+			gum::debug!(target: LOG_TARGET, err=?e, "precheck: cannot decompress validation code");
 			return PreCheckOutcome::Invalid
 		},
 	};
@@ -481,7 +481,7 @@ async fn validate_candidate_exhaustive(
 	let _timer = metrics.time_validate_candidate_exhaustive();
 
 	let validation_code_hash = validation_code.hash();
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		?validation_code_hash,
 		para_id = ?descriptor.para_id,
@@ -503,7 +503,7 @@ async fn validate_candidate_exhaustive(
 	) {
 		Ok(code) => code,
 		Err(e) => {
-			tracing::debug!(target: LOG_TARGET, err=?e, "Invalid validation code");
+			gum::debug!(target: LOG_TARGET, err=?e, "Invalid validation code");
 
 			// If the validation code is invalid, the candidate certainly is.
 			return Ok(ValidationResult::Invalid(InvalidCandidate::CodeDecompressionFailure))
@@ -514,7 +514,7 @@ async fn validate_candidate_exhaustive(
 		match sp_maybe_compressed_blob::decompress(&pov.block_data.0, POV_BOMB_LIMIT) {
 			Ok(block_data) => BlockData(block_data.to_vec()),
 			Err(e) => {
-				tracing::debug!(target: LOG_TARGET, err=?e, "Invalid PoV code");
+				gum::debug!(target: LOG_TARGET, err=?e, "Invalid PoV code");
 
 				// If the PoV is invalid, the candidate certainly is.
 				return Ok(ValidationResult::Invalid(InvalidCandidate::PoVDecompressionFailure))
@@ -533,7 +533,7 @@ async fn validate_candidate_exhaustive(
 		.await;
 
 	if let Err(ref e) = result {
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			error = ?e,
 			"Failed to validate candidate",
diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml
index d4b712a935ee000c04ec44e6b5f5256e7af057f5..5fb36aa542bc77ab6501bd76fe78c504fe7ef0e6 100644
--- a/polkadot/node/core/chain-api/Cargo.toml
+++ b/polkadot/node/core/chain-api/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
 
 [dependencies]
 futures = "0.3.21"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
 polkadot-primitives = { path = "../../../primitives" }
 polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
diff --git a/polkadot/node/core/chain-api/src/lib.rs b/polkadot/node/core/chain-api/src/lib.rs
index 307a6620ecb9eec35e0fbc32392670eb6c73b6a5..c7533021f6f8b939255db31194c81fdc85ba3719 100644
--- a/polkadot/node/core/chain-api/src/lib.rs
+++ b/polkadot/node/core/chain-api/src/lib.rs
@@ -129,7 +129,7 @@ where
 				},
 				ChainApiMessage::Ancestors { hash, k, response_channel } => {
 					let _timer = subsystem.metrics.time_ancestors();
-					tracing::span!(tracing::Level::TRACE, "ChainApiMessage::Ancestors", subsystem=LOG_TARGET, hash=%hash, k=k);
+					gum::trace!(target: LOG_TARGET, hash=%hash, k=k, "ChainApiMessage::Ancestors");
 
 					let mut hash = hash;
 
diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml
index 83662995756eb23e1bf1e4cf48eea4c8c387628f..f2f87f33208bbe7ffb9e62bf73447b0149d2d175 100644
--- a/polkadot/node/core/chain-selection/Cargo.toml
+++ b/polkadot/node/core/chain-selection/Cargo.toml
@@ -8,7 +8,7 @@ edition = "2021"
 [dependencies]
 futures = "0.3.21"
 futures-timer = "3"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 polkadot-primitives = { path = "../../../primitives" }
 polkadot-node-primitives = { path = "../../primitives" }
 polkadot-node-subsystem = { path = "../../subsystem" }
diff --git a/polkadot/node/core/chain-selection/src/lib.rs b/polkadot/node/core/chain-selection/src/lib.rs
index 3453224239742df9f6ee2b88dddb95b18e295a34..172de99d34ee256bd86b19c18ef71ba83e401973 100644
--- a/polkadot/node/core/chain-selection/src/lib.rs
+++ b/polkadot/node/core/chain-selection/src/lib.rs
@@ -207,9 +207,9 @@ impl Error {
 	fn trace(&self) {
 		match self {
 			// don't spam the log with spurious errors
-			Self::Oneshot(_) => tracing::debug!(target: LOG_TARGET, err = ?self),
+			Self::Oneshot(_) => gum::debug!(target: LOG_TARGET, err = ?self),
 			// it's worth reporting otherwise
-			_ => tracing::warn!(target: LOG_TARGET, err = ?self),
+			_ => gum::warn!(target: LOG_TARGET, err = ?self),
 		}
 	}
 }
@@ -235,7 +235,7 @@ impl Clock for SystemClock {
 		match SystemTime::now().duration_since(UNIX_EPOCH) {
 			Ok(d) => d.as_secs(),
 			Err(e) => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					err = ?e,
 					"Current time is before unix epoch. Validation will not work correctly."
@@ -356,7 +356,7 @@ async fn run<Context, B>(
 				break
 			},
 			Ok(()) => {
-				tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
+				gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
 				break
 			},
 		}
@@ -444,7 +444,7 @@ async fn fetch_finalized(
 	let number = match number_rx.await? {
 		Ok(number) => number,
 		Err(err) => {
-			tracing::warn!(target: LOG_TARGET, ?err, "Fetching finalized number failed");
+			gum::warn!(target: LOG_TARGET, ?err, "Fetching finalized number failed");
 			return Ok(None)
 		},
 	};
@@ -455,16 +455,11 @@ async fn fetch_finalized(
 
 	match hash_rx.await? {
 		Err(err) => {
-			tracing::warn!(
-				target: LOG_TARGET,
-				number,
-				?err,
-				"Fetching finalized block number failed"
-			);
+			gum::warn!(target: LOG_TARGET, number, ?err, "Fetching finalized block number failed");
 			Ok(None)
 		},
 		Ok(None) => {
-			tracing::warn!(target: LOG_TARGET, number, "Missing hash for finalized block number");
+			gum::warn!(target: LOG_TARGET, number, "Missing hash for finalized block number");
 			Ok(None)
 		},
 		Ok(Some(h)) => Ok(Some((h, number))),
@@ -479,7 +474,7 @@ async fn fetch_header(
 	ctx.send_message(ChainApiMessage::BlockHeader(hash, tx)).await;
 
 	Ok(rx.await?.unwrap_or_else(|err| {
-		tracing::warn!(target: LOG_TARGET, ?hash, ?err, "Missing hash for finalized block number");
+		gum::warn!(target: LOG_TARGET, ?hash, ?err, "Missing hash for finalized block number");
 		None
 	}))
 }
@@ -494,7 +489,7 @@ async fn fetch_block_weight(
 	let res = rx.await?;
 
 	Ok(res.unwrap_or_else(|err| {
-		tracing::warn!(target: LOG_TARGET, ?hash, ?err, "Missing hash for finalized block number");
+		gum::warn!(target: LOG_TARGET, ?hash, ?err, "Missing hash for finalized block number");
 		None
 	}))
 }
@@ -518,7 +513,7 @@ async fn handle_active_leaf(
 
 	let header = match fetch_header(ctx, hash).await? {
 		None => {
-			tracing::warn!(target: LOG_TARGET, ?hash, "Missing header for new head");
+			gum::warn!(target: LOG_TARGET, ?hash, "Missing header for new head");
 			return Ok(Vec::new())
 		},
 		Some(h) => h,
@@ -540,7 +535,7 @@ async fn handle_active_leaf(
 	for (hash, header) in new_blocks.into_iter().rev() {
 		let weight = match fetch_block_weight(ctx, hash).await? {
 			None => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					?hash,
 					"Missing block weight for new head. Skipping chain.",
@@ -580,7 +575,7 @@ fn extract_reversion_logs(header: &Header) -> Vec<BlockNumber> {
 		.enumerate()
 		.filter_map(|(i, d)| match ConsensusLog::from_digest_item(d) {
 			Err(e) => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					err = ?e,
 					index = i,
@@ -592,7 +587,7 @@ fn extract_reversion_logs(header: &Header) -> Vec<BlockNumber> {
 			},
 			Ok(Some(ConsensusLog::Revert(b))) if b < number => Some(b),
 			Ok(Some(ConsensusLog::Revert(b))) => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					revert_target = b,
 					block_number = number,
diff --git a/polkadot/node/core/chain-selection/src/tree.rs b/polkadot/node/core/chain-selection/src/tree.rs
index 4645ec78a27f26646343418f00dd6b7ba294eea7..23613c4b607c020f628ab10df4304c4f0535c8b9 100644
--- a/polkadot/node/core/chain-selection/src/tree.rs
+++ b/polkadot/node/core/chain-selection/src/tree.rs
@@ -140,7 +140,7 @@ fn propagate_viability_update(
 			BlockEntryRef::Explicit(entry) => entry,
 			BlockEntryRef::Hash(hash) => match backend.load_block_entry(&hash)? {
 				None => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						block_hash = ?hash,
 						"Missing expected block entry"
@@ -360,7 +360,7 @@ fn apply_reversions(
 		let mut ancestor_entry =
 			match load_ancestor(backend, block_hash, block_number, revert_number)? {
 				None => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						?block_hash,
 						block_number,
@@ -373,7 +373,7 @@ fn apply_reversions(
 					continue
 				},
 				Some(ancestor_entry) => {
-					tracing::info!(
+					gum::info!(
 						target: LOG_TARGET,
 						?block_hash,
 						block_number,
@@ -480,7 +480,7 @@ pub(super) fn finalize_block<'a, B: Backend + 'a>(
 
 			propagate_viability_update(&mut backend, child)?;
 		} else {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				?finalized_hash,
 				finalized_number,
@@ -515,7 +515,7 @@ pub(super) fn approve_block(
 			backend.write_block_entry(entry);
 		}
 	} else {
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			block_hash = ?approved_hash,
 			"Missing entry for freshly-approved block. Ignoring"
diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml
index 4d3aeb5564a6a1a95984f2806c750000fb9cea41..3c6f837ce7ddc34f7e29284c429ba2253ecb4b53 100644
--- a/polkadot/node/core/dispute-coordinator/Cargo.toml
+++ b/polkadot/node/core/dispute-coordinator/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
 
 [dependencies]
 futures = "0.3.21"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 parity-scale-codec = "3.1.0"
 kvdb = "0.11.0"
 thiserror = "1.0.30"
diff --git a/polkadot/node/core/dispute-coordinator/src/dummy.rs b/polkadot/node/core/dispute-coordinator/src/dummy.rs
index c16d0c762cde864ea0d4639d4da34be0521dfa03..a6a0bec8d5941768903e450998f92a371d118a0e 100644
--- a/polkadot/node/core/dispute-coordinator/src/dummy.rs
+++ b/polkadot/node/core/dispute-coordinator/src/dummy.rs
@@ -65,14 +65,14 @@ where
 		let res = run_until_error(&mut ctx, &subsystem).await;
 		match res.into_nested() {
 			Err(fatal) => {
-				tracing::error!(target: LOG_TARGET, "Observed fatal issue: {:?}", fatal);
+				gum::error!(target: LOG_TARGET, "Observed fatal issue: {:?}", fatal);
 				break
 			},
 			Ok(Err(jfyi)) => {
-				tracing::debug!(target: LOG_TARGET, "Observed issue: {:?}", jfyi);
+				gum::debug!(target: LOG_TARGET, "Observed issue: {:?}", jfyi);
 			},
 			Ok(Ok(())) => {
-				tracing::info!(target: LOG_TARGET, "Received `Conclude` signal, exiting");
+				gum::info!(target: LOG_TARGET, "Received `Conclude` signal, exiting");
 				break
 			},
 		}
diff --git a/polkadot/node/core/dispute-coordinator/src/error.rs b/polkadot/node/core/dispute-coordinator/src/error.rs
index f2445640a2395ca28cc8d657d3a04ccbfab46301..9415bc1a36d0cf6997a38f7fdeb5c1d3b1a752eb 100644
--- a/polkadot/node/core/dispute-coordinator/src/error.rs
+++ b/polkadot/node/core/dispute-coordinator/src/error.rs
@@ -119,10 +119,10 @@ impl JfyiError {
 		match self {
 			// don't spam the log with spurious errors
 			Self::Runtime(_) | Self::Oneshot(_) => {
-				tracing::debug!(target: LOG_TARGET, error = ?self)
+				gum::debug!(target: LOG_TARGET, error = ?self)
 			},
 			// it's worth reporting otherwise
-			_ => tracing::warn!(target: LOG_TARGET, error = ?self),
+			_ => gum::warn!(target: LOG_TARGET, error = ?self),
 		}
 	}
 }
diff --git a/polkadot/node/core/dispute-coordinator/src/real/initialized.rs b/polkadot/node/core/dispute-coordinator/src/real/initialized.rs
index 779ec57bfbc4e467170a3e380f40421297c43119..ceb59e0e869d0a67bc2ca5c8e7b1a0c512306823 100644
--- a/polkadot/node/core/dispute-coordinator/src/real/initialized.rs
+++ b/polkadot/node/core/dispute-coordinator/src/real/initialized.rs
@@ -149,7 +149,7 @@ impl Initialized {
 				)
 				.await;
 			if let Ok(()) = res {
-				tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
+				gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
 				return Ok(())
 			}
 			log_error(res)?;
@@ -268,7 +268,7 @@ impl Initialized {
 				.await
 			{
 				Err(e) => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						err = ?e,
 						"Failed to update session cache for disputes",
@@ -283,11 +283,7 @@ impl Initialized {
 					self.error = None;
 					let session = window_end;
 					if self.highest_session < session {
-						tracing::trace!(
-							target: LOG_TARGET,
-							session,
-							"Observed new session. Pruning"
-						);
+						gum::trace!(target: LOG_TARGET, session, "Observed new session. Pruning");
 
 						self.highest_session = session;
 
@@ -304,7 +300,7 @@ impl Initialized {
 					.scrape_on_chain_votes(ctx, overlay_db, new_leaf.hash, now)
 					.await
 					.map_err(|err| {
-						tracing::warn!(
+						gum::warn!(
 							target: LOG_TARGET,
 							"Skipping scraping block #{}({}) due to error: {}",
 							new_leaf.number,
@@ -335,7 +331,7 @@ impl Initialized {
 					)
 					.await
 					.unwrap_or_else(|err| {
-						tracing::debug!(
+						gum::debug!(
 							target: LOG_TARGET,
 							activated_leaf = ?new_leaf,
 							error = ?err,
@@ -347,7 +343,7 @@ impl Initialized {
 					})
 				},
 				Err(err) => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						activated_leaf = ?new_leaf,
 						error = ?err,
@@ -364,7 +360,7 @@ impl Initialized {
 			for ancestor in ancestors {
 				let _ = self.scrape_on_chain_votes(ctx, overlay_db, ancestor, now).await.map_err(
 					|err| {
-						tracing::warn!(
+						gum::warn!(
 							target: LOG_TARGET,
 							hash = ?ancestor,
 							error = ?err,
@@ -405,14 +401,14 @@ impl Initialized {
 			match rx.await {
 				Ok(Ok(Some(val))) => val,
 				Ok(Ok(None)) => {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						relay_parent = ?new_leaf,
 						"No on chain votes stored for relay chain leaf");
 					return Ok(())
 				},
 				Ok(Err(e)) => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						relay_parent = ?new_leaf,
 						error = ?e,
@@ -420,7 +416,7 @@ impl Initialized {
 					return Ok(())
 				},
 				Err(e) => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						relay_parent = ?new_leaf,
 						error = ?e,
@@ -446,7 +442,7 @@ impl Initialized {
 			if let Some(session_info) = self.rolling_session_window.session_info(session) {
 				session_info.clone()
 			} else {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					relay_parent = ?new_leaf,
 					?session,
@@ -466,7 +462,7 @@ impl Initialized {
 						.validators
 						.get(validator_index.0 as usize)
 						.or_else(|| {
-							tracing::error!(
+							gum::error!(
 							target: LOG_TARGET,
 							relay_parent = ?new_leaf,
 							"Missing public key for validator {:?}",
@@ -506,11 +502,11 @@ impl Initialized {
 				)
 				.await?;
 			match import_result {
-				ImportStatementsResult::ValidImport => tracing::trace!(target: LOG_TARGET,
+				ImportStatementsResult::ValidImport => gum::trace!(target: LOG_TARGET,
 																	   relay_parent = ?new_leaf,
 																	   ?session,
 																	   "Imported backing vote from on-chain"),
-				ImportStatementsResult::InvalidImport => tracing::warn!(target: LOG_TARGET,
+				ImportStatementsResult::InvalidImport => gum::warn!(target: LOG_TARGET,
 																		relay_parent = ?new_leaf,
 																		?session,
 																		"Attempted import of on-chain backing votes failed"),
@@ -537,7 +533,7 @@ impl Initialized {
 					{
 						session_info.clone()
 					} else {
-						tracing::warn!(
+						gum::warn!(
 								target: LOG_TARGET,
 								relay_parent = ?new_leaf,
 								?session,
@@ -549,7 +545,7 @@ impl Initialized {
 						.validators
 						.get(validator_index.0 as usize)
 						.or_else(|| {
-							tracing::error!(
+							gum::error!(
 								target: LOG_TARGET,
 								relay_parent = ?new_leaf,
 								?session,
@@ -584,12 +580,12 @@ impl Initialized {
 				)
 				.await?;
 			match import_result {
-				ImportStatementsResult::ValidImport => tracing::trace!(target: LOG_TARGET,
+				ImportStatementsResult::ValidImport => gum::trace!(target: LOG_TARGET,
 																	   relay_parent = ?new_leaf,
 																	   ?candidate_hash,
 																	   ?session,
 																	   "Imported statement of concluded dispute from on-chain"),
-				ImportStatementsResult::InvalidImport => tracing::warn!(target: LOG_TARGET,
+				ImportStatementsResult::InvalidImport => gum::warn!(target: LOG_TARGET,
 																		relay_parent = ?new_leaf,
 																		?candidate_hash,
 																		?session,
@@ -679,7 +675,7 @@ impl Initialized {
 					{
 						query_output.push((session_index, candidate_hash, v.into()));
 					} else {
-						tracing::debug!(
+						gum::debug!(
 							target: LOG_TARGET,
 							session_index,
 							"No votes found for candidate",
@@ -753,7 +749,7 @@ impl Initialized {
 
 		let session_info = match self.rolling_session_window.session_info(session) {
 			None => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					session,
 					"Importing statement lacks info for session which has an active dispute",
@@ -794,7 +790,7 @@ impl Initialized {
 						true,
 					)
 				} else {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						session,
 						"Not seen backing vote for candidate which has an active dispute",
@@ -837,7 +833,7 @@ impl Initialized {
 				.get(val_index.0 as usize)
 				.map_or(true, |v| v != statement.validator_public())
 			{
-				tracing::debug!(
+				gum::debug!(
 				target: LOG_TARGET,
 				?val_index,
 				session,
@@ -907,7 +903,7 @@ impl Initialized {
 			}
 			// Only validity stating votes or validator had free spam slot?
 			if !free_spam_slots_available {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					?candidate_hash,
 					?session,
@@ -931,7 +927,7 @@ impl Initialized {
 		// Participate in dispute if the imported vote was not local, we did not vote before either
 		// and we actually have keys to issue a local vote.
 		if !is_local && !voted_already && is_disputed && !controlled_indices.is_empty() {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				candidate_hash = ?candidate_receipt.hash(),
 				priority = ?comparator.is_some(),
@@ -959,7 +955,7 @@ impl Initialized {
 
 		let status = if is_disputed {
 			let status = recent_disputes.entry((session, candidate_hash)).or_insert_with(|| {
-				tracing::info!(
+				gum::info!(
 					target: LOG_TARGET,
 					?candidate_hash,
 					session,
@@ -994,7 +990,7 @@ impl Initialized {
 			}
 
 			if !was_concluded_valid && concluded_valid {
-				tracing::info!(
+				gum::info!(
 					target: LOG_TARGET,
 					?candidate_hash,
 					session,
@@ -1004,7 +1000,7 @@ impl Initialized {
 			}
 
 			if !was_concluded_invalid && concluded_invalid {
-				tracing::info!(
+				gum::info!(
 					target: LOG_TARGET,
 					?candidate_hash,
 					session,
@@ -1038,7 +1034,7 @@ impl Initialized {
 		// Load session info.
 		let info = match self.rolling_session_window.session_info(session) {
 			None => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					session,
 					"Missing info for session which has an active dispute",
@@ -1088,7 +1084,7 @@ impl Initialized {
 				},
 				Ok(None) => {},
 				Err(e) => {
-					tracing::error!(
+					gum::error!(
 					target: LOG_TARGET,
 					err = ?e,
 					"Encountered keystore error while signing dispute statement",
@@ -1102,11 +1098,7 @@ impl Initialized {
 			let dispute_message =
 				match make_dispute_message(info, &votes, statement.clone(), *index) {
 					Err(err) => {
-						tracing::debug!(
-							target: LOG_TARGET,
-							?err,
-							"Creating dispute message failed."
-						);
+						gum::debug!(target: LOG_TARGET, ?err, "Creating dispute message failed.");
 						continue
 					},
 					Ok(dispute_message) => dispute_message,
@@ -1130,7 +1122,7 @@ impl Initialized {
 				.await?
 			{
 				ImportStatementsResult::InvalidImport => {
-					tracing::error!(
+					gum::error!(
 						target: LOG_TARGET,
 						?candidate_hash,
 						?session,
@@ -1138,7 +1130,7 @@ impl Initialized {
 					);
 				},
 				ImportStatementsResult::ValidImport => {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?candidate_hash,
 						?session,
diff --git a/polkadot/node/core/dispute-coordinator/src/real/mod.rs b/polkadot/node/core/dispute-coordinator/src/real/mod.rs
index b962c6f488b244adf6d111d3a6896224f2134862..9304ba620b078111454e5858853e77fc82866ccf 100644
--- a/polkadot/node/core/dispute-coordinator/src/real/mod.rs
+++ b/polkadot/node/core/dispute-coordinator/src/real/mod.rs
@@ -194,7 +194,7 @@ impl DisputeCoordinatorSubsystem {
 			let (first_leaf, rolling_session_window) = match get_rolling_session_window(ctx).await {
 				Ok(Some(update)) => update,
 				Ok(None) => {
-					tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
+					gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
 					return Ok(None)
 				},
 				Err(e) => {
@@ -267,11 +267,7 @@ impl DisputeCoordinatorSubsystem {
 				get_active_with_status(disputes.into_iter(), clock.now()).collect(),
 			Ok(None) => Vec::new(),
 			Err(e) => {
-				tracing::error!(
-					target: LOG_TARGET,
-					"Failed initial load of recent disputes: {:?}",
-					e
-				);
+				gum::error!(target: LOG_TARGET, "Failed initial load of recent disputes: {:?}", e);
 				return Err(e.into())
 			},
 		};
@@ -285,7 +281,7 @@ impl DisputeCoordinatorSubsystem {
 					Ok(Some(votes)) => votes.into(),
 					Ok(None) => continue,
 					Err(e) => {
-						tracing::error!(
+						gum::error!(
 							target: LOG_TARGET,
 							"Failed initial load of candidate votes: {:?}",
 							e
@@ -296,7 +292,7 @@ impl DisputeCoordinatorSubsystem {
 
 			let validators = match rolling_session_window.session_info(session) {
 				None => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						session,
 						"Missing info for session which has an active dispute",
@@ -401,7 +397,7 @@ where
 			// hour old database state, we should rather cancel contained oneshots and delay
 			// finality until we are fully functional.
 			{
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					?msg,
 					"Received msg before first active leaves update. This is not expected - message will be dropped."
diff --git a/polkadot/node/core/dispute-coordinator/src/real/ordering/mod.rs b/polkadot/node/core/dispute-coordinator/src/real/ordering/mod.rs
index 3bd939158c57c55387ec7e93b1daf40e1ca1fbf2..b7250aed561c81dd284d5fe8181520708ec98b77 100644
--- a/polkadot/node/core/dispute-coordinator/src/real/ordering/mod.rs
+++ b/polkadot/node/core/dispute-coordinator/src/real/ordering/mod.rs
@@ -162,7 +162,7 @@ impl OrderingProvider {
 		}
 		let n = match get_block_number(sender, candidate.descriptor().relay_parent).await? {
 			None => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					candidate_hash = ?candidate_hash,
 					"Candidate's relay_parent could not be found via chain API, but we saw candidate included?!"
@@ -197,7 +197,7 @@ impl OrderingProvider {
 					)
 					.await
 					.unwrap_or_else(|err| {
-						tracing::debug!(
+						gum::debug!(
 							target: LOG_TARGET,
 							activated_leaf = ?activated,
 							error = ?err,
@@ -209,7 +209,7 @@ impl OrderingProvider {
 					})
 				},
 				Err(err) => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						activated_leaf = ?activated,
 						error = ?err,
@@ -309,7 +309,7 @@ impl OrderingProvider {
 				None => {
 					// It's assumed that it's impossible to retrieve
 					// more than N ancestors for block number N.
-					tracing::error!(
+					gum::error!(
 						target: LOG_TARGET,
 						"Received {} ancestors for block number {} from Chain API",
 						hashes.len(),
diff --git a/polkadot/node/core/dispute-coordinator/src/real/participation/mod.rs b/polkadot/node/core/dispute-coordinator/src/real/participation/mod.rs
index 27e2350e1f51d5768eeaa68d07dd3ae7cdf42f55..4ed4015cd6db28da10e8480ace9c0edfd29dce55 100644
--- a/polkadot/node/core/dispute-coordinator/src/real/participation/mod.rs
+++ b/polkadot/node/core/dispute-coordinator/src/real/participation/mod.rs
@@ -266,7 +266,7 @@ async fn participate(
 
 	let available_data = match recover_available_data_rx.await {
 		Err(oneshot::Canceled) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"`Oneshot` got cancelled when recovering available data {:?}",
 				req.candidate_hash(),
@@ -298,7 +298,7 @@ async fn participate(
 	{
 		Ok(Some(code)) => code,
 		Ok(None) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"Validation code unavailable for code hash {:?} in the state of block {:?}",
 				req.candidate_receipt().descriptor.validation_code_hash,
@@ -309,7 +309,7 @@ async fn participate(
 			return
 		},
 		Err(err) => {
-			tracing::warn!(target: LOG_TARGET, ?err, "Error when fetching validation code.");
+			gum::warn!(target: LOG_TARGET, ?err, "Error when fetching validation code.");
 			send_result(&mut result_sender, req, ParticipationOutcome::Error).await;
 			return
 		},
@@ -333,14 +333,14 @@ async fn participate(
 
 	match store_available_data_rx.await {
 		Err(oneshot::Canceled) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"`Oneshot` got cancelled when storing available data {:?}",
 				req.candidate_hash(),
 			);
 		},
 		Ok(Err(err)) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				?err,
 				"Failed to store available data for candidate {:?}",
@@ -375,7 +375,7 @@ async fn participate(
 	// the validation and if valid, whether the commitments hash matches
 	match validation_rx.await {
 		Err(oneshot::Canceled) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"`Oneshot` got cancelled when validating candidate {:?}",
 				req.candidate_hash(),
@@ -384,7 +384,7 @@ async fn participate(
 			return
 		},
 		Ok(Err(err)) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"Candidate {:?} validation failed with: {:?}",
 				req.candidate_hash(),
@@ -394,7 +394,7 @@ async fn participate(
 			send_result(&mut result_sender, req, ParticipationOutcome::Invalid).await;
 		},
 		Ok(Ok(ValidationResult::Invalid(invalid))) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"Candidate {:?} considered invalid: {:?}",
 				req.candidate_hash(),
@@ -405,7 +405,7 @@ async fn participate(
 		},
 		Ok(Ok(ValidationResult::Valid(commitments, _))) => {
 			if commitments.hash() != req.candidate_receipt().commitments_hash {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					expected = ?req.candidate_receipt().commitments_hash,
 					got = ?commitments.hash(),
@@ -427,7 +427,7 @@ async fn send_result(
 	outcome: ParticipationOutcome,
 ) {
 	if let Err(err) = sender.feed(WorkerMessage::from_request(req, outcome)).await {
-		tracing::error!(
+		gum::error!(
 			target: LOG_TARGET,
 			?err,
 			"Sending back participation result failed. Dispute coordinator not working properly!"
diff --git a/polkadot/node/core/dispute-coordinator/src/real/spam_slots.rs b/polkadot/node/core/dispute-coordinator/src/real/spam_slots.rs
index a9327dadeb0e3e62d04df24d308587a91e478998..5b4435121524c89859204eb61665a59798913efa 100644
--- a/polkadot/node/core/dispute-coordinator/src/real/spam_slots.rs
+++ b/polkadot/node/core/dispute-coordinator/src/real/spam_slots.rs
@@ -67,7 +67,7 @@ impl SpamSlots {
 				let spam_vote_count = slots.entry((*session, *validator)).or_default();
 				*spam_vote_count += 1;
 				if *spam_vote_count > MAX_SPAM_VOTES {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						?session,
 						?validator,
diff --git a/polkadot/node/core/dispute-coordinator/src/status.rs b/polkadot/node/core/dispute-coordinator/src/status.rs
index f07c6a7d26843363d542aea15c5b833a82e82650..d2ad551bd9ad8feffcc4f5a6b89658795ad47ee9 100644
--- a/polkadot/node/core/dispute-coordinator/src/status.rs
+++ b/polkadot/node/core/dispute-coordinator/src/status.rs
@@ -152,7 +152,7 @@ impl Clock for SystemClock {
 		match SystemTime::now().duration_since(UNIX_EPOCH) {
 			Ok(d) => d.as_secs(),
 			Err(e) => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					err = ?e,
 					"Current time is before unix epoch. Validation will not work correctly."
diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml
index a3c5b51e33f21eadca4f837f416287d50f59660b..702af0844b9441b2554710088bffb458e6f1f8a7 100644
--- a/polkadot/node/core/parachains-inherent/Cargo.toml
+++ b/polkadot/node/core/parachains-inherent/Cargo.toml
@@ -7,7 +7,7 @@ edition = "2021"
 [dependencies]
 futures = "0.3.21"
 futures-timer = "3.0.2"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 thiserror = "1.0.30"
 async-trait = "0.1.52"
 polkadot-node-subsystem = { path = "../../subsystem" }
diff --git a/polkadot/node/core/parachains-inherent/src/lib.rs b/polkadot/node/core/parachains-inherent/src/lib.rs
index 7ab8360451f3874278cc333804a88be9dc229fd8..1feb7c5ffe3d63c996050ea330c79cdc4a5ebfd4 100644
--- a/polkadot/node/core/parachains-inherent/src/lib.rs
+++ b/polkadot/node/core/parachains-inherent/src/lib.rs
@@ -88,7 +88,7 @@ impl ParachainsInherentDataProvider {
 				parent_header,
 			},
 			Err(err) => {
-				tracing::debug!(
+				gum::debug!(
 					?err,
 					"Could not get provisioner inherent data; injecting default data",
 				);
diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml
index 3d4cc5c9cb138be3a6edf6a054b2d98c3d9afe7f..c56431d3fcf2008003205810adb92f5c9903f4b2 100644
--- a/polkadot/node/core/provisioner/Cargo.toml
+++ b/polkadot/node/core/provisioner/Cargo.toml
@@ -7,7 +7,7 @@ edition = "2021"
 [dependencies]
 bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] }
 futures = "0.3.21"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 thiserror = "1.0.30"
 polkadot-primitives = { path = "../../../primitives" }
 polkadot-node-primitives = { path = "../../primitives" }
diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs
index 22c59103897626db30678bc37bb9a16732e1cd7b..5593ab2a7b235fcd54b77e8f6ff442d6af31e4c8 100644
--- a/polkadot/node/core/provisioner/src/lib.rs
+++ b/polkadot/node/core/provisioner/src/lib.rs
@@ -265,7 +265,7 @@ impl ProvisionerJob {
 		)
 		.await
 		{
-			tracing::warn!(target: LOG_TARGET, err = ?err, "failed to assemble or send inherent data");
+			gum::warn!(target: LOG_TARGET, err = ?err, "failed to assemble or send inherent data");
 			self.metrics.on_inherent_data_request(Err(()));
 		} else {
 			self.metrics.on_inherent_data_request(Ok(()));
@@ -451,7 +451,7 @@ async fn select_candidates(
 				descriptor.persisted_validation_data_hash == computed_validation_data_hash
 		}) {
 			let candidate_hash = candidate.hash();
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				"Selecting candidate {}. para_id={} core={}",
 				candidate_hash,
@@ -509,7 +509,7 @@ async fn select_candidates(
 		true
 	});
 
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		n_candidates = candidates.len(),
 		n_cores = availability_cores.len(),
@@ -557,7 +557,7 @@ fn bitfields_indicate_availability(
 				// in principle, this function might return a `Result<bool, Error>` so that we can more clearly express this error condition
 				// however, in practice, that would just push off an error-handling routine which would look a whole lot like this one.
 				// simpler to just handle the error internally here.
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					validator_idx = %validator_idx,
 					availability_len = %availability_len,
@@ -598,7 +598,7 @@ async fn request_disputes(
 	let recent_disputes = match rx.await {
 		Ok(r) => r,
 		Err(oneshot::Canceled) => {
-			tracing::warn!(target: LOG_TARGET, "Unable to gather {:?} disputes", active_or_recent);
+			gum::warn!(target: LOG_TARGET, "Unable to gather {:?} disputes", active_or_recent);
 			Vec::new()
 		},
 	};
@@ -618,7 +618,7 @@ async fn request_votes(
 	match rx.await {
 		Ok(v) => v,
 		Err(oneshot::Canceled) => {
-			tracing::warn!(target: LOG_TARGET, "Unable to query candidate votes");
+			gum::warn!(target: LOG_TARGET, "Unable to query candidate votes");
 			Vec::new()
 		},
 	}
@@ -666,7 +666,7 @@ async fn select_disputes(
 	// If the active ones are already exceeding the bounds, randomly select a subset.
 	let recent = request_disputes(sender, RequestType::Recent).await;
 	let disputes = if recent.len() > MAX_DISPUTES_FORWARDED_TO_RUNTIME {
-		tracing::warn!(
+		gum::warn!(
 			target: LOG_TARGET,
 			"Recent disputes are excessive ({} > {}), reduce to active ones, and selected",
 			recent.len(),
diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml
index 3d5b8737396554502ac312392647e2b54c00b345..c892462aec7e94aa5f44abc658f403856d807fb4 100644
--- a/polkadot/node/core/pvf-checker/Cargo.toml
+++ b/polkadot/node/core/pvf-checker/Cargo.toml
@@ -7,7 +7,7 @@ edition = "2021"
 [dependencies]
 futures = "0.3.21"
 thiserror = "1.0.30"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 
 polkadot-node-primitives = { path = "../../primitives" }
 polkadot-node-subsystem = { path = "../../subsystem" }
diff --git a/polkadot/node/core/pvf-checker/src/lib.rs b/polkadot/node/core/pvf-checker/src/lib.rs
index 8eaf898f07b1f25261c6b2c59277f90130cae66e..5d6f544e39aedde0d89842ca478c6139575251bb 100644
--- a/polkadot/node/core/pvf-checker/src/lib.rs
+++ b/polkadot/node/core/pvf-checker/src/lib.rs
@@ -185,7 +185,7 @@ async fn handle_pvf_check(
 	outcome: PreCheckOutcome,
 	validation_code_hash: ValidationCodeHash,
 ) {
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		?validation_code_hash,
 		"Received pre-check result: {:?}",
@@ -200,7 +200,7 @@ async fn handle_pvf_check(
 			//
 			// Returning here will leave the PVF in the view dangling. Since it is there, no new
 			// pre-checking request will be sent.
-			tracing::info!(
+			gum::info!(
 				target: LOG_TARGET,
 				?validation_code_hash,
 				"Pre-check failed, abstaining from voting",
@@ -212,7 +212,7 @@ async fn handle_pvf_check(
 	match state.view.on_judgement(validation_code_hash, judgement) {
 		Ok(()) => (),
 		Err(()) => {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				?validation_code_hash,
 				"received judgement for an unknown (or removed) PVF hash",
@@ -254,7 +254,7 @@ async fn handle_from_overseer(
 ) -> Option<Conclude> {
 	match from_overseer {
 		FromOverseer::Signal(OverseerSignal::Conclude) => {
-			tracing::info!(target: LOG_TARGET, "Received `Conclude` signal, exiting");
+			gum::info!(target: LOG_TARGET, "Received `Conclude` signal, exiting");
 			Some(Conclude)
 		},
 		FromOverseer::Signal(OverseerSignal::BlockFinalized(_, _)) => {
@@ -360,7 +360,7 @@ async fn examine_activation(
 	leaf_hash: Hash,
 	leaf_number: BlockNumber,
 ) -> Option<ActivationEffect> {
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		"Examining activation of leaf {:?} ({})",
 		leaf_hash,
@@ -370,7 +370,7 @@ async fn examine_activation(
 	let pending_pvfs = match runtime_api::pvfs_require_precheck(sender, leaf_hash).await {
 		Err(runtime_api::RuntimeRequestError::NotSupported) => return None,
 		Err(_) => {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				relay_parent = ?leaf_hash,
 				"cannot fetch PVFs that require pre-checking from runtime API",
@@ -398,7 +398,7 @@ async fn examine_activation(
 				None
 			},
 		Err(e) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				relay_parent = ?leaf_hash,
 				"cannot fetch session index from runtime API: {:?}",
@@ -421,7 +421,7 @@ async fn check_signing_credentials(
 	let validators = match runtime_api::validators(sender, leaf).await {
 		Ok(v) => v,
 		Err(e) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				relay_parent = ?leaf,
 				"error occured during requesting validators: {:?}",
@@ -453,7 +453,7 @@ async fn sign_and_submit_pvf_check_statement(
 	judgement: Judgement,
 	validation_code_hash: ValidationCodeHash,
 ) {
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		?validation_code_hash,
 		?relay_parent,
@@ -464,7 +464,7 @@ async fn sign_and_submit_pvf_check_statement(
 	metrics.on_vote_submission_started();
 
 	if voted.contains(&validation_code_hash) {
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			relay_parent = ?relay_parent,
 			?validation_code_hash,
@@ -491,7 +491,7 @@ async fn sign_and_submit_pvf_check_statement(
 	{
 		Ok(Some(signature)) => signature,
 		Ok(None) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				?relay_parent,
 				validator_index = ?credentials.validator_index,
@@ -501,7 +501,7 @@ async fn sign_and_submit_pvf_check_statement(
 			return
 		},
 		Err(e) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				?relay_parent,
 				validator_index = ?credentials.validator_index,
@@ -518,7 +518,7 @@ async fn sign_and_submit_pvf_check_statement(
 			metrics.on_vote_submitted();
 		},
 		Err(e) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				?relay_parent,
 				?validation_code_hash,
@@ -540,12 +540,7 @@ async fn initiate_precheck(
 	validation_code_hash: ValidationCodeHash,
 	metrics: &Metrics,
 ) {
-	tracing::debug!(
-		target: LOG_TARGET,
-		?validation_code_hash,
-		?relay_parent,
-		"initiating a precheck",
-	);
+	gum::debug!(target: LOG_TARGET, ?validation_code_hash, ?relay_parent, "initiating a precheck",);
 
 	let (tx, rx) = oneshot::channel();
 	sender
@@ -563,7 +558,7 @@ async fn initiate_precheck(
 				// Pre-checking request dropped before replying. That can happen in case the
 				// overseer is shutting down. Our part of shutdown will be handled by the
 				// overseer conclude signal. Log it here just in case.
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					?validation_code_hash,
 					?relay_parent,
diff --git a/polkadot/node/core/pvf-checker/src/runtime_api.rs b/polkadot/node/core/pvf-checker/src/runtime_api.rs
index 91b6e52d9973f6ce89b7ea2d3571b212c0b0a229..798fb3f1abfbd5f0a5f1860b629a924cdf8691d8 100644
--- a/polkadot/node/core/pvf-checker/src/runtime_api.rs
+++ b/polkadot/node/core/pvf-checker/src/runtime_api.rs
@@ -85,7 +85,7 @@ pub(crate) async fn runtime_api_request<T>(
 	receiver
 		.await
 		.map_err(|_| {
-			tracing::debug!(target: LOG_TARGET, ?relay_parent, "Runtime API request dropped");
+			gum::debug!(target: LOG_TARGET, ?relay_parent, "Runtime API request dropped");
 			RuntimeRequestError::CommunicationError
 		})
 		.and_then(|res| {
@@ -93,7 +93,7 @@ pub(crate) async fn runtime_api_request<T>(
 				use RuntimeApiSubsystemError::*;
 				match e {
 					Execution { .. } => {
-						tracing::debug!(
+						gum::debug!(
 							target: LOG_TARGET,
 							?relay_parent,
 							err = ?e,
diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml
index d1d89d52411098d3ce10a51a08d3e937ae6854bc..4f8aadbaaa1659ad904a7997ad50e4d5cacc5500 100644
--- a/polkadot/node/core/pvf/Cargo.toml
+++ b/polkadot/node/core/pvf/Cargo.toml
@@ -16,7 +16,7 @@ assert_matches = "1.4.0"
 futures = "0.3.21"
 futures-timer = "3.0.2"
 slotmap = "1.0"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 pin-project = "1.0.9"
 rand = "0.8.5"
 parity-scale-codec = { version = "3.1.0", default-features = false, features = ["derive"] }
diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs
index c8d75fb0398780b5c1fb73bcd7612811486cc684..23ed115b7f095e98ec84558fed7b0a57b7d582a0 100644
--- a/polkadot/node/core/pvf/src/execute/queue.rs
+++ b/polkadot/node/core/pvf/src/execute/queue.rs
@@ -174,7 +174,7 @@ async fn purge_dead(metrics: &Metrics, workers: &mut Workers) {
 
 fn handle_to_queue(queue: &mut Queue, to_queue: ToQueue) {
 	let ToQueue::Enqueue { artifact, execution_timeout, params, result_tx } = to_queue;
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		validation_code_hash = ?artifact.id.code_hash,
 		"enqueueing an artifact for execution",
@@ -208,7 +208,7 @@ fn handle_worker_spawned(queue: &mut Queue, idle: IdleWorker, handle: WorkerHand
 	queue.workers.spawn_inflight -= 1;
 	let worker = queue.workers.running.insert(WorkerData { idle: Some(idle), handle });
 
-	tracing::debug!(target: LOG_TARGET, ?worker, "execute worker spawned");
+	gum::debug!(target: LOG_TARGET, ?worker, "execute worker spawned");
 
 	if let Some(job) = queue.queue.pop_front() {
 		assign(queue, worker, job);
@@ -244,7 +244,7 @@ fn handle_job_finish(
 	};
 
 	queue.metrics.execute_finished();
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		validation_code_hash = ?artifact_id.code_hash,
 		worker_rip = idle_worker.is_none(),
@@ -288,7 +288,7 @@ fn handle_job_finish(
 
 fn spawn_extra_worker(queue: &mut Queue) {
 	queue.metrics.execute_worker().on_begin_spawn();
-	tracing::debug!(target: LOG_TARGET, "spawning an extra worker");
+	gum::debug!(target: LOG_TARGET, "spawning an extra worker");
 
 	queue
 		.mux
@@ -303,7 +303,7 @@ async fn spawn_worker_task(program_path: PathBuf, spawn_timeout: Duration) -> Qu
 		match super::worker::spawn(&program_path, spawn_timeout).await {
 			Ok((idle, handle)) => break QueueEvent::Spawn(idle, handle),
 			Err(err) => {
-				tracing::warn!(target: LOG_TARGET, "failed to spawn an execute worker: {:?}", err);
+				gum::warn!(target: LOG_TARGET, "failed to spawn an execute worker: {:?}", err);
 
 				// Assume that the failure intermittent and retry after a delay.
 				Delay::new(Duration::from_secs(3)).await;
@@ -316,7 +316,7 @@ async fn spawn_worker_task(program_path: PathBuf, spawn_timeout: Duration) -> Qu
 ///
 /// The worker must be running and idle.
 fn assign(queue: &mut Queue, worker: Worker, job: ExecuteJob) {
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		validation_code_hash = ?job.artifact.id,
 		?worker,
diff --git a/polkadot/node/core/pvf/src/execute/worker.rs b/polkadot/node/core/pvf/src/execute/worker.rs
index a1c4e6d55788569fc224727ab5dbd73556bf408f..1140758e6aee790b1599e6253bc6d23dfcef695f 100644
--- a/polkadot/node/core/pvf/src/execute/worker.rs
+++ b/polkadot/node/core/pvf/src/execute/worker.rs
@@ -72,7 +72,7 @@ pub async fn start_work(
 ) -> Outcome {
 	let IdleWorker { mut stream, pid } = worker;
 
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		worker_pid = %pid,
 		validation_code_hash = ?artifact.id.code_hash,
@@ -81,7 +81,7 @@ pub async fn start_work(
 	);
 
 	if let Err(error) = send_request(&mut stream, &artifact.path, &validation_params).await {
-		tracing::warn!(
+		gum::warn!(
 			target: LOG_TARGET,
 			worker_pid = %pid,
 			validation_code_hash = ?artifact.id.code_hash,
@@ -95,7 +95,7 @@ pub async fn start_work(
 		response = recv_response(&mut stream).fuse() => {
 			match response {
 				Err(error) => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						worker_pid = %pid,
 						validation_code_hash = ?artifact.id.code_hash,
@@ -108,7 +108,7 @@ pub async fn start_work(
 			}
 		},
 		_ = Delay::new(execution_timeout).fuse() => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				worker_pid = %pid,
 				validation_code_hash = ?artifact.id.code_hash,
@@ -189,7 +189,7 @@ pub fn worker_entrypoint(socket_path: &str) {
 		})?;
 		loop {
 			let (artifact_path, params) = recv_request(&mut stream).await?;
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				worker_pid = %std::process::id(),
 				"worker: validating artifact {}",
diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs
index e62bedbcd6aab9d817de0f400f00d3a9c8739fc0..64bdc6db750c84bb8cf49e03835df8adcbdec314 100644
--- a/polkadot/node/core/pvf/src/host.rs
+++ b/polkadot/node/core/pvf/src/host.rs
@@ -639,13 +639,13 @@ async fn handle_cleanup_pulse(
 	artifact_ttl: Duration,
 ) -> Result<(), Fatal> {
 	let to_remove = artifacts.prune(artifact_ttl);
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		"PVF pruning: {} artifacts reached their end of life",
 		to_remove.len(),
 	);
 	for artifact_id in to_remove {
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			validation_code_hash = ?artifact_id.code_hash,
 			"pruning artifact",
@@ -664,7 +664,7 @@ async fn sweeper_task(mut sweeper_rx: mpsc::Receiver<PathBuf>) {
 			None => break,
 			Some(condemned) => {
 				let result = async_std::fs::remove_file(&condemned).await;
-				tracing::trace!(
+				gum::trace!(
 					target: LOG_TARGET,
 					?result,
 					"Sweeping the artifact file {}",
diff --git a/polkadot/node/core/pvf/src/prepare/pool.rs b/polkadot/node/core/pvf/src/prepare/pool.rs
index a51b79dbcc3cb3344ab048120dca619c36d8118d..4902c4c7e3b3f9679e3d5103c7d6aff1e4a9cf21 100644
--- a/polkadot/node/core/pvf/src/prepare/pool.rs
+++ b/polkadot/node/core/pvf/src/prepare/pool.rs
@@ -201,7 +201,7 @@ fn handle_to_pool(
 ) {
 	match to_pool {
 		ToPool::Spawn => {
-			tracing::debug!(target: LOG_TARGET, "spawning a new prepare worker");
+			gum::debug!(target: LOG_TARGET, "spawning a new prepare worker");
 			metrics.prepare_worker().on_begin_spawn();
 			mux.push(spawn_worker_task(program_path.to_owned(), spawn_timeout).boxed());
 		},
@@ -234,7 +234,7 @@ fn handle_to_pool(
 			}
 		},
 		ToPool::Kill(worker) => {
-			tracing::debug!(target: LOG_TARGET, ?worker, "killing prepare worker");
+			gum::debug!(target: LOG_TARGET, ?worker, "killing prepare worker");
 			// It may be absent if it were previously already removed by `purge_dead`.
 			let _ = attempt_retire(metrics, spawned, worker);
 		},
@@ -248,7 +248,7 @@ async fn spawn_worker_task(program_path: PathBuf, spawn_timeout: Duration) -> Po
 		match worker::spawn(&program_path, spawn_timeout).await {
 			Ok((idle, handle)) => break PoolEvent::Spawn(idle, handle),
 			Err(err) => {
-				tracing::warn!(target: LOG_TARGET, "failed to spawn a prepare worker: {:?}", err);
+				gum::warn!(target: LOG_TARGET, "failed to spawn a prepare worker: {:?}", err);
 
 				// Assume that the failure intermittent and retry after a delay.
 				Delay::new(Duration::from_secs(3)).await;
diff --git a/polkadot/node/core/pvf/src/prepare/queue.rs b/polkadot/node/core/pvf/src/prepare/queue.rs
index 4e226bd3032e030bbd48d40e6c2f8bfcd2797d83..5aa1402916d667b92e2207aa98ca9c9c30efc91a 100644
--- a/polkadot/node/core/pvf/src/prepare/queue.rs
+++ b/polkadot/node/core/pvf/src/prepare/queue.rs
@@ -211,7 +211,7 @@ async fn handle_to_queue(queue: &mut Queue, to_queue: ToQueue) -> Result<(), Fat
 }
 
 async fn handle_enqueue(queue: &mut Queue, priority: Priority, pvf: Pvf) -> Result<(), Fatal> {
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		validation_code_hash = ?pvf.code_hash,
 		?priority,
@@ -228,7 +228,7 @@ async fn handle_enqueue(queue: &mut Queue, priority: Priority, pvf: Pvf) -> Resu
 		// Precondtion for `Enqueue` is that it is sent only once for a PVF;
 		// Thus this should always be `false`;
 		// qed.
-		tracing::warn!(
+		gum::warn!(
 			target: LOG_TARGET,
 			"duplicate `enqueue` command received for {:?}",
 			artifact_id,
@@ -331,7 +331,7 @@ async fn handle_worker_concluded(
 
 	queue.artifact_id_to_job.remove(&artifact_id);
 
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		validation_code_hash = ?artifact_id.code_hash,
 		?worker,
@@ -370,7 +370,7 @@ async fn handle_worker_concluded(
 }
 
 async fn handle_worker_rip(queue: &mut Queue, worker: Worker) -> Result<(), Fatal> {
-	tracing::debug!(target: LOG_TARGET, ?worker, "prepare worker ripped");
+	gum::debug!(target: LOG_TARGET, ?worker, "prepare worker ripped");
 
 	let worker_data = queue.workers.remove(worker);
 	if let Some(WorkerData { job: Some(job), .. }) = worker_data {
diff --git a/polkadot/node/core/pvf/src/prepare/worker.rs b/polkadot/node/core/pvf/src/prepare/worker.rs
index 1913f79d8222f50d8ab4e72bca638f71030db3de..d83346946f6222b7842b9ac2d5556410e5a39d26 100644
--- a/polkadot/node/core/pvf/src/prepare/worker.rs
+++ b/polkadot/node/core/pvf/src/prepare/worker.rs
@@ -72,7 +72,7 @@ pub async fn start_work(
 ) -> Outcome {
 	let IdleWorker { mut stream, pid } = worker;
 
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		worker_pid = %pid,
 		"starting prepare for {}",
@@ -81,7 +81,7 @@ pub async fn start_work(
 
 	with_tmp_file(pid, cache_path, |tmp_file| async move {
 		if let Err(err) = send_request(&mut stream, code, &tmp_file).await {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				worker_pid = %pid,
 				"failed to send a prepare request: {:?}",
@@ -109,7 +109,7 @@ pub async fn start_work(
 					// By convention we expect encoded `PrepareResult`.
 					if let Ok(result) = PrepareResult::decode(&mut response_bytes.as_slice()) {
 						if result.is_ok() {
-							tracing::debug!(
+							gum::debug!(
 								target: LOG_TARGET,
 								worker_pid = %pid,
 								"promoting WIP artifact {} to {}",
@@ -121,7 +121,7 @@ pub async fn start_work(
 								.await
 								.map(|_| Selected::Done(result))
 								.unwrap_or_else(|err| {
-									tracing::warn!(
+									gum::warn!(
 										target: LOG_TARGET,
 										worker_pid = %pid,
 										"failed to rename the artifact from {} to {}: {:?}",
@@ -137,7 +137,7 @@ pub async fn start_work(
 					} else {
 						// We received invalid bytes from the worker.
 						let bound_bytes = &response_bytes[..response_bytes.len().min(4)];
-						tracing::warn!(
+						gum::warn!(
 							target: LOG_TARGET,
 							worker_pid = %pid,
 							"received unexpected response from the prepare worker: {}",
@@ -148,7 +148,7 @@ pub async fn start_work(
 				},
 				Ok(Err(err)) => {
 					// Communication error within the time limit.
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						worker_pid = %pid,
 						"failed to recv a prepare response: {:?}",
@@ -184,7 +184,7 @@ where
 	let tmp_file = match tmpfile_in("prepare-artifact-", cache_path).await {
 		Ok(f) => f,
 		Err(err) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				worker_pid = %pid,
 				"failed to create a temp file for the artifact: {:?}",
@@ -205,7 +205,7 @@ where
 		Ok(()) => (),
 		Err(err) if err.kind() == std::io::ErrorKind::NotFound => (),
 		Err(err) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				worker_pid = %pid,
 				"failed to remove the tmp file: {:?}",
@@ -246,7 +246,7 @@ pub fn worker_entrypoint(socket_path: &str) {
 		loop {
 			let (code, dest) = recv_request(&mut stream).await?;
 
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				worker_pid = %std::process::id(),
 				"worker: preparing artifact",
@@ -267,7 +267,7 @@ pub fn worker_entrypoint(socket_path: &str) {
 
 					let artifact_bytes = compiled_artifact.encode();
 
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						worker_pid = %std::process::id(),
 						"worker: writing artifact to {}",
diff --git a/polkadot/node/core/pvf/src/worker_common.rs b/polkadot/node/core/pvf/src/worker_common.rs
index 534bcb61798998d68c349e40143668c902c9bded..8614b07d8166aa53cb226e6f0d7fc3de55143621 100644
--- a/polkadot/node/core/pvf/src/worker_common.rs
+++ b/polkadot/node/core/pvf/src/worker_common.rs
@@ -48,7 +48,7 @@ pub async fn spawn_with_program_path(
 		let socket_path = socket_path.to_owned();
 		async move {
 			let listener = UnixListener::bind(&socket_path).await.map_err(|err| {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					%debug_id,
 					"cannot bind unix socket: {:?}",
@@ -59,7 +59,7 @@ pub async fn spawn_with_program_path(
 
 			let handle =
 				WorkerHandle::spawn(program_path, extra_args, socket_path).map_err(|err| {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						%debug_id,
 						"cannot spawn a worker: {:?}",
@@ -71,7 +71,7 @@ pub async fn spawn_with_program_path(
 			futures::select! {
 				accept_result = listener.accept().fuse() => {
 					let (stream, _) = accept_result.map_err(|err| {
-						tracing::warn!(
+						gum::warn!(
 							target: LOG_TARGET,
 							%debug_id,
 							"cannot accept a worker: {:?}",
@@ -160,7 +160,7 @@ where
 	})
 	.unwrap_err(); // it's never `Ok` because it's `Ok(Never)`
 
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		worker_pid = %std::process::id(),
 		"pvf worker ({}): {:?}",
diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml
index 4fe1b450d1d0585e2d031d36653a88b797a13f38..4e31ad0daabe4922a68737c97ccf0d238c32beea 100644
--- a/polkadot/node/core/runtime-api/Cargo.toml
+++ b/polkadot/node/core/runtime-api/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
 
 [dependencies]
 futures = "0.3.21"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 memory-lru = "0.1.0"
 parity-util-mem = { version = "0.11.0", default-features = false }
 
diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs
index a9e806b2fddef3f813dc18411488c382b0509a8c..bd2da3772c84caa2e4b89f9b8149a2a879d97fae 100644
--- a/polkadot/node/core/runtime-api/src/lib.rs
+++ b/polkadot/node/core/runtime-api/src/lib.rs
@@ -291,7 +291,7 @@ where
 			self.waiting_requests.push_back((request, receiver));
 
 			if self.waiting_requests.len() > MAX_PARALLEL_REQUESTS * 10 {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					"{} runtime API requests waiting to be executed.",
 					self.waiting_requests.len(),
@@ -372,7 +372,7 @@ where
 
 			let runtime_version = api.api_version::<dyn ParachainHost<Block>>(&BlockId::Hash(relay_parent))
 				.unwrap_or_else(|e| {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						"cannot query the runtime API version: {}",
 						e,
@@ -380,7 +380,7 @@ where
 					Some(0)
 				})
 				.unwrap_or_else(|| {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						"no runtime version is reported"
 					);
diff --git a/polkadot/node/gum/Cargo.toml b/polkadot/node/gum/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..24d3faae5968b60ab46f4694180087a8078ab03d
--- /dev/null
+++ b/polkadot/node/gum/Cargo.toml
@@ -0,0 +1,12 @@
+[package]
+name = "tracing-gum"
+version = "0.9.17"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2021"
+description = "Stick logs together with the TraceID as provided by tempo"
+
+[dependencies]
+tracing = "0.1.31"
+jaeger = { path = "../jaeger", package = "polkadot-node-jaeger" }
+gum-proc-macro = { path = "./proc-macro", package = "tracing-gum-proc-macro" }
+polkadot-primitives = { path = "../../primitives", features = ["std"] }
diff --git a/polkadot/node/gum/README.md b/polkadot/node/gum/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9d2cc3168cdd78a112148ad6913298104f095af8
--- /dev/null
+++ b/polkadot/node/gum/README.md
@@ -0,0 +1,53 @@
+# tracing-gum
+
+"gum" to make `tracing::{warn,info,..}` and `mick-jaeger` stick together, to be
+cross referenced in grafana with zero additional loc in the source code.
+
+## Architecture Decision Record (ADR)
+
+### Context
+
+For cross referencing spans and logs in grafana loki and tempo, a shared
+`traceID` or `TraceIdentifier` is required. All logs must be annotated with such
+meta information.
+
+In most cases `CandidateHash` is the primary identifier of the `jaeger::Span`
+and hence the source from which the `traceID` is derived. For cases where it is
+_not_ the primary identifier, a helper tag named `traceID` is added to those
+spans (out of scope, this is already present as a convenience measure).
+
+Log lines on the other hand side, use `warn!,info!,debug!,trace!,..` API
+provided by the `tracing` crate. Many of these, contain a `candidate_hash`,
+which is _not_ equivalent to the `traceID` (256bits vs 128bits), and hence must
+be derived.
+
+To achieve the cross ref, either all instances of `candidate_hash` could be
+added or this could be approached more systematically by providing a `macro` to
+automatically do so.
+
+Related issues:
+
+* <https://github.com/paritytech/polkadot/issues/5045>
+
+### Decision
+
+Adding approx. 2 lines per tracing line including a `candidate_hash` reference,
+to derive the `TraceIdentifier` from that, and printing that as part of the
+key-value section in the `tracing::*` macros. The visual overhead and friction
+and required diligence to keep the 100s of `tracing::{warn!,info!,debug!,..}` up
+is unreasonably high in the mid/long run. This is especially true, in the
+context of more people joining the team. Hence a proc-macro is introduced
+which abstracts this away, and does so automagically at the cost of
+one-more-proc-macro in the codebase.
+
+### Consequences
+
+Minimal training/impact is required to name `CandidateHash` as `candidate_hash`
+when providing to any of the log macros (`warn!`, `info!`, etc.).
+
+The crate has to be used throughout the entire codebase to work consistently, to
+disambiguate, the prefix `gum::` is used.
+
+Feature parity with `tracing::{warn!,..}` is not desired. We want consistency
+more than anything. All currently used features _are_ supported with _gum_ as
+well.
diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..def95ecf500369df9c7caba1af184803cb34c505
--- /dev/null
+++ b/polkadot/node/gum/proc-macro/Cargo.toml
@@ -0,0 +1,29 @@
+[package]
+name = "tracing-gum-proc-macro"
+version = "0.9.17"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2021"
+description = "Generate an overseer including builder pattern and message wrapper from a single annotated struct definition."
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[lib]
+proc-macro = true
+
+[dependencies]
+syn = { version = "1.0.86", features = ["full", "extra-traits"] }
+quote = "1.0.15"
+proc-macro2 = "1.0.36"
+proc-macro-crate = "1.1.3"
+expander = "0.0.6"
+
+[dev-dependencies]
+assert_matches = "1.5.0"
+
+
+[features]
+default = []
+# write the expanded version to a `gum.[a-f0-9]{10}.rs`
+# in the `OUT_DIR` as defined by `cargo` for the `expander` crate.
+expand = []
diff --git a/polkadot/node/gum/proc-macro/src/lib.rs b/polkadot/node/gum/proc-macro/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..f513e25e3a798268bc67082d424a6af7ba39e6b1
--- /dev/null
+++ b/polkadot/node/gum/proc-macro/src/lib.rs
@@ -0,0 +1,171 @@
+// Copyright 2022 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+#![deny(unused_crate_dependencies)]
+#![deny(missing_docs)]
+#![deny(clippy::dbg_macro)]
+
+//! Generative part of `tracing-gum`. See `tracing-gum` for usage documentation.
+
+use proc_macro2::{Ident, Span, TokenStream};
+use quote::{quote, ToTokens};
+use syn::{parse2, parse_quote, punctuated::Punctuated, Result, Token};
+
+mod types;
+
+use self::types::*;
+
+#[cfg(test)]
+mod tests;
+
+/// Print an error message.
+#[proc_macro]
+pub fn error(item: proc_macro::TokenStream) -> proc_macro::TokenStream {
+	gum(item, Level::Error)
+}
+
+/// Print a warning level message.
+#[proc_macro]
+pub fn warn(item: proc_macro::TokenStream) -> proc_macro::TokenStream {
+	gum(item, Level::Warn)
+}
+
+/// Print a info level message.
+#[proc_macro]
+pub fn info(item: proc_macro::TokenStream) -> proc_macro::TokenStream {
+	gum(item, Level::Info)
+}
+
+/// Print a debug level message.
+#[proc_macro]
+pub fn debug(item: proc_macro::TokenStream) -> proc_macro::TokenStream {
+	gum(item, Level::Debug)
+}
+
+/// Print a trace level message.
+#[proc_macro]
+pub fn trace(item: proc_macro::TokenStream) -> proc_macro::TokenStream {
+	gum(item, Level::Trace)
+}
+
+/// One-size-fits all internal implementation that produces the actual code.
+pub(crate) fn gum(item: proc_macro::TokenStream, level: Level) -> proc_macro::TokenStream {
+	let item: TokenStream = item.into();
+
+	let res = expander::Expander::new("gum")
+		.add_comment("Generated overseer code by `gum::warn!(..)`".to_owned())
+		.dry(!cfg!(feature = "expand"))
+		.verbose(false)
+		.fmt(expander::Edition::_2021)
+		.maybe_write_to_out_dir(impl_gum2(item, level))
+		.expect("Expander does not fail due to IO in OUT_DIR. qed");
+
+	res.unwrap_or_else(|err| err.to_compile_error()).into()
+}
+
+/// Does the actual parsing and token generation based on `proc_macro2` types.
+///
+/// Required for unit tests.
+pub(crate) fn impl_gum2(orig: TokenStream, level: Level) -> Result<TokenStream> {
+	let args: Args = parse2(orig)?;
+
+	let krate = support_crate();
+	let span = Span::call_site();
+
+	let Args { target, comma, mut values, fmt } = args;
+
+	// find a value or alias called `candidate_hash`.
+	let maybe_candidate_hash = values.iter_mut().find(|value| value.as_ident() == "candidate_hash");
+
+	if let Some(kv) = maybe_candidate_hash {
+		let (ident, rhs_expr, replace_with) = match kv {
+			Value::Alias(alias) => {
+				let ValueWithAliasIdent { alias, marker, expr, .. } = alias.clone();
+				(
+					alias.clone(),
+					expr.to_token_stream(),
+					Some(Value::Value(ValueWithFormatMarker {
+						marker,
+						ident: alias,
+						dot: None,
+						inner: Punctuated::new(),
+					})),
+				)
+			},
+			Value::Value(value) => (value.ident.clone(), value.ident.to_token_stream(), None),
+		};
+
+		// we generate a local value with the same alias name
+		// so replace the expr with just a value
+		if let Some(replace_with) = replace_with {
+			let _old = std::mem::replace(kv, replace_with);
+		};
+
+		// Inject the addition `traceID = % trace_id` identifier
+		// while maintaining trailing comma semantics.
+		let had_trailing_comma = values.trailing_punct();
+		if !had_trailing_comma {
+			values.push_punct(Token![,](span));
+		}
+
+		values.push_value(parse_quote! {
+			traceID = % trace_id
+		});
+		if had_trailing_comma {
+			values.push_punct(Token![,](span));
+		}
+
+		Ok(quote! {
+			if #krate :: enabled!(#target #comma #level) {
+				use ::std::ops::Deref;
+
+				// create a scoped let binding of something that `deref`s to
+				// `Hash`.
+				let value = #rhs_expr;
+				let value = &value;
+				let value: & #krate:: Hash = value.deref();
+				// Do the `deref` to `Hash` and convert to a `TraceIdentifier`.
+				let #ident: #krate:: Hash = * value;
+				let trace_id = #krate:: hash_to_trace_identifier ( #ident );
+				#krate :: event!(
+					#target #comma #level, #values #fmt
+				)
+			}
+		})
+	} else {
+		Ok(quote! {
+				#krate :: event!(
+					#target #comma #level, #values #fmt
+				)
+		})
+	}
+}
+
+/// Extract the support crate path.
+fn support_crate() -> TokenStream {
+	let support_crate_name = if cfg!(test) {
+		quote! {crate}
+	} else {
+		use proc_macro_crate::{crate_name, FoundCrate};
+		let crate_name = crate_name("tracing-gum")
+			.expect("Support crate `tracing-gum` is present in `Cargo.toml`. qed");
+		match crate_name {
+			FoundCrate::Itself => quote! {crate},
+			FoundCrate::Name(name) => Ident::new(&name, Span::call_site()).to_token_stream(),
+		}
+	};
+	support_crate_name
+}
diff --git a/polkadot/node/gum/proc-macro/src/tests.rs b/polkadot/node/gum/proc-macro/src/tests.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ffe332e066532b93c13470d40505326ac23eb1c8
--- /dev/null
+++ b/polkadot/node/gum/proc-macro/src/tests.rs
@@ -0,0 +1,207 @@
+// Copyright 2022 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use super::*;
+
+use assert_matches::assert_matches;
+use quote::quote;
+
+#[test]
+fn smoke() {
+	assert_matches!(
+		impl_gum2(
+			quote! {
+				target: "xyz",
+				x = Foo::default(),
+				z = ?Game::new(),
+				"Foo {p} x {q}",
+				p,
+				q,
+			},
+			Level::Warn
+		),
+		Ok(_)
+	);
+}
+
+mod roundtrip {
+	use super::*;
+
+	macro_rules! roundtrip {
+		($whatty:ty | $ts:expr) => {
+			let input = $ts;
+			assert_matches!(
+				::syn::parse2::<$whatty>(input),
+				Ok(typed) => {
+					let downgraded = dbg!(typed.to_token_stream());
+					assert_matches!(::syn::parse2::<$whatty>(downgraded),
+					Ok(reparsed) => {
+						assert_eq!(
+							dbg!(typed.into_token_stream().to_string()),
+							reparsed.into_token_stream().to_string(),
+						)
+					});
+				}
+			);
+		}
+	}
+
+	#[test]
+	fn u_target() {
+		roundtrip! {Target | quote! {target: "foo" } };
+	}
+
+	#[test]
+	fn u_format_marker() {
+		roundtrip! {FormatMarker | quote! {?} };
+		roundtrip! {FormatMarker | quote! {%} };
+		roundtrip! {FormatMarker | quote! {} };
+	}
+
+	#[test]
+	fn u_value_w_alias() {
+		roundtrip! {Value | quote! {x = y} };
+		roundtrip! {Value | quote! {f = f} };
+		roundtrip! {Value | quote! {ff = ?ff} };
+		roundtrip! {Value | quote! {fff = %fff} };
+	}
+
+	#[test]
+	fn u_value_bare_w_format_marker() {
+		roundtrip! {Value | quote! {?q} };
+		roundtrip! {Value | quote! {%etcpp} };
+
+		roundtrip! {ValueWithFormatMarker | quote! {?q} };
+		roundtrip! {ValueWithFormatMarker | quote! {%etcpp} };
+	}
+
+	#[test]
+	fn u_value_bare_w_field_access() {
+		roundtrip! {ValueWithFormatMarker | quote! {a.b} };
+		roundtrip! {ValueWithFormatMarker | quote! {a.b.cdef.ghij} };
+		roundtrip! {ValueWithFormatMarker | quote! {?a.b.c} };
+	}
+
+	#[test]
+	fn u_args() {
+		roundtrip! {Args | quote! {target: "yes", k=?v, candidate_hash, "But why? {a}", a} };
+		roundtrip! {Args | quote! {target: "also", candidate_hash = ?c_hash, "But why?"} };
+		roundtrip! {Args | quote! {"Nope? {}", candidate_hash} };
+	}
+
+	#[test]
+	fn e2e() {
+		roundtrip! {Args | quote! {target: "yes", k=?v, candidate_hash, "But why? {a}", a} };
+		roundtrip! {Args | quote! {target: "also", candidate_hash = ?c_hash, "But why?"} };
+		roundtrip! {Args | quote! { "Nope? But yes {}", candidate_hash} };
+	}
+
+	#[test]
+	fn sample_w_candidate_hash_aliased() {
+		dbg!(impl_gum2(
+			quote! {
+				target: "bar",
+				a = a,
+				candidate_hash = %Hash::repeat_byte(0xF0),
+				b = ?Y::default(),
+				c = ?a,
+				"xxx"
+			},
+			Level::Info
+		)
+		.unwrap()
+		.to_string());
+	}
+
+	#[test]
+
+	fn sample_w_candidate_hash_aliased_unnecessary() {
+		assert_matches!(impl_gum2(
+			quote! {
+				"bar",
+				a = a,
+				candidate_hash = ?candidate_hash,
+				b = ?Y::default(),
+				c = ?a,
+				"xxx {} {}",
+				a,
+				a,
+			},
+			Level::Info
+		), Ok(x) => {
+			dbg!(x.to_string())
+		});
+	}
+
+	#[test]
+	fn no_fmt_str_args() {
+		assert_matches!(impl_gum2(
+			quote! {
+				target: "bar",
+				a = a,
+				candidate_hash = ?candidate_hash,
+				b = ?Y::default(),
+				c = a,
+				"xxx",
+			},
+			Level::Trace
+		), Ok(x) => {
+			dbg!(x.to_string())
+		});
+	}
+
+	#[test]
+	fn no_fmt_str() {
+		assert_matches!(impl_gum2(
+			quote! {
+				target: "bar",
+				a = a,
+				candidate_hash = ?candidate_hash,
+				b = ?Y::default(),
+				c = a,
+			},
+			Level::Trace
+		), Ok(x) => {
+			dbg!(x.to_string())
+		});
+	}
+
+	#[test]
+	fn field_member_as_kv() {
+		assert_matches!(impl_gum2(
+			quote! {
+				target: "z",
+				?y.x,
+			},
+			Level::Info
+		), Ok(x) => {
+			dbg!(x.to_string())
+		});
+	}
+
+	#[test]
+	fn nested_field_member_as_kv() {
+		assert_matches!(impl_gum2(
+			quote! {
+				target: "z",
+				?a.b.c.d.e.f.g,
+			},
+			Level::Info
+		), Ok(x) => {
+			dbg!(x.to_string())
+		});
+	}
+}
diff --git a/polkadot/node/gum/proc-macro/src/types.rs b/polkadot/node/gum/proc-macro/src/types.rs
new file mode 100644
index 0000000000000000000000000000000000000000..7a2f663fee389379e1bb14f487aae81835cdb83e
--- /dev/null
+++ b/polkadot/node/gum/proc-macro/src/types.rs
@@ -0,0 +1,336 @@
+// Copyright 2022 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use super::*;
+
+use syn::{
+	parse::{Parse, ParseStream},
+	Token,
+};
+
+pub(crate) mod kw {
+	syn::custom_keyword!(target);
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub(crate) struct Target {
+	kw: kw::target,
+	colon: Token![:],
+	expr: syn::Expr,
+}
+
+impl Parse for Target {
+	fn parse(input: ParseStream) -> Result<Self> {
+		Ok(Self { kw: input.parse()?, colon: input.parse()?, expr: input.parse()? })
+	}
+}
+
+impl ToTokens for Target {
+	fn to_tokens(&self, tokens: &mut TokenStream) {
+		let kw = &self.kw;
+		let colon = &self.colon;
+		let expr = &self.expr;
+		tokens.extend(quote! {
+			#kw #colon #expr
+		})
+	}
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub(crate) enum FormatMarker {
+	Questionmark(Token![?]),
+	Percentage(Token![%]),
+	None,
+}
+
+impl Parse for FormatMarker {
+	fn parse(input: ParseStream) -> Result<Self> {
+		let lookahead = input.lookahead1();
+		if lookahead.peek(Token![?]) {
+			input.parse().map(Self::Questionmark)
+		} else if lookahead.peek(Token![%]) {
+			input.parse().map(Self::Percentage)
+		} else {
+			Ok(Self::None)
+		}
+	}
+}
+
+impl ToTokens for FormatMarker {
+	fn to_tokens(&self, tokens: &mut TokenStream) {
+		tokens.extend(match self {
+			Self::Percentage(p) => p.to_token_stream(),
+			Self::Questionmark(q) => q.to_token_stream(),
+			Self::None => TokenStream::new(),
+		})
+	}
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub(crate) struct ValueWithAliasIdent {
+	pub alias: Ident,
+	pub eq: Token![=],
+	pub marker: FormatMarker,
+	pub expr: syn::Expr,
+}
+
+impl Parse for ValueWithAliasIdent {
+	fn parse(input: ParseStream) -> Result<Self> {
+		Ok(Self {
+			alias: input.parse()?,
+			eq: input.parse()?,
+			marker: input.parse()?,
+			expr: input.parse()?,
+		})
+	}
+}
+
+impl ToTokens for ValueWithAliasIdent {
+	fn to_tokens(&self, tokens: &mut TokenStream) {
+		let alias = &self.alias;
+		let eq = &self.eq;
+		let marker = &self.marker;
+		let expr = &self.expr;
+		tokens.extend(quote! {
+			#alias #eq #marker #expr
+		})
+	}
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+
+pub(crate) struct ValueWithFormatMarker {
+	pub marker: FormatMarker,
+	pub ident: Ident,
+	pub dot: Option<Token![.]>,
+	pub inner: Punctuated<syn::Member, Token![.]>,
+}
+
+impl Parse for ValueWithFormatMarker {
+	fn parse(input: ParseStream) -> Result<Self> {
+		let marker = input.parse::<FormatMarker>()?;
+		let ident = input.parse::<syn::Ident>()?;
+
+		let mut inner = Punctuated::<syn::Member, Token![.]>::new();
+
+		let lookahead = input.lookahead1();
+		let dot = if lookahead.peek(Token![.]) {
+			let dot = Some(input.parse::<Token![.]>()?);
+
+			loop {
+				let member = input.parse::<syn::Member>()?;
+				inner.push_value(member);
+
+				let lookahead = input.lookahead1();
+				if !lookahead.peek(Token![.]) {
+					break
+				}
+
+				let token = input.parse::<Token![.]>()?;
+				inner.push_punct(token);
+			}
+
+			dot
+		} else {
+			None
+		};
+		Ok(Self { marker, ident, dot, inner })
+	}
+}
+
+impl ToTokens for ValueWithFormatMarker {
+	fn to_tokens(&self, tokens: &mut TokenStream) {
+		let marker = &self.marker;
+		let ident = &self.ident;
+		let dot = &self.dot;
+		let inner = &self.inner;
+		tokens.extend(quote! {
+			#marker #ident #dot #inner
+		})
+	}
+}
+
+/// A value as passed to the macro, appearing _before_ the format string.
+#[derive(Debug, Clone, PartialEq, Eq)]
+
+pub(crate) enum Value {
+	Alias(ValueWithAliasIdent),
+	Value(ValueWithFormatMarker),
+}
+
+impl Value {
+	pub fn as_ident(&self) -> &Ident {
+		match self {
+			Self::Alias(alias) => &alias.alias,
+			Self::Value(value) => &value.ident,
+		}
+	}
+}
+
+impl Parse for Value {
+	fn parse(input: ParseStream) -> Result<Self> {
+		if input.fork().parse::<ValueWithAliasIdent>().is_ok() {
+			input.parse().map(Self::Alias)
+		} else if input.fork().parse::<ValueWithFormatMarker>().is_ok() {
+			input.parse().map(Self::Value)
+		} else {
+			Err(syn::Error::new(Span::call_site(), "Neither value nor aliased value."))
+		}
+	}
+}
+
+impl ToTokens for Value {
+	fn to_tokens(&self, tokens: &mut TokenStream) {
+		tokens.extend(match self {
+			Self::Alias(alias) => quote! { #alias },
+			Self::Value(value) => quote! { #value },
+		})
+	}
+}
+
+/// Defines the token stream consisting of a format string and it's arguments.
+///
+/// Attention: Currently the correctness of the arguments is not checked as part
+/// of the parsing logic.
+/// It would be possible to use `parse_fmt_str:2.0.0`
+/// to do so and possibly improve the error message here - for the time being
+/// it's not clear if this yields any practical benefits, and is hence
+/// left for future consideration.
+#[derive(Debug, Clone)]
+pub(crate) struct FmtGroup {
+	pub format_str: syn::LitStr,
+	pub maybe_comma: Option<Token![,]>,
+	pub rest: TokenStream,
+}
+
+impl Parse for FmtGroup {
+	fn parse(input: ParseStream) -> Result<Self> {
+		let format_str = input
+			.parse()
+			.map_err(|e| syn::Error::new(e.span(), "Expected format specifier"))?;
+
+		let (maybe_comma, rest) = if input.peek(Token![,]) {
+			let comma = input.parse::<Token![,]>()?;
+			let rest = input.parse()?;
+			(Some(comma), rest)
+		} else {
+			(None, TokenStream::new())
+		};
+
+		if !input.is_empty() {
+			return Err(syn::Error::new(input.span(), "Unexpected data, expected closing `)`."))
+		}
+
+		Ok(Self { format_str, maybe_comma, rest })
+	}
+}
+
+impl ToTokens for FmtGroup {
+	fn to_tokens(&self, tokens: &mut TokenStream) {
+		let format_str = &self.format_str;
+		let maybe_comma = &self.maybe_comma;
+		let rest = &self.rest;
+
+		tokens.extend(quote! { #format_str #maybe_comma #rest });
+	}
+}
+
+/// Full set of arguments as provided to the `gum::warn!` call.
+#[derive(Debug, Clone)]
+pub(crate) struct Args {
+	pub target: Option<Target>,
+	pub comma: Option<Token![,]>,
+	pub values: Punctuated<Value, Token![,]>,
+	pub fmt: Option<FmtGroup>,
+}
+
+impl Parse for Args {
+	fn parse(input: ParseStream) -> Result<Self> {
+		let lookahead = input.lookahead1();
+		let (target, comma) = if lookahead.peek(kw::target) {
+			let target = input.parse()?;
+			let comma = input.parse::<Token![,]>()?;
+			(Some(target), Some(comma))
+		} else {
+			(None, None)
+		};
+
+		let mut values = Punctuated::new();
+		loop {
+			if input.fork().parse::<Value>().is_ok() {
+				values.push_value(input.parse::<Value>()?);
+			} else {
+				break
+			}
+			if input.peek(Token![,]) {
+				values.push_punct(input.parse::<Token![,]>()?);
+			} else {
+				break
+			}
+		}
+
+		let fmt = if values.empty_or_trailing() && !input.is_empty() {
+			let fmt = input.parse::<FmtGroup>()?;
+			Some(fmt)
+		} else {
+			None
+		};
+
+		Ok(Self { target, comma, values, fmt })
+	}
+}
+
+impl ToTokens for Args {
+	fn to_tokens(&self, tokens: &mut TokenStream) {
+		let target = &self.target;
+		let comma = &self.comma;
+		let values = &self.values;
+		let fmt = &self.fmt;
+		tokens.extend(quote! {
+			#target #comma #values #fmt
+		})
+	}
+}
+
+/// Support tracing levels, passed to `tracing::event!`
+///
+/// Note: Not parsed from the input stream, but implicitly defined
+/// by the macro name, i.e. `level::debug!` is `Level::Debug`.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub(crate) enum Level {
+	Error,
+	Warn,
+	Info,
+	Debug,
+	Trace,
+}
+
+impl ToTokens for Level {
+	fn to_tokens(&self, tokens: &mut TokenStream) {
+		let span = Span::call_site();
+		let variant = match self {
+			Self::Error => Ident::new("ERROR", span),
+			Self::Warn => Ident::new("WARN", span),
+			Self::Info => Ident::new("INFO", span),
+			Self::Debug => Ident::new("DEBUG", span),
+			Self::Trace => Ident::new("TRACE", span),
+		};
+		let krate = support_crate();
+		tokens.extend(quote! {
+			#krate :: Level :: #variant
+		})
+	}
+}
diff --git a/polkadot/node/gum/src/lib.rs b/polkadot/node/gum/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..261f1037de7fe39f045b2a1ea6c8b1cededf7695
--- /dev/null
+++ b/polkadot/node/gum/src/lib.rs
@@ -0,0 +1,35 @@
+// Copyright 2022 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+#![deny(unused_crate_dependencies)]
+#![deny(missing_docs)]
+#![deny(clippy::dbg_macro)]
+
+//! A wrapper around `tracing` macros, to provide semi automatic
+//! `traceID` annotation without codebase turnover.
+
+pub use tracing::{enabled, event, Level};
+
+#[doc(hidden)]
+pub use jaeger::hash_to_trace_identifier;
+
+#[doc(hidden)]
+pub use polkadot_primitives::v2::{CandidateHash, Hash};
+
+pub use gum_proc_macro::{debug, error, info, trace, warn};
+
+#[cfg(test)]
+mod tests;
diff --git a/polkadot/node/gum/src/tests.rs b/polkadot/node/gum/src/tests.rs
new file mode 100644
index 0000000000000000000000000000000000000000..4b52147f294854d804d73b5fad3a64838aebdf83
--- /dev/null
+++ b/polkadot/node/gum/src/tests.rs
@@ -0,0 +1,104 @@
+// Copyright 2022 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use super::*;
+pub use polkadot_primitives::v2::{CandidateHash, Hash};
+
+#[derive(Default, Debug)]
+struct Y {
+	#[allow(dead_code)]
+	x: u8,
+}
+
+#[test]
+fn plain() {
+	error!("plain");
+}
+
+#[test]
+fn wo_alias() {
+	let a: i32 = 7;
+	error!(target: "foo",
+		"Something something {}, {:?}, or maybe {}",
+		a,
+		b = Y::default(),
+		c = a
+	);
+}
+
+#[test]
+fn wo_unnecessary() {
+	let a: i32 = 7;
+	warn!(target: "bar",
+		a = a,
+		b = ?Y::default(),
+		"fff {c}",
+		c = a,
+	);
+}
+
+#[test]
+fn w_candidate_hash_value_assignment() {
+	let a: i32 = 7;
+	info!(target: "bar",
+		a = a,
+		// ad-hoc value
+		candidate_hash = %CandidateHash(Hash::repeat_byte(0xF0)),
+		b = ?Y::default(),
+		c = ?a,
+		"xxx",
+	);
+}
+
+#[test]
+fn w_candidate_hash_from_scope() {
+	let a: i32 = 7;
+	let candidate_hash = CandidateHash(Hash::repeat_byte(0xF1));
+	debug!(target: "bar",
+		a = a,
+		?candidate_hash,
+		b = ?Y::default(),
+		c = ?a,
+		"xxx",
+	);
+}
+
+#[test]
+fn w_candidate_hash_aliased() {
+	let a: i32 = 7;
+	let c_hash = Hash::repeat_byte(0xFA);
+	trace!(target: "bar",
+		a = a,
+		candidate_hash = ?c_hash,
+		b = ?Y::default(),
+		c = a,
+		"xxx",
+	);
+}
+
+#[test]
+fn w_candidate_hash_aliased_unnecessary() {
+	let a: i32 = 7;
+	let candidate_hash = CandidateHash(Hash::repeat_byte(0xFA));
+	info!(
+		target: "bar",
+		a = a,
+		candidate_hash = ?candidate_hash,
+		b = ?Y::default(),
+		c = a,
+		"xxx",
+	);
+}
diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml
index f5b1a64b3dac4c94df00247fdcd97b750d12b0d2..2edc201f04fe50b9440cbb86426d29e328f3e636 100644
--- a/polkadot/node/jaeger/Cargo.toml
+++ b/polkadot/node/jaeger/Cargo.toml
@@ -3,7 +3,7 @@ name = "polkadot-node-jaeger"
 version = "0.9.18"
 authors = ["Parity Technologies <admin@parity.io>"]
 edition = "2021"
-description = "Polkadot Jaeger primitives"
+description = "Polkadot Jaeger primitives, but equally useful for Grafana/Tempo"
 
 [dependencies]
 async-std = "1.8.0"
diff --git a/polkadot/node/jaeger/src/lib.rs b/polkadot/node/jaeger/src/lib.rs
index 2fa135f890628732eac494cb47d841bccd0f9283..5af77764d9c87be78c60541089055c1df43ec6c7 100644
--- a/polkadot/node/jaeger/src/lib.rs
+++ b/polkadot/node/jaeger/src/lib.rs
@@ -53,7 +53,7 @@ mod spans;
 pub use self::{
 	config::{JaegerConfig, JaegerConfigBuilder},
 	errors::JaegerError,
-	spans::{PerLeafSpan, Span, Stage},
+	spans::{hash_to_trace_identifier, PerLeafSpan, Span, Stage},
 };
 
 use self::spans::TraceIdentifier;
diff --git a/polkadot/node/jaeger/src/spans.rs b/polkadot/node/jaeger/src/spans.rs
index b062708c526e4273bf17a41c6f144b6ebc65bdea..56902571b88c579c185fb0e51b27c36a626b9e68 100644
--- a/polkadot/node/jaeger/src/spans.rs
+++ b/polkadot/node/jaeger/src/spans.rs
@@ -174,7 +174,7 @@ pub(crate) type TraceIdentifier = u128;
 /// A helper to convert the hash to the fixed size representation
 /// needed for jaeger.
 #[inline]
-fn hash_to_identifier(hash: Hash) -> TraceIdentifier {
+pub fn hash_to_trace_identifier(hash: Hash) -> TraceIdentifier {
 	let mut buf = [0u8; 16];
 	buf.copy_from_slice(&hash.as_ref()[0..16]);
 	// The slice bytes are copied in reading order, so if interpreted
@@ -197,13 +197,13 @@ pub trait LazyIdent {
 
 impl<'a> LazyIdent for &'a [u8] {
 	fn eval(&self) -> TraceIdentifier {
-		hash_to_identifier(BlakeTwo256::hash_of(self))
+		hash_to_trace_identifier(BlakeTwo256::hash_of(self))
 	}
 }
 
 impl LazyIdent for &PoV {
 	fn eval(&self) -> TraceIdentifier {
-		hash_to_identifier(self.hash())
+		hash_to_trace_identifier(self.hash())
 	}
 
 	fn extra_tags(&self, span: &mut Span) {
@@ -213,7 +213,7 @@ impl LazyIdent for &PoV {
 
 impl LazyIdent for Hash {
 	fn eval(&self) -> TraceIdentifier {
-		hash_to_identifier(*self)
+		hash_to_trace_identifier(*self)
 	}
 
 	fn extra_tags(&self, span: &mut Span) {
@@ -223,7 +223,7 @@ impl LazyIdent for Hash {
 
 impl LazyIdent for &Hash {
 	fn eval(&self) -> TraceIdentifier {
-		hash_to_identifier(**self)
+		hash_to_trace_identifier(**self)
 	}
 
 	fn extra_tags(&self, span: &mut Span) {
@@ -233,7 +233,7 @@ impl LazyIdent for &Hash {
 
 impl LazyIdent for CandidateHash {
 	fn eval(&self) -> TraceIdentifier {
-		hash_to_identifier(self.0)
+		hash_to_trace_identifier(self.0)
 	}
 
 	fn extra_tags(&self, span: &mut Span) {
@@ -472,7 +472,7 @@ mod tests {
 	#[test]
 	fn hash_derived_identifier_is_leading_16bytes() {
 		let candidate_hash = dbg!(Hash::from(&RAW));
-		let trace_id = dbg!(hash_to_identifier(candidate_hash));
+		let trace_id = dbg!(hash_to_trace_identifier(candidate_hash));
 		for (idx, (a, b)) in candidate_hash
 			.as_bytes()
 			.iter()
@@ -488,7 +488,7 @@ mod tests {
 	fn extra_tags_do_not_change_trace_id() {
 		Jaeger::test_setup();
 		let candidate_hash = dbg!(Hash::from(&RAW));
-		let trace_id = hash_to_identifier(candidate_hash);
+		let trace_id = hash_to_trace_identifier(candidate_hash);
 
 		let span = Span::new(candidate_hash, "foo");
 
diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml
index e0fa7fd0ecf8654dbf411adf9e3b9ba20759c7a0..042573b6f0e4a97d1019872e6b48d8c5fc02a5a4 100644
--- a/polkadot/node/malus/Cargo.toml
+++ b/polkadot/node/malus/Cargo.toml
@@ -31,7 +31,7 @@ sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "maste
 clap = { version = "3.1", features = ["derive"] }
 futures = "0.3.21"
 futures-timer = "3.0.2"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../gum/" }
 
 [features]
 default = []
diff --git a/polkadot/node/malus/src/variants/back_garbage_candidate.rs b/polkadot/node/malus/src/variants/back_garbage_candidate.rs
index 210a03c0447fd14f65b8abe8e004ef132e307380..5cca3144a8a1e2a34d2ba889752155aa3f4cff23 100644
--- a/polkadot/node/malus/src/variants/back_garbage_candidate.rs
+++ b/polkadot/node/malus/src/variants/back_garbage_candidate.rs
@@ -170,15 +170,15 @@ where
 									response_sender,
 								);
 							} else {
-								tracing::info!(
-									target = MALUS,
+								gum::info!(
+									target: MALUS,
 									"Could not get availability data, can't back"
 								);
 							}
 						}),
 					);
 				} else {
-					tracing::info!(target = MALUS, "No CandidateReceipt available to work with");
+					gum::info!(target: MALUS, "No CandidateReceipt available to work with");
 				}
 				None
 			},
diff --git a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs
index 3d43f1c0054a54fc83745733a96614dcc8cdda63..64e8629edc50dbb6a67b0ba7f1e81e84bbc00fd1 100644
--- a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs
+++ b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs
@@ -132,8 +132,8 @@ impl OverseerGen for SuggestGarbageCandidate {
 			move |(mut subsystem_sender, hash, candidate_receipt): (_, Hash, CandidateReceipt)| {
 				let keystore = keystore.clone();
 				async move {
-					tracing::info!(
-						target = MALUS,
+					gum::info!(
+						target: MALUS,
 						"Replacing seconded candidate pov with something else"
 					);
 
@@ -160,7 +160,7 @@ impl OverseerGen for SuggestGarbageCandidate {
 							))
 							.await;
 					} else {
-						tracing::info!("We are not a validator. Not siging anything.");
+						gum::info!("We are not a validator. Not siging anything.");
 					}
 				}
 			},
diff --git a/polkadot/node/metered-channel/Cargo.toml b/polkadot/node/metered-channel/Cargo.toml
index 07d935a5896df868070969f3e2f114fc197ebda0..9681345f07fdeb1f7c1767ed1f3e5ef3c98cd3dd 100644
--- a/polkadot/node/metered-channel/Cargo.toml
+++ b/polkadot/node/metered-channel/Cargo.toml
@@ -9,7 +9,7 @@ description = "Channels with attached Meters"
 futures = "0.3.21"
 futures-timer = "3.0.2"
 derive_more = "0.99"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../gum" }
 thiserror = "1.0.30"
 
 [dev-dependencies]
diff --git a/polkadot/node/metered-channel/src/oneshot.rs b/polkadot/node/metered-channel/src/oneshot.rs
index af162c267f77ea856415446de24777cabfcbb980..3ccd8eead264821b762dc3f83393498e1b9c41e6 100644
--- a/polkadot/node/metered-channel/src/oneshot.rs
+++ b/polkadot/node/metered-channel/src/oneshot.rs
@@ -224,7 +224,7 @@ impl<T> Future for MeteredReceiver<T> {
 			.get_or_insert_with(move || Delay::new(soft_timeout).fuse());
 
 		if Pin::new(soft_timeout).poll(ctx).is_ready() {
-			tracing::warn!("Oneshot `{name}` exceeded the soft threshold", name = &self.name);
+			gum::warn!("Oneshot `{name}` exceeded the soft threshold", name = &self.name);
 		}
 
 		let hard_timeout = self.hard_timeout.clone();
diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml
index 80ac119a19f98464686a6a955b3000d91de4b1cf..33cea4c4c42d38074dc6c74108e29f249b36c0b9 100644
--- a/polkadot/node/metrics/Cargo.toml
+++ b/polkadot/node/metrics/Cargo.toml
@@ -8,7 +8,7 @@ description = "Subsystem metric helpers"
 [dependencies]
 futures = "0.3.21"
 futures-timer = "3.0.2"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../gum" }
 
 metered-channel = { path = "../metered-channel" }
 
diff --git a/polkadot/node/metrics/src/runtime/mod.rs b/polkadot/node/metrics/src/runtime/mod.rs
index 0b862445ac24ed7b7b8987694f4630c924b62575..2497d63703c86b993d4c11f42c9b9f71994cbe3c 100644
--- a/polkadot/node/metrics/src/runtime/mod.rs
+++ b/polkadot/node/metrics/src/runtime/mod.rs
@@ -106,11 +106,7 @@ impl RuntimeMetricsProvider {
 		F: FnOnce(MutexGuard<'_, HashMap<String, Counter<U64>>>) -> Result<(), PrometheusError>,
 	{
 		let _ = self.1.counters.lock().map(do_something).or_else(|error| {
-			tracing::error!(
-				target: LOG_TARGET,
-				"Cannot acquire the counter hashmap lock: {:?}",
-				error
-			);
+			gum::error!(target: LOG_TARGET, "Cannot acquire the counter hashmap lock: {:?}", error);
 			Err(error)
 		});
 	}
@@ -120,7 +116,7 @@ impl RuntimeMetricsProvider {
 		F: FnOnce(MutexGuard<'_, HashMap<String, CounterVec<U64>>>) -> Result<(), PrometheusError>,
 	{
 		let _ = self.1.counter_vecs.lock().map(do_something).or_else(|error| {
-			tracing::error!(
+			gum::error!(
 				target: LOG_TARGET,
 				"Cannot acquire the countervec hashmap lock: {:?}",
 				error
@@ -154,7 +150,7 @@ impl sc_tracing::TraceHandler for RuntimeMetricsProvider {
 					self.parse_metric_update(update_op);
 				},
 				Err(e) => {
-					tracing::error!(target: LOG_TARGET, "TraceEvent decode failed: {:?}", e);
+					gum::error!(target: LOG_TARGET, "TraceEvent decode failed: {:?}", e);
 				},
 			}
 		}
@@ -195,7 +191,7 @@ impl RuntimeMetricsProvider {
 pub fn logger_hook() -> impl FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration) -> () {
 	|logger_builder, config| {
 		if config.prometheus_registry().is_none() {
-			tracing::debug!(target: LOG_TARGET, "Prometheus registry is not configured.",);
+			gum::debug!(target: LOG_TARGET, "Prometheus registry is not configured.",);
 			return
 		}
 		let registry = config.prometheus_registry().cloned().unwrap();
diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml
index 4b7844d3525844d998915205c990c3cf4e3b66f0..749e47227a126446cf4c2ef4a53e53ee1ddfe98b 100644
--- a/polkadot/node/network/approval-distribution/Cargo.toml
+++ b/polkadot/node/network/approval-distribution/Cargo.toml
@@ -12,7 +12,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" }
 polkadot-primitives = { path = "../../../primitives" }
 
 futures = "0.3.21"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 
 [dev-dependencies]
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] }
diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs
index a2740326fbc58261828f17f3ffdb3859350dab06..bf5835016ddb35e6496a1a5c96ff0dc94fab3342 100644
--- a/polkadot/node/network/approval-distribution/src/lib.rs
+++ b/polkadot/node/network/approval-distribution/src/lib.rs
@@ -206,11 +206,11 @@ impl State {
 		match event {
 			NetworkBridgeEvent::PeerConnected(peer_id, role, _) => {
 				// insert a blank view if none already present
-				tracing::trace!(target: LOG_TARGET, ?peer_id, ?role, "Peer connected");
+				gum::trace!(target: LOG_TARGET, ?peer_id, ?role, "Peer connected");
 				self.peer_views.entry(peer_id).or_default();
 			},
 			NetworkBridgeEvent::PeerDisconnected(peer_id) => {
-				tracing::trace!(target: LOG_TARGET, ?peer_id, "Peer disconnected");
+				gum::trace!(target: LOG_TARGET, ?peer_id, "Peer disconnected");
 				self.peer_views.remove(&peer_id);
 				self.blocks.iter_mut().for_each(|(_hash, entry)| {
 					entry.known_by.remove(&peer_id);
@@ -230,7 +230,7 @@ impl State {
 				self.handle_peer_view_change(ctx, metrics, peer_id, view).await;
 			},
 			NetworkBridgeEvent::OurViewChange(view) => {
-				tracing::trace!(target: LOG_TARGET, ?view, "Own view change");
+				gum::trace!(target: LOG_TARGET, ?view, "Own view change");
 				for head in view.iter() {
 					if !self.blocks.contains_key(head) {
 						self.pending_known.entry(*head).or_default();
@@ -240,7 +240,7 @@ impl State {
 				self.pending_known.retain(|h, _| {
 					let live = view.contains(h);
 					if !live {
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							block_hash = ?h,
 							"Cleaning up stale pending messages",
@@ -287,7 +287,7 @@ impl State {
 			}
 		}
 
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			"Got new blocks {:?}",
 			metas.iter().map(|m| (m.hash, m.number)).collect::<Vec<_>>(),
@@ -318,7 +318,7 @@ impl State {
 			let to_import = pending_now_known
 				.into_iter()
 				.inspect(|h| {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						block_hash = ?h,
 						"Extracting pending messages for new block"
@@ -329,7 +329,7 @@ impl State {
 				.collect::<Vec<_>>();
 
 			if !to_import.is_empty() {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					num = to_import.len(),
 					"Processing pending assignment/approvals",
@@ -374,7 +374,7 @@ impl State {
 	) {
 		match msg {
 			protocol_v1::ApprovalDistributionMessage::Assignments(assignments) => {
-				tracing::trace!(
+				gum::trace!(
 					target: LOG_TARGET,
 					peer_id = %peer_id,
 					num = assignments.len(),
@@ -388,7 +388,7 @@ impl State {
 							assignment.validator,
 						);
 
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							%peer_id,
 							?fingerprint,
@@ -414,7 +414,7 @@ impl State {
 				}
 			},
 			protocol_v1::ApprovalDistributionMessage::Approvals(approvals) => {
-				tracing::trace!(
+				gum::trace!(
 					target: LOG_TARGET,
 					peer_id = %peer_id,
 					num = approvals.len(),
@@ -428,7 +428,7 @@ impl State {
 							approval_vote.validator,
 						);
 
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							%peer_id,
 							?fingerprint,
@@ -460,7 +460,7 @@ impl State {
 		peer_id: PeerId,
 		view: View,
 	) {
-		tracing::trace!(target: LOG_TARGET, ?view, "Peer view change");
+		gum::trace!(target: LOG_TARGET, ?view, "Peer view change");
 		let finalized_number = view.finalized_number;
 		let old_view = self.peer_views.insert(peer_id.clone(), view.clone());
 		let old_finalized_number = old_view.map(|v| v.finalized_number).unwrap_or(0);
@@ -525,7 +525,7 @@ impl State {
 			Some(entry) => entry,
 			None => {
 				if let Some(peer_id) = source.peer_id() {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?peer_id,
 						?block_hash,
@@ -549,7 +549,7 @@ impl State {
 					let peer_knowledge = peer_knowledge.get_mut();
 					if peer_knowledge.contains(&fingerprint) {
 						if peer_knowledge.received.contains(&fingerprint) {
-							tracing::debug!(
+							gum::debug!(
 								target: LOG_TARGET,
 								?peer_id,
 								?fingerprint,
@@ -562,7 +562,7 @@ impl State {
 					}
 				},
 				hash_map::Entry::Vacant(_) => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						?peer_id,
 						?fingerprint,
@@ -576,7 +576,7 @@ impl State {
 			if entry.knowledge.known_messages.contains(&fingerprint) {
 				modify_reputation(ctx, peer_id.clone(), BENEFIT_VALID_MESSAGE).await;
 				if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
-					tracing::trace!(target: LOG_TARGET, ?peer_id, ?fingerprint, "Known assignment");
+					gum::trace!(target: LOG_TARGET, ?peer_id, ?fingerprint, "Known assignment");
 					peer_knowledge.received.insert(fingerprint.clone());
 				}
 				return
@@ -595,19 +595,13 @@ impl State {
 			let result = match rx.await {
 				Ok(result) => result,
 				Err(_) => {
-					tracing::debug!(target: LOG_TARGET, "The approval voting subsystem is down");
+					gum::debug!(target: LOG_TARGET, "The approval voting subsystem is down");
 					return
 				},
 			};
 			drop(timer);
 
-			tracing::trace!(
-				target: LOG_TARGET,
-				?source,
-				?fingerprint,
-				?result,
-				"Checked assignment",
-			);
+			gum::trace!(target: LOG_TARGET, ?source, ?fingerprint, ?result, "Checked assignment",);
 			match result {
 				AssignmentCheckResult::Accepted => {
 					modify_reputation(ctx, peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST).await;
@@ -623,7 +617,7 @@ impl State {
 					if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
 						peer_knowledge.received.insert(fingerprint);
 					}
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						?peer_id,
 						"Got an `AcceptedDuplicate` assignment",
@@ -631,7 +625,7 @@ impl State {
 					return
 				},
 				AssignmentCheckResult::TooFarInFuture => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						?peer_id,
 						"Got an assignment too far in the future",
@@ -640,7 +634,7 @@ impl State {
 					return
 				},
 				AssignmentCheckResult::Bad(error) => {
-					tracing::info!(
+					gum::info!(
 						target: LOG_TARGET,
 						?peer_id,
 						%error,
@@ -653,18 +647,14 @@ impl State {
 		} else {
 			if !entry.knowledge.known_messages.insert(fingerprint.clone()) {
 				// if we already imported an assignment, there is no need to distribute it again
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					?fingerprint,
 					"Importing locally an already known assignment",
 				);
 				return
 			} else {
-				tracing::debug!(
-					target: LOG_TARGET,
-					?fingerprint,
-					"Importing locally a new assignment",
-				);
+				gum::debug!(target: LOG_TARGET, ?fingerprint, "Importing locally a new assignment",);
 			}
 		}
 
@@ -682,7 +672,7 @@ impl State {
 				});
 			},
 			None => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					hash = ?block_hash,
 					?claimed_candidate_index,
@@ -716,7 +706,7 @@ impl State {
 		}
 
 		if !peers.is_empty() {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				?block_hash,
 				?claimed_candidate_index,
@@ -769,7 +759,7 @@ impl State {
 			);
 
 			if !entry.knowledge.known_messages.contains(&assignment_fingerprint) {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					?peer_id,
 					?fingerprint,
@@ -785,7 +775,7 @@ impl State {
 					let peer_knowledge = knowledge.get_mut();
 					if peer_knowledge.contains(&fingerprint) {
 						if peer_knowledge.received.contains(&fingerprint) {
-							tracing::debug!(
+							gum::debug!(
 								target: LOG_TARGET,
 								?peer_id,
 								?fingerprint,
@@ -799,7 +789,7 @@ impl State {
 					}
 				},
 				hash_map::Entry::Vacant(_) => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						?peer_id,
 						?fingerprint,
@@ -811,7 +801,7 @@ impl State {
 
 			// if the approval is known to be valid, reward the peer
 			if entry.knowledge.contains(&fingerprint) {
-				tracing::trace!(target: LOG_TARGET, ?peer_id, ?fingerprint, "Known approval");
+				gum::trace!(target: LOG_TARGET, ?peer_id, ?fingerprint, "Known approval");
 				modify_reputation(ctx, peer_id.clone(), BENEFIT_VALID_MESSAGE).await;
 				if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
 					peer_knowledge.received.insert(fingerprint.clone());
@@ -828,19 +818,13 @@ impl State {
 			let result = match rx.await {
 				Ok(result) => result,
 				Err(_) => {
-					tracing::debug!(target: LOG_TARGET, "The approval voting subsystem is down");
+					gum::debug!(target: LOG_TARGET, "The approval voting subsystem is down");
 					return
 				},
 			};
 			drop(timer);
 
-			tracing::trace!(
-				target: LOG_TARGET,
-				?peer_id,
-				?fingerprint,
-				?result,
-				"Checked approval",
-			);
+			gum::trace!(target: LOG_TARGET, ?peer_id, ?fingerprint, ?result, "Checked approval",);
 			match result {
 				ApprovalCheckResult::Accepted => {
 					modify_reputation(ctx, peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST).await;
@@ -852,7 +836,7 @@ impl State {
 				},
 				ApprovalCheckResult::Bad(error) => {
 					modify_reputation(ctx, peer_id, COST_INVALID_MESSAGE).await;
-					tracing::info!(
+					gum::info!(
 						target: LOG_TARGET,
 						?peer_id,
 						%error,
@@ -864,18 +848,14 @@ impl State {
 		} else {
 			if !entry.knowledge.insert(fingerprint.clone()) {
 				// if we already imported an approval, there is no need to distribute it again
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					?fingerprint,
 					"Importing locally an already known approval",
 				);
 				return
 			} else {
-				tracing::debug!(
-					target: LOG_TARGET,
-					?fingerprint,
-					"Importing locally a new approval",
-				);
+				gum::debug!(target: LOG_TARGET, ?fingerprint, "Importing locally a new approval",);
 			}
 		}
 
@@ -902,7 +882,7 @@ impl State {
 					},
 					None => {
 						// this would indicate a bug in approval-voting
-						tracing::warn!(
+						gum::warn!(
 							target: LOG_TARGET,
 							hash = ?block_hash,
 							?candidate_index,
@@ -913,7 +893,7 @@ impl State {
 				}
 			},
 			None => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					hash = ?block_hash,
 					?candidate_index,
@@ -948,7 +928,7 @@ impl State {
 
 		let approvals = vec![vote];
 		if !peers.is_empty() {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				?block_hash,
 				?candidate_index,
@@ -1004,7 +984,7 @@ impl State {
 						// This safeguard is needed primarily in case of long finality stalls
 						// so we don't waste time in a loop for every peer.
 						if missing.is_empty() {
-							tracing::trace!(
+							gum::trace!(
 								target: LOG_TARGET,
 								?block,
 								?peer_id,
@@ -1036,7 +1016,7 @@ impl State {
 				util::MIN_GOSSIP_PEERS,
 			);
 		if !lucky {
-			tracing::trace!(target: LOG_TARGET, ?peer_id, "Unlucky peer");
+			gum::trace!(target: LOG_TARGET, ?peer_id, "Unlucky peer");
 			return
 		}
 
@@ -1062,7 +1042,7 @@ impl State {
 				None => continue, // should be unreachable
 			};
 
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				"Sending all assignments and approvals in block {} to peer {}",
 				block,
@@ -1083,7 +1063,7 @@ impl State {
 					match approval_state {
 						ApprovalState::Assigned(cert) => {
 							if !missing.contains(&assignment_fingerprint) {
-								tracing::trace!(
+								gum::trace!(
 									target: LOG_TARGET,
 									?block,
 									?validator_index,
@@ -1123,7 +1103,7 @@ impl State {
 									candidate_index.clone(),
 								));
 							} else {
-								tracing::trace!(
+								gum::trace!(
 									target: LOG_TARGET,
 									?block,
 									?validator_index,
@@ -1142,7 +1122,7 @@ impl State {
 									signature: signature.clone(),
 								});
 							} else {
-								tracing::trace!(
+								gum::trace!(
 									target: LOG_TARGET,
 									?block,
 									?validator_index,
@@ -1157,7 +1137,7 @@ impl State {
 		}
 
 		if !assignments.is_empty() {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				num = assignments.len(),
 				?num_blocks,
@@ -1175,7 +1155,7 @@ impl State {
 		}
 
 		if !approvals.is_empty() {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				num = approvals.len(),
 				?num_blocks,
@@ -1201,7 +1181,7 @@ async fn modify_reputation(
 	peer_id: PeerId,
 	rep: Rep,
 ) {
-	tracing::trace!(
+	gum::trace!(
 		target: LOG_TARGET,
 		reputation = ?rep,
 		?peer_id,
@@ -1236,7 +1216,7 @@ impl ApprovalDistribution {
 			let message = match ctx.recv().await {
 				Ok(message) => message,
 				Err(e) => {
-					tracing::debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer, exiting");
+					gum::debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer, exiting");
 					return
 				},
 			};
@@ -1249,13 +1229,13 @@ impl ApprovalDistribution {
 				FromOverseer::Communication {
 					msg: ApprovalDistributionMessage::NewBlocks(metas),
 				} => {
-					tracing::debug!(target: LOG_TARGET, "Processing NewBlocks");
+					gum::debug!(target: LOG_TARGET, "Processing NewBlocks");
 					state.handle_new_blocks(&mut ctx, &self.metrics, metas).await;
 				},
 				FromOverseer::Communication {
 					msg: ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index),
 				} => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						"Distributing our assignment on candidate (block={}, index={})",
 						cert.block_hash,
@@ -1275,7 +1255,7 @@ impl ApprovalDistribution {
 				FromOverseer::Communication {
 					msg: ApprovalDistributionMessage::DistributeApproval(vote),
 				} => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						"Distributing our approval vote on candidate (block={}, index={})",
 						vote.block_hash,
@@ -1294,11 +1274,11 @@ impl ApprovalDistribution {
 				FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate {
 					..
 				})) => {
-					tracing::trace!(target: LOG_TARGET, "active leaves signal (ignored)");
+					gum::trace!(target: LOG_TARGET, "active leaves signal (ignored)");
 					// handled by NewBlocks
 				},
 				FromOverseer::Signal(OverseerSignal::BlockFinalized(_hash, number)) => {
-					tracing::trace!(target: LOG_TARGET, number = %number, "finalized signal");
+					gum::trace!(target: LOG_TARGET, number = %number, "finalized signal");
 					state.handle_block_finalized(number);
 				},
 				FromOverseer::Signal(OverseerSignal::Conclude) => return,
diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs
index 062385da5e24f30e5d78de75ac5d72027e17830e..6f08b2a8523acf1768ccadb04b52aba3769652a8 100644
--- a/polkadot/node/network/approval-distribution/src/tests.rs
+++ b/polkadot/node/network/approval-distribution/src/tests.rs
@@ -72,7 +72,7 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(
 const TIMEOUT: Duration = Duration::from_millis(100);
 
 async fn overseer_send(overseer: &mut VirtualOverseer, msg: ApprovalDistributionMessage) {
-	tracing::trace!(msg = ?msg, "Sending message");
+	gum::trace!(msg = ?msg, "Sending message");
 	overseer
 		.send(FromOverseer::Communication { msg })
 		.timeout(TIMEOUT)
@@ -81,7 +81,7 @@ async fn overseer_send(overseer: &mut VirtualOverseer, msg: ApprovalDistribution
 }
 
 async fn overseer_signal_block_finalized(overseer: &mut VirtualOverseer, number: BlockNumber) {
-	tracing::trace!(?number, "Sending a finalized signal");
+	gum::trace!(?number, "Sending a finalized signal");
 	// we don't care about the block hash
 	overseer
 		.send(FromOverseer::Signal(OverseerSignal::BlockFinalized(Hash::zero(), number)))
@@ -91,10 +91,10 @@ async fn overseer_signal_block_finalized(overseer: &mut VirtualOverseer, number:
 }
 
 async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
-	tracing::trace!("Waiting for a message");
+	gum::trace!("Waiting for a message");
 	let msg = overseer.recv().timeout(TIMEOUT).await.expect("msg recv timeout");
 
-	tracing::trace!(msg = ?msg, "Received message");
+	gum::trace!(msg = ?msg, "Received message");
 
 	msg
 }
diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml
index 16be328a84eeebbdfb68306bd98fff90925dc645..f6875b748e8e34af8d7b79a11a50d364f0eb0139 100644
--- a/polkadot/node/network/availability-distribution/Cargo.toml
+++ b/polkadot/node/network/availability-distribution/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
 
 [dependencies]
 futures = "0.3.21"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 parity-scale-codec = { version = "3.1.0", features = ["std"] }
 polkadot-primitives = { path = "../../../primitives" }
 polkadot-erasure-coding = { path = "../../../erasure-coding" }
diff --git a/polkadot/node/network/availability-distribution/src/error.rs b/polkadot/node/network/availability-distribution/src/error.rs
index a336230fa7a6ff6ed7be6f5abb0b851bbafb824a..8f6fd9987748cbe42a95593acce4ac5ce580a0fb 100644
--- a/polkadot/node/network/availability-distribution/src/error.rs
+++ b/polkadot/node/network/availability-distribution/src/error.rs
@@ -100,12 +100,11 @@ pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<(
 				JfyiError::InvalidValidatorIndex |
 				JfyiError::NoSuchCachedSession { .. } |
 				JfyiError::QueryAvailableDataResponseChannel(_) |
-				JfyiError::QueryChunkResponseChannel(_) =>
-					tracing::warn!(target: LOG_TARGET, error = %jfyi, ctx),
+				JfyiError::QueryChunkResponseChannel(_) => gum::warn!(target: LOG_TARGET, error = %jfyi, ctx),
 				JfyiError::FetchPoV(_) |
 				JfyiError::SendResponse |
 				JfyiError::NoSuchPoV |
-				JfyiError::Runtime(_) => tracing::debug!(target: LOG_TARGET, error = ?jfyi, ctx),
+				JfyiError::Runtime(_) => gum::debug!(target: LOG_TARGET, error = ?jfyi, ctx),
 			}
 			Ok(())
 		},
diff --git a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs
index d46e58f1f271f407bacb8cd42f599a2498951d82..bc9f60a4fd621969902a9d6b7f4581b0aa52ecdc 100644
--- a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs
+++ b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs
@@ -91,7 +91,7 @@ async fn fetch_pov_job(
 	metrics: Metrics,
 ) {
 	if let Err(err) = do_fetch_pov(pov_hash, pending_response, span, tx, metrics).await {
-		tracing::warn!(target: LOG_TARGET, ?err, ?pov_hash, ?authority_id, "fetch_pov_job");
+		gum::warn!(target: LOG_TARGET, ?err, ?pov_hash, ?authority_id, "fetch_pov_job");
 	}
 }
 
@@ -207,7 +207,7 @@ mod tests {
 							.unwrap();
 						break
 					},
-					msg => tracing::debug!(target: LOG_TARGET, msg = ?msg, "Received msg"),
+					msg => gum::debug!(target: LOG_TARGET, msg = ?msg, "Received msg"),
 				}
 			}
 			if pov.hash() == pov_hash {
diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs
index f5b57bdc81907fe6ab4bbfdab998a59953a0a186..8f1a034cc5521ab7d4fd9616df75eede2b909232 100644
--- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs
+++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs
@@ -271,7 +271,7 @@ impl RunningTask {
 			let resp = match self.do_request(&validator).await {
 				Ok(resp) => resp,
 				Err(TaskError::ShuttingDown) => {
-					tracing::info!(
+					gum::info!(
 						target: LOG_TARGET,
 						"Node seems to be shutting down, canceling fetch task"
 					);
@@ -286,7 +286,7 @@ impl RunningTask {
 			let chunk = match resp {
 				ChunkFetchingResponse::Chunk(resp) => resp.recombine_into_chunk(&self.request),
 				ChunkFetchingResponse::NoSuchChunk => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						validator = ?validator,
 						"Validator did not have our chunk"
@@ -337,7 +337,7 @@ impl RunningTask {
 		match response_recv.await {
 			Ok(resp) => Ok(resp),
 			Err(RequestError::InvalidResponse(err)) => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					origin= ?validator,
 					err= ?err,
@@ -346,7 +346,7 @@ impl RunningTask {
 				Err(TaskError::PeerError)
 			},
 			Err(RequestError::NetworkError(err)) => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					origin= ?validator,
 					err= ?err,
@@ -355,7 +355,7 @@ impl RunningTask {
 				Err(TaskError::PeerError)
 			},
 			Err(RequestError::Canceled(oneshot::Canceled)) => {
-				tracing::debug!(target: LOG_TARGET,
+				gum::debug!(target: LOG_TARGET,
 							   origin= ?validator,
 							   "Erasure chunk request got canceled");
 				Err(TaskError::PeerError)
@@ -368,7 +368,7 @@ impl RunningTask {
 			match branch_hash(&self.erasure_root, chunk.proof(), chunk.index.0 as usize) {
 				Ok(hash) => hash,
 				Err(e) => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						candidate_hash = ?self.request.candidate_hash,
 						origin = ?validator,
@@ -380,7 +380,7 @@ impl RunningTask {
 			};
 		let erasure_chunk_hash = BlakeTwo256::hash(&chunk.chunk);
 		if anticipated_hash != erasure_chunk_hash {
-			tracing::warn!(target: LOG_TARGET, origin = ?validator,  "Received chunk does not match merkle tree");
+			gum::warn!(target: LOG_TARGET, origin = ?validator,  "Received chunk does not match merkle tree");
 			return false
 		}
 		true
@@ -400,11 +400,11 @@ impl RunningTask {
 			)))
 			.await;
 		if let Err(err) = r {
-			tracing::error!(target: LOG_TARGET, err= ?err, "Storing erasure chunk failed, system shutting down?");
+			gum::error!(target: LOG_TARGET, err= ?err, "Storing erasure chunk failed, system shutting down?");
 		}
 
 		if let Err(oneshot::Canceled) = rx.await {
-			tracing::error!(target: LOG_TARGET, "Storing erasure chunk failed");
+			gum::error!(target: LOG_TARGET, "Storing erasure chunk failed");
 		}
 	}
 
@@ -420,7 +420,7 @@ impl RunningTask {
 			})
 		};
 		if let Err(err) = self.sender.send(FromFetchTask::Concluded(payload)).await {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				err= ?err,
 				"Sending concluded message for task failed"
@@ -431,7 +431,7 @@ impl RunningTask {
 	async fn conclude_fail(&mut self) {
 		if let Err(err) = self.sender.send(FromFetchTask::Failed(self.request.candidate_hash)).await
 		{
-			tracing::warn!(target: LOG_TARGET, ?err, "Sending `Failed` message for task failed");
+			gum::warn!(target: LOG_TARGET, ?err, "Sending `Failed` message for task failed");
 		}
 	}
 }
diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs
index 7f667bd8872dba264ee052fd6041584114908e23..1b035fe8dcb596318077068527b3e15f72993437 100644
--- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs
+++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs
@@ -262,7 +262,7 @@ impl TestRun {
 				return true
 			},
 			_ => {
-				tracing::debug!(target: LOG_TARGET, "Unexpected message");
+				gum::debug!(target: LOG_TARGET, "Unexpected message");
 				return false
 			},
 		}
diff --git a/polkadot/node/network/availability-distribution/src/requester/mod.rs b/polkadot/node/network/availability-distribution/src/requester/mod.rs
index f288f1ac9fe9cc0b3a633780b9e55f7c1b6259bf..95bae7c618dc6f77d7e0877e61d59c5a49b3ff9c 100644
--- a/polkadot/node/network/availability-distribution/src/requester/mod.rs
+++ b/polkadot/node/network/availability-distribution/src/requester/mod.rs
@@ -103,7 +103,7 @@ impl Requester {
 	where
 		Context: SubsystemContext,
 	{
-		tracing::trace!(target: LOG_TARGET, ?update, "Update fetching heads");
+		gum::trace!(target: LOG_TARGET, ?update, "Update fetching heads");
 		let ActiveLeavesUpdate { activated, deactivated } = update;
 		// Stale leaves happen after a reversion - we don't want to re-run availability there.
 		if let Some(leaf) = activated.filter(|leaf| leaf.status == LeafStatus::Fresh) {
@@ -140,7 +140,7 @@ impl Requester {
 		// Also spawn or bump tasks for candidates in ancestry in the same session.
 		for hash in std::iter::once(leaf).chain(ancestors_in_session) {
 			let cores = get_occupied_cores(ctx, hash).await?;
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				occupied_cores = ?cores,
 				"Query occupied core"
@@ -212,7 +212,7 @@ impl Requester {
 						)
 						.await
 						.map_err(|err| {
-							tracing::warn!(
+							gum::warn!(
 								target: LOG_TARGET,
 								error = ?err,
 								"Failed to spawn a fetch task"
diff --git a/polkadot/node/network/availability-distribution/src/requester/session_cache.rs b/polkadot/node/network/availability-distribution/src/requester/session_cache.rs
index 38c3c3fa83617f28afb34d025d8a5cfd30402ff9..78b4da36058dd17eb1c63c27f6d3c7ba1b7d9f81 100644
--- a/polkadot/node/network/availability-distribution/src/requester/session_cache.rs
+++ b/polkadot/node/network/availability-distribution/src/requester/session_cache.rs
@@ -107,16 +107,16 @@ impl SessionCache {
 		F: FnOnce(&SessionInfo) -> R,
 	{
 		if let Some(o_info) = self.session_info_cache.get(&session_index) {
-			tracing::trace!(target: LOG_TARGET, session_index, "Got session from lru");
+			gum::trace!(target: LOG_TARGET, session_index, "Got session from lru");
 			return Ok(Some(with_info(o_info)))
 		}
 
 		if let Some(info) =
 			self.query_info_from_runtime(ctx, runtime, parent, session_index).await?
 		{
-			tracing::trace!(target: LOG_TARGET, session_index, "Calling `with_info`");
+			gum::trace!(target: LOG_TARGET, session_index, "Calling `with_info`");
 			let r = with_info(&info);
-			tracing::trace!(target: LOG_TARGET, session_index, "Storing session info in lru!");
+			gum::trace!(target: LOG_TARGET, session_index, "Storing session info in lru!");
 			self.session_info_cache.put(session_index, info);
 			Ok(Some(r))
 		} else {
@@ -130,7 +130,7 @@ impl SessionCache {
 	/// subsystem on this.
 	pub fn report_bad_log(&mut self, report: BadValidators) {
 		if let Err(err) = self.report_bad(report) {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				err = ?err,
 				"Reporting bad validators failed with error"
diff --git a/polkadot/node/network/availability-distribution/src/responder.rs b/polkadot/node/network/availability-distribution/src/responder.rs
index 467d090ac5fa3d756d39e24bed9884df6387c5a9..f3691b5d0266404fba92e185eb616996d155be08 100644
--- a/polkadot/node/network/availability-distribution/src/responder.rs
+++ b/polkadot/node/network/availability-distribution/src/responder.rs
@@ -51,7 +51,7 @@ pub async fn run_pov_receiver<Sender>(
 				answer_pov_request_log(&mut sender, msg, &metrics).await;
 			},
 			Err(fatal) => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					error = ?fatal,
 					"Shutting down POV receiver."
@@ -59,7 +59,7 @@ pub async fn run_pov_receiver<Sender>(
 				return
 			},
 			Ok(Err(jfyi)) => {
-				tracing::debug!(target: LOG_TARGET, error = ?jfyi, "Error decoding incoming PoV request.");
+				gum::debug!(target: LOG_TARGET, error = ?jfyi, "Error decoding incoming PoV request.");
 			},
 		}
 	}
@@ -79,7 +79,7 @@ pub async fn run_chunk_receiver<Sender>(
 				answer_chunk_request_log(&mut sender, msg, &metrics).await;
 			},
 			Err(fatal) => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					error = ?fatal,
 					"Shutting down chunk receiver."
@@ -87,7 +87,7 @@ pub async fn run_chunk_receiver<Sender>(
 				return
 			},
 			Ok(Err(jfyi)) => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					error = ?jfyi,
 					"Error decoding incoming chunk request."
@@ -111,7 +111,7 @@ pub async fn answer_pov_request_log<Sender>(
 	match res {
 		Ok(result) => metrics.on_served_pov(if result { SUCCEEDED } else { NOT_FOUND }),
 		Err(err) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				err= ?err,
 				"Serving PoV failed with error"
@@ -136,7 +136,7 @@ where
 	match res {
 		Ok(result) => metrics.on_served_chunk(if result { SUCCEEDED } else { NOT_FOUND }),
 		Err(err) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				err= ?err,
 				"Serving chunk failed with error"
@@ -192,7 +192,7 @@ where
 
 	let result = chunk.is_some();
 
-	tracing::trace!(
+	gum::trace!(
 		target: LOG_TARGET,
 		hash = ?req.payload.candidate_hash,
 		index = ?req.payload.index,
@@ -227,7 +227,7 @@ where
 		.await;
 
 	let result = rx.await.map_err(|e| {
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			?validator_index,
 			?candidate_hash,
diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs
index 651e85b9d46b3f16ec12fcebd03556a328e547ec..ced1fe37a49c95604008aaf2112dcb611ac6dd12 100644
--- a/polkadot/node/network/availability-distribution/src/tests/state.rs
+++ b/polkadot/node/network/availability-distribution/src/tests/state.rs
@@ -211,7 +211,7 @@ impl TestState {
 		);
 
 		while remaining_stores > 0 {
-			tracing::trace!(target: LOG_TARGET, remaining_stores, "Stores left to go");
+			gum::trace!(target: LOG_TARGET, remaining_stores, "Stores left to go");
 			let msg = overseer_recv(&mut rx).await;
 			match msg {
 				AllMessages::NetworkBridge(NetworkBridgeMessage::SendRequests(
@@ -255,7 +255,7 @@ impl TestState {
 						"Only valid chunks should ever get stored."
 					);
 					tx.send(Ok(())).expect("Receiver is expected to be alive");
-					tracing::trace!(target: LOG_TARGET, "'Stored' fetched chunk.");
+					gum::trace!(target: LOG_TARGET, "'Stored' fetched chunk.");
 					remaining_stores -= 1;
 				},
 				AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, req)) => {
@@ -269,7 +269,7 @@ impl TestState {
 								.expect("Receiver should be alive.");
 						},
 						RuntimeApiRequest::AvailabilityCores(tx) => {
-							tracing::trace!(target: LOG_TARGET, cores= ?self.cores[&hash], hash = ?hash, "Sending out cores for hash");
+							gum::trace!(target: LOG_TARGET, cores= ?self.cores[&hash], hash = ?hash, "Sending out cores for hash");
 							tx.send(Ok(self.cores[&hash].clone()))
 								.expect("Receiver should still be alive");
 						},
@@ -299,12 +299,12 @@ async fn overseer_signal(
 	msg: impl Into<OverseerSignal>,
 ) {
 	let msg = msg.into();
-	tracing::trace!(target: LOG_TARGET, msg = ?msg, "sending message");
+	gum::trace!(target: LOG_TARGET, msg = ?msg, "sending message");
 	tx.send(FromOverseer::Signal(msg)).await.expect("Test subsystem no longer live");
 }
 
 async fn overseer_recv(rx: &mut mpsc::UnboundedReceiver<AllMessages>) -> AllMessages {
-	tracing::trace!(target: LOG_TARGET, "waiting for message ...");
+	gum::trace!(target: LOG_TARGET, "waiting for message ...");
 	rx.next().await.expect("Test subsystem no longer live")
 }
 
diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml
index 85614fe08e295e3a92971a0e7529bace17efb2ab..8536ef609ffad074ce424e54e1a4aa9c8a556793 100644
--- a/polkadot/node/network/availability-recovery/Cargo.toml
+++ b/polkadot/node/network/availability-recovery/Cargo.toml
@@ -10,7 +10,7 @@ lru = "0.7.3"
 rand = "0.8.5"
 fatality = "0.0.6"
 thiserror = "1.0.30"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 
 polkadot-erasure-coding = { path = "../../../erasure-coding" }
 polkadot-primitives = { path = "../../../primitives" }
diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs
index 870fad87ffae6a6233184a9d1ee1fc9c69a3ff18..c3d27e5ea3d11effba073404d49ed37eb9db58a0 100644
--- a/polkadot/node/network/availability-recovery/src/lib.rs
+++ b/polkadot/node/network/availability-recovery/src/lib.rs
@@ -180,7 +180,7 @@ impl RequestFromBackers {
 		params: &RecoveryParams,
 		sender: &mut impl SubsystemSender,
 	) -> Result<AvailableData, RecoveryError> {
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			candidate_hash = ?params.candidate_hash,
 			erasure_root = ?params.erasure_root,
@@ -216,7 +216,7 @@ impl RequestFromBackers {
 						&params.erasure_root,
 						&data,
 					) {
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							candidate_hash = ?params.candidate_hash,
 							"Received full data",
@@ -224,7 +224,7 @@ impl RequestFromBackers {
 
 						return Ok(data)
 					} else {
-						tracing::debug!(
+						gum::debug!(
 							target: LOG_TARGET,
 							candidate_hash = ?params.candidate_hash,
 							?validator_index,
@@ -235,7 +235,7 @@ impl RequestFromBackers {
 					}
 				},
 				Ok(req_res::v1::AvailableDataFetchingResponse::NoSuchData) => {},
-				Err(e) => tracing::debug!(
+				Err(e) => gum::debug!(
 					target: LOG_TARGET,
 					candidate_hash = ?params.candidate_hash,
 					?validator_index,
@@ -310,7 +310,7 @@ impl RequestChunksFromValidators {
 		while self.requesting_chunks.len() < num_requests {
 			if let Some(validator_index) = self.shuffling.pop_back() {
 				let validator = params.validator_authority_keys[validator_index.0 as usize].clone();
-				tracing::trace!(
+				gum::trace!(
 					target: LOG_TARGET,
 					?validator,
 					?validator_index,
@@ -379,7 +379,7 @@ impl RequestChunksFromValidators {
 							metrics.on_chunk_request_invalid();
 							self.error_count += 1;
 
-							tracing::debug!(
+							gum::debug!(
 								target: LOG_TARGET,
 								candidate_hash = ?params.candidate_hash,
 								?validator_index,
@@ -388,7 +388,7 @@ impl RequestChunksFromValidators {
 						} else {
 							metrics.on_chunk_request_succeeded();
 
-							tracing::trace!(
+							gum::trace!(
 								target: LOG_TARGET,
 								candidate_hash = ?params.candidate_hash,
 								?validator_index,
@@ -400,7 +400,7 @@ impl RequestChunksFromValidators {
 						metrics.on_chunk_request_invalid();
 						self.error_count += 1;
 
-						tracing::debug!(
+						gum::debug!(
 							target: LOG_TARGET,
 							candidate_hash = ?params.candidate_hash,
 							?validator_index,
@@ -415,7 +415,7 @@ impl RequestChunksFromValidators {
 				Err((validator_index, e)) => {
 					self.error_count += 1;
 
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						candidate_hash= ?params.candidate_hash,
 						err = ?e,
@@ -479,7 +479,7 @@ impl RequestChunksFromValidators {
 					}
 				},
 				Err(oneshot::Canceled) => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						candidate_hash = ?params.candidate_hash,
 						"Failed to reach the availability store"
@@ -490,7 +490,7 @@ impl RequestChunksFromValidators {
 
 		loop {
 			if self.is_unavailable(&params) {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					candidate_hash = ?params.candidate_hash,
 					erasure_root = ?params.erasure_root,
@@ -521,7 +521,7 @@ impl RequestChunksFromValidators {
 							&params.erasure_root,
 							&data,
 						) {
-							tracing::trace!(
+							gum::trace!(
 								target: LOG_TARGET,
 								candidate_hash = ?params.candidate_hash,
 								erasure_root = ?params.erasure_root,
@@ -530,7 +530,7 @@ impl RequestChunksFromValidators {
 
 							Ok(data)
 						} else {
-							tracing::trace!(
+							gum::trace!(
 								target: LOG_TARGET,
 								candidate_hash = ?params.candidate_hash,
 								erasure_root = ?params.erasure_root,
@@ -541,7 +541,7 @@ impl RequestChunksFromValidators {
 						}
 					},
 					Err(err) => {
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							candidate_hash = ?params.candidate_hash,
 							erasure_root = ?params.erasure_root,
@@ -589,7 +589,7 @@ fn reconstructed_data_matches_root(
 	let chunks = match obtain_chunks_v1(n_validators, data) {
 		Ok(chunks) => chunks,
 		Err(e) => {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				err = ?e,
 				"Failed to obtain chunks",
@@ -619,7 +619,7 @@ impl<S: SubsystemSender> RecoveryTask<S> {
 				Ok(Some(data)) => return Ok(data),
 				Ok(None) => {},
 				Err(oneshot::Canceled) => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						candidate_hash = ?self.params.candidate_hash,
 						"Failed to reach the availability store",
@@ -669,7 +669,7 @@ impl Future for RecoveryHandle {
 
 		// these are reverse order, so remove is fine.
 		for index in indices_to_remove {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				candidate_hash = ?self.candidate_hash,
 				"Receiver for available data dropped.",
@@ -679,7 +679,7 @@ impl Future for RecoveryHandle {
 		}
 
 		if self.awaiting.is_empty() {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				candidate_hash = ?self.candidate_hash,
 				"All receivers for available data dropped.",
@@ -829,7 +829,7 @@ where
 	});
 
 	if let Err(e) = ctx.spawn("recovery-task", Box::pin(remote)) {
-		tracing::warn!(
+		gum::warn!(
 			target: LOG_TARGET,
 			err = ?e,
 			"Failed to spawn a recovery task",
@@ -862,7 +862,7 @@ where
 		state.availability_lru.get(&candidate_hash).cloned().map(|v| v.into_result())
 	{
 		if let Err(e) = response_sender.send(result) {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				err = ?e,
 				"Error responding with an availability recovery result",
@@ -898,7 +898,7 @@ where
 			)
 			.await,
 		None => {
-			tracing::warn!(target: LOG_TARGET, "SessionInfo is `None` at {:?}", state.live_block);
+			gum::warn!(target: LOG_TARGET, "SessionInfo is `None` at {:?}", state.live_block);
 			response_sender
 				.send(Err(RecoveryError::Unavailable))
 				.map_err(|_| error::Error::CanceledResponseSender)?;
@@ -978,7 +978,7 @@ impl AvailabilityRecoverySubsystem {
 										response_sender,
 										&metrics,
 									).await {
-										tracing::warn!(
+										gum::warn!(
 											target: LOG_TARGET,
 											err = ?e,
 											"Error handling a recovery request",
@@ -997,7 +997,7 @@ impl AvailabilityRecoverySubsystem {
 									let _ = req.send_response(res.into());
 								}
 								Err(e) => {
-									tracing::debug!(
+									gum::debug!(
 										target: LOG_TARGET,
 										err = ?e,
 										"Failed to query available data.",
@@ -1008,7 +1008,7 @@ impl AvailabilityRecoverySubsystem {
 							}
 						}
 						Err(jfyi) => {
-							tracing::debug!(
+							gum::debug!(
 								target: LOG_TARGET,
 								error = ?jfyi,
 								"Decoding incoming request failed"
diff --git a/polkadot/node/network/availability-recovery/src/tests.rs b/polkadot/node/network/availability-recovery/src/tests.rs
index ccdc277cd1a29442ea4e923efa6247a9e08f18ce..507941fc9ed1b3f3a1e9ae982da1a9f535d84fe9 100644
--- a/polkadot/node/network/availability-recovery/src/tests.rs
+++ b/polkadot/node/network/availability-recovery/src/tests.rs
@@ -134,7 +134,7 @@ async fn overseer_send(
 	overseer: &mut TestSubsystemContextHandle<AvailabilityRecoveryMessage>,
 	msg: AvailabilityRecoveryMessage,
 ) {
-	tracing::trace!(msg = ?msg, "sending message");
+	gum::trace!(msg = ?msg, "sending message");
 	overseer
 		.send(FromOverseer::Communication { msg })
 		.timeout(TIMEOUT)
@@ -145,9 +145,9 @@ async fn overseer_send(
 async fn overseer_recv(
 	overseer: &mut TestSubsystemContextHandle<AvailabilityRecoveryMessage>,
 ) -> AllMessages {
-	tracing::trace!("waiting for message ...");
+	gum::trace!("waiting for message ...");
 	let msg = overseer.recv().timeout(TIMEOUT).await.expect("TIMEOUT is enough to recv.");
-	tracing::trace!(msg = ?msg, "received message");
+	gum::trace!(msg = ?msg, "received message");
 	msg
 }
 
diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml
index 3e8fc1e04d84adcdd96e8409b956277dd626865d..f8007e2c9f907313c374e09ac2abecfe7cf9de82 100644
--- a/polkadot/node/network/bitfield-distribution/Cargo.toml
+++ b/polkadot/node/network/bitfield-distribution/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
 
 [dependencies]
 futures = "0.3.21"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 polkadot-primitives = { path = "../../../primitives" }
 polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
 polkadot-node-subsystem-util = { path = "../../subsystem-util" }
diff --git a/polkadot/node/network/bitfield-distribution/src/lib.rs b/polkadot/node/network/bitfield-distribution/src/lib.rs
index 988126dd6936b133f59a62a80c83899462f7d832..1aee6dc06cdbbd5b0b3a18587854823d0a8abbf0 100644
--- a/polkadot/node/network/bitfield-distribution/src/lib.rs
+++ b/polkadot/node/network/bitfield-distribution/src/lib.rs
@@ -179,7 +179,7 @@ impl BitfieldDistribution {
 			let message = match ctx.recv().await {
 				Ok(message) => message,
 				Err(e) => {
-					tracing::debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer, exiting");
+					gum::debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer, exiting");
 					return
 				},
 			};
@@ -187,7 +187,7 @@ impl BitfieldDistribution {
 				FromOverseer::Communication {
 					msg: BitfieldDistributionMessage::DistributeBitfield(hash, signed_availability),
 				} => {
-					tracing::trace!(target: LOG_TARGET, ?hash, "Processing DistributeBitfield");
+					gum::trace!(target: LOG_TARGET, ?hash, "Processing DistributeBitfield");
 					handle_bitfield_distribution(
 						&mut ctx,
 						&mut state,
@@ -200,7 +200,7 @@ impl BitfieldDistribution {
 				FromOverseer::Communication {
 					msg: BitfieldDistributionMessage::NetworkBridgeUpdateV1(event),
 				} => {
-					tracing::trace!(target: LOG_TARGET, "Processing NetworkMessage");
+					gum::trace!(target: LOG_TARGET, "Processing NetworkMessage");
 					// a network message was received
 					handle_network_msg(&mut ctx, &mut state, &self.metrics, event).await;
 				},
@@ -213,7 +213,7 @@ impl BitfieldDistribution {
 					for activated in activated {
 						let relay_parent = activated.hash;
 
-						tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "activated");
+						gum::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "activated");
 						let span = PerLeafSpan::new(activated.span, "bitfield-distribution");
 						let _span = span.child("query-basics");
 
@@ -231,17 +231,17 @@ impl BitfieldDistribution {
 								);
 							},
 							Err(e) => {
-								tracing::warn!(target: LOG_TARGET, err = ?e, "query_basics has failed");
+								gum::warn!(target: LOG_TARGET, err = ?e, "query_basics has failed");
 							},
 							_ => {},
 						}
 					}
 				},
 				FromOverseer::Signal(OverseerSignal::BlockFinalized(hash, number)) => {
-					tracing::trace!(target: LOG_TARGET, hash = %hash, number = %number, "block finalized");
+					gum::trace!(target: LOG_TARGET, hash = %hash, number = %number, "block finalized");
 				},
 				FromOverseer::Signal(OverseerSignal::Conclude) => {
-					tracing::trace!(target: LOG_TARGET, "Conclude");
+					gum::trace!(target: LOG_TARGET, "Conclude");
 					return
 				},
 			}
@@ -254,7 +254,7 @@ async fn modify_reputation<Context>(ctx: &mut Context, peer: PeerId, rep: Rep)
 where
 	Context: SubsystemContext<Message = BitfieldDistributionMessage>,
 {
-	tracing::trace!(target: LOG_TARGET, ?rep, peer_id = %peer, "reputation change");
+	gum::trace!(target: LOG_TARGET, ?rep, peer_id = %peer, "reputation change");
 
 	ctx.send_message(NetworkBridgeMessage::ReportPeer(peer, rep)).await
 }
@@ -278,7 +278,7 @@ async fn handle_bitfield_distribution<Context>(
 	let job_data: &mut _ = if let Some(ref mut job_data) = job_data {
 		job_data
 	} else {
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			relay_parent = %relay_parent,
 			"Not supposed to work on relay parent related data",
@@ -288,7 +288,7 @@ async fn handle_bitfield_distribution<Context>(
 	};
 	let validator_set = &job_data.validator_set;
 	if validator_set.is_empty() {
-		tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "validator set is empty");
+		gum::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "validator set is empty");
 		return
 	}
 
@@ -296,11 +296,7 @@ async fn handle_bitfield_distribution<Context>(
 	let validator = if let Some(validator) = validator_set.get(validator_index) {
 		validator.clone()
 	} else {
-		tracing::trace!(
-			target: LOG_TARGET,
-			"Could not find a validator for index {}",
-			validator_index
-		);
+		gum::trace!(target: LOG_TARGET, "Could not find a validator for index {}", validator_index);
 		return
 	};
 
@@ -374,7 +370,7 @@ async fn relay_message<Context>(
 	drop(_span);
 
 	if interested_peers.is_empty() {
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			relay_parent = %message.relay_parent,
 			"no peers are interested in gossip for relay parent",
@@ -400,7 +396,7 @@ async fn process_incoming_peer_message<Context>(
 	Context: SubsystemContext<Message = BitfieldDistributionMessage>,
 {
 	let protocol_v1::BitfieldDistributionMessage::Bitfield(relay_parent, bitfield) = message;
-	tracing::trace!(
+	gum::trace!(
 		target: LOG_TARGET,
 		peer_id = %origin,
 		?relay_parent,
@@ -432,7 +428,7 @@ async fn process_incoming_peer_message<Context>(
 
 	let validator_set = &job_data.validator_set;
 	if validator_set.is_empty() {
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			relay_parent = %relay_parent,
 			?origin,
@@ -460,7 +456,7 @@ async fn process_incoming_peer_message<Context>(
 	if !received_set.contains(&validator) {
 		received_set.insert(validator.clone());
 	} else {
-		tracing::trace!(target: LOG_TARGET, ?validator_index, ?origin, "Duplicate message");
+		gum::trace!(target: LOG_TARGET, ?validator_index, ?origin, "Duplicate message");
 		modify_reputation(ctx, origin, COST_PEER_DUPLICATE_MESSAGE).await;
 		return
 	};
@@ -469,7 +465,7 @@ async fn process_incoming_peer_message<Context>(
 
 	// only relay_message a message of a validator once
 	if let Some(old_message) = one_per_validator.get(&validator) {
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			?validator_index,
 			"already received a message for validator",
@@ -512,12 +508,12 @@ async fn handle_network_msg<Context>(
 
 	match bridge_message {
 		NetworkBridgeEvent::PeerConnected(peerid, role, _) => {
-			tracing::trace!(target: LOG_TARGET, ?peerid, ?role, "Peer connected");
+			gum::trace!(target: LOG_TARGET, ?peerid, ?role, "Peer connected");
 			// insert if none already present
 			state.peer_views.entry(peerid).or_default();
 		},
 		NetworkBridgeEvent::PeerDisconnected(peerid) => {
-			tracing::trace!(target: LOG_TARGET, ?peerid, "Peer disconnected");
+			gum::trace!(target: LOG_TARGET, ?peerid, "Peer disconnected");
 			// get rid of superfluous data
 			state.peer_views.remove(&peerid);
 		},
@@ -531,11 +527,11 @@ async fn handle_network_msg<Context>(
 			}
 		},
 		NetworkBridgeEvent::PeerViewChange(peerid, view) => {
-			tracing::trace!(target: LOG_TARGET, ?peerid, ?view, "Peer view change");
+			gum::trace!(target: LOG_TARGET, ?peerid, ?view, "Peer view change");
 			handle_peer_view_change(ctx, state, peerid, view).await;
 		},
 		NetworkBridgeEvent::OurViewChange(view) => {
-			tracing::trace!(target: LOG_TARGET, ?view, "Our view change");
+			gum::trace!(target: LOG_TARGET, ?view, "Our view change");
 			handle_our_view_change(state, view);
 		},
 		NetworkBridgeEvent::PeerMessage(remote, message) =>
@@ -549,7 +545,7 @@ fn handle_our_view_change(state: &mut ProtocolState, view: OurView) {
 
 	for added in state.view.difference(&old_view) {
 		if !state.per_relay_parent.contains_key(&added) {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				added = %added,
 				"Our view contains {} but the overseer never told use we should work on this",
@@ -589,7 +585,7 @@ async fn handle_peer_view_change<Context>(
 		);
 
 	if !lucky {
-		tracing::trace!(target: LOG_TARGET, ?origin, "Peer view change is ignored");
+		gum::trace!(target: LOG_TARGET, ?origin, "Peer view change is ignored");
 		return
 	}
 
@@ -637,7 +633,7 @@ async fn send_tracked_gossip_message<Context>(
 	};
 
 	let _span = job_data.span.child("gossip");
-	tracing::trace!(
+	gum::trace!(
 		target: LOG_TARGET,
 		?dest,
 		?validator,
@@ -699,7 +695,7 @@ where
 		(Ok(v), Ok(s)) =>
 			Ok(Some((v, SigningContext { parent_hash: relay_parent, session_index: s }))),
 		(Err(e), _) | (_, Err(e)) => {
-			tracing::warn!(target: LOG_TARGET, err = ?e, "Failed to fetch basics from runtime API");
+			gum::warn!(target: LOG_TARGET, err = ?e, "Failed to fetch basics from runtime API");
 			Ok(None)
 		},
 	}
diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml
index 232dd1b48117e8888c932b322aa314a68688d9e0..346f3464c0d178172d93f07685c37b9e04002c67 100644
--- a/polkadot/node/network/bridge/Cargo.toml
+++ b/polkadot/node/network/bridge/Cargo.toml
@@ -7,7 +7,7 @@ edition = "2021"
 [dependencies]
 async-trait = "0.1.52"
 futures = "0.3.21"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 polkadot-primitives = { path = "../../../primitives" }
 parity-scale-codec = { version = "3.1.0", default-features = false, features = ["derive"] }
 sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/node/network/bridge/src/lib.rs b/polkadot/node/network/bridge/src/lib.rs
index 8be504da40baec7c9218dae711fe213cfa7fe45b..b3cf6452d38ae8a8f70c949a7214932b92c2319a 100644
--- a/polkadot/node/network/bridge/src/lib.rs
+++ b/polkadot/node/network/bridge/src/lib.rs
@@ -374,7 +374,7 @@ where
 			msg = ctx.recv().fuse() => match msg {
 				Ok(FromOverseer::Signal(OverseerSignal::ActiveLeaves(active_leaves))) => {
 					let ActiveLeavesUpdate { activated, deactivated } = active_leaves;
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						action = "ActiveLeaves",
 						has_activated = activated.is_some(),
@@ -413,7 +413,7 @@ where
 					}
 				}
 				Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(_hash, number))) => {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						action = "BlockFinalized"
 					);
@@ -431,7 +431,7 @@ where
 				Ok(FromOverseer::Communication { msg }) => match msg {
 					NetworkBridgeMessage::ReportPeer(peer, rep) => {
 						if !rep.is_benefit() {
-							tracing::debug!(
+							gum::debug!(
 								target: LOG_TARGET,
 								?peer,
 								?rep,
@@ -441,7 +441,7 @@ where
 						network_service.report_peer(peer, rep);
 					}
 					NetworkBridgeMessage::DisconnectPeer(peer, peer_set) => {
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							action = "DisconnectPeer",
 							?peer,
@@ -450,7 +450,7 @@ where
 						network_service.disconnect_peer(peer, peer_set);
 					}
 					NetworkBridgeMessage::SendValidationMessage(peers, msg) => {
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							action = "SendValidationMessages",
 							num_messages = 1,
@@ -465,7 +465,7 @@ where
 						);
 					}
 					NetworkBridgeMessage::SendValidationMessages(msgs) => {
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							action = "SendValidationMessages",
 							num_messages = %msgs.len(),
@@ -482,7 +482,7 @@ where
 						}
 					}
 					NetworkBridgeMessage::SendCollationMessage(peers, msg) => {
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							action = "SendCollationMessages",
 							num_messages = 1,
@@ -497,7 +497,7 @@ where
 						);
 					}
 					NetworkBridgeMessage::SendCollationMessages(msgs) => {
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							action = "SendCollationMessages",
 							num_messages = %msgs.len(),
@@ -514,7 +514,7 @@ where
 						}
 					}
 					NetworkBridgeMessage::SendRequests(reqs, if_disconnected) => {
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							action = "SendRequests",
 							num_requests = %reqs.len(),
@@ -531,7 +531,7 @@ where
 						peer_set,
 						failed,
 					} => {
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							action = "ConnectToValidators",
 							peer_set = ?peer_set,
@@ -556,7 +556,7 @@ where
 						validator_addrs,
 						peer_set,
 					} => {
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							action = "ConnectToPeers",
 							peer_set = ?peer_set,
@@ -576,7 +576,7 @@ where
 					NetworkBridgeMessage::NewGossipTopology {
 						our_neighbors,
 					} => {
-						tracing::debug!(
+						gum::debug!(
 							target: LOG_TARGET,
 							action = "NewGossipTopology",
 							neighbors = our_neighbors.len(),
@@ -632,7 +632,7 @@ async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
 					Some(peer_set) => peer_set,
 				};
 
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					action = "PeerConnected",
 					peer_set = ?peer_set,
@@ -716,7 +716,7 @@ async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
 					Some(peer_set) => peer_set,
 				};
 
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					action = "PeerDisconnected",
 					peer_set = ?peer_set,
@@ -766,7 +766,7 @@ async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
 
 				let v_messages = match v_messages {
 					Err(_) => {
-						tracing::debug!(target: LOG_TARGET, action = "ReportPeer");
+						gum::debug!(target: LOG_TARGET, action = "ReportPeer");
 
 						network_service.report_peer(remote, MALFORMED_MESSAGE_COST);
 						continue
@@ -784,7 +784,7 @@ async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
 
 				match c_messages {
 					Err(_) => {
-						tracing::debug!(target: LOG_TARGET, action = "ReportPeer");
+						gum::debug!(target: LOG_TARGET, action = "ReportPeer");
 
 						network_service.report_peer(remote, MALFORMED_MESSAGE_COST);
 						continue
@@ -793,7 +793,7 @@ async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
 						if v_messages.is_empty() && c_messages.is_empty() {
 							continue
 						} else {
-							tracing::trace!(
+							gum::trace!(
 								target: LOG_TARGET,
 								action = "PeerMessages",
 								peer = ?remote,
@@ -896,7 +896,7 @@ where
 	{
 		Ok(()) => Ok(()),
 		Err(UnexpectedAbort::SubsystemError(err)) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				err = ?err,
 				"Shutting down Network Bridge due to error"
@@ -908,7 +908,7 @@ where
 			)))
 		},
 		Err(UnexpectedAbort::EventStreamConcluded) => {
-			tracing::info!(
+			gum::info!(
 				target: LOG_TARGET,
 				"Shutting down Network Bridge: underlying request stream concluded"
 			);
diff --git a/polkadot/node/network/bridge/src/network.rs b/polkadot/node/network/bridge/src/network.rs
index 3d29e73fa2292a766584be6541aee2b4f8e68417..e5ce3effefb1ea91395db825cd77d81716a658c3 100644
--- a/polkadot/node/network/bridge/src/network.rs
+++ b/polkadot/node/network/bridge/src/network.rs
@@ -176,14 +176,12 @@ impl Network for Arc<NetworkService<Block, Hash>> {
 
 		let peer_id = match peer_id {
 			None => {
-				tracing::debug!(target: LOG_TARGET, "Discovering authority failed");
+				gum::debug!(target: LOG_TARGET, "Discovering authority failed");
 				match pending_response
 					.send(Err(RequestFailure::Network(OutboundFailure::DialFailure)))
 				{
-					Err(_) => tracing::debug!(
-						target: LOG_TARGET,
-						"Sending failed request response failed."
-					),
+					Err(_) =>
+						gum::debug!(target: LOG_TARGET, "Sending failed request response failed."),
 					Ok(_) => {},
 				}
 				return
diff --git a/polkadot/node/network/bridge/src/validator_discovery.rs b/polkadot/node/network/bridge/src/validator_discovery.rs
index 11c6f9c5271b49fc78abdbdeb695640213c0e74d..69f4fabb4283862fc51d2f4981ab837081c22d70 100644
--- a/polkadot/node/network/bridge/src/validator_discovery.rs
+++ b/polkadot/node/network/bridge/src/validator_discovery.rs
@@ -66,7 +66,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
 		let removed = peers_to_remove.len();
 		state.previously_requested = new_peer_ids;
 
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			?peer_set,
 			?num_peers,
@@ -79,7 +79,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
 			.set_reserved_peers(peer_set.into_protocol_name(), newly_requested)
 			.await
 		{
-			tracing::warn!(target: LOG_TARGET, err = ?e, "AuthorityDiscoveryService returned an invalid multiaddress");
+			gum::warn!(target: LOG_TARGET, err = ?e, "AuthorityDiscoveryService returned an invalid multiaddress");
 		}
 		// the addresses are known to be valid
 		let _ = network_service
@@ -116,7 +116,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
 				newly_requested.extend(addresses);
 			} else {
 				failed_to_resolve += 1;
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					"Authority Discovery couldn't resolve {:?}",
 					authority
@@ -124,7 +124,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
 			}
 		}
 
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			?peer_set,
 			?requested,
diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml
index 5ceb871f956e698c929965f5750fdfda021dbc74..a13bc9adb4548d19fc9c0d3105c10a1078e4c8e4 100644
--- a/polkadot/node/network/collator-protocol/Cargo.toml
+++ b/polkadot/node/network/collator-protocol/Cargo.toml
@@ -8,7 +8,7 @@ edition = "2021"
 always-assert = "0.1.2"
 futures = "0.3.21"
 futures-timer = "3"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
index d1e5c5add718cc1e925b884e970cc55d9694e68c..8cea56d846e5f044e470c218855b7a99b607ee0b 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
@@ -345,7 +345,7 @@ where
 
 	// This collation is not in the active-leaves set.
 	if !state.view.contains(&relay_parent) {
-		tracing::warn!(
+		gum::warn!(
 			target: LOG_TARGET,
 			?relay_parent,
 			"distribute collation message parent is outside of our view",
@@ -364,7 +364,7 @@ where
 	let (our_core, num_cores) = match determine_core(ctx, id, relay_parent).await? {
 		Some(core) => core,
 		None => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				para_id = %id,
 				?relay_parent,
@@ -380,7 +380,7 @@ where
 		determine_our_validators(ctx, runtime, our_core, num_cores, relay_parent).await?;
 
 	if current_validators.validators.is_empty() {
-		tracing::warn!(
+		gum::warn!(
 			target: LOG_TARGET,
 			core = ?our_core,
 			"there are no validators assigned to core",
@@ -389,7 +389,7 @@ where
 		return Ok(())
 	}
 
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		para_id = %id,
 		relay_parent = %relay_parent,
@@ -472,7 +472,7 @@ where
 		.get_session_info_by_index(ctx.sender(), relay_parent, session_index)
 		.await?
 		.session_info;
-	tracing::debug!(target: LOG_TARGET, ?session_index, "Received session info");
+	gum::debug!(target: LOG_TARGET, ?session_index, "Received session info");
 	let groups = &info.validator_groups;
 	let rotation_info = get_group_rotation_info(ctx, relay_parent).await?;
 
@@ -554,7 +554,7 @@ async fn advertise_collation<Context>(
 
 	match (state.collations.get_mut(&relay_parent), should_advertise) {
 		(None, _) => {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				?relay_parent,
 				peer_id = %peer,
@@ -563,7 +563,7 @@ async fn advertise_collation<Context>(
 			return
 		},
 		(_, false) => {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				?relay_parent,
 				peer_id = %peer,
@@ -572,7 +572,7 @@ async fn advertise_collation<Context>(
 			return
 		},
 		(Some(collation), true) => {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				?relay_parent,
 				peer_id = %peer,
@@ -626,7 +626,7 @@ where
 				Some(id) if receipt.descriptor.para_id != id => {
 					// If the ParaId of a collation requested to be distributed does not match
 					// the one we expect, we ignore the message.
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						para_id = %receipt.descriptor.para_id,
 						collating_on = %id,
@@ -638,7 +638,7 @@ where
 						.await?;
 				},
 				None => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						para_id = %receipt.descriptor.para_id,
 						"DistributeCollation message while not collating on any",
@@ -647,14 +647,14 @@ where
 			}
 		},
 		ReportCollator(_) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"ReportCollator message is not expected on the collator side of the protocol",
 			);
 		},
 		NetworkBridgeUpdateV1(event) => {
 			if let Err(e) = handle_network_msg(ctx, runtime, state, event).await {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					err = ?e,
 					"Failed to handle incoming network message",
@@ -686,14 +686,14 @@ async fn send_collation(
 	};
 
 	if let Err(_) = request.send_outgoing_response(response) {
-		tracing::warn!(target: LOG_TARGET, "Sending collation response failed");
+		gum::warn!(target: LOG_TARGET, "Sending collation response failed");
 	}
 
 	state.active_collation_fetches.push(
 		async move {
 			let r = rx.timeout(MAX_UNSHARED_UPLOAD_TIME).await;
 			if r.is_none() {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					?relay_parent,
 					?peer_id,
@@ -724,7 +724,7 @@ where
 
 	match msg {
 		Declare(_, _, _) => {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				?origin,
 				"Declare message is not expected on the collator side of the protocol",
@@ -735,7 +735,7 @@ where
 				.await;
 		},
 		AdvertiseCollation(_) => {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				?origin,
 				"AdvertiseCollation message is not expected on the collator side of the protocol",
@@ -753,7 +753,7 @@ where
 		},
 		CollationSeconded(relay_parent, statement) => {
 			if !matches!(statement.unchecked_payload(), Statement::Seconded(_)) {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					?statement,
 					?origin,
@@ -769,7 +769,7 @@ where
 					state.collation_result_senders.remove(&statement.payload().candidate_hash());
 
 				if let Some(sender) = removed {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?statement,
 						?origin,
@@ -806,7 +806,7 @@ where
 					collation.status.advance_to_requested();
 					(collation.receipt.clone(), collation.pov.clone())
 				} else {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						relay_parent = %req.payload.relay_parent,
 						"received a `RequestCollation` for a relay parent we don't have collation stored.",
@@ -823,7 +823,7 @@ where
 				state.waiting_collation_fetches.entry(req.payload.relay_parent).or_default();
 
 			if !waiting.waiting_peers.insert(req.peer) {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					"Dropping incoming request as peer has a request in flight already."
 				);
@@ -840,7 +840,7 @@ where
 			}
 		},
 		Some(our_para_id) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				for_para_id = %req.payload.para_id,
 				our_para_id = %our_para_id,
@@ -848,7 +848,7 @@ where
 			);
 		},
 		None => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				for_para_id = %req.payload.para_id,
 				"received a `RequestCollation` while not collating on any para",
@@ -896,9 +896,9 @@ where
 		PeerConnected(peer_id, observed_role, maybe_authority) => {
 			// If it is possible that a disconnected validator would attempt a reconnect
 			// it should be handled here.
-			tracing::trace!(target: LOG_TARGET, ?peer_id, ?observed_role, "Peer connected");
+			gum::trace!(target: LOG_TARGET, ?peer_id, ?observed_role, "Peer connected");
 			if let Some(authority_ids) = maybe_authority {
-				tracing::trace!(
+				gum::trace!(
 					target: LOG_TARGET,
 					?authority_ids,
 					?peer_id,
@@ -910,16 +910,16 @@ where
 			}
 		},
 		PeerViewChange(peer_id, view) => {
-			tracing::trace!(target: LOG_TARGET, ?peer_id, ?view, "Peer view change");
+			gum::trace!(target: LOG_TARGET, ?peer_id, ?view, "Peer view change");
 			handle_peer_view_change(ctx, state, peer_id, view).await;
 		},
 		PeerDisconnected(peer_id) => {
-			tracing::trace!(target: LOG_TARGET, ?peer_id, "Peer disconnected");
+			gum::trace!(target: LOG_TARGET, ?peer_id, "Peer disconnected");
 			state.peer_views.remove(&peer_id);
 			state.peer_ids.remove(&peer_id);
 		},
 		OurViewChange(view) => {
-			tracing::trace!(target: LOG_TARGET, ?view, "Own view change");
+			gum::trace!(target: LOG_TARGET, ?view, "Own view change");
 			handle_our_view_change(state, view).await?;
 		},
 		PeerMessage(remote, msg) => {
@@ -936,25 +936,25 @@ where
 /// Handles our view changes.
 async fn handle_our_view_change(state: &mut State, view: OurView) -> Result<()> {
 	for removed in state.view.difference(&view) {
-		tracing::debug!(target: LOG_TARGET, relay_parent = ?removed, "Removing relay parent because our view changed.");
+		gum::debug!(target: LOG_TARGET, relay_parent = ?removed, "Removing relay parent because our view changed.");
 
 		if let Some(collation) = state.collations.remove(removed) {
 			state.collation_result_senders.remove(&collation.receipt.hash());
 
 			match collation.status {
-				CollationStatus::Created => tracing::warn!(
+				CollationStatus::Created => gum::warn!(
 					target: LOG_TARGET,
 					candidate_hash = ?collation.receipt.hash(),
 					pov_hash = ?collation.pov.hash(),
 					"Collation wasn't advertised to any validator.",
 				),
-				CollationStatus::Advertised => tracing::debug!(
+				CollationStatus::Advertised => gum::debug!(
 					target: LOG_TARGET,
 					candidate_hash = ?collation.receipt.hash(),
 					pov_hash = ?collation.pov.hash(),
 					"Collation was advertised but not requested by any validator.",
 				),
-				CollationStatus::Requested => tracing::debug!(
+				CollationStatus::Requested => gum::debug!(
 					target: LOG_TARGET,
 					candidate_hash = ?collation.receipt.hash(),
 					pov_hash = ?collation.pov.hash(),
@@ -1035,7 +1035,7 @@ where
 					}
 					Err(error) => {
 						let jfyi = error.split().map_err(incoming::Error::from)?;
-						tracing::debug!(
+						gum::debug!(
 							target: LOG_TARGET,
 							error = ?jfyi,
 							"Decoding incoming request failed"
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests.rs
index 10c1a032694d4d53d2ecd16a873914bc6a122444..f8f985cd4c66d7ca668efd55e5ea8f74c606591d 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/tests.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/tests.rs
@@ -226,7 +226,7 @@ fn test_harness<T: Future<Output = TestHarness>>(
 const TIMEOUT: Duration = Duration::from_millis(100);
 
 async fn overseer_send(overseer: &mut VirtualOverseer, msg: CollatorProtocolMessage) {
-	tracing::trace!(?msg, "sending message");
+	gum::trace!(?msg, "sending message");
 	overseer
 		.send(FromOverseer::Communication { msg })
 		.timeout(TIMEOUT)
@@ -239,7 +239,7 @@ async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
 		.await
 		.expect(&format!("{:?} is more than enough to receive messages", TIMEOUT));
 
-	tracing::trace!(?msg, "received message");
+	gum::trace!(?msg, "received message");
 
 	msg
 }
@@ -248,7 +248,7 @@ async fn overseer_recv_with_timeout(
 	overseer: &mut VirtualOverseer,
 	timeout: Duration,
 ) -> Option<AllMessages> {
-	tracing::trace!("waiting for message...");
+	gum::trace!("waiting for message...");
 	overseer.recv().timeout(timeout).await
 }
 
diff --git a/polkadot/node/network/collator-protocol/src/error.rs b/polkadot/node/network/collator-protocol/src/error.rs
index 2606b44bdc50f12aba3a40c1d5773b4195e80f48..4f0b0921a05a36ab2acc4df79a15cb276fbdcbfa 100644
--- a/polkadot/node/network/collator-protocol/src/error.rs
+++ b/polkadot/node/network/collator-protocol/src/error.rs
@@ -56,7 +56,7 @@ pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<(
 	match result.into_nested()? {
 		Ok(()) => Ok(()),
 		Err(jfyi) => {
-			tracing::warn!(target: LOG_TARGET, error = ?jfyi, ctx);
+			gum::warn!(target: LOG_TARGET, error = ?jfyi, ctx);
 			Ok(())
 		},
 	}
diff --git a/polkadot/node/network/collator-protocol/src/lib.rs b/polkadot/node/network/collator-protocol/src/lib.rs
index eb6106b4e07f4c21d37a8d8a2eeb4b142843b207..975d60e553fe45e8097299afd839e218e9af0f60 100644
--- a/polkadot/node/network/collator-protocol/src/lib.rs
+++ b/polkadot/node/network/collator-protocol/src/lib.rs
@@ -133,7 +133,7 @@ async fn modify_reputation<Context>(ctx: &mut Context, peer: PeerId, rep: Rep)
 where
 	Context: SubsystemContext,
 {
-	tracing::trace!(
+	gum::trace!(
 		target: LOG_TARGET,
 		rep = ?rep,
 		peer_id = %peer,
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
index 9b464749d188461f4eed94d9e203c3d77d79f145..084448b4b70b1faef5fcaa0b24ed2f33d843d7f3 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
@@ -371,7 +371,7 @@ impl ActiveParas {
 			let (validators, groups, rotation_info, cores) = match (mv, mg, mc) {
 				(Some(v), Some((g, r)), Some(c)) => (v, g, r, c),
 				_ => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						?relay_parent,
 						"Failed to query runtime API for relay-parent",
@@ -393,7 +393,7 @@ impl ActiveParas {
 						cores.get(core_now.0 as usize).and_then(|c| c.para_id())
 					},
 					None => {
-						tracing::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator");
+						gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator");
 
 						continue
 					},
@@ -411,7 +411,7 @@ impl ActiveParas {
 				let entry = self.current_assignments.entry(para_now).or_default();
 				*entry += 1;
 				if *entry == 1 {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						?relay_parent,
 						para_id = ?para_now,
@@ -435,7 +435,7 @@ impl ActiveParas {
 						*occupied.get_mut() -= 1;
 						if *occupied.get() == 0 {
 							occupied.remove_entry();
-							tracing::debug!(
+							gum::debug!(
 								target: LOG_TARGET,
 								para_id = ?cur,
 								"Unassigned from a parachain",
@@ -533,7 +533,7 @@ impl CollationsPerRelayParent {
 		// If finished one does not match waiting_collation, then we already dequeued another fetch
 		// to replace it.
 		if self.waiting_collation != finished_one {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				waiting_collation = ?self.waiting_collation,
 				?finished_one,
@@ -728,7 +728,7 @@ async fn request_collation<Context>(
 	Context: SubsystemContext<Message = CollatorProtocolMessage>,
 {
 	if !state.view.contains(&relay_parent) {
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			peer_id = %peer_id,
 			para_id = %para_id,
@@ -739,7 +739,7 @@ async fn request_collation<Context>(
 	}
 	let pending_collation = PendingCollation::new(relay_parent, &para_id, &peer_id);
 	if state.requested_collations.contains_key(&pending_collation) {
-		tracing::warn!(
+		gum::warn!(
 			target: LOG_TARGET,
 			peer_id = %pending_collation.peer_id,
 			%pending_collation.para_id,
@@ -768,7 +768,7 @@ async fn request_collation<Context>(
 		.requested_collations
 		.insert(PendingCollation::new(relay_parent, &para_id, &peer_id), per_request);
 
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		peer_id = %peer_id,
 		%para_id,
@@ -821,7 +821,7 @@ async fn process_incoming_peer_message<Context>(
 			}
 
 			if state.active_paras.is_current(&para_id) {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					peer_id = ?origin,
 					?collator_id,
@@ -831,7 +831,7 @@ async fn process_incoming_peer_message<Context>(
 
 				peer_data.set_collating(collator_id, para_id);
 			} else {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					peer_id = ?origin,
 					?collator_id,
@@ -840,7 +840,7 @@ async fn process_incoming_peer_message<Context>(
 				);
 
 				modify_reputation(ctx, origin.clone(), COST_UNNEEDED_COLLATOR).await;
-				tracing::trace!(target: LOG_TARGET, "Disconnecting unneeded collator");
+				gum::trace!(target: LOG_TARGET, "Disconnecting unneeded collator");
 				disconnect_peer(ctx, origin).await;
 			}
 		},
@@ -850,7 +850,7 @@ async fn process_incoming_peer_message<Context>(
 				.get(&relay_parent)
 				.map(|s| s.child("advertise-collation"));
 			if !state.view.contains(&relay_parent) {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					peer_id = ?origin,
 					?relay_parent,
@@ -871,7 +871,7 @@ async fn process_incoming_peer_message<Context>(
 
 			match peer_data.insert_advertisement(relay_parent, &state.view) {
 				Ok((id, para_id)) => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						peer_id = ?origin,
 						%para_id,
@@ -897,7 +897,7 @@ async fn process_incoming_peer_message<Context>(
 					}
 				},
 				Err(error) => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						peer_id = ?origin,
 						?relay_parent,
@@ -910,7 +910,7 @@ async fn process_incoming_peer_message<Context>(
 			}
 		},
 		CollationSeconded(_, _) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				peer_id = ?origin,
 				"Unexpected `CollationSeconded` message, decreasing reputation",
@@ -976,7 +976,7 @@ where
 		// declare.
 		if let Some(para_id) = peer_data.collating_para() {
 			if !state.active_paras.is_current(&para_id) {
-				tracing::trace!(target: LOG_TARGET, "Disconnecting peer on view change");
+				gum::trace!(target: LOG_TARGET, "Disconnecting peer on view change");
 				disconnect_peer(ctx, peer_id.clone()).await;
 			}
 		}
@@ -1040,14 +1040,14 @@ async fn process_msg<Context>(
 
 	match msg {
 		CollateOn(id) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				para_id = %id,
 				"CollateOn message is not expected on the validator side of the protocol",
 			);
 		},
 		DistributeCollation(_, _, _) => {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"DistributeCollation message is not expected on the validator side of the protocol",
 			);
@@ -1057,7 +1057,7 @@ async fn process_msg<Context>(
 		},
 		NetworkBridgeUpdateV1(event) => {
 			if let Err(e) = handle_network_msg(ctx, state, keystore, event).await {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					err = ?e,
 					"Failed to handle incoming network message",
@@ -1075,7 +1075,7 @@ async fn process_msg<Context>(
 					collations.status = CollationStatus::Seconded;
 				}
 			} else {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					relay_parent = ?parent,
 					"Collation has been seconded, but the relay parent is deactivated",
@@ -1089,7 +1089,7 @@ async fn process_msg<Context>(
 						Some(candidate_receipt.commitments_hash) =>
 					entry.remove().0,
 				Entry::Occupied(_) => {
-					tracing::error!(
+					gum::error!(
 						target: LOG_TARGET,
 						relay_parent = ?parent,
 						candidate = ?candidate_receipt.hash(),
@@ -1150,7 +1150,7 @@ where
 			res = ctx.recv().fuse() => {
 				match res {
 					Ok(FromOverseer::Communication { msg }) => {
-						tracing::trace!(target: LOG_TARGET, msg = ?msg, "received a message");
+						gum::trace!(target: LOG_TARGET, msg = ?msg, "received a message");
 						process_msg(
 							&mut ctx,
 							&keystore,
@@ -1170,7 +1170,7 @@ where
 			}
 			res = state.collation_fetch_timeouts.select_next_some() => {
 				let (collator_id, relay_parent) = res;
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					?relay_parent,
 					?collator_id,
@@ -1233,7 +1233,7 @@ async fn dequeue_next_collation_and_fetch(
 		.get_mut(&relay_parent)
 		.and_then(|c| c.get_next_collation_to_fetch(Some(previous_fetch)))
 	{
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			?relay_parent,
 			?id,
@@ -1260,7 +1260,7 @@ async fn handle_collation_fetched_result<Context>(
 	let (candidate_receipt, pov) = match res {
 		Ok(res) => res,
 		Err(e) => {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				relay_parent = ?collation_event.1.relay_parent,
 				para_id = ?collation_event.1.para_id,
@@ -1277,7 +1277,7 @@ async fn handle_collation_fetched_result<Context>(
 
 	if let Some(collations) = state.collations_per_relay_parent.get_mut(&relay_parent) {
 		if let CollationStatus::Seconded = collations.status {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				?relay_parent,
 				"Already seconded - no longer interested in collation fetch result."
@@ -1298,7 +1298,7 @@ async fn handle_collation_fetched_result<Context>(
 
 		entry.insert(collation_event);
 	} else {
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			?relay_parent,
 			candidate = ?candidate_receipt.hash(),
@@ -1320,7 +1320,7 @@ async fn disconnect_inactive_peers<Context>(
 {
 	for (peer, peer_data) in peers {
 		if peer_data.is_inactive(&eviction_policy) {
-			tracing::trace!(target: LOG_TARGET, "Disconnecting inactive peer");
+			gum::trace!(target: LOG_TARGET, "Disconnecting inactive peer");
 			disconnect_peer(ctx, peer.clone()).await;
 		}
 	}
@@ -1353,7 +1353,7 @@ async fn poll_collation_response(
 	per_req: &mut PerRequest,
 ) -> CollationFetchResult {
 	if never!(per_req.from_collator.is_terminated()) {
-		tracing::error!(
+		gum::error!(
 			target: LOG_TARGET,
 			"We remove pending responses once received, this should not happen."
 		);
@@ -1371,7 +1371,7 @@ async fn poll_collation_response(
 
 		let result = match response {
 			Err(RequestError::InvalidResponse(err)) => {
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					hash = ?pending_collation.relay_parent,
 					para_id = ?pending_collation.para_id,
@@ -1382,7 +1382,7 @@ async fn poll_collation_response(
 				CollationFetchResult::Error(Some(COST_CORRUPTED_MESSAGE))
 			},
 			Err(err) if err.is_timed_out() => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					hash = ?pending_collation.relay_parent,
 					para_id = ?pending_collation.para_id,
@@ -1394,7 +1394,7 @@ async fn poll_collation_response(
 				CollationFetchResult::Error(None)
 			},
 			Err(RequestError::NetworkError(err)) => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					hash = ?pending_collation.relay_parent,
 					para_id = ?pending_collation.para_id,
@@ -1409,7 +1409,7 @@ async fn poll_collation_response(
 				CollationFetchResult::Error(Some(COST_NETWORK_ERROR))
 			},
 			Err(RequestError::Canceled(err)) => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					hash = ?pending_collation.relay_parent,
 					para_id = ?pending_collation.para_id,
@@ -1422,7 +1422,7 @@ async fn poll_collation_response(
 			Ok(CollationFetchingResponse::Collation(receipt, _))
 				if receipt.descriptor().para_id != pending_collation.para_id =>
 			{
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					expected_para_id = ?pending_collation.para_id,
 					got_para_id = ?receipt.descriptor().para_id,
@@ -1433,7 +1433,7 @@ async fn poll_collation_response(
 				CollationFetchResult::Error(Some(COST_WRONG_PARA))
 			},
 			Ok(CollationFetchingResponse::Collation(receipt, pov)) => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					para_id = %pending_collation.para_id,
 					hash = ?pending_collation.relay_parent,
@@ -1447,7 +1447,7 @@ async fn poll_collation_response(
 				let result = tx.send((receipt, pov));
 
 				if let Err(_) = result {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						hash = ?pending_collation.relay_parent,
 						para_id = ?pending_collation.para_id,
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests.rs
index 2978a5c76e89ce63e54d08bf5a14068570c1c2ae..913575469579e1c2d22ccf61772c8c773f643e23 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/tests.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/tests.rs
@@ -166,7 +166,7 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(test: impl FnOnce(TestHarne
 const TIMEOUT: Duration = Duration::from_millis(200);
 
 async fn overseer_send(overseer: &mut VirtualOverseer, msg: CollatorProtocolMessage) {
-	tracing::trace!("Sending message:\n{:?}", &msg);
+	gum::trace!("Sending message:\n{:?}", &msg);
 	overseer
 		.send(FromOverseer::Communication { msg })
 		.timeout(TIMEOUT)
@@ -179,7 +179,7 @@ async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
 		.await
 		.expect(&format!("{:?} is enough to receive messages.", TIMEOUT));
 
-	tracing::trace!("Received message:\n{:?}", &msg);
+	gum::trace!("Received message:\n{:?}", &msg);
 
 	msg
 }
@@ -188,7 +188,7 @@ async fn overseer_recv_with_timeout(
 	overseer: &mut VirtualOverseer,
 	timeout: Duration,
 ) -> Option<AllMessages> {
-	tracing::trace!("Waiting for message...");
+	gum::trace!("Waiting for message...");
 	overseer.recv().timeout(timeout).await
 }
 
@@ -350,7 +350,7 @@ fn act_on_advertisement() {
 		let TestHarness { mut virtual_overseer } = test_harness;
 
 		let pair = CollatorPair::generate().0;
-		tracing::trace!("activating");
+		gum::trace!("activating");
 
 		overseer_send(
 			&mut virtual_overseer,
diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml
index 6e832794d913bf64e93ddb2858b49ba0db94c82d..65ca08b2e0193ae7b86012bc507e41c8e99e03f9 100644
--- a/polkadot/node/network/dispute-distribution/Cargo.toml
+++ b/polkadot/node/network/dispute-distribution/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
 
 [dependencies]
 futures = "0.3.21"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 derive_more = "0.99.17"
 parity-scale-codec = { version = "3.1.0", features = ["std"] }
 polkadot-primitives = { path = "../../../primitives" }
diff --git a/polkadot/node/network/dispute-distribution/src/error.rs b/polkadot/node/network/dispute-distribution/src/error.rs
index 4410c8fc6719ded742dac05511055c789c0989b5..053802c897adb653803086e861aeddd9e971665b 100644
--- a/polkadot/node/network/dispute-distribution/src/error.rs
+++ b/polkadot/node/network/dispute-distribution/src/error.rs
@@ -64,7 +64,7 @@ pub type FatalResult<T> = std::result::Result<T, FatalError>;
 pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<(), FatalError> {
 	match result.into_nested()? {
 		Err(jfyi) => {
-			tracing::warn!(target: LOG_TARGET, error = ?jfyi, ctx);
+			gum::warn!(target: LOG_TARGET, error = ?jfyi, ctx);
 			Ok(())
 		},
 		Ok(()) => Ok(()),
diff --git a/polkadot/node/network/dispute-distribution/src/receiver/error.rs b/polkadot/node/network/dispute-distribution/src/receiver/error.rs
index 5fb76fff78638be2e3ca643177bdaa760d71c18a..ce578cc8e0f979592f35e864ebafd89a429c66da 100644
--- a/polkadot/node/network/dispute-distribution/src/receiver/error.rs
+++ b/polkadot/node/network/dispute-distribution/src/receiver/error.rs
@@ -62,11 +62,11 @@ pub type JfyiErrorResult<T> = std::result::Result<T, JfyiError>;
 pub fn log_error(result: Result<()>) -> std::result::Result<(), FatalError> {
 	match result.into_nested()? {
 		Err(error @ JfyiError::ImportCanceled(_)) => {
-			tracing::debug!(target: LOG_TARGET, error = ?error);
+			gum::debug!(target: LOG_TARGET, error = ?error);
 			Ok(())
 		},
 		Err(JfyiError::NotAValidator(peer)) => {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				?peer,
 				"Dropping message from peer (unknown authority id)"
@@ -74,7 +74,7 @@ pub fn log_error(result: Result<()>) -> std::result::Result<(), FatalError> {
 			Ok(())
 		},
 		Err(error) => {
-			tracing::warn!(target: LOG_TARGET, error = ?error);
+			gum::warn!(target: LOG_TARGET, error = ?error);
 			Ok(())
 		},
 		Ok(()) => Ok(()),
diff --git a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs
index 7166b24d3d55211684f09721a5db85a1764dd7b3..252ec7c4301afe007e4ad0df5b03464fe34fc355 100644
--- a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs
+++ b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs
@@ -168,7 +168,7 @@ where
 			match log_error(self.run_inner().await) {
 				Ok(()) => {},
 				Err(fatal) => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						error = ?fatal,
 						"Shutting down"
@@ -212,7 +212,7 @@ where
 		// Immediately drop requests from peers that already have requests in flight or have
 		// been banned recently (flood protection):
 		if self.pending_imports.peer_is_pending(&peer) || self.banned_peers.contains(&peer) {
-			tracing::trace!(
+			gum::trace!(
 				target: LOG_TARGET,
 				?peer,
 				"Dropping message from peer (banned/pending import)"
diff --git a/polkadot/node/network/dispute-distribution/src/sender/mod.rs b/polkadot/node/network/dispute-distribution/src/sender/mod.rs
index 814e0b430067263318f5a942c0080bf3d9d644c4..98632fba3b60c67d75149538db2bb9fdb227aac2 100644
--- a/polkadot/node/network/dispute-distribution/src/sender/mod.rs
+++ b/polkadot/node/network/dispute-distribution/src/sender/mod.rs
@@ -89,11 +89,7 @@ impl DisputeSender {
 		let candidate_hash = req.0.candidate_receipt.hash();
 		match self.disputes.entry(candidate_hash) {
 			Entry::Occupied(_) => {
-				tracing::trace!(
-					target: LOG_TARGET,
-					?candidate_hash,
-					"Dispute sending already active."
-				);
+				gum::trace!(target: LOG_TARGET, ?candidate_hash, "Dispute sending already active.");
 				return Ok(())
 			},
 			Entry::Vacant(vacant) => {
@@ -168,7 +164,7 @@ impl DisputeSender {
 		let task = match self.disputes.get_mut(&candidate_hash) {
 			None => {
 				// Can happen when a dispute ends, with messages still in queue:
-				tracing::trace!(
+				gum::trace!(
 					target: LOG_TARGET,
 					?result,
 					"Received `FromSendingTask::Finished` for non existing dispute."
@@ -215,7 +211,7 @@ impl DisputeSender {
 			.await?;
 		let our_index = match info.validator_info.our_index {
 			None => {
-				tracing::trace!(
+				gum::trace!(
 					target: LOG_TARGET,
 					"Not a validator in that session - not starting dispute sending."
 				);
@@ -226,7 +222,7 @@ impl DisputeSender {
 
 		let votes = match get_candidate_votes(ctx, session_index, candidate_hash).await? {
 			None => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					?session_index,
 					?candidate_hash,
diff --git a/polkadot/node/network/dispute-distribution/src/sender/send_task.rs b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs
index d42289a49ccbda9ce68de247d8004ae641da1c27..4c032990aa099c0968bc121b313eb40895951075 100644
--- a/polkadot/node/network/dispute-distribution/src/sender/send_task.rs
+++ b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs
@@ -159,7 +159,7 @@ impl SendTask {
 	pub fn on_finished_send(&mut self, authority: &AuthorityDiscoveryId, result: TaskResult) {
 		match result {
 			TaskResult::Failed(err) => {
-				tracing::trace!(
+				gum::trace!(
 					target: LOG_TARGET,
 					?authority,
 					candidate_hash = %self.request.0.candidate_receipt.hash(),
@@ -176,7 +176,7 @@ impl SendTask {
 					None => {
 						// Can happen when a sending became irrelevant while the response was already
 						// queued.
-						tracing::debug!(
+						gum::debug!(
 							target: LOG_TARGET,
 							candidate = ?self.request.0.candidate_receipt.hash(),
 							?authority,
@@ -290,7 +290,7 @@ async fn wait_response_task(
 			TaskFinish { candidate_hash, receiver, result: TaskResult::Succeeded },
 	};
 	if let Err(err) = tx.feed(msg).await {
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			%err,
 			"Failed to notify susystem about dispute sending result."
diff --git a/polkadot/node/network/dispute-distribution/src/tests/mod.rs b/polkadot/node/network/dispute-distribution/src/tests/mod.rs
index 269176299559141efd9f11ca79134ad063fd4331..9b0e4037a38e62226d3073ef8aa7d18e477ae251 100644
--- a/polkadot/node/network/dispute-distribution/src/tests/mod.rs
+++ b/polkadot/node/network/dispute-distribution/src/tests/mod.rs
@@ -175,7 +175,7 @@ fn received_request_triggers_import() {
 							assert_matches!(
 								rx_response.await,
 								Err(err) => {
-									tracing::trace!(
+									gum::trace!(
 										target: LOG_TARGET,
 										?err,
 										"Request got dropped - other request already in flight"
@@ -197,7 +197,7 @@ fn received_request_triggers_import() {
 							assert_matches!(
 								rx_response.await,
 								Err(err) => {
-									tracing::trace!(
+									gum::trace!(
 										target: LOG_TARGET,
 										?err,
 										"Request got dropped - other request already in flight"
@@ -223,7 +223,7 @@ fn received_request_triggers_import() {
 			assert_matches!(
 				rx_response.await,
 				Err(err) => {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?err,
 						"Request got dropped - peer is banned."
@@ -244,7 +244,7 @@ fn received_request_triggers_import() {
 		)
 		.await;
 
-		tracing::trace!(target: LOG_TARGET, "Concluding.");
+		gum::trace!(target: LOG_TARGET, "Concluding.");
 		conclude(&mut handle).await;
 	};
 	test_harness(test);
@@ -562,7 +562,7 @@ async fn nested_network_dispute_request<'a, F, O>(
 					if let Some(sent_feedback) = sent_feedback {
 						sent_feedback.send(()).unwrap();
 					}
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						"Valid import happened."
 					);
@@ -735,7 +735,7 @@ where
 		match subsystem.run(ctx).await {
 			Ok(()) => {},
 			Err(fatal) => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					?fatal,
 					"Dispute distribution exited with fatal error."
diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml
index 957bce7f31fae411aef368c6bcc9af4ebff0d628..f37a530d03b3326ee4dea33a1f14db1cfea44786 100644
--- a/polkadot/node/network/gossip-support/Cargo.toml
+++ b/polkadot/node/network/gossip-support/Cargo.toml
@@ -19,7 +19,7 @@ futures = "0.3.21"
 futures-timer = "3.0.2"
 rand = { version = "0.8.5", default-features = false }
 rand_chacha = { version = "0.3.1", default-features = false }
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 
 [dev-dependencies]
 sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs
index ee5aef8ee525922ac9db9b3a38a838371c66598d..9db1e9050df2f41be3d0b80a7094d4554ba0928c 100644
--- a/polkadot/node/network/gossip-support/src/lib.rs
+++ b/polkadot/node/network/gossip-support/src/lib.rs
@@ -156,7 +156,7 @@ where
 					match result {
 						Ok(message) => message,
 						Err(e) => {
-							tracing::debug!(
+							gum::debug!(
 								target: LOG_TARGET,
 								err = ?e,
 								"Failed to receive a message from Overseer, exiting",
@@ -173,11 +173,11 @@ where
 					activated,
 					..
 				})) => {
-					tracing::trace!(target: LOG_TARGET, "active leaves signal");
+					gum::trace!(target: LOG_TARGET, "active leaves signal");
 
 					let leaves = activated.into_iter().map(|a| a.hash);
 					if let Err(e) = self.handle_active_leaves(&mut ctx, leaves).await {
-						tracing::debug!(target: LOG_TARGET, error = ?e);
+						gum::debug!(target: LOG_TARGET, error = ?e);
 					}
 				},
 				FromOverseer::Signal(OverseerSignal::BlockFinalized(_hash, _number)) => {},
@@ -215,7 +215,7 @@ where
 			if let Some((session_index, relay_parent)) = maybe_issue_connection {
 				let is_new_session = maybe_new_session.is_some();
 				if is_new_session {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						%session_index,
 						"New session detected",
@@ -312,7 +312,7 @@ where
 				resolved.insert(authority, addrs);
 			} else {
 				failures += 1;
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					"Couldn't resolve addresses of authority: {:?}",
 					authority
@@ -320,7 +320,7 @@ where
 			}
 		}
 		self.resolved_authorities = resolved;
-		tracing::debug!(target: LOG_TARGET, %num, "Issuing a connection request");
+		gum::debug!(target: LOG_TARGET, %num, "Issuing a connection request");
 
 		ctx.send_message(NetworkBridgeMessage::ConnectToResolvedValidators {
 			validator_addrs,
@@ -335,7 +335,7 @@ where
 			match self.failure_start {
 				None => self.failure_start = Some(timestamp),
 				Some(first) if first.elapsed() >= LOW_CONNECTIVITY_WARN_DELAY => {
-					tracing::warn!(
+					gum::warn!(
 						target: LOG_TARGET,
 						connected = ?(num - failures),
 						target = ?num,
@@ -343,7 +343,7 @@ where
 					);
 				},
 				Some(_) => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						connected = ?(num - failures),
 						target = ?num,
@@ -399,13 +399,13 @@ where
 		// we already know it is broken.
 		// https://github.com/paritytech/polkadot/issues/3921
 		if connected_ratio <= LOW_CONNECTIVITY_WARN_THRESHOLD {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				"Connectivity seems low, we are only connected to {}% of available validators (see debug logs for details)", connected_ratio
 			);
 		}
 		let pretty = PrettyAuthorities(unconnected_authorities);
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			?connected_ratio,
 			?absolute_connected,
@@ -425,7 +425,7 @@ where
 	Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
 {
 	let authorities = util::request_authorities(relay_parent, ctx.sender()).await.await??;
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		authority_count = ?authorities.len(),
 		"Determined relevant authorities",
diff --git a/polkadot/node/network/gossip-support/src/tests.rs b/polkadot/node/network/gossip-support/src/tests.rs
index c1ed929d843b3c938d2580b6d1081660b25ccf46..6302772501ff8eea7decbe56e86d78437af13048 100644
--- a/polkadot/node/network/gossip-support/src/tests.rs
+++ b/polkadot/node/network/gossip-support/src/tests.rs
@@ -372,7 +372,7 @@ fn test_log_output() {
 		m
 	};
 	let pretty = PrettyAuthorities(unconnected_authorities.iter());
-	tracing::debug!(
+	gum::debug!(
 		target: LOG_TARGET,
 		unconnected_authorities = %pretty,
 		"Connectivity Report"
diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml
index 79a999a4acfc0562fb04b3891fde0aad6c618e69..ee00f5416a6c445a9b85155346c44be6c233dce2 100644
--- a/polkadot/node/network/statement-distribution/Cargo.toml
+++ b/polkadot/node/network/statement-distribution/Cargo.toml
@@ -7,7 +7,7 @@ edition = "2021"
 
 [dependencies]
 futures = "0.3.21"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 polkadot-primitives = { path = "../../../primitives" }
 sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/node/network/statement-distribution/src/error.rs b/polkadot/node/network/statement-distribution/src/error.rs
index 9f0e30c730c3dccb9db10d5fb81bec8fd8c0fe60..be4e8ffebeb15fac2e8e430d81952ff41d1af2eb 100644
--- a/polkadot/node/network/statement-distribution/src/error.rs
+++ b/polkadot/node/network/statement-distribution/src/error.rs
@@ -87,8 +87,8 @@ pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<(
 		Err(jfyi) => {
 			match jfyi {
 				JfyiError::RequestedUnannouncedCandidate(_, _) =>
-					tracing::warn!(target: LOG_TARGET, error = %jfyi, ctx),
-				_ => tracing::debug!(target: LOG_TARGET, error = %jfyi, ctx),
+					gum::warn!(target: LOG_TARGET, error = %jfyi, ctx),
+				_ => gum::debug!(target: LOG_TARGET, error = %jfyi, ctx),
 			}
 			Ok(())
 		},
diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs
index f68860c1529f17170c479a8f22b94e4baac4c206..ebeae87d9c03ae76fa36424c07fbd43595409cc0 100644
--- a/polkadot/node/network/statement-distribution/src/lib.rs
+++ b/polkadot/node/network/statement-distribution/src/lib.rs
@@ -169,7 +169,7 @@ impl VcPerPeerTracker {
 	/// based on a message that we have sent it from our local pool.
 	fn note_local(&mut self, h: CandidateHash) {
 		if !note_hash(&mut self.local_observed, h) {
-			tracing::warn!(
+			gum::warn!(
 				target: LOG_TARGET,
 				"Statement distribution is erroneously attempting to distribute more \
 				than {} candidate(s) per validator index. Ignoring",
@@ -703,7 +703,7 @@ impl ActiveHeadData {
 			CompactStatement::Seconded(h) => {
 				let seconded_so_far = self.seconded_counts.entry(validator_index).or_insert(0);
 				if *seconded_so_far >= VC_THRESHOLD {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?validator_index,
 						?statement,
@@ -714,7 +714,7 @@ impl ActiveHeadData {
 
 				self.candidates.insert(h);
 				if let Some(old) = self.statements.insert(comparator.clone(), statement) {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?validator_index,
 						statement = ?old,
@@ -724,7 +724,7 @@ impl ActiveHeadData {
 				} else {
 					*seconded_so_far += 1;
 
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?validator_index,
 						statement = ?self.statements.last().expect("Just inserted").1,
@@ -741,7 +741,7 @@ impl ActiveHeadData {
 			},
 			CompactStatement::Valid(h) => {
 				if !self.candidates.contains(&h) {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?validator_index,
 						?statement,
@@ -751,7 +751,7 @@ impl ActiveHeadData {
 				}
 
 				if let Some(old) = self.statements.insert(comparator.clone(), statement) {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?validator_index,
 						statement = ?old,
@@ -759,7 +759,7 @@ impl ActiveHeadData {
 					);
 					NotedStatement::UsefulButKnown
 				} else {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?validator_index,
 						statement = ?self.statements.last().expect("Just inserted").1,
@@ -795,7 +795,7 @@ impl ActiveHeadData {
 			CompactStatement::Seconded(_) => {
 				let seconded_so_far = self.seconded_counts.get(&validator_index).unwrap_or(&0);
 				if *seconded_so_far >= VC_THRESHOLD {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?validator_index,
 						?statement,
@@ -805,7 +805,7 @@ impl ActiveHeadData {
 				}
 
 				if self.statements.contains_key(&comparator) {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?validator_index,
 						?statement,
@@ -816,7 +816,7 @@ impl ActiveHeadData {
 			},
 			CompactStatement::Valid(h) => {
 				if !self.candidates.contains(&h) {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?validator_index,
 						?statement,
@@ -826,7 +826,7 @@ impl ActiveHeadData {
 				}
 
 				if self.statements.contains_key(&comparator) {
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?validator_index,
 						?statement,
@@ -1033,7 +1033,7 @@ async fn circulate_statement<'a>(
 	// Send all these peers the initial statement.
 	if !peers_to_send.is_empty() {
 		let payload = statement_message(relay_parent, stored.statement.clone());
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			?peers_to_send,
 			?relay_parent,
@@ -1071,7 +1071,7 @@ async fn send_statements_about(
 		peer_data.send(&relay_parent, &fingerprint);
 		let payload = statement_message(relay_parent, statement.statement.clone());
 
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			?peer,
 			?relay_parent,
@@ -1106,7 +1106,7 @@ async fn send_statements(
 		peer_data.send(&relay_parent, &fingerprint);
 		let payload = statement_message(relay_parent, statement.statement.clone());
 
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			?peer,
 			?relay_parent,
@@ -1244,7 +1244,7 @@ async fn launch_request(
 
 	let result = ctx.spawn("large-statement-fetcher", task.boxed());
 	if let Err(err) = result {
-		tracing::error!(target: LOG_TARGET, ?err, "Spawning task failed.");
+		gum::error!(target: LOG_TARGET, ?err, "Spawning task failed.");
 		return None
 	}
 	let available_peers = {
@@ -1312,7 +1312,7 @@ async fn handle_incoming_message<'a>(
 	let active_head = match active_heads.get_mut(&relay_parent) {
 		Some(h) => h,
 		None => {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				%relay_parent,
 				"our view out-of-sync with active heads; head not found",
@@ -1324,13 +1324,7 @@ async fn handle_incoming_message<'a>(
 
 	if let protocol_v1::StatementDistributionMessage::LargeStatement(_) = message {
 		if let Err(rep) = peer_data.receive_large_statement(&relay_parent) {
-			tracing::debug!(
-				target: LOG_TARGET,
-				?peer,
-				?message,
-				?rep,
-				"Unexpected large statement.",
-			);
+			gum::debug!(target: LOG_TARGET, ?peer, ?message, ?rep, "Unexpected large statement.",);
 			report_peer(ctx, peer, rep).await;
 			return None
 		}
@@ -1355,7 +1349,7 @@ async fn handle_incoming_message<'a>(
 		// reputation change flood.
 		let unexpected_count = peer_data.receive_unexpected(&relay_parent);
 
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			?relay_parent,
 			?peer,
@@ -1406,12 +1400,7 @@ async fn handle_incoming_message<'a>(
 		// check the signature on the statement.
 		match check_statement_signature(&active_head, relay_parent, unchecked_compact) {
 			Err(statement) => {
-				tracing::debug!(
-					target: LOG_TARGET,
-					?peer,
-					?statement,
-					"Invalid statement signature"
-				);
+				gum::debug!(target: LOG_TARGET, ?peer, ?statement, "Invalid statement signature");
 				report_peer(ctx, peer, COST_INVALID_SIGNATURE).await;
 				return None
 			},
@@ -1431,7 +1420,7 @@ async fn handle_incoming_message<'a>(
 	// This fails if the payload doesn't encode correctly.
 	let statement: SignedFullStatement = match checked_compact.convert_to_superpayload(payload) {
 		Err((compact, _)) => {
-			tracing::debug!(
+			gum::debug!(
 				target: LOG_TARGET,
 				?peer,
 				?compact,
@@ -1453,7 +1442,7 @@ async fn handle_incoming_message<'a>(
 			unreachable!("checked in `check_can_receive` above; qed");
 		},
 		Ok(true) => {
-			tracing::trace!(target: LOG_TARGET, ?peer, ?statement, "Statement accepted");
+			gum::trace!(target: LOG_TARGET, ?peer, ?statement, "Statement accepted");
 			// Send the peer all statements concerning the candidate that we have,
 			// since it appears to have just learned about the candidate.
 			send_statements_about(
@@ -1544,7 +1533,7 @@ async fn handle_network_update(
 ) {
 	match update {
 		NetworkBridgeEvent::PeerConnected(peer, role, maybe_authority) => {
-			tracing::trace!(target: LOG_TARGET, ?peer, ?role, "Peer connected");
+			gum::trace!(target: LOG_TARGET, ?peer, ?role, "Peer connected");
 			peers.insert(
 				peer,
 				PeerData {
@@ -1560,7 +1549,7 @@ async fn handle_network_update(
 			}
 		},
 		NetworkBridgeEvent::PeerDisconnected(peer) => {
-			tracing::trace!(target: LOG_TARGET, ?peer, "Peer disconnected");
+			gum::trace!(target: LOG_TARGET, ?peer, "Peer disconnected");
 			if let Some(auth_ids) = peers.remove(&peer).and_then(|p| p.maybe_authority) {
 				auth_ids.into_iter().for_each(|a| {
 					authorities.remove(&a);
@@ -1600,7 +1589,7 @@ async fn handle_network_update(
 			.await;
 		},
 		NetworkBridgeEvent::PeerViewChange(peer, view) => {
-			tracing::trace!(target: LOG_TARGET, ?peer, ?view, "Peer view change");
+			gum::trace!(target: LOG_TARGET, ?peer, ?view, "Peer view change");
 			match peers.get_mut(&peer) {
 				Some(data) =>
 					update_peer_view_and_maybe_send_unlocked(
@@ -1670,7 +1659,7 @@ impl StatementDistributionSubsystem {
 					match result.into_nested()? {
 						Ok(true) => break,
 						Ok(false) => {},
-						Err(jfyi) => tracing::debug!(target: LOG_TARGET, error = ?jfyi),
+						Err(jfyi) => gum::debug!(target: LOG_TARGET, error = ?jfyi),
 					}
 				},
 				MuxedMessage::Requester(result) => {
@@ -1820,7 +1809,7 @@ impl StatementDistributionSubsystem {
 					Some(LargeStatementStatus::Fetching(info)) => info,
 					Some(LargeStatementStatus::FetchedOrShared(_)) => {
 						// This task is going to die soon - no need to send it anything.
-						tracing::debug!(target: LOG_TARGET, "Zombie task wanted more peers.");
+						gum::debug!(target: LOG_TARGET, "Zombie task wanted more peers.");
 						return Ok(())
 					},
 					None =>
@@ -1867,7 +1856,7 @@ impl StatementDistributionSubsystem {
 
 				for deactivated in deactivated {
 					if active_heads.remove(&deactivated).is_some() {
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							hash = ?deactivated,
 							"Deactivating leaf",
@@ -1878,7 +1867,7 @@ impl StatementDistributionSubsystem {
 				for activated in activated {
 					let relay_parent = activated.hash;
 					let span = PerLeafSpan::new(activated.span, "statement-distribution");
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						hash = ?relay_parent,
 						"New active leaf",
diff --git a/polkadot/node/network/statement-distribution/src/requester.rs b/polkadot/node/network/statement-distribution/src/requester.rs
index 7bc70ea0505ab778a0ab49e3ea7e76b56d9f61c8..904333f046b9a597ea34cbabbea3e7fb0b3f7f9a 100644
--- a/polkadot/node/network/statement-distribution/src/requester.rs
+++ b/polkadot/node/network/statement-distribution/src/requester.rs
@@ -103,7 +103,7 @@ pub async fn fetch(
 				.feed(RequesterMessage::SendRequest(Requests::StatementFetching(outgoing)))
 				.await
 			{
-				tracing::info!(
+				gum::info!(
 					target: LOG_TARGET,
 					?err,
 					"Sending request failed, node might be shutting down - exiting."
@@ -122,7 +122,7 @@ pub async fn fetch(
 						if let Err(err) =
 							sender.feed(RequesterMessage::ReportPeer(peer, COST_WRONG_HASH)).await
 						{
-							tracing::warn!(
+							gum::warn!(
 								target: LOG_TARGET,
 								?err,
 								"Sending reputation change failed: This should not happen."
@@ -142,7 +142,7 @@ pub async fn fetch(
 						})
 						.await
 					{
-						tracing::warn!(
+						gum::warn!(
 							target: LOG_TARGET,
 							?err,
 							"Sending task response failed: This should not happen."
@@ -155,7 +155,7 @@ pub async fn fetch(
 					return
 				},
 				Err(err) => {
-					tracing::debug!(
+					gum::debug!(
 						target: LOG_TARGET,
 						?err,
 						"Receiving response failed with error - trying next peer."
@@ -174,7 +174,7 @@ pub async fn fetch(
 		// All our peers failed us - try getting new ones before trying again:
 		match try_get_new_peers(relay_parent, candidate_hash, &mut sender, &span).await {
 			Ok(Some(mut peers)) => {
-				tracing::trace!(target: LOG_TARGET, ?peers, "Received new peers.");
+				gum::trace!(target: LOG_TARGET, ?peers, "Received new peers.");
 				// New arrivals will be tried first:
 				new_peers.append(&mut peers);
 			},
@@ -205,7 +205,7 @@ async fn try_get_new_peers(
 		.send(RequesterMessage::GetMorePeers { relay_parent, candidate_hash, tx })
 		.await
 	{
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			?err,
 			"Failed sending background task message, subsystem probably moved on."
@@ -215,7 +215,7 @@ async fn try_get_new_peers(
 
 	match rx.timeout(RETRY_TIMEOUT).await.transpose() {
 		Err(_) => {
-			tracing::debug!(target: LOG_TARGET, "Failed fetching more peers.");
+			gum::debug!(target: LOG_TARGET, "Failed fetching more peers.");
 			Err(())
 		},
 		Ok(val) => Ok(val),
diff --git a/polkadot/node/network/statement-distribution/src/responder.rs b/polkadot/node/network/statement-distribution/src/responder.rs
index 6aca72bbabed5fdd7607a883cf0f1147bc9e1357..776652c04e7a626ddd1e14f31796fc41fbe3bc3e 100644
--- a/polkadot/node/network/statement-distribution/src/responder.rs
+++ b/polkadot/node/network/statement-distribution/src/responder.rs
@@ -78,11 +78,11 @@ pub async fn respond(
 		let req = match receiver.recv(|| vec![COST_INVALID_REQUEST]).await.into_nested() {
 			Ok(Ok(v)) => v,
 			Err(fatal) => {
-				tracing::debug!(target: LOG_TARGET, error = ?fatal, "Shutting down request responder");
+				gum::debug!(target: LOG_TARGET, error = ?fatal, "Shutting down request responder");
 				return
 			},
 			Ok(Err(jfyi)) => {
-				tracing::debug!(target: LOG_TARGET, error = ?jfyi, "Decoding request failed");
+				gum::debug!(target: LOG_TARGET, error = ?jfyi, "Decoding request failed");
 				continue
 			},
 		};
@@ -97,12 +97,12 @@ pub async fn respond(
 			})
 			.await
 		{
-			tracing::debug!(target: LOG_TARGET, ?err, "Shutting down responder");
+			gum::debug!(target: LOG_TARGET, ?err, "Shutting down responder");
 			return
 		}
 		let response = match rx.await {
 			Err(err) => {
-				tracing::debug!(target: LOG_TARGET, ?err, "Requested data not found.");
+				gum::debug!(target: LOG_TARGET, ?err, "Requested data not found.");
 				Err(())
 			},
 			Ok(v) => Ok(StatementFetchingResponse::Statement(v)),
@@ -115,7 +115,7 @@ pub async fn respond(
 		};
 		pending_out.push(pending_sent_rx);
 		if let Err(_) = req.send_outgoing_response(response) {
-			tracing::debug!(target: LOG_TARGET, "Sending response failed");
+			gum::debug!(target: LOG_TARGET, "Sending response failed");
 		}
 	}
 }
diff --git a/polkadot/node/network/statement-distribution/src/tests.rs b/polkadot/node/network/statement-distribution/src/tests.rs
index eda99cae1d365efa56ad8eff487236b507dfdc7d..a9a534bec2458230d0f22edb453dec532f2aacb1 100644
--- a/polkadot/node/network/statement-distribution/src/tests.rs
+++ b/polkadot/node/network/statement-distribution/src/tests.rs
@@ -1275,7 +1275,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing(
 					),
 				)
 			) => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					?recipients,
 					"Recipients received"
@@ -1613,7 +1613,7 @@ fn share_prioritizes_backing_group() {
 					),
 				)
 			) => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					?recipients,
 					"Recipients received"
diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml
index ee775f146ec6dcd0c0265100f21a2262cf7ec14b..2c10d16a780dac3e4c628f43af21e40fa853adb2 100644
--- a/polkadot/node/overseer/Cargo.toml
+++ b/polkadot/node/overseer/Cargo.toml
@@ -16,7 +16,7 @@ polkadot-node-subsystem-types = { path = "../subsystem-types" }
 polkadot-node-metrics = { path = "../metrics" }
 polkadot-primitives = { path = "../../primitives" }
 polkadot-overseer-gen = { path = "./overseer-gen" }
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../gum" }
 lru = "0.7"
 parity-util-mem = { version = "0.11.0", default-features = false }
 
diff --git a/polkadot/node/overseer/examples/minimal-example.rs b/polkadot/node/overseer/examples/minimal-example.rs
index 14f8cf9d4ed7f1eab0428201109e00bce49b53a1..20141fad8555baea8fcbe529cb77a5419c644d26 100644
--- a/polkadot/node/overseer/examples/minimal-example.rs
+++ b/polkadot/node/overseer/examples/minimal-example.rs
@@ -59,13 +59,13 @@ impl Subsystem1 {
 			match ctx.try_recv().await {
 				Ok(Some(msg)) => {
 					if let FromOverseer::Communication { msg } = msg {
-						tracing::info!("msg {:?}", msg);
+						gum::info!("msg {:?}", msg);
 					}
 					continue 'louy
 				},
 				Ok(None) => (),
 				Err(_) => {
-					tracing::info!("exiting");
+					gum::info!("exiting");
 					break 'louy
 				},
 			}
@@ -121,7 +121,7 @@ impl Subsystem2 {
 			"subsystem-2-job",
 			Box::pin(async {
 				loop {
-					tracing::info!("Job tick");
+					gum::info!("Job tick");
 					Delay::new(Duration::from_secs(1)).await;
 				}
 			}),
@@ -131,14 +131,14 @@ impl Subsystem2 {
 		loop {
 			match ctx.try_recv().await {
 				Ok(Some(msg)) => {
-					tracing::info!("Subsystem2 received message {:?}", msg);
+					gum::info!("Subsystem2 received message {:?}", msg);
 					continue
 				},
 				Ok(None) => {
 					pending!();
 				},
 				Err(_) => {
-					tracing::info!("exiting");
+					gum::info!("exiting");
 					return
 				},
 			}
@@ -189,7 +189,7 @@ fn main() {
 			select! {
 				_ = overseer_fut => break,
 				_ = timer_stream.next() => {
-					tracing::info!("tick");
+					gum::info!("tick");
 				}
 				complete => break,
 			}
diff --git a/polkadot/node/overseer/overseer-gen/Cargo.toml b/polkadot/node/overseer/overseer-gen/Cargo.toml
index 30bddfc90f702d21f24043f8cc3635571067b822..14e0d9b743b92dae52dcfa577964e752b902282d 100644
--- a/polkadot/node/overseer/overseer-gen/Cargo.toml
+++ b/polkadot/node/overseer/overseer-gen/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
 description = "Generate an overseer including builder pattern and message wrapper from a single struct."
 
 [dependencies]
-tracing = "0.1"
+gum = { package = "tracing-gum", path = "../../gum" }
 futures = "0.3"
 async-trait = "0.1"
 thiserror = "1"
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_builder.rs b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_builder.rs
index be2b00715fc42017fd79f48efe5e3d48b90a50dc..f669519e8a6b890f7b749bc2b4fb0219b155c603 100644
--- a/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_builder.rs
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_builder.rs
@@ -624,9 +624,9 @@ pub(crate) fn impl_task_kind(info: &OverseerInfo) -> proc_macro2::TokenStream {
 
 			let fut = Box::pin(async move {
 				if let Err(e) = future.await {
-					#support_crate ::tracing::error!(subsystem=name, err = ?e, "subsystem exited with error");
+					#support_crate ::gum::error!(subsystem=name, err = ?e, "subsystem exited with error");
 				} else {
-					#support_crate ::tracing::debug!(subsystem=name, "subsystem exited without an error");
+					#support_crate ::gum::debug!(subsystem=name, "subsystem exited without an error");
 				}
 				let _ = tx.send(());
 			});
@@ -635,7 +635,7 @@ pub(crate) fn impl_task_kind(info: &OverseerInfo) -> proc_macro2::TokenStream {
 
 			futures.push(Box::pin(
 				rx.map(|e| {
-					tracing::warn!(err = ?e, "dropping error");
+					gum::warn!(err = ?e, "dropping error");
 					Ok(())
 				})
 			));
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_channels_out.rs b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_channels_out.rs
index e2ec1752ce78370589072200d87544086e7af3db..f61071abc8e11a2d3a6f6e425e53c2af56085a1d 100644
--- a/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_channels_out.rs
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_channels_out.rs
@@ -82,7 +82,7 @@ pub(crate) fn impl_channels_out_struct(info: &OverseerInfo) -> Result<proc_macro
 				};
 
 				if let Err(subsystem_name) = res {
-					#support_crate ::tracing::debug!(
+					#support_crate ::gum::debug!(
 						target: LOG_TARGET,
 						"Failed to send (bounded) a message to {} subsystem",
 						subsystem_name
@@ -114,7 +114,7 @@ pub(crate) fn impl_channels_out_struct(info: &OverseerInfo) -> Result<proc_macro
 				};
 
 				if let Err(subsystem_name) = res {
-					#support_crate ::tracing::debug!(
+					#support_crate ::gum::debug!(
 						target: LOG_TARGET,
 						"Failed to send_unbounded a message to {} subsystem",
 						subsystem_name
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs
index 54b207eb1960904ea7bcead16386cd0197c17d5f..4721eb5373e1116b0dfd5e68ae33e4c57f334aef 100644
--- a/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs
@@ -212,7 +212,7 @@ pub(crate) fn impl_overseen_subsystem(info: &OverseerInfo) -> proc_macro2::Token
 					}).timeout(MESSAGE_TIMEOUT).await
 					{
 						None => {
-							#support_crate ::tracing::error!(
+							#support_crate ::gum::error!(
 								target: LOG_TARGET,
 								%origin,
 								"Subsystem {} appears unresponsive.",
diff --git a/polkadot/node/overseer/overseer-gen/src/lib.rs b/polkadot/node/overseer/overseer-gen/src/lib.rs
index c233f20d0e2647cbc4497ce6621c5d0344052f15..95eb56166d5a426e4eef6562c4bd3ac824f52984 100644
--- a/polkadot/node/overseer/overseer-gen/src/lib.rs
+++ b/polkadot/node/overseer/overseer-gen/src/lib.rs
@@ -62,12 +62,12 @@
 
 pub use polkadot_overseer_gen_proc_macro::overlord;
 
+#[doc(hidden)]
+pub use gum;
 #[doc(hidden)]
 pub use metered;
 #[doc(hidden)]
 pub use polkadot_node_primitives::SpawnNamed;
-#[doc(hidden)]
-pub use tracing;
 
 #[doc(hidden)]
 pub use async_trait::async_trait;
diff --git a/polkadot/node/overseer/src/dummy.rs b/polkadot/node/overseer/src/dummy.rs
index dba531cbf07c9bcf36840f5f8b47fcc6849d0997..eb6b9099a8bbb556dc0fb40ee3a0775aaa48e5f7 100644
--- a/polkadot/node/overseer/src/dummy.rs
+++ b/polkadot/node/overseer/src/dummy.rs
@@ -43,7 +43,7 @@ where
 					Err(_) => return Ok(()),
 					Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => return Ok(()),
 					Ok(overseer_msg) => {
-						tracing::debug!(
+						gum::debug!(
 							target: "dummy-subsystem",
 							"Discarding a message sent from overseer {:?}",
 							overseer_msg
diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs
index 5fabcf018c17a69d01b42135356c92b8138e08d9..dc44abc799f39f04fea332e1d3a427a6a4e5f4f6 100644
--- a/polkadot/node/overseer/src/lib.rs
+++ b/polkadot/node/overseer/src/lib.rs
@@ -199,7 +199,7 @@ impl Handle {
 	/// Most basic operation, to stop a server.
 	async fn send_and_log_error(&mut self, event: Event) {
 		if self.0.send(event).await.is_err() {
-			tracing::info!(target: LOG_TARGET, "Failed to send an event to Overseer");
+			gum::info!(target: LOG_TARGET, "Failed to send an event to Overseer");
 		}
 	}
 }
@@ -529,21 +529,18 @@ where
 			Ok(memory_stats) =>
 				Box::new(move |metrics: &OverseerMetrics| match memory_stats.snapshot() {
 					Ok(memory_stats_snapshot) => {
-						tracing::trace!(
+						gum::trace!(
 							target: LOG_TARGET,
 							"memory_stats: {:?}",
 							&memory_stats_snapshot
 						);
 						metrics.memory_stats_snapshot(memory_stats_snapshot);
 					},
-					Err(e) => tracing::debug!(
-						target: LOG_TARGET,
-						"Failed to obtain memory stats: {:?}",
-						e
-					),
+					Err(e) =>
+						gum::debug!(target: LOG_TARGET, "Failed to obtain memory stats: {:?}", e),
 				}),
 			Err(_) => {
-				tracing::debug!(
+				gum::debug!(
 					target: LOG_TARGET,
 					"Memory allocation tracking is not supported by the allocator.",
 				);
@@ -634,7 +631,7 @@ where
 					}
 				},
 				res = self.running_subsystems.select_next_some() => {
-					tracing::error!(
+					gum::error!(
 						target: LOG_TARGET,
 						subsystem = ?res,
 						"subsystem finished unexpectedly",
diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml
index 1e7ad76b87706a037b5697a1500e422e5a70d31c..5925a0c356d4f6abb48fed630bf1b20b56022095 100644
--- a/polkadot/node/service/Cargo.toml
+++ b/polkadot/node/service/Cargo.toml
@@ -63,7 +63,7 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https:
 # External Crates
 futures = "0.3.21"
 hex-literal = "0.3.4"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../gum/" }
 serde = { version = "1.0.136", features = ["derive"] }
 thiserror = "1.0.30"
 kvdb = "0.11.0"
diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs
index fd660f01f7fdf9da187fe7f60a432e69653ce6c5..a563e95a70877a9e5bb2915b2d9c8c8c25c54bb0 100644
--- a/polkadot/node/service/src/lib.rs
+++ b/polkadot/node/service/src/lib.rs
@@ -36,6 +36,7 @@ mod tests;
 use {
 	beefy_gadget::notification::{BeefyBestBlockSender, BeefySignedCommitmentSender},
 	grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider},
+	gum::info,
 	polkadot_node_core_approval_voting::Config as ApprovalVotingConfig,
 	polkadot_node_core_av_store::Config as AvailabilityConfig,
 	polkadot_node_core_av_store::Error as AvailabilityError,
@@ -47,7 +48,6 @@ use {
 	polkadot_overseer::BlockInfo,
 	sc_client_api::{BlockBackend, ExecutorProvider},
 	sp_trie::PrefixedMemoryDB,
-	tracing::info,
 };
 
 pub use sp_core::traits::SpawnNamed;
@@ -975,7 +975,7 @@ where
 	};
 
 	if local_keystore.is_none() {
-		tracing::info!("Cannot run as validator without local keystore.");
+		gum::info!("Cannot run as validator without local keystore.");
 	}
 
 	let maybe_params =
@@ -1011,7 +1011,7 @@ where
 				},
 			)
 			.map_err(|e| {
-				tracing::error!("Failed to init overseer: {}", e);
+				gum::error!("Failed to init overseer: {}", e);
 				e
 			})?;
 		let handle = Handle::new(overseer_handle.clone());
diff --git a/polkadot/node/service/src/relay_chain_selection.rs b/polkadot/node/service/src/relay_chain_selection.rs
index c6c021dba2d198328afcb5b993b765bf6b1f188b..9f6cd50342ecba2f842d87ecdc6d78ad16768db7 100644
--- a/polkadot/node/service/src/relay_chain_selection.rs
+++ b/polkadot/node/service/src/relay_chain_selection.rs
@@ -153,7 +153,7 @@ where
 {
 	/// Use the plain longest chain algorithm exclusively.
 	pub fn new_longest_chain(backend: Arc<B>) -> Self {
-		tracing::debug!(target: LOG_TARGET, "Using {} chain selection algorithm", "longest");
+		gum::debug!(target: LOG_TARGET, "Using {} chain selection algorithm", "longest");
 
 		Self {
 			longest_chain: sc_consensus::LongestChain::new(backend.clone()),
@@ -169,7 +169,7 @@ where
 		metrics: Metrics,
 		disputes_enabled: bool,
 	) -> Self {
-		tracing::debug!(
+		gum::debug!(
 			target: LOG_TARGET,
 			"Using {} chain selection algorithm",
 			if disputes_enabled {
@@ -351,7 +351,7 @@ where
 			.map_err(Error::LeavesCanceled)
 			.map_err(|e| ConsensusError::Other(Box::new(e)))?;
 
-		tracing::trace!(target: LOG_TARGET, ?leaves, "Chain selection leaves");
+		gum::trace!(target: LOG_TARGET, ?leaves, "Chain selection leaves");
 
 		Ok(leaves)
 	}
@@ -368,7 +368,7 @@ where
 			.ok_or_else(|| ConsensusError::Other(Box::new(Error::EmptyLeaves)))?
 			.clone();
 
-		tracing::trace!(target: LOG_TARGET, ?best_leaf, "Best chain");
+		gum::trace!(target: LOG_TARGET, ?best_leaf, "Best chain");
 
 		self.block_header(best_leaf)
 	}
@@ -389,7 +389,7 @@ where
 		maybe_max_number: Option<BlockNumber>,
 	) -> Result<Hash, ConsensusError> {
 		let mut overseer = self.overseer.clone();
-		tracing::trace!(target: LOG_TARGET, ?best_leaf, "Longest chain");
+		gum::trace!(target: LOG_TARGET, ?best_leaf, "Longest chain");
 
 		let subchain_head = if self.disputes_enabled {
 			let (tx, rx) = oneshot::channel();
@@ -405,7 +405,7 @@ where
 				.map_err(Error::BestLeafContainingCanceled)
 				.map_err(|e| ConsensusError::Other(Box::new(e)))?;
 
-			tracing::trace!(target: LOG_TARGET, ?best, "Best leaf containing");
+			gum::trace!(target: LOG_TARGET, ?best, "Best leaf containing");
 
 			match best {
 				// No viable leaves containing the block.
@@ -413,7 +413,7 @@ where
 				Some(best) => best,
 			}
 		} else {
-			tracing::trace!(target: LOG_TARGET, ?best_leaf, "Dummy disputes active");
+			gum::trace!(target: LOG_TARGET, ?best_leaf, "Dummy disputes active");
 			if best_leaf == target_hash {
 				return Ok(target_hash)
 			} else {
@@ -429,7 +429,7 @@ where
 			Some(max) => {
 				if max <= target_number {
 					if max < target_number {
-						tracing::warn!(
+						gum::warn!(
 							LOG_TARGET,
 							max_number = max,
 							target_number,
@@ -442,7 +442,7 @@ where
 				let subchain_header = self.block_header(subchain_head)?;
 
 				if subchain_header.number <= max {
-					tracing::trace!(target: LOG_TARGET, ?best_leaf, "Constrained sub-chain head",);
+					gum::trace!(target: LOG_TARGET, ?best_leaf, "Constrained sub-chain head",);
 					subchain_head
 				} else {
 					let (ancestor_hash, _) =
@@ -452,7 +452,7 @@ where
 							&subchain_header,
 						)
 						.map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))?;
-					tracing::trace!(
+					gum::trace!(
 						target: LOG_TARGET,
 						?ancestor_hash,
 						"Grandpa walk backwards sub-chain head"
@@ -487,11 +487,7 @@ where
 			}
 		};
 
-		tracing::trace!(
-			target: LOG_TARGET,
-			?subchain_head,
-			"Ancestor approval restriction applied",
-		);
+		gum::trace!(target: LOG_TARGET, ?subchain_head, "Ancestor approval restriction applied",);
 
 		let lag = initial_leaf_number.saturating_sub(subchain_number);
 		self.metrics.note_approval_checking_finality_lag(lag);
@@ -501,7 +497,7 @@ where
 			if Some(subchain_block_descriptions.len() as _) !=
 				subchain_number.checked_sub(target_number)
 			{
-				tracing::error!(
+				gum::error!(
 					LOG_TARGET,
 					present_block_descriptions = subchain_block_descriptions.len(),
 					target_number,
@@ -536,7 +532,7 @@ where
 						(lag_disputes, subchain_head)
 					},
 					Err(e) => {
-						tracing::error!(
+						gum::error!(
 							target: LOG_TARGET,
 							error = ?e,
 							"Call to `DetermineUndisputedChain` failed",
@@ -552,7 +548,7 @@ where
 			(lag, subchain_head)
 		};
 
-		tracing::trace!(
+		gum::trace!(
 			target: LOG_TARGET,
 			?subchain_head,
 			"Disputed blocks in ancestry restriction applied",
@@ -565,7 +561,7 @@ where
 			let safe_target = initial_leaf_number - MAX_FINALITY_LAG;
 
 			if safe_target <= target_number {
-				tracing::warn!(target: LOG_TARGET, ?target_hash, "Safeguard enforced finalization");
+				gum::warn!(target: LOG_TARGET, ?target_hash, "Safeguard enforced finalization");
 				// Minimal vote needs to be on the target number.
 				Ok(target_hash)
 			} else {
@@ -578,7 +574,7 @@ where
 				)
 				.map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))?;
 
-				tracing::warn!(
+				gum::warn!(
 					target: LOG_TARGET,
 					?forced_target,
 					"Safeguard enforced finalization of child"
diff --git a/polkadot/node/service/src/tests.rs b/polkadot/node/service/src/tests.rs
index 5fdf3ce5d47868b2b4bf2dd101254a1ab80b9c4a..fe6b699469434c15bce54707be68d117c1ceaad5 100644
--- a/polkadot/node/service/src/tests.rs
+++ b/polkadot/node/service/src/tests.rs
@@ -115,7 +115,7 @@ async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
 		.await
 		.expect(&format!("{:?} is enough to receive messages.", TIMEOUT));
 
-	tracing::trace!("Received message:\n{:?}", &msg);
+	gum::trace!("Received message:\n{:?}", &msg);
 
 	msg
 }
@@ -123,7 +123,7 @@ async fn overseer_recv_with_timeout(
 	overseer: &mut VirtualOverseer,
 	timeout: Duration,
 ) -> Option<AllMessages> {
-	tracing::trace!("Waiting for message...");
+	gum::trace!("Waiting for message...");
 	overseer.recv().timeout(timeout).await
 }
 
@@ -353,7 +353,7 @@ async fn test_skeleton(
 ) {
 	let undisputed_chain = undisputed_chain.map(|x| (chain.number(x).unwrap().unwrap(), x));
 
-	tracing::trace!("best leaf response: {:?}", undisputed_chain);
+	gum::trace!("best leaf response: {:?}", undisputed_chain);
 	assert_matches!(
 		overseer_recv(
 			virtual_overseer
@@ -372,7 +372,7 @@ async fn test_skeleton(
 		return
 	}
 
-	tracing::trace!("approved ancestor response: {:?}", undisputed_chain);
+	gum::trace!("approved ancestor response: {:?}", undisputed_chain);
 	assert_matches!(
 		overseer_recv(
 			virtual_overseer
@@ -383,7 +383,7 @@ async fn test_skeleton(
 		}
 	);
 
-	tracing::trace!("determine undisputed chain response: {:?}", undisputed_chain);
+	gum::trace!("determine undisputed chain response: {:?}", undisputed_chain);
 
 	let target_block_number = chain.number(target_block_hash).unwrap().unwrap();
 	assert_matches!(
@@ -724,8 +724,8 @@ fn chain_6() -> CaseVars {
 
 	let chain = builder.init();
 
-	tracing::trace!(highest_approved = ?chain.highest_approved_ancestors(1, leaf));
-	tracing::trace!(undisputed = ?chain.undisputed_chain(1, approved));
+	gum::trace!(highest_approved = ?chain.highest_approved_ancestors(1, leaf));
+	gum::trace!(undisputed = ?chain.undisputed_chain(1, approved));
 	CaseVars {
 		chain,
 		target_block: b1,
diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml
index f5be9018833a463feb52a6975db7a281533034d9..96bf8366ba2c483f1815655934d6e5302494de27 100644
--- a/polkadot/node/subsystem-util/Cargo.toml
+++ b/polkadot/node/subsystem-util/Cargo.toml
@@ -15,7 +15,7 @@ pin-project = "1.0.9"
 rand = "0.8.5"
 thiserror = "1.0.30"
 fatality = "0.0.6"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../gum" }
 derive_more = "0.99.17"
 lru = "0.7.3"
 
diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs
index e7c7a1851323bb8dbbe39dce992535cce2db2d50..ccdfe7982b5930052a4af1718e1d4b73e9a258fc 100644
--- a/polkadot/node/subsystem-util/src/lib.rs
+++ b/polkadot/node/subsystem-util/src/lib.rs
@@ -586,7 +586,7 @@ where
 			)
 			.await
 			{
-				tracing::error!(
+				gum::error!(
 					job = Job::NAME,
 					parent_hash = %hash,
 					err = ?e,
@@ -732,7 +732,7 @@ impl<Job: JobTrait, Spawner> JobSubsystem<Job, Spawner> {
 							}
 						}
 						Err(err) => {
-							tracing::error!(
+							gum::error!(
 								job = Job::NAME,
 								err = ?err,
 								"error receiving message from subsystem context for job",
@@ -751,7 +751,7 @@ impl<Job: JobTrait, Spawner> JobSubsystem<Job, Spawner> {
 					};
 
 					if let Err(e) = res {
-						tracing::warn!(err = ?e, "failed to handle command from job");
+						gum::warn!(err = ?e, "failed to handle command from job");
 					}
 				}
 				complete => break,
diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml
index a7d236bc64715c4905a823c7ac02860e3d22a7a8..524fb69dca068208dad2633493dfab2705297702 100644
--- a/polkadot/node/test/service/Cargo.toml
+++ b/polkadot/node/test/service/Cargo.toml
@@ -8,7 +8,7 @@ edition = "2021"
 futures = "0.3.21"
 futures01 = { package = "futures", version = "0.1.29" }
 hex = "0.4.3"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../../gum" }
 rand = "0.8.5"
 tempfile = "3.2.0"
 tokio = "1.17.0"
diff --git a/polkadot/node/zombienet-backchannel/Cargo.toml b/polkadot/node/zombienet-backchannel/Cargo.toml
index 620f399ca83ae08261223864790fef6327008206..e1f8813c30c7d55042b7bc8e4dc86ebad5780475 100644
--- a/polkadot/node/zombienet-backchannel/Cargo.toml
+++ b/polkadot/node/zombienet-backchannel/Cargo.toml
@@ -17,6 +17,6 @@ lazy_static = "1.4.0"
 parity-scale-codec = { version = "3.1.0", features = ["derive"] }
 reqwest = "0.11"
 thiserror = "1.0.30"
-tracing = "0.1.32"
+gum = { package = "tracing-gum", path = "../gum/" }
 serde = { version = "1.0", features = ["derive"] }
 serde_json = "1"
diff --git a/polkadot/node/zombienet-backchannel/src/lib.rs b/polkadot/node/zombienet-backchannel/src/lib.rs
index c489ee0e7f962eed1e3776f440f7394240aaea68..d5298515e762911c8f216155d9915450760b626d 100644
--- a/polkadot/node/zombienet-backchannel/src/lib.rs
+++ b/polkadot/node/zombienet-backchannel/src/lib.rs
@@ -77,7 +77,7 @@ impl Broadcaster {
 
 		let sender = zombienet_bkc.ws_tx.clone();
 		sender.send(backchannel_item).map_err(|e| {
-			tracing::error!(target = ZOMBIENET, "Error sending new item: {}", e);
+			gum::error!(target: ZOMBIENET, "Error sending new item: {}", e);
 			BackchannelError::SendItemFail
 		})?;
 
@@ -102,7 +102,7 @@ impl ZombienetBackchannel {
 			};
 
 			let ws_url = format!("ws://{}:{}/ws", backchannel_host, backchannel_port);
-			tracing::debug!(target = ZOMBIENET, "Connecting to : {}", &ws_url);
+			gum::debug!(target: ZOMBIENET, "Connecting to : {}", &ws_url);
 			let (ws_stream, _) =
 				connect_async(ws_url).await.map_err(|_| BackchannelError::CantConnectToWS)?;
 			let (mut write, mut read) = ws_stream.split();
@@ -117,11 +117,11 @@ impl ZombienetBackchannel {
 					match serde_json::from_str::<BackchannelItem>(&text) {
 						Ok(backchannel_item) =>
 							if tx1.send(backchannel_item).is_err() {
-								tracing::error!("Error sending through the channel");
+								gum::error!(target: ZOMBIENET, "Error sending through the channel");
 								return
 							},
 						Err(_) => {
-							tracing::error!("Invalid payload received");
+							gum::error!(target: ZOMBIENET, "Invalid payload received");
 						},
 					}
 				}
@@ -135,7 +135,7 @@ impl ZombienetBackchannel {
 						.await
 						.is_err()
 					{
-						tracing::error!("Error sending through ws");
+						gum::error!(target: ZOMBIENET, "Error sending through ws");
 					}
 				}
 			});