lib.rs 20.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
// Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.

// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.

// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.

// You should have received a copy of the GNU General Public License
// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.

//! Availability Recovery Subsystem of Polkadot.

#![warn(missing_docs)]

use std::collections::HashMap;
22
use std::pin::Pin;
23

24
25
26
use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered};
use futures::future::{BoxFuture, RemoteHandle, FutureExt};
use futures::task::{Context, Poll};
27
use lru::LruCache;
28
use rand::seq::SliceRandom;
29
30

use polkadot_primitives::v1::{
31
32
	AuthorityDiscoveryId, CandidateReceipt, CandidateHash,
	Hash, ValidatorId, ValidatorIndex,
33
	SessionInfo, SessionIndex, BlakeTwo256, HashT, GroupIndex, BlockNumber,
34
};
35
use polkadot_node_primitives::{ErasureChunk, AvailableData};
36
37
use polkadot_subsystem::{
	SubsystemContext, SubsystemResult, SubsystemError, Subsystem, SpawnedSubsystem, FromOverseer,
38
	OverseerSignal, ActiveLeavesUpdate, SubsystemSender,
39
	errors::RecoveryError,
40
	jaeger,
41
42
43
44
45
	messages::{
		AvailabilityStoreMessage, AvailabilityRecoveryMessage, AllMessages, NetworkBridgeMessage,
	},
};
use polkadot_node_network_protocol::{
46
47
48
49
50
	IfDisconnected,
	request_response::{
		self as req_res, OutgoingRequest, Recipient, Requests,
		request::RequestError,
	},
51
};
52
use polkadot_node_subsystem_util::request_session_info_ctx;
53
54
55
56
57
58
use polkadot_erasure_coding::{branches, branch_hash, recovery_threshold, obtain_chunks_v1};
mod error;

#[cfg(test)]
mod tests;

59
const LOG_TARGET: &str = "parachain::availability-recovery";
60
61
62
63
64
65
66
67

// How many parallel requests interaction should have going at once.
const N_PARALLEL: usize = 50;

// Size of the LRU cache where we keep recovered data.
const LRU_SIZE: usize = 16;

/// The Availability Recovery Subsystem.
68
69
70
pub struct AvailabilityRecoverySubsystem {
	fast_path: bool,
}
71

72
73
74
75
76
77
78
79
80
81
82
struct RequestFromBackersPhase {
	// a random shuffling of the validators from the backing group which indicates the order
	// in which we connect to them and request the chunk.
	shuffled_backers: Vec<ValidatorIndex>,
}

struct RequestChunksPhase {
	// a random shuffling of the validators which indicates the order in which we connect to the validators and
	// request the chunk from them.
	shuffling: Vec<ValidatorIndex>,
	received_chunks: HashMap<ValidatorIndex, ErasureChunk>,
83
84
	requesting_chunks: FuturesUnordered<BoxFuture<
		'static,
85
		Result<Option<ErasureChunk>, (ValidatorIndex, RequestError)>>,
86
	>,
87
}
88

89
struct InteractionParams {
90
91
92
93
94
95
96
97
98
99
100
101
102
103
	/// Discovery ids of `validators`.
	validator_authority_keys: Vec<AuthorityDiscoveryId>,

	/// Validators relevant to this `Interaction`.
	validators: Vec<ValidatorId>,

	/// The number of pieces needed.
	threshold: usize,

	/// A hash of the relevant candidate.
	candidate_hash: CandidateHash,

	/// The root of the erasure encoding of the para block.
	erasure_root: Hash,
104
105
106
107
108
109
110
111
}

enum InteractionPhase {
	RequestFromBackers(RequestFromBackersPhase),
	RequestChunks(RequestChunksPhase),
}

/// A state of a single interaction reconstructing an available data.
112
113
struct Interaction<S> {
	sender: S,
114

115
116
	/// The parameters of the interaction.
	params: InteractionParams,
117

118
119
	/// The phase of the interaction.
	phase: InteractionPhase,
120
121
}

122
123
124
125
126
127
128
129
130
impl RequestFromBackersPhase {
	fn new(mut backers: Vec<ValidatorIndex>) -> Self {
		backers.shuffle(&mut rand::thread_rng());

		RequestFromBackersPhase {
			shuffled_backers: backers,
		}
	}

131
	// Run this phase to completion.
132
133
134
	async fn run(
		&mut self,
		params: &InteractionParams,
135
136
		sender: &mut impl SubsystemSender,
	) -> Result<AvailableData, RecoveryError> {
137
138
139
140
141
142
		tracing::trace!(
			target: LOG_TARGET,
			candidate_hash = ?params.candidate_hash,
			erasure_root = ?params.erasure_root,
			"Requesting from backers",
		);
143
144
145
		loop {
			// Pop the next backer, and proceed to next phase if we're out.
			let validator_index = match self.shuffled_backers.pop() {
146
				None => return Err(RecoveryError::Unavailable),
147
148
149
150
				Some(i) => i,
			};

			// Request data.
151
152
153
154
155
			let (req, res) = OutgoingRequest::new(
				Recipient::Authority(params.validator_authority_keys[validator_index.0 as usize].clone()),
				req_res::v1::AvailableDataFetchingRequest { candidate_hash: params.candidate_hash },
			);

156
157
158
159
			sender.send_message(NetworkBridgeMessage::SendRequests(
				vec![Requests::AvailableDataFetching(req)],
				IfDisconnected::TryConnect,
			).into()).await;
160
161
162

			match res.await {
				Ok(req_res::v1::AvailableDataFetchingResponse::AvailableData(data)) => {
163
					if reconstructed_data_matches_root(params.validators.len(), &params.erasure_root, &data) {
164
165
166
167
168
						tracing::trace!(
							target: LOG_TARGET,
							candidate_hash = ?params.candidate_hash,
							"Received full data",
						);
169
170

						return Ok(data);
171
					} else {
172
173
174
						tracing::debug!(
							target: LOG_TARGET,
							candidate_hash = ?params.candidate_hash,
175
							?validator_index,
176
177
							"Invalid data response",
						);
178
179

						// it doesn't help to report the peer with req/res.
180
181
					}
				}
182
183
184
185
186
187
188
189
				Ok(req_res::v1::AvailableDataFetchingResponse::NoSuchData) => {}
				Err(e) => tracing::debug!(
					target: LOG_TARGET,
					candidate_hash = ?params.candidate_hash,
					?validator_index,
					err = ?e,
					"Error fetching full available data."
				),
190
191
192
			}
		}
	}
193
194
}

195
impl RequestChunksPhase {
196
197
	fn new(n_validators: u32) -> Self {
		let mut shuffling: Vec<_> = (0..n_validators).map(ValidatorIndex).collect();
198
199
200
201
202
203
204
205
206
207
208
209
		shuffling.shuffle(&mut rand::thread_rng());

		RequestChunksPhase {
			shuffling,
			received_chunks: HashMap::new(),
			requesting_chunks: FuturesUnordered::new(),
		}
	}

	async fn launch_parallel_requests(
		&mut self,
		params: &InteractionParams,
210
211
		sender: &mut impl SubsystemSender,
	) {
212
213
		let max_requests = std::cmp::min(N_PARALLEL, params.threshold);
		while self.requesting_chunks.len() < max_requests {
214
			if let Some(validator_index) = self.shuffling.pop() {
215
				let validator = params.validator_authority_keys[validator_index.0 as usize].clone();
216
217
				tracing::trace!(
					target: LOG_TARGET,
218
					?validator,
219
220
221
222
					?validator_index,
					candidate_hash = ?params.candidate_hash,
					"Requesting chunk",
				);
223

224
225
226
227
228
229
230
231
232
233
234
				// Request data.
				let raw_request = req_res::v1::ChunkFetchingRequest {
					candidate_hash: params.candidate_hash,
					index: validator_index,
				};

				let (req, res) = OutgoingRequest::new(
					Recipient::Authority(validator),
					raw_request.clone(),
				);

235
236
237
238
				sender.send_message(NetworkBridgeMessage::SendRequests(
					vec![Requests::ChunkFetching(req)],
					IfDisconnected::TryConnect,
				).into()).await;
239
240
241
242
243
244

				self.requesting_chunks.push(Box::pin(async move {
					match res.await {
						Ok(req_res::v1::ChunkFetchingResponse::Chunk(chunk))
							=> Ok(Some(chunk.recombine_into_chunk(&raw_request))),
						Ok(req_res::v1::ChunkFetchingResponse::NoSuchChunk) => Ok(None),
245
						Err(e) => Err((validator_index, e)),
246
247
					}
				}));
248
249
250
251
252
253
			} else {
				break;
			}
		}
	}

254
255
256
	async fn wait_for_chunks(
		&mut self,
		params: &InteractionParams,
257
	) {
258
		// Poll for new updates from requesting_chunks.
259
260
261
		while let Poll::Ready(Some(request_result))
			= futures::poll!(self.requesting_chunks.next())
		{
262
			match request_result {
263
				Ok(Some(chunk)) => {
264
					// Check merkle proofs of any received chunks.
265

266
					let validator_index = chunk.index;
267

268
					if let Ok(anticipated_hash) = branch_hash(
269
						&params.erasure_root,
270
						&chunk.proof,
271
						chunk.index.0 as usize,
272
273
274
275
					) {
						let erasure_chunk_hash = BlakeTwo256::hash(&chunk.chunk);

						if erasure_chunk_hash != anticipated_hash {
276
277
278
279
280
							tracing::debug!(
								target: LOG_TARGET,
								?validator_index,
								"Merkle proof mismatch",
							);
281
						} else {
Arkadiy Paronyan's avatar
Arkadiy Paronyan committed
282
							tracing::trace!(
283
284
								target: LOG_TARGET,
								?validator_index,
285
								"Received valid chunk.",
286
							);
287
288
289
							self.received_chunks.insert(validator_index, chunk);
						}
					} else {
290
291
292
293
294
						tracing::debug!(
							target: LOG_TARGET,
							?validator_index,
							"Invalid Merkle proof",
						);
295
296
					}
				}
297
				Ok(None) => {}
298
				Err((validator_index, e)) => {
299
300
301
					tracing::debug!(
						target: LOG_TARGET,
						err = ?e,
302
						?validator_index,
303
						"Failure requesting chunk",
304
					);
305
306
307
308
309
310
311

					match e {
						RequestError::InvalidResponse(_) => {}
						RequestError::NetworkError(_) | RequestError::Canceled(_) => {
							self.shuffling.push(validator_index);
						}
					}
312
313
314
315
316
				}
			}
		}
	}

317
318
319
	async fn run(
		&mut self,
		params: &InteractionParams,
320
321
		sender: &mut impl SubsystemSender,
	) -> Result<AvailableData, RecoveryError> {
322
323
324
325
326
		loop {
			if is_unavailable(
				self.received_chunks.len(),
				self.requesting_chunks.len(),
				self.shuffling.len(),
327
				params.threshold,
328
			) {
329
330
331
332
333
334
				tracing::debug!(
					target: LOG_TARGET,
					candidate_hash = ?params.candidate_hash,
					erasure_root = ?params.erasure_root,
					received = %self.received_chunks.len(),
					requesting = %self.requesting_chunks.len(),
335
					n_validators = %params.validators.len(),
336
337
					"Data recovery is not possible",
				);
338

339
				return Err(RecoveryError::Unavailable);
340
341
			}

342
343
			self.launch_parallel_requests(params, sender).await;
			self.wait_for_chunks(params).await;
344
345
346

			// If received_chunks has more than threshold entries, attempt to recover the data.
			// If that fails, or a re-encoding of it doesn't match the expected erasure root,
347
			// return Err(RecoveryError::Invalid)
348
			if self.received_chunks.len() >= params.threshold {
349
				return match polkadot_erasure_coding::reconstruct_v1(
350
					params.validators.len(),
351
					self.received_chunks.values().map(|c| (&c.chunk[..], c.index.0 as usize)),
352
353
				) {
					Ok(data) => {
354
						if reconstructed_data_matches_root(params.validators.len(), &params.erasure_root, &data) {
355
356
357
358
359
360
							tracing::trace!(
								target: LOG_TARGET,
								candidate_hash = ?params.candidate_hash,
								erasure_root = ?params.erasure_root,
								"Data recovery complete",
							);
361
362

							Ok(data)
363
						} else {
364
365
366
367
368
369
							tracing::trace!(
								target: LOG_TARGET,
								candidate_hash = ?params.candidate_hash,
								erasure_root = ?params.erasure_root,
								"Data recovery - root mismatch",
							);
370
371

							Err(RecoveryError::Invalid)
372
373
						}
					}
374
375
376
377
378
379
380
381
					Err(err) => {
						tracing::trace!(
							target: LOG_TARGET,
							candidate_hash = ?params.candidate_hash,
							erasure_root = ?params.erasure_root,
							?err,
							"Data recovery error ",
						);
382
383

						Err(RecoveryError::Invalid)
384
					},
385
386
387
388
389
390
				};
			}
		}
	}
}

391
392
393
const fn is_unavailable(
	received_chunks: usize,
	requesting_chunks: usize,
394
	unrequested_validators: usize,
395
396
	threshold: usize,
) -> bool {
397
	received_chunks + requesting_chunks + unrequested_validators < threshold
398
399
}

400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
fn reconstructed_data_matches_root(
	n_validators: usize,
	expected_root: &Hash,
	data: &AvailableData,
) -> bool {
	let chunks = match obtain_chunks_v1(n_validators, data) {
		Ok(chunks) => chunks,
		Err(e) => {
			tracing::debug!(
				target: LOG_TARGET,
				err = ?e,
				"Failed to obtain chunks",
			);
			return false;
		}
	};

	let branches = branches(&chunks);

	branches.root() == *expected_root
}

422
423
impl<S: SubsystemSender> Interaction<S> {
	async fn run(mut self) -> Result<AvailableData, RecoveryError> {
424
425
426
427
428
		loop {
			// These only fail if we cannot reach the underlying subsystem, which case there is nothing
			// meaningful we can do.
			match self.phase {
				InteractionPhase::RequestFromBackers(ref mut from_backers) => {
429
430
431
432
433
434
435
436
					match from_backers.run(&self.params, &mut self.sender).await {
						Ok(data) => break Ok(data),
						Err(RecoveryError::Invalid) => break Err(RecoveryError::Invalid),
						Err(RecoveryError::Unavailable) => {
							self.phase = InteractionPhase::RequestChunks(
								RequestChunksPhase::new(self.params.validators.len() as _)
							)
						}
437
438
439
					}
				}
				InteractionPhase::RequestChunks(ref mut from_all) => {
440
					break from_all.run(&self.params, &mut self.sender).await;
441
442
443
444
445
446
				}
			}
		}
	}
}

447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
/// Accumulate all awaiting sides for some particular `AvailableData`.
struct InteractionHandle {
	candidate_hash: CandidateHash,
	remote: RemoteHandle<Result<AvailableData, RecoveryError>>,
	awaiting: Vec<oneshot::Sender<Result<AvailableData, RecoveryError>>>,
}

impl Future for InteractionHandle {
	type Output = Option<(CandidateHash, Result<AvailableData, RecoveryError>)>;

	fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
		let mut indices_to_remove = Vec::new();
		for (i, awaiting) in self.awaiting.iter_mut().enumerate().rev() {
			if let Poll::Ready(()) =  awaiting.poll_canceled(cx) {
				indices_to_remove.push(i);
			}
		}

		// these are reverse order, so remove is fine.
		for index in indices_to_remove {
			tracing::debug!(
				target: LOG_TARGET,
				candidate_hash = ?self.candidate_hash,
				"Receiver for available data dropped.",
			);

			self.awaiting.swap_remove(index);
		}

		if self.awaiting.is_empty() {
			tracing::debug!(
				target: LOG_TARGET,
				candidate_hash = ?self.candidate_hash,
				"All receivers for available data dropped.",
			);

			return Poll::Ready(None);
		}

		let remote = &mut self.remote;
		futures::pin_mut!(remote);
		let result = futures::ready!(remote.poll(cx));

		for awaiting in self.awaiting.drain(..) {
			let _ = awaiting.send(result.clone());
		}

		Poll::Ready(Some((self.candidate_hash, result)))
	}
}

498
499
500
struct State {
	/// Each interaction is implemented as its own async task,
	/// and these handles are for communicating with them.
501
	interactions: FuturesUnordered<InteractionHandle>,
502
503

	/// A recent block hash for which state should be available.
504
	live_block: (BlockNumber, Hash),
505
506
507
508
509
510
511
512

	/// An LRU cache of recently recovered data.
	availability_lru: LruCache<CandidateHash, Result<AvailableData, RecoveryError>>,
}

impl Default for State {
	fn default() -> Self {
		Self {
513
			interactions: FuturesUnordered::new(),
514
			live_block: (0, Hash::default()),
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
			availability_lru: LruCache::new(LRU_SIZE),
		}
	}
}

impl<C> Subsystem<C> for AvailabilityRecoverySubsystem
	where C: SubsystemContext<Message = AvailabilityRecoveryMessage>
{
	fn start(self, ctx: C) -> SpawnedSubsystem {
		let future = self.run(ctx)
			.map_err(|e| SubsystemError::with_origin("availability-recovery", e))
			.boxed();
		SpawnedSubsystem {
			name: "availability-recovery-subsystem",
			future,
		}
	}
}

/// Handles a signal from the overseer.
async fn handle_signal(
	state: &mut State,
	signal: OverseerSignal,
) -> SubsystemResult<bool> {
	match signal {
		OverseerSignal::Conclude => Ok(true),
		OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, .. }) => {
542
543
544
545
546
			// if activated is non-empty, set state.live_block to the highest block in `activated`
			for activated in activated {
				if activated.number > state.live_block.0 {
					state.live_block = (activated.number, activated.hash)
				}
547
548
549
550
			}

			Ok(false)
		}
551
		OverseerSignal::BlockFinalized(_, _) => Ok(false)
552
553
554
555
556
557
558
559
560
561
562
	}
}

/// Machinery around launching interactions into the background.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
async fn launch_interaction(
	state: &mut State,
	ctx: &mut impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
	session_index: SessionIndex,
	session_info: SessionInfo,
	receipt: CandidateReceipt,
563
	backing_group: Option<GroupIndex>,
564
565
	response_sender: oneshot::Sender<Result<AvailableData, RecoveryError>>,
) -> error::Result<()> {
566
	let candidate_hash = receipt.hash();
567

568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
	let params = InteractionParams {
		validator_authority_keys: session_info.discovery_keys.clone(),
		validators: session_info.validators.clone(),
		threshold: recovery_threshold(session_info.validators.len())?,
		candidate_hash,
		erasure_root: receipt.descriptor.erasure_root,
	};

	let phase = backing_group
		.and_then(|g| session_info.validator_groups.get(g.0 as usize))
		.map(|group| InteractionPhase::RequestFromBackers(
			RequestFromBackersPhase::new(group.clone())
		))
		.unwrap_or_else(|| InteractionPhase::RequestChunks(
			RequestChunksPhase::new(params.validators.len() as _)
		));
584
585

	let interaction = Interaction {
586
		sender: ctx.sender().clone(),
587
588
		params,
		phase,
589
590
	};

591
	let (remote, remote_handle) = interaction.run().remote_handle();
592

593
594
595
596
597
598
599
	state.interactions.push(InteractionHandle {
		candidate_hash,
		remote: remote_handle,
		awaiting: vec![response_sender],
	});

	if let Err(e) = ctx.spawn("recovery interaction", Box::pin(remote)).await {
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
		tracing::warn!(
			target: LOG_TARGET,
			err = ?e,
			"Failed to spawn a recovery interaction task",
		);
	}

	Ok(())
}

/// Handles an availability recovery request.
#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))]
async fn handle_recover(
	state: &mut State,
	ctx: &mut impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
	receipt: CandidateReceipt,
	session_index: SessionIndex,
617
	backing_group: Option<GroupIndex>,
618
619
620
621
	response_sender: oneshot::Sender<Result<AvailableData, RecoveryError>>,
) -> error::Result<()> {
	let candidate_hash = receipt.hash();

622
623
	let span = jaeger::Span::new(candidate_hash, "availbility-recovery")
		.with_stage(jaeger::Stage::AvailabilityRecovery);
624

625
626
627
628
629
630
631
632
633
634
635
	if let Some(result) = state.availability_lru.get(&candidate_hash) {
		if let Err(e) = response_sender.send(result.clone()) {
			tracing::warn!(
				target: LOG_TARGET,
				err = ?e,
				"Error responding with an availability recovery result",
			);
		}
		return Ok(());
	}

636
637
	if let Some(i) = state.interactions.iter_mut().find(|i| i.candidate_hash == candidate_hash) {
		i.awaiting.push(response_sender);
638
639
640
		return Ok(());
	}

641
	let _span = span.child("not-cached");
642
	let session_info = request_session_info_ctx(
643
		state.live_block.1,
644
645
646
647
		session_index,
		ctx,
	).await?.await.map_err(error::Error::CanceledSessionInfo)??;

648
	let _span = span.child("session-info-ctx-received");
649
650
651
652
653
654
655
656
	match session_info {
		Some(session_info) => {
			launch_interaction(
				state,
				ctx,
				session_index,
				session_info,
				receipt,
657
				backing_group,
658
659
660
661
662
663
				response_sender,
			).await
		}
		None => {
			tracing::warn!(
				target: LOG_TARGET,
664
				"SessionInfo is `None` at {:?}", state.live_block,
665
666
667
668
669
670
671
672
673
			);
			response_sender
				.send(Err(RecoveryError::Unavailable))
				.map_err(|_| error::Error::CanceledResponseSender)?;
			Ok(())
		}
	}
}

674
675
676
677
678
679
680
681
682
683
684
685
686
687
/// Queries a chunk from av-store.
#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))]
async fn query_full_data(
	ctx: &mut impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
	candidate_hash: CandidateHash,
) -> error::Result<Option<AvailableData>> {
	let (tx, rx) = oneshot::channel();
	ctx.send_message(AllMessages::AvailabilityStore(
		AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx),
	)).await;

	Ok(rx.await.map_err(error::Error::CanceledQueryFullData)?)
}

688
impl AvailabilityRecoverySubsystem {
689
690
691
692
693
694
695
696
	/// Create a new instance of `AvailabilityRecoverySubsystem` which starts with a fast path to request data from backers.
	pub fn with_fast_path() -> Self {
		Self { fast_path: true }
	}

	/// Create a new instance of `AvailabilityRecoverySubsystem` which requests only chunks
	pub fn with_chunks_only() -> Self {
		Self { fast_path: false }
697
698
699
700
701
702
703
704
705
	}

	async fn run(
		self,
		mut ctx: impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
	) -> SubsystemResult<()> {
		let mut state = State::default();

		loop {
706
			futures::select! {
707
708
709
710
711
712
713
714
715
716
717
718
719
				v = ctx.recv().fuse() => {
					match v? {
						FromOverseer::Signal(signal) => if handle_signal(
							&mut state,
							signal,
						).await? {
							return Ok(());
						}
						FromOverseer::Communication { msg } => {
							match msg {
								AvailabilityRecoveryMessage::RecoverAvailableData(
									receipt,
									session_index,
720
									maybe_backing_group,
721
722
723
724
725
726
727
									response_sender,
								) => {
									if let Err(e) = handle_recover(
										&mut state,
										&mut ctx,
										receipt,
										session_index,
728
										maybe_backing_group.filter(|_| self.fast_path),
729
730
731
732
733
734
735
736
737
										response_sender,
									).await {
										tracing::warn!(
											target: LOG_TARGET,
											err = ?e,
											"Error handling a recovery request",
										);
									}
								}
738
739
740
741
742
743
744
745
746
747
748
749
750
751
								AvailabilityRecoveryMessage::AvailableDataFetchingRequest(req) => {
									match query_full_data(&mut ctx, req.payload.candidate_hash).await {
										Ok(res) => {
											let _ = req.send_response(res.into());
										}
										Err(e) => {
											tracing::debug!(
												target: LOG_TARGET,
												err = ?e,
												"Failed to query available data.",
											);

											let _ = req.send_response(None.into());
										}
752
753
754
755
756
757
									}
								}
							}
						}
					}
				}
758
759
760
				output = state.interactions.next() => {
					if let Some((candidate_hash, result)) = output.flatten() {
						state.availability_lru.put(candidate_hash, result);
761
762
763
764
765
766
					}
				}
			}
		}
	}
}