Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! On-demand Substrate -> Substrate parachain finality relay.
use crate::{
messages_source::best_finalized_peer_header_at_self,
on_demand::OnDemandRelay,
parachains::{
source::ParachainsSource, target::ParachainsTarget, ParachainsPipelineAdapter,
SubstrateParachainsPipeline,
},
TransactionParams,
};
use async_std::{
channel::{unbounded, Receiver, Sender},
sync::{Arc, Mutex},
};
use async_trait::async_trait;
use bp_polkadot_core::parachains::ParaHash;
use futures::{select, FutureExt};
use num_traits::Zero;
use pallet_bridge_parachains::{RelayBlockHash, RelayBlockHasher, RelayBlockNumber};
use parachains_relay::parachains_loop::{ParachainSyncParams, TargetClient};
use relay_substrate_client::{
AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, Client, Error as SubstrateError,
TransactionSignScheme,
};
use relay_utils::{
metrics::MetricsParams, relay_loop::Client as RelayClient, FailedClient, HeaderId,
};
use sp_runtime::traits::Header as HeaderT;
use std::{cmp::Ordering, collections::BTreeMap};
/// On-demand Substrate <-> Substrate parachain finality relay.
///
/// This relay may be requested to sync more parachain headers, whenever some other relay
/// (e.g. messages relay) needs it to continue its regular work. When enough parachain headers
/// are relayed, on-demand stops syncing headers.
#[derive(Clone)]
pub struct OnDemandParachainsRelay<SourceParachain: Chain> {
/// Relay task name.
relay_task_name: String,
/// Channel used to communicate with background task and ask for relay of parachain heads.
required_header_number_sender: Sender<BlockNumberOf<SourceParachain>>,
}
impl<SourceParachain: Chain> OnDemandParachainsRelay<SourceParachain> {
/// Create new on-demand parachains relay.
///
/// Note that the argument is the source relay chain client, not the parachain client.
/// That's because parachain finality is determined by the relay chain and we don't
/// need to connect to the parachain itself here.
pub fn new<P: SubstrateParachainsPipeline<SourceParachain = SourceParachain>>(
source_relay_client: Client<P::SourceRelayChain>,
target_client: Client<P::TargetChain>,
target_transaction_params: TransactionParams<AccountKeyPairOf<P::TransactionSignScheme>>,
on_demand_source_relay_to_target_headers: Arc<
dyn OnDemandRelay<BlockNumberOf<P::SourceRelayChain>>,
>,
) -> Self
where
P::SourceParachain: Chain<Hash = ParaHash>,
P::SourceRelayChain:
Chain<BlockNumber = RelayBlockNumber, Hash = RelayBlockHash, Hasher = RelayBlockHasher>,
AccountIdOf<P::TargetChain>:
From<<AccountKeyPairOf<P::TransactionSignScheme> as sp_core::Pair>::Public>,
P::TransactionSignScheme: TransactionSignScheme<Chain = P::TargetChain>,
{
let (required_header_number_sender, required_header_number_receiver) = unbounded();
let this = OnDemandParachainsRelay {
relay_task_name: on_demand_parachains_relay_name::<SourceParachain, P::TargetChain>(),
required_header_number_sender,
};
async_std::task::spawn(async move {
background_task::<P>(
source_relay_client,
target_client,
target_transaction_params,
on_demand_source_relay_to_target_headers,
required_header_number_receiver,
)
.await;
});
this
}
}
#[async_trait]
impl<SourceParachain> OnDemandRelay<BlockNumberOf<SourceParachain>>
for OnDemandParachainsRelay<SourceParachain>
where
SourceParachain: Chain,
{
async fn require_more_headers(&self, required_header: BlockNumberOf<SourceParachain>) {
if let Err(e) = self.required_header_number_sender.send(required_header).await {
log::trace!(
target: "bridge",
"Failed to request {} header {:?} in {:?}: {:?}",
SourceParachain::NAME,
required_header,
self.relay_task_name,
e,
);
}
}
}
/// Background task that is responsible for starting parachain headers relay.
async fn background_task<P: SubstrateParachainsPipeline>(
source_relay_client: Client<P::SourceRelayChain>,
target_client: Client<P::TargetChain>,
target_transaction_params: TransactionParams<AccountKeyPairOf<P::TransactionSignScheme>>,
on_demand_source_relay_to_target_headers: Arc<
dyn OnDemandRelay<BlockNumberOf<P::SourceRelayChain>>,
>,
required_parachain_header_number_receiver: Receiver<BlockNumberOf<P::SourceParachain>>,
) where
P::SourceParachain: Chain<Hash = ParaHash>,
P::SourceRelayChain:
Chain<BlockNumber = RelayBlockNumber, Hash = RelayBlockHash, Hasher = RelayBlockHasher>,
AccountIdOf<P::TargetChain>:
From<<AccountKeyPairOf<P::TransactionSignScheme> as sp_core::Pair>::Public>,
P::TransactionSignScheme: TransactionSignScheme<Chain = P::TargetChain>,
{
let relay_task_name = on_demand_parachains_relay_name::<P::SourceParachain, P::TargetChain>();
let target_transactions_mortality = target_transaction_params.mortality;
let mut relay_state = RelayState::Idle;
let mut headers_map_cache = BTreeMap::new();
let mut required_parachain_header_number = Zero::zero();
let required_para_header_number_ref = Arc::new(Mutex::new(required_parachain_header_number));
let mut restart_relay = true;
let parachains_relay_task = futures::future::Fuse::terminated();
futures::pin_mut!(parachains_relay_task);
let mut parachains_source = ParachainsSource::<P>::new(
source_relay_client.clone(),
Some(required_para_header_number_ref.clone()),
);
let mut parachains_target =
ParachainsTarget::<P>::new(target_client.clone(), target_transaction_params.clone());
loop {
select! {
new_required_parachain_header_number = required_parachain_header_number_receiver.recv().fuse() => {
let new_required_parachain_header_number = match new_required_parachain_header_number {
Ok(new_required_parachain_header_number) => new_required_parachain_header_number,
Err(e) => {
log::error!(
target: "bridge",
"Background task of {} has exited with error: {:?}",
relay_task_name,
e,
);
return;
},
};
// keep in mind that we are not updating `required_para_header_number_ref` here, because
// then we'll be submitting all previous headers as well (while required relay headers are
// delivered) and we want to avoid that (to reduce cost)
required_parachain_header_number = std::cmp::max(
required_parachain_header_number,
new_required_parachain_header_number,
);
},
_ = parachains_relay_task => {
// this should never happen in practice given the current code
restart_relay = true;
},
}
// the workflow of the on-demand parachains relay is:
//
// 1) message relay (or any other dependent relay) sees new message at parachain header
// `PH`; 2) it sees that the target chain does not know `PH`;
// 3) it asks on-demand parachains relay to relay `PH` to the target chain;
//
// Phase#1: relaying relay chain header
//
// 4) on-demand parachains relay waits for GRANDPA-finalized block of the source relay chain
// `RH` that is storing `PH` or its descendant. Let it be `PH'`;
// 5) it asks on-demand headers relay to relay `RH` to the target chain;
// 6) it waits until `RH` (or its descendant) is relayed to the target chain;
//
// Phase#2: relaying parachain header
//
// 7) on-demand parachains relay sets `ParachainsSource::maximal_header_number` to the
// `PH'.number()`. 8) parachains finality relay sees that the parachain head has been
// updated and relays `PH'` to the target chain.
// select headers to relay
let relay_data = read_relay_data(
¶chains_source,
¶chains_target,
required_parachain_header_number,
&mut headers_map_cache,
)
.await;
match relay_data {
Ok(mut relay_data) => {
let prev_relay_state = relay_state;
relay_state = select_headers_to_relay(&mut relay_data, relay_state);
log::trace!(
target: "bridge",
"Selected new relay state in {}: {:?} using old state {:?} and data {:?}",
relay_task_name,
relay_state,
prev_relay_state,
relay_data,
);
},
Err(failed_client) => {
relay_utils::relay_loop::reconnect_failed_client(
failed_client,
relay_utils::relay_loop::RECONNECT_DELAY,
&mut parachains_source,
&mut parachains_target,
)
.await;
continue
},
}
// we have selected our new 'state' => let's notify our source clients about our new
// requirements
match relay_state {
RelayState::Idle => (),
RelayState::RelayingRelayHeader(required_relay_header, _) => {
on_demand_source_relay_to_target_headers
.require_more_headers(required_relay_header)
.await;
},
RelayState::RelayingParaHeader(required_para_header) => {
*required_para_header_number_ref.lock().await = required_para_header;
},
}
// start/restart relay
if restart_relay {
let stall_timeout = relay_substrate_client::transaction_stall_timeout(
target_transactions_mortality,
P::TargetChain::AVERAGE_BLOCK_INTERVAL,
crate::STALL_TIMEOUT,
);
log::info!(
target: "bridge",
"Starting {} relay\n\t\
Tx mortality: {:?} (~{}m)\n\t\
Stall timeout: {:?}",
relay_task_name,
target_transactions_mortality,
stall_timeout.as_secs_f64() / 60.0f64,
stall_timeout,
);
parachains_relay_task.set(
parachains_relay::parachains_loop::run(
parachains_source.clone(),
parachains_target.clone(),
ParachainSyncParams {
parachains: vec![P::SOURCE_PARACHAIN_PARA_ID.into()],
stall_timeout: std::time::Duration::from_secs(60),
strategy: parachains_relay::parachains_loop::ParachainSyncStrategy::Any,
},
MetricsParams::disabled(),
futures::future::pending(),
)
.fuse(),
);
restart_relay = false;
}
}
}
/// On-demand parachains relay task name.
fn on_demand_parachains_relay_name<SourceChain: Chain, TargetChain: Chain>() -> String {
format!("on-demand-{}-to-{}", SourceChain::NAME, TargetChain::NAME)
}
/// On-demand relay state.
#[derive(Clone, Copy, Debug, PartialEq)]
enum RelayState<SourceParaBlock, SourceRelayBlock> {
/// On-demand relay is not doing anything.
Idle,
/// Relaying given relay header to relay given parachain header later.
RelayingRelayHeader(SourceRelayBlock, SourceParaBlock),
/// Relaying given parachain header.
RelayingParaHeader(SourceParaBlock),
}
/// Data gathered from source and target clients, used by on-demand relay.
#[derive(Debug)]
struct RelayData<'a, SourceParaBlock, SourceRelayBlock> {
/// Parachain header number that is required at the target chain.
pub required_para_header: SourceParaBlock,
/// Parachain header number, known to the target chain.
pub para_header_at_target: SourceParaBlock,
/// Parachain header number, known to the source (relay) chain.
pub para_header_at_source: Option<SourceParaBlock>,
/// Relay header number at the source chain.
pub relay_header_at_source: SourceRelayBlock,
/// Relay header number at the target chain.
pub relay_header_at_target: SourceRelayBlock,
/// Map of relay to para header block numbers for recent relay headers.
///
/// Even if we have been trying to relay relay header #100 to relay parachain header #50
/// afterwards, it may happen that the relay header #200 may be relayed instead - either
/// by us (e.g. if GRANDPA justification is generated for #200, or if we are only syncing
/// mandatory headers), or by other relayer. Then, instead of parachain header #50 we may
/// relay parachain header #70.
///
/// This cache is especially important, given that we assume that the nodes we're connected
/// to are not necessarily archive nodes. Then, if current relay chain block is #210 and #200
/// has been delivered to the target chain, we have more chances to generate storage proof
/// at relay block #200 than on relay block #100, which is most likely has pruned state
/// already.
pub headers_map_cache: &'a mut BTreeMap<SourceRelayBlock, SourceParaBlock>,
}
/// Read required data from source and target clients.
async fn read_relay_data<'a, P: SubstrateParachainsPipeline>(
source: &ParachainsSource<P>,
target: &ParachainsTarget<P>,
required_header_number: BlockNumberOf<P::SourceParachain>,
headers_map_cache: &'a mut BTreeMap<
BlockNumberOf<P::SourceRelayChain>,
BlockNumberOf<P::SourceParachain>,
>,
) -> Result<
RelayData<'a, BlockNumberOf<P::SourceParachain>, BlockNumberOf<P::SourceRelayChain>>,
FailedClient,
>
where
ParachainsTarget<P>:
TargetClient<ParachainsPipelineAdapter<P>> + RelayClient<Error = SubstrateError>,
{
let map_target_err = |e| {
log::error!(
target: "bridge",
"Failed to read {} relay data from {} client: {:?}",
on_demand_parachains_relay_name::<P::SourceParachain, P::TargetChain>(),
P::TargetChain::NAME,
e,
);
FailedClient::Target
};
let map_source_err = |e| {
log::error!(
target: "bridge",
"Failed to read {} relay data from {} client: {:?}",
on_demand_parachains_relay_name::<P::SourceParachain, P::TargetChain>(),
P::SourceRelayChain::NAME,
e,
);
FailedClient::Source
};
let best_target_block_hash = target.best_block().await.map_err(map_target_err)?.1;
let para_header_at_target =
best_finalized_peer_header_at_self::<P::TargetChain, P::SourceParachain>(
target.client(),
best_target_block_hash,
P::SourceParachain::BEST_FINALIZED_HEADER_ID_METHOD,
)
.await
.map_err(map_target_err)?
.0;
let best_finalized_relay_header =
source.client().best_finalized_header().await.map_err(map_source_err)?;
let best_finalized_relay_block_id =
HeaderId(*best_finalized_relay_header.number(), best_finalized_relay_header.hash());
let para_header_at_source = source
.on_chain_parachain_header(
best_finalized_relay_block_id,
P::SOURCE_PARACHAIN_PARA_ID.into(),
)
.await
.map_err(map_source_err)?
.map(|h| *h.number());
let relay_header_at_source = best_finalized_relay_block_id.0;
let relay_header_at_target =
best_finalized_peer_header_at_self::<P::TargetChain, P::SourceRelayChain>(
target.client(),
best_target_block_hash,
P::SourceRelayChain::BEST_FINALIZED_HEADER_ID_METHOD,
)
.await
.map_err(map_target_err)?
.0;
Ok(RelayData {
required_para_header: required_header_number,
para_header_at_target,
para_header_at_source,
relay_header_at_source,
relay_header_at_target,
headers_map_cache,
})
}
// This number is bigger than the session length of any well-known Substrate-based relay
// chain. We expect that the underlying on-demand relay will submit at least 1 header per
// session.
const MAX_HEADERS_MAP_CACHE_ENTRIES: usize = 4096;
/// Select relay and parachain headers that need to be relayed.
fn select_headers_to_relay<'a, SourceParaBlock, SourceRelayBlock>(
data: &mut RelayData<'a, SourceParaBlock, SourceRelayBlock>,
mut state: RelayState<SourceParaBlock, SourceRelayBlock>,
) -> RelayState<SourceParaBlock, SourceRelayBlock>
where
RelayData<'a, SourceParaBlock, SourceRelayBlock>: std::fmt::Debug, // TODO: remove
SourceParaBlock: Copy + PartialOrd,
SourceRelayBlock: Copy + Ord,
{
// despite of our current state, we want to update the headers map cache
if let Some(para_header_at_source) = data.para_header_at_source {
data.headers_map_cache
.insert(data.relay_header_at_source, para_header_at_source);
if data.headers_map_cache.len() > MAX_HEADERS_MAP_CACHE_ENTRIES {
let first_key = *data.headers_map_cache.keys().next().expect("map is not empty; qed");
data.headers_map_cache.remove(&first_key);
}
}
// this switch is responsible for processing `RelayingRelayHeader` state
match state {
RelayState::Idle | RelayState::RelayingParaHeader(_) => (),
RelayState::RelayingRelayHeader(relay_header_number, para_header_number) => {
match data.relay_header_at_target.cmp(&relay_header_number) {
Ordering::Less => {
// relay header hasn't yet been relayed
return RelayState::RelayingRelayHeader(relay_header_number, para_header_number)
},
Ordering::Equal => {
// relay header has been realyed and we may continue with parachain header
state = RelayState::RelayingParaHeader(para_header_number);
},
Ordering::Greater => {
// relay header descendant has been relayed and we may need to change parachain
// header that we want to relay
let next_para_header_number = data
.headers_map_cache
.range(..=data.relay_header_at_target)
.next_back()
.map(|(_, next_para_header_number)| *next_para_header_number)
.unwrap_or_else(|| para_header_number);
state = RelayState::RelayingParaHeader(next_para_header_number);
},
}
},
}
// this switch is responsible for processing `RelayingParaHeader` state
match state {
RelayState::Idle => (),
RelayState::RelayingRelayHeader(_, _) => unreachable!("processed by previous match; qed"),
RelayState::RelayingParaHeader(para_header_number) => {
if data.para_header_at_target < para_header_number {
// parachain header hasn't yet been relayed
return RelayState::RelayingParaHeader(para_header_number)
}
},
}
// if we have already satisfied our "customer", do nothing
if data.required_para_header <= data.para_header_at_target {
return RelayState::Idle
}
// if required header is not available even at the source chain, let's wait
if Some(data.required_para_header) > data.para_header_at_source {
return RelayState::Idle
}
// we will always try to sync latest parachain/relay header, even if we've been asked for some
// its ancestor
// we need relay chain header first
if data.relay_header_at_target < data.relay_header_at_source {
return RelayState::RelayingRelayHeader(
data.relay_header_at_source,
data.required_para_header,
)
}
// if all relay headers synced, we may start directly with parachain header
RelayState::RelayingParaHeader(data.required_para_header)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn relay_waits_for_relay_header_to_be_delivered() {
assert_eq!(
select_headers_to_relay(
&mut RelayData {
required_para_header: 100,
para_header_at_target: 50,
para_header_at_source: Some(110),
relay_header_at_source: 800,
relay_header_at_target: 700,
headers_map_cache: &mut BTreeMap::new(),
},
RelayState::RelayingRelayHeader(750, 100),
),
RelayState::RelayingRelayHeader(750, 100),
);
}
#[test]
fn relay_starts_relaying_requested_para_header_after_relay_header_is_delivered() {
assert_eq!(
select_headers_to_relay(
&mut RelayData {
required_para_header: 100,
para_header_at_target: 50,
para_header_at_source: Some(110),
relay_header_at_source: 800,
relay_header_at_target: 750,
headers_map_cache: &mut BTreeMap::new(),
},
RelayState::RelayingRelayHeader(750, 100),
),
RelayState::RelayingParaHeader(100),
);
}
#[test]
fn relay_selects_same_para_header_after_better_relay_header_is_delivered_1() {
assert_eq!(
select_headers_to_relay(
&mut RelayData {
required_para_header: 100,
para_header_at_target: 50,
para_header_at_source: Some(110),
relay_header_at_source: 800,
relay_header_at_target: 780,
headers_map_cache: &mut vec![(700, 90), (750, 100)].into_iter().collect(),
},
RelayState::RelayingRelayHeader(750, 100),
),
RelayState::RelayingParaHeader(100),
);
}
#[test]
fn relay_selects_same_para_header_after_better_relay_header_is_delivered_2() {
assert_eq!(
select_headers_to_relay(
&mut RelayData {
required_para_header: 100,
para_header_at_target: 50,
para_header_at_source: Some(110),
relay_header_at_source: 800,
relay_header_at_target: 780,
headers_map_cache: &mut BTreeMap::new(),
},
RelayState::RelayingRelayHeader(750, 100),
),
RelayState::RelayingParaHeader(100),
);
}
#[test]
fn relay_selects_better_para_header_after_better_relay_header_is_delivered() {
assert_eq!(
select_headers_to_relay(
&mut RelayData {
required_para_header: 100,
para_header_at_target: 50,
para_header_at_source: Some(120),
relay_header_at_source: 800,
relay_header_at_target: 780,
headers_map_cache: &mut vec![(700, 90), (750, 100), (780, 110), (790, 120)]
.into_iter()
.collect(),
},
RelayState::RelayingRelayHeader(750, 100),
),
RelayState::RelayingParaHeader(110),
);
}
#[test]
fn relay_waits_for_para_header_to_be_delivered() {
assert_eq!(
select_headers_to_relay(
&mut RelayData {
required_para_header: 100,
para_header_at_target: 50,
para_header_at_source: Some(110),
relay_header_at_source: 800,
relay_header_at_target: 700,
headers_map_cache: &mut BTreeMap::new(),
},
RelayState::RelayingParaHeader(100),
),
RelayState::RelayingParaHeader(100),
);
}
#[test]
fn relay_stays_idle_if_required_para_header_is_already_delivered() {
assert_eq!(
select_headers_to_relay(
&mut RelayData {
required_para_header: 100,
para_header_at_target: 100,
para_header_at_source: Some(110),
relay_header_at_source: 800,
relay_header_at_target: 700,
headers_map_cache: &mut BTreeMap::new(),
},
RelayState::Idle,
),
RelayState::Idle,
);
}
#[test]
fn relay_waits_for_required_para_header_to_appear_at_source_1() {
assert_eq!(
select_headers_to_relay(
&mut RelayData {
required_para_header: 110,
para_header_at_target: 100,
para_header_at_source: None,
relay_header_at_source: 800,
relay_header_at_target: 700,
headers_map_cache: &mut BTreeMap::new(),
},
RelayState::Idle,
),
RelayState::Idle,
);
}
#[test]
fn relay_waits_for_required_para_header_to_appear_at_source_2() {
assert_eq!(
select_headers_to_relay(
&mut RelayData {
required_para_header: 110,
para_header_at_target: 100,
para_header_at_source: Some(100),
relay_header_at_source: 800,
relay_header_at_target: 700,
headers_map_cache: &mut BTreeMap::new(),
},
RelayState::Idle,
),
RelayState::Idle,
);
}
#[test]
fn relay_starts_relaying_relay_header_when_new_para_header_is_requested() {
assert_eq!(
select_headers_to_relay(
&mut RelayData {
required_para_header: 110,
para_header_at_target: 100,
para_header_at_source: Some(110),
relay_header_at_source: 800,
relay_header_at_target: 700,
headers_map_cache: &mut BTreeMap::new(),
},
RelayState::Idle,
),
RelayState::RelayingRelayHeader(800, 110),
);
}
#[test]
fn relay_starts_relaying_para_header_when_new_para_header_is_requested() {
assert_eq!(
select_headers_to_relay(
&mut RelayData {
required_para_header: 110,
para_header_at_target: 100,
para_header_at_source: Some(110),
relay_header_at_source: 800,
relay_header_at_target: 800,
headers_map_cache: &mut BTreeMap::new(),
},
RelayState::Idle,
),
RelayState::RelayingParaHeader(110),
);
}
#[test]
fn headers_map_cache_is_updated() {
let mut headers_map_cache = BTreeMap::new();
// when parachain header is known, map is updated
select_headers_to_relay(
&mut RelayData {
required_para_header: 0,
para_header_at_target: 50,
para_header_at_source: Some(110),
relay_header_at_source: 800,
relay_header_at_target: 700,
headers_map_cache: &mut headers_map_cache,
},
RelayState::RelayingRelayHeader(750, 100),
);
assert_eq!(headers_map_cache.clone().into_iter().collect::<Vec<_>>(), vec![(800, 110)],);
// when parachain header is not known, map is NOT updated
select_headers_to_relay(
&mut RelayData {
required_para_header: 0,
para_header_at_target: 50,
para_header_at_source: None,
relay_header_at_source: 800,
relay_header_at_target: 700,
headers_map_cache: &mut headers_map_cache,
},
RelayState::RelayingRelayHeader(750, 100),
);
assert_eq!(headers_map_cache.clone().into_iter().collect::<Vec<_>>(), vec![(800, 110)],);
// map auto-deduplicates equal entries
select_headers_to_relay(
&mut RelayData {
required_para_header: 0,
para_header_at_target: 50,
para_header_at_source: Some(110),
relay_header_at_source: 800,
relay_header_at_target: 700,
headers_map_cache: &mut headers_map_cache,
},
RelayState::RelayingRelayHeader(750, 100),
);
assert_eq!(headers_map_cache.clone().into_iter().collect::<Vec<_>>(), vec![(800, 110)],);
// nothing is pruned if number of map entries is < MAX_HEADERS_MAP_CACHE_ENTRIES
for i in 1..MAX_HEADERS_MAP_CACHE_ENTRIES {
select_headers_to_relay(
&mut RelayData {
required_para_header: 0,
para_header_at_target: 50,
para_header_at_source: Some(110 + i),
relay_header_at_source: 800 + i,
relay_header_at_target: 700,
headers_map_cache: &mut headers_map_cache,
},
RelayState::RelayingRelayHeader(750, 100),
);
assert_eq!(headers_map_cache.len(), i + 1);
}
// when we add next entry, the oldest one is pruned
assert!(headers_map_cache.contains_key(&800));
assert_eq!(headers_map_cache.len(), MAX_HEADERS_MAP_CACHE_ENTRIES);
select_headers_to_relay(
&mut RelayData {
required_para_header: 0,
para_header_at_target: 50,
para_header_at_source: Some(110 + MAX_HEADERS_MAP_CACHE_ENTRIES),
relay_header_at_source: 800 + MAX_HEADERS_MAP_CACHE_ENTRIES,
relay_header_at_target: 700,
headers_map_cache: &mut headers_map_cache,
},
RelayState::RelayingRelayHeader(750, 100),
);
assert!(!headers_map_cache.contains_key(&800));
assert_eq!(headers_map_cache.len(), MAX_HEADERS_MAP_CACHE_ENTRIES);
}
}