lib.rs 15 KB
Newer Older
Shawn Tabrizi's avatar
Shawn Tabrizi committed
1
// Copyright 2018-2020 Parity Technologies (UK) Ltd.
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
// This file is part of Polkadot.

// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.

// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.

// You should have received a copy of the GNU General Public License
// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.

//! As part of Polkadot's availability system, certain pieces of data
//! for each block are required to be kept available.
//!
//! The way we accomplish this is by erasure coding the data into n pieces
//! and constructing a merkle root of the data.
//!
23
//! Each of n validators stores their piece of data. We assume n=3f+k, 0 < k ≤ 3.
Black3HDF's avatar
Black3HDF committed
24
//! f is the maximum number of faulty validators in the system.
25
26
//! The data is coded so any f+1 chunks can be used to reconstruct the full data.

27
use codec::{Encode, Decode};
28
use reed_solomon::galois_16::{self, ReedSolomon};
asynchronous rob's avatar
asynchronous rob committed
29
30
use primitives::v0::{self, Hash as H256, BlakeTwo256, HashT};
use primitives::v1;
31
use sp_core::Blake2Hasher;
32
use trie::{EMPTY_PREFIX, MemoryDB, Trie, TrieMut, trie_types::{TrieDBMut, TrieDB}};
33
34
35
36
37
38
39
40
41

use self::wrapped_shard::WrappedShard;

mod wrapped_shard;

// we are limited to the field order of GF(2^16), which is 65536
const MAX_VALIDATORS: usize = <galois_16::Field as reed_solomon::Field>::ORDER;

/// Errors in erasure coding.
42
#[derive(Debug, Clone, PartialEq, derive_more::Display)]
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
pub enum Error {
	/// Returned when there are too many validators.
	TooManyValidators,
	/// Cannot encode something for no validators
	EmptyValidators,
	/// Cannot reconstruct: wrong number of validators.
	WrongValidatorCount,
	/// Not enough chunks present.
	NotEnoughChunks,
	/// Too many chunks present.
	TooManyChunks,
	/// Chunks not of uniform length or the chunks are empty.
	NonUniformChunks,
	/// An uneven byte-length of a shard is not valid for GF(2^16) encoding.
	UnevenLength,
	/// Chunk index out of bounds.
59
	#[display(fmt = "Chunk is out of bounds: {} {}", _0, _1)]
60
61
62
63
64
65
66
67
68
	ChunkIndexOutOfBounds(usize, usize),
	/// Bad payload in reconstructed bytes.
	BadPayload,
	/// Invalid branch proof.
	InvalidBranchProof,
	/// Branch out of bounds.
	BranchOutOfBounds,
}

69
70
impl std::error::Error for Error { }

71
#[derive(Debug, PartialEq)]
72
73
74
75
76
77
78
79
struct CodeParams {
	data_shards: usize,
	parity_shards: usize,
}

impl CodeParams {
	// the shard length needed for a payload with initial size `base_len`.
	fn shard_len(&self, base_len: usize) -> usize {
80
81
82
83
84
85
86
		// how many bytes we actually need.
		let needed_shard_len = base_len / self.data_shards
			+ (base_len % self.data_shards != 0) as usize;

		// round up to next even number
		// (no actual space overhead since we are working in GF(2^16)).
		needed_shard_len + needed_shard_len % 2
87
88
89
90
91
	}

	fn make_shards_for(&self, payload: &[u8]) -> Vec<WrappedShard> {
		let shard_len = self.shard_len(payload.len());
		let mut shards = vec![
92
			WrappedShard::new(vec![0; shard_len]);
93
94
95
96
97
98
			self.data_shards + self.parity_shards
		];

		for (data_chunk, blank_shard) in payload.chunks(shard_len).zip(&mut shards) {
			// fill the empty shards with the corresponding piece of the payload,
			// zero-padded to fit in the shards.
99
100
			let len = std::cmp::min(shard_len, data_chunk.len());
			let blank_shard: &mut [u8] = blank_shard.as_mut();
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
			blank_shard[..len].copy_from_slice(&data_chunk[..len]);
		}

		shards
	}

	// make a reed-solomon instance.
	fn make_encoder(&self) -> ReedSolomon {
		ReedSolomon::new(self.data_shards, self.parity_shards)
			.expect("this struct is not created with invalid shard number; qed")
	}
}

fn code_params(n_validators: usize) -> Result<CodeParams, Error> {
	if n_validators > MAX_VALIDATORS { return Err(Error::TooManyValidators) }
	if n_validators == 0 { return Err(Error::EmptyValidators) }

	let n_faulty = n_validators.saturating_sub(1) / 3;
	let n_good = n_validators - n_faulty;

	Ok(CodeParams {
		data_shards: n_faulty + 1,
		parity_shards: n_good - 1,
	})
}

asynchronous rob's avatar
asynchronous rob committed
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
/// Obtain erasure-coded chunks for v0 `AvailableData`, one for each validator.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn obtain_chunks_v0(n_validators: usize, data: &v0::AvailableData)
	-> Result<Vec<Vec<u8>>, Error>
{
	obtain_chunks(n_validators, data)
}

/// Obtain erasure-coded chunks for v1 `AvailableData`, one for each validator.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn obtain_chunks_v1(n_validators: usize, data: &v1::AvailableData)
	-> Result<Vec<Vec<u8>>, Error>
{
	obtain_chunks(n_validators, data)
}

145
146
/// Obtain erasure-coded chunks, one for each validator.
///
147
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
asynchronous rob's avatar
asynchronous rob committed
148
fn obtain_chunks<T: Encode>(n_validators: usize, data: &T)
149
150
	-> Result<Vec<Vec<u8>>, Error>
{
151
	let params = code_params(n_validators)?;
asynchronous rob's avatar
asynchronous rob committed
152
	let encoded = data.encode();
153
154
155
156
157
158
159
160
161
162
163
164
165

	if encoded.is_empty() {
		return Err(Error::BadPayload);
	}

	let mut shards = params.make_shards_for(&encoded[..]);

	params.make_encoder().encode(&mut shards[..])
		.expect("Payload non-empty, shard sizes are uniform, and validator numbers checked; qed");

	Ok(shards.into_iter().map(|w| w.into_inner()).collect())
}

asynchronous rob's avatar
asynchronous rob committed
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
/// Reconstruct the v0 available data from a set of chunks.
///
/// Provide an iterator containing chunk data and the corresponding index.
/// The indices of the present chunks must be indicated. If too few chunks
/// are provided, recovery is not possible.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn reconstruct_v0<'a, I: 'a>(n_validators: usize, chunks: I)
	-> Result<v0::AvailableData, Error>
	where I: IntoIterator<Item=(&'a [u8], usize)>
{
	reconstruct(n_validators, chunks)
}

/// Reconstruct the v1 available data from a set of chunks.
///
/// Provide an iterator containing chunk data and the corresponding index.
/// The indices of the present chunks must be indicated. If too few chunks
/// are provided, recovery is not possible.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn reconstruct_v1<'a, I: 'a>(n_validators: usize, chunks: I)
	-> Result<v1::AvailableData, Error>
	where I: IntoIterator<Item=(&'a [u8], usize)>
{
	reconstruct(n_validators, chunks)
}

/// Reconstruct decodable data from a set of chunks.
195
196
197
198
199
///
/// Provide an iterator containing chunk data and the corresponding index.
/// The indices of the present chunks must be indicated. If too few chunks
/// are provided, recovery is not possible.
///
200
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
asynchronous rob's avatar
asynchronous rob committed
201
fn reconstruct<'a, I: 'a, T: Decode>(n_validators: usize, chunks: I) -> Result<T, Error>
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
	where I: IntoIterator<Item=(&'a [u8], usize)>
{
	let params = code_params(n_validators)?;
	let mut shards: Vec<Option<WrappedShard>> = vec![None; n_validators];
	let mut shard_len = None;
	for (chunk_data, chunk_idx) in chunks.into_iter().take(n_validators) {
		if chunk_idx >= n_validators {
			return Err(Error::ChunkIndexOutOfBounds(chunk_idx, n_validators));
		}

		let shard_len = shard_len.get_or_insert_with(|| chunk_data.len());

		if *shard_len % 2 != 0 {
			return Err(Error::UnevenLength);
		}

		if *shard_len != chunk_data.len() || *shard_len == 0 {
			return Err(Error::NonUniformChunks);
		}

		shards[chunk_idx] = Some(WrappedShard::new(chunk_data.to_vec()));
	}

	if let Err(e) = params.make_encoder().reconstruct(&mut shards[..]) {
		match e {
			reed_solomon::Error::TooFewShardsPresent => Err(Error::NotEnoughChunks)?,
			reed_solomon::Error::InvalidShardFlags => Err(Error::WrongValidatorCount)?,
			reed_solomon::Error::TooManyShards => Err(Error::TooManyChunks)?,
			reed_solomon::Error::EmptyShard => panic!("chunks are all non-empty; this is checked above; qed"),
			reed_solomon::Error::IncorrectShardSize => panic!("chunks are all same len; this is checked above; qed"),
			_ => panic!("reed_solomon encoder returns no more variants for this function; qed"),
		}
	}

	// lazily decode from the data shards.
	Decode::decode(&mut ShardInput {
238
		remaining_len: shard_len.map(|s| s * params.data_shards).unwrap_or(0),
239
		cur_shard: None,
240
241
242
243
		shards: shards.iter()
			.map(|x| x.as_ref())
			.take(params.data_shards)
			.map(|x| x.expect("all data shards have been recovered; qed"))
244
			.map(|x| x.as_ref()),
245
	}).or_else(|_| Err(Error::BadPayload))
246
247
248
249
}

/// An iterator that yields merkle branches and chunk data for all chunks to
/// be sent to other validators.
250
pub struct Branches<'a, I> {
251
252
	trie_storage: MemoryDB<Blake2Hasher>,
	root: H256,
253
	chunks: &'a [I],
254
255
256
	current_pos: usize,
}

257
impl<'a, I: AsRef<[u8]>> Branches<'a, I> {
258
259
260
261
	/// Get the trie root.
	pub fn root(&self) -> H256 { self.root.clone() }
}

262
impl<'a, I: AsRef<[u8]>> Iterator for Branches<'a, I> {
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
	type Item = (Vec<Vec<u8>>, &'a [u8]);

	fn next(&mut self) -> Option<Self::Item> {
		use trie::Recorder;

		let trie = TrieDB::new(&self.trie_storage, &self.root)
			.expect("`Branches` is only created with a valid memorydb that contains all nodes for the trie with given root; qed");

		let mut recorder = Recorder::new();
		let res = (self.current_pos as u32).using_encoded(|s|
			trie.get_with(s, &mut recorder)
		);

		match res.expect("all nodes in trie present; qed") {
			Some(_) => {
				let nodes = recorder.drain().into_iter().map(|r| r.data).collect();
279
				let chunk = self.chunks.get(self.current_pos)
280
281
282
					.expect("there is a one-to-one mapping of chunks to valid merkle branches; qed");

				self.current_pos += 1;
283
				Some((nodes, chunk.as_ref()))
284
285
286
287
288
289
290
291
			}
			None => None,
		}
	}
}

/// Construct a trie from chunks of an erasure-coded value. This returns the root hash and an
/// iterator of merkle proofs, one for each validator.
292
293
294
pub fn branches<'a, I: 'a>(chunks: &'a [I]) -> Branches<'a, I>
	where I: AsRef<[u8]>,
{
295
296
297
298
299
300
	let mut trie_storage: MemoryDB<Blake2Hasher> = MemoryDB::default();
	let mut root = H256::default();

	// construct trie mapping each chunk's index to its hash.
	{
		let mut trie = TrieDBMut::new(&mut trie_storage, &mut root);
301
		for (i, chunk) in chunks.as_ref().iter().enumerate() {
302
			(i as u32).using_encoded(|encoded_index| {
303
				let chunk_hash = BlakeTwo256::hash(chunk.as_ref());
304
305
306
307
308
309
310
311
312
				trie.insert(encoded_index, chunk_hash.as_ref())
					.expect("a fresh trie stored in memory cannot have errors loading nodes; qed");
			})
		}
	}

	Branches {
		trie_storage,
		root,
313
		chunks: chunks,
314
315
316
317
		current_pos: 0,
	}
}

Black3HDF's avatar
Black3HDF committed
318
/// Verify a merkle branch, yielding the chunk hash meant to be present at that
319
320
321
322
/// index.
pub fn branch_hash(root: &H256, branch_nodes: &[Vec<u8>], index: usize) -> Result<H256, Error> {
	let mut trie_storage: MemoryDB<Blake2Hasher> = MemoryDB::default();
	for node in branch_nodes.iter() {
323
		(&mut trie_storage as &mut trie::HashDB<_>).insert(EMPTY_PREFIX, node.as_slice());
324
325
326
327
328
329
330
331
	}

	let trie = TrieDB::new(&trie_storage, &root).map_err(|_| Error::InvalidBranchProof)?;
	let res = (index as u32).using_encoded(|key|
		trie.get_with(key, |raw_hash: &[u8]| H256::decode(&mut &raw_hash[..]))
	);

	match res {
332
333
		Ok(Some(Ok(hash))) => Ok(hash),
		Ok(Some(Err(_))) => Err(Error::InvalidBranchProof), // hash failed to decode
334
335
336
337
338
		Ok(None) => Err(Error::BranchOutOfBounds),
		Err(_) => Err(Error::InvalidBranchProof),
	}
}

339
// input for `codec` which draws data from the data shards
340
struct ShardInput<'a, I> {
341
	remaining_len: usize,
342
343
344
345
	shards: I,
	cur_shard: Option<(&'a [u8], usize)>,
}

346
347
348
349
350
351
impl<'a, I: Iterator<Item=&'a [u8]>> codec::Input for ShardInput<'a, I> {
	fn remaining_len(&mut self) -> Result<Option<usize>, codec::Error> {
		Ok(Some(self.remaining_len))
	}

	fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> {
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
		let mut read_bytes = 0;

		loop {
			if read_bytes == into.len() { break }

			let cur_shard = self.cur_shard.take().or_else(|| self.shards.next().map(|s| (s, 0)));
			let (active_shard, mut in_shard) = match cur_shard {
				Some((s, i)) => (s, i),
				None => break,
			};

			if in_shard >= active_shard.len() {
				continue;
			}

			let remaining_len_out = into.len() - read_bytes;
			let remaining_len_shard = active_shard.len() - in_shard;

			let write_len = std::cmp::min(remaining_len_out, remaining_len_shard);
			into[read_bytes..][..write_len]
				.copy_from_slice(&active_shard[in_shard..][..write_len]);

			in_shard += write_len;
			read_bytes += write_len;
			self.cur_shard = Some((active_shard, in_shard))
		}

379
380
381
382
383
384
		self.remaining_len -= read_bytes;
		if read_bytes == into.len() {
			Ok(())
		} else {
			Err("slice provided too big for input".into())
		}
385
386
387
388
389
390
	}
}

#[cfg(test)]
mod tests {
	use super::*;
asynchronous rob's avatar
asynchronous rob committed
391
	use primitives::v0::{AvailableData, BlockData, PoVBlock};
392
393
394
395
396
397

	#[test]
	fn field_order_is_right_size() {
		assert_eq!(MAX_VALIDATORS, 65536);
	}

398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
	#[test]
	fn test_code_params() {
		assert_eq!(code_params(0), Err(Error::EmptyValidators));

		assert_eq!(code_params(1), Ok(CodeParams {
			data_shards: 1,
			parity_shards: 0,
		}));

		assert_eq!(code_params(2), Ok(CodeParams {
			data_shards: 1,
			parity_shards: 1,
		}));

		assert_eq!(code_params(3), Ok(CodeParams {
			data_shards: 1,
			parity_shards: 2,
		}));

		assert_eq!(code_params(4), Ok(CodeParams {
			data_shards: 2,
			parity_shards: 2,
		}));

		assert_eq!(code_params(100), Ok(CodeParams {
			data_shards: 34,
			parity_shards: 66,
		}));
	}

	#[test]
	fn shard_len_is_reasonable() {
		let mut params = CodeParams {
			data_shards: 5,
			parity_shards: 0, // doesn't affect calculation.
		};

		assert_eq!(params.shard_len(100), 20);
		assert_eq!(params.shard_len(99), 20);

		// see if it rounds up to 2.
		assert_eq!(params.shard_len(95), 20);
		assert_eq!(params.shard_len(94), 20);

		assert_eq!(params.shard_len(89), 18);

		params.data_shards = 7;

		// needs 3 bytes to fit, rounded up to next even number.
		assert_eq!(params.shard_len(19), 4);
	}

450
    #[test]
451
452
453
454
455
456
457
458
459
	fn round_trip_works() {
		let pov_block = PoVBlock {
			block_data: BlockData((0..255).collect()),
		};

		let available_data = AvailableData {
			pov_block,
			omitted_validation: Default::default(),
		};
460
461
		let chunks = obtain_chunks(
			10,
462
			&available_data,
463
464
465
466
467
		).unwrap();

		assert_eq!(chunks.len(), 10);

		// any 4 chunks should work.
asynchronous rob's avatar
asynchronous rob committed
468
		let reconstructed: AvailableData = reconstruct(
469
470
471
472
473
474
475
476
477
			10,
			[
				(&*chunks[1], 1),
				(&*chunks[4], 4),
				(&*chunks[6], 6),
				(&*chunks[9], 9),
			].iter().cloned(),
		).unwrap();

478
		assert_eq!(reconstructed, available_data);
479
480
481
482
	}

	#[test]
	fn construct_valid_branches() {
483
484
485
486
487
488
489
490
		let pov_block = PoVBlock {
			block_data: BlockData(vec![2; 256]),
		};

		let available_data = AvailableData {
			pov_block,
			omitted_validation: Default::default(),
		};
491

492
493
		let chunks = obtain_chunks(
			10,
494
			&available_data,
495
496
497
498
		).unwrap();

		assert_eq!(chunks.len(), 10);

499
		let branches = branches(chunks.as_ref());
500
501
502
503
504
505
506
		let root = branches.root();

		let proofs: Vec<_> = branches.map(|(proof, _)| proof).collect();

		assert_eq!(proofs.len(), 10);

		for (i, proof) in proofs.into_iter().enumerate() {
507
			assert_eq!(branch_hash(&root, &proof, i).unwrap(), BlakeTwo256::hash(&chunks[i]));
508
509
510
		}
	}
}