Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
parity
Mirrored projects
polkadot
Commits
0357d231
Unverified
Commit
0357d231
authored
Aug 30, 2018
by
Gav Wood
Committed by
GitHub
Aug 30, 2018
Browse files
Merge branch 'master' into a-wasm-authoring
parents
1576ad9d
ab08eb78
Changes
15
Expand all
Hide whitespace changes
Inline
Side-by-side
Cargo.lock
View file @
0357d231
This diff is collapsed.
Click to expand it.
api/Cargo.toml
View file @
0357d231
...
...
@@ -17,6 +17,7 @@ substrate-client = { git = "https://github.com/paritytech/substrate" }
substrate-primitives
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-executor
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-state-machine
=
{
git
=
"https://github.com/paritytech/substrate"
}
log
=
"0.3"
[dev-dependencies]
substrate-keyring
=
{
git
=
"https://github.com/paritytech/substrate"
}
api/src/full.rs
View file @
0357d231
...
...
@@ -119,7 +119,7 @@ impl<B: LocalBackend<Block, KeccakHasher, RlpCodec>> PolkadotApi for Client<B, L
let
encoded
=
block
.encode
();
let
res
:
Result
<
()
>
=
call
(
self
,
at
,
"execute_block"
,
&
encoded
);
match
res
{
Ok
(
()
)
=>
Ok
(
true
),
Ok
(
_
)
=>
Ok
(
true
),
Err
(
err
)
=>
match
err
.kind
()
{
&
ErrorKind
::
Execution
(
_
)
=>
Ok
(
false
),
_
=>
Err
(
err
)
...
...
@@ -165,7 +165,8 @@ impl<B: LocalBackend<Block, KeccakHasher, RlpCodec>> PolkadotApi for Client<B, L
}
fn
inherent_extrinsics
(
&
self
,
at
:
&
BlockId
,
inherent_data
:
InherentData
)
->
Result
<
Vec
<
UncheckedExtrinsic
>>
{
inherent_data
.using_encoded
(|
encoded
|
{
let
runtime_version
=
self
.runtime_version_at
(
at
)
?
;
(
inherent_data
,
runtime_version
.spec_version
)
.using_encoded
(|
encoded
|
{
call
(
self
,
at
,
"inherent_extrinsics"
,
encoded
)
})
}
...
...
@@ -241,6 +242,7 @@ mod tests {
assert_eq!
(
block
.header.number
,
1
);
assert!
(
block
.header.extrinsics_root
!=
Default
::
default
());
assert!
(
client
.evaluate_block
(
&
id
,
block
)
.unwrap
());
}
#[test]
...
...
@@ -263,6 +265,7 @@ mod tests {
assert_eq!
(
block
.header.number
,
1
);
assert!
(
block
.header.extrinsics_root
!=
Default
::
default
());
assert!
(
client
.evaluate_block
(
&
id
,
block
)
.unwrap
());
}
#[test]
...
...
cli/Cargo.toml
View file @
0357d231
...
...
@@ -5,29 +5,9 @@ authors = ["Parity Technologies <admin@parity.io>"]
description
=
"Polkadot node implementation in Rust."
[dependencies]
clap
=
{
version
=
"~2.32"
,
features
=
["yaml"]
}
error-chain
=
"0.12"
log
=
"0.3"
slog
=
"^2"
lazy_static
=
"1.0"
tokio
=
"0.1.7"
futures
=
"0.1.17"
parking_lot
=
"0.4"
exit-future
=
"0.1"
substrate-cli
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-client
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-codec
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-extrinsic-pool
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-network
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-primitives
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-rpc
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-rpc-servers
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-runtime-primitives
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-service
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-state-machine
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-telemetry
=
{
git
=
"https://github.com/paritytech/substrate"
}
polkadot-primitives
=
{
path
=
"../primitives"
}
polkadot-runtime
=
{
path
=
"../runtime"
}
polkadot-service
=
{
path
=
"../service"
}
polkadot-transaction-pool
=
{
path
=
"../transaction-pool"
}
consensus/src/lib.rs
View file @
0357d231
...
...
@@ -231,7 +231,10 @@ fn make_group_info(roster: DutyRoster, authorities: &[AuthorityId], local_id: Au
}
/// Polkadot proposer factory.
pub
struct
ProposerFactory
<
C
,
N
,
P
>
{
pub
struct
ProposerFactory
<
C
,
N
,
P
>
where
P
:
PolkadotApi
+
Send
+
Sync
+
'static
{
/// The client instance.
pub
client
:
Arc
<
P
>
,
/// The transaction pool.
...
...
@@ -407,7 +410,7 @@ struct LocalDuty {
}
/// The Polkadot proposer logic.
pub
struct
Proposer
<
C
:
PolkadotApi
>
{
pub
struct
Proposer
<
C
:
PolkadotApi
+
Send
+
Sync
>
{
client
:
Arc
<
C
>
,
dynamic_inclusion
:
DynamicInclusion
,
local_key
:
Arc
<
ed25519
::
Pair
>
,
...
...
@@ -587,10 +590,10 @@ impl<C> bft::Proposer<Block> for Proposer<C>
let
local_id
=
self
.local_key
.public
()
.0
.into
();
let
mut
next_index
=
{
let
cur_index
=
self
.transaction_pool
.cull_and_get_pending
(
BlockId
::
hash
(
self
.parent_hash
),
|
pending
|
pending
.filter
(|
tx
|
tx
.sender
()
.map
(|
s
|
s
==
local_id
)
.unwrap_or
(
false
))
let
cur_index
=
self
.transaction_pool
.cull_and_get_pending
(
&
BlockId
::
hash
(
self
.parent_hash
),
|
pending
|
pending
.filter
(|
tx
|
tx
.
verified
.
sender
()
.map
(|
s
|
s
==
local_id
)
.unwrap_or
(
false
))
.last
()
.map
(|
tx
|
Ok
(
tx
.index
()))
.map
(|
tx
|
Ok
(
tx
.
verified
.
index
()))
.unwrap_or_else
(||
self
.client
.index
(
&
self
.parent_id
,
local_id
))
);
...
...
@@ -636,9 +639,8 @@ impl<C> bft::Proposer<Block> for Proposer<C>
index
:
extrinsic
.index
,
function
:
extrinsic
.function
,
};
let
uxt
=
UncheckedExtrinsic
::
new
(
extrinsic
,
signature
);
self
.transaction_pool
.import_unchecked_extrinsic
(
BlockId
::
hash
(
self
.parent_hash
),
uxt
)
let
uxt
:
Vec
<
u8
>
=
Decode
::
decode
(
&
mut
UncheckedExtrinsic
::
new
(
extrinsic
,
signature
)
.encode
()
.as_slice
())
.expect
(
"Encoded extrinsic is valid"
);
self
.transaction_pool
.submit_one
(
&
BlockId
::
hash
(
self
.parent_hash
),
uxt
)
.expect
(
"locally signed extrinsic is valid; qed"
);
}
}
...
...
@@ -720,7 +722,7 @@ impl ProposalTiming {
}
/// Future which resolves upon the creation of a proposal.
pub
struct
CreateProposal
<
C
:
PolkadotApi
>
{
pub
struct
CreateProposal
<
C
:
PolkadotApi
+
Send
+
Sync
>
{
parent_hash
:
Hash
,
parent_number
:
BlockNumber
,
parent_id
:
BlockId
,
...
...
@@ -732,7 +734,7 @@ pub struct CreateProposal<C: PolkadotApi> {
offline
:
SharedOfflineTracker
,
}
impl
<
C
>
CreateProposal
<
C
>
where
C
:
PolkadotApi
{
impl
<
C
>
CreateProposal
<
C
>
where
C
:
PolkadotApi
+
Send
+
Sync
{
fn
propose_with
(
&
self
,
candidates
:
Vec
<
CandidateReceipt
>
)
->
Result
<
Block
,
Error
>
{
use
polkadot_api
::
BlockBuilder
;
use
runtime_primitives
::
traits
::{
Hash
as
HashT
,
BlakeTwo256
};
...
...
@@ -767,18 +769,18 @@ impl<C> CreateProposal<C> where C: PolkadotApi {
{
let
mut
unqueue_invalid
=
Vec
::
new
();
let
result
=
self
.transaction_pool
.cull_and_get_pending
(
BlockId
::
hash
(
self
.parent_hash
),
|
pending_iterator
|
{
let
result
=
self
.transaction_pool
.cull_and_get_pending
(
&
BlockId
::
hash
(
self
.parent_hash
),
|
pending_iterator
|
{
let
mut
pending_size
=
0
;
for
pending
in
pending_iterator
{
if
pending_size
+
pending
.encoded_size
()
>=
MAX_TRANSACTIONS_SIZE
{
break
}
if
pending_size
+
pending
.
verified
.
encoded_size
()
>=
MAX_TRANSACTIONS_SIZE
{
break
}
match
block_builder
.push_extrinsic
(
pending
.
p
ri
mitive_extrinsic
())
{
match
block_builder
.push_extrinsic
(
pending
.
o
ri
ginal
.clone
())
{
Ok
(())
=>
{
pending_size
+=
pending
.encoded_size
();
pending_size
+=
pending
.
verified
.
encoded_size
();
}
Err
(
e
)
=>
{
trace!
(
target
:
"transaction-pool"
,
"Invalid transaction: {}"
,
e
);
unqueue_invalid
.push
(
pending
.hash
()
.clone
());
unqueue_invalid
.push
(
pending
.
verified
.
hash
()
.clone
());
}
}
}
...
...
@@ -819,7 +821,7 @@ impl<C> CreateProposal<C> where C: PolkadotApi {
}
}
impl
<
C
>
Future
for
CreateProposal
<
C
>
where
C
:
PolkadotApi
{
impl
<
C
>
Future
for
CreateProposal
<
C
>
where
C
:
PolkadotApi
+
Send
+
Sync
{
type
Item
=
Block
;
type
Error
=
Error
;
...
...
consensus/src/offline_tracker.rs
View file @
0357d231
...
...
@@ -21,14 +21,18 @@ use polkadot_primitives::AccountId;
use
std
::
collections
::
HashMap
;
use
std
::
time
::{
Instant
,
Duration
};
// time before we report a validator.
const
REPORT_TIME
:
Duration
=
Duration
::
from_secs
(
60
*
5
);
struct
Observed
{
last_round_end
:
Instant
,
offline_since
:
Instant
,
}
#[derive(Eq,
PartialEq)]
enum
Activity
{
Offline
,
StillOffline
(
Duration
),
Online
,
}
impl
Observed
{
fn
new
()
->
Observed
{
let
now
=
Instant
::
now
();
...
...
@@ -38,31 +42,32 @@ impl Observed {
}
}
fn
note_round_end
(
&
mut
self
,
was_online
:
bool
)
{
let
now
=
Instant
::
now
();
fn
note_round_end
(
&
mut
self
,
now
:
Instant
,
was_online
:
Option
<
bool
>
)
{
self
.last_round_end
=
now
;
if
was_online
{
if
let
Some
(
false
)
=
was_online
{
self
.offline_since
=
now
;
}
}
fn
is_active
(
&
self
)
->
bool
{
/// Returns what we have observed about the online/offline state of the validator.
fn
activity
(
&
self
)
->
Activity
{
// can happen if clocks are not monotonic
if
self
.offline_since
>
self
.last_round_end
{
return
true
}
self
.last_round_end
.duration_since
(
self
.offline_since
)
<
REPORT_TIME
if
self
.offline_since
>
self
.last_round_end
{
return
Activity
::
Online
}
if
self
.offline_since
==
self
.last_round_end
{
return
Activity
::
Offline
}
Activity
::
StillOffline
(
self
.last_round_end
.duration_since
(
self
.offline_since
))
}
}
/// Tracks offline validators and can issue a report for those offline.
pub
struct
OfflineTracker
{
observed
:
HashMap
<
AccountId
,
Observed
>
,
block_instant
:
Instant
,
}
impl
OfflineTracker
{
/// Create a new tracker.
pub
fn
new
()
->
Self
{
OfflineTracker
{
observed
:
HashMap
::
new
()
}
OfflineTracker
{
observed
:
HashMap
::
new
()
,
block_instant
:
Instant
::
now
()
}
}
/// Note new consensus is starting with the given set of validators.
...
...
@@ -71,23 +76,33 @@ impl OfflineTracker {
let
set
:
HashSet
<
_
>
=
validators
.iter
()
.cloned
()
.collect
();
self
.observed
.retain
(|
k
,
_
|
set
.contains
(
k
));
self
.block_instant
=
Instant
::
now
();
}
/// Note that a round has ended.
pub
fn
note_round_end
(
&
mut
self
,
validator
:
AccountId
,
was_online
:
bool
)
{
self
.observed
.entry
(
validator
)
.or_insert_with
(
Observed
::
new
)
.note_round_end
(
was_online
);
self
.observed
.entry
(
validator
)
.or_insert_with
(
Observed
::
new
);
for
(
val
,
obs
)
in
self
.observed
.iter_mut
()
{
obs
.note_round_end
(
self
.block_instant
,
if
val
==
&
validator
{
Some
(
was_online
)
}
else
{
None
}
)
}
}
/// Generate a vector of indices for offline account IDs.
pub
fn
reports
(
&
self
,
validators
:
&
[
AccountId
])
->
Vec
<
u32
>
{
validators
.iter
()
.enumerate
()
.filter_map
(|(
i
,
v
)|
if
self
.is_online
(
v
)
{
None
}
else
{
.filter_map
(|(
i
,
v
)|
if
self
.is_known_offline_now
(
v
)
{
Some
(
i
as
u32
)
}
else
{
None
})
.collect
()
}
...
...
@@ -101,13 +116,15 @@ impl OfflineTracker {
};
// we must think all validators reported externally are offline.
let
thinks_online
=
self
.is_online
(
v
);
!
thinks_online
self
.is_known_offline_now
(
v
)
})
}
fn
is_online
(
&
self
,
v
:
&
AccountId
)
->
bool
{
self
.observed
.get
(
v
)
.map
(
Observed
::
is_active
)
.unwrap_or
(
true
)
/// Rwturns true only if we have seen the validator miss the last round. For further
/// rounds where we can't say for sure that they're still offline, we give them the
/// benefit of the doubt.
fn
is_known_offline_now
(
&
self
,
v
:
&
AccountId
)
->
bool
{
self
.observed
.get
(
v
)
.map
(|
o
|
o
.activity
()
==
Activity
::
Offline
)
.unwrap_or
(
false
)
}
}
...
...
@@ -121,17 +138,30 @@ mod tests {
let
v
=
[
0
;
32
]
.into
();
let
v2
=
[
1
;
32
]
.into
();
let
v3
=
[
2
;
32
]
.into
();
tracker
.note_new_block
(
&
[
v
,
v2
,
v3
]);
tracker
.note_round_end
(
v
,
true
);
tracker
.note_round_end
(
v2
,
true
);
tracker
.note_round_end
(
v3
,
true
);
assert_eq!
(
tracker
.reports
(
&
[
v
,
v2
,
v3
]),
vec!
[
0u32
;
0
]);
tracker
.note_new_block
(
&
[
v
,
v2
,
v3
]);
tracker
.note_round_end
(
v
,
true
);
tracker
.note_round_end
(
v2
,
false
);
tracker
.note_round_end
(
v3
,
true
);
assert_eq!
(
tracker
.reports
(
&
[
v
,
v2
,
v3
]),
vec!
[
1
]);
let
slash_time
=
REPORT_TIME
+
Duration
::
from_secs
(
5
);
tracker
.
observed
.get_mut
(
&
v
)
.unwrap
()
.offline_since
-=
slash_time
;
tracker
.observed
.get_mut
(
&
v2
)
.unwrap
()
.offline_since
-=
slash_time
;
tracker
.note_new_block
(
&
[
v
,
v2
,
v3
]
);
tracker
.
note_round_end
(
v
,
false
)
;
assert_eq!
(
tracker
.reports
(
&
[
v
,
v2
,
v3
]),
vec!
[
0
])
;
assert_eq!
(
tracker
.reports
(
&
[
v
,
v2
,
v3
]),
vec!
[
0
,
1
]);
tracker
.note_new_block
(
&
[
v
,
v2
,
v3
]);
tracker
.note_round_end
(
v
,
false
);
tracker
.note_round_end
(
v2
,
true
);
tracker
.note_round_end
(
v3
,
false
);
assert_eq!
(
tracker
.reports
(
&
[
v
,
v2
,
v3
]),
vec!
[
0
,
2
]);
tracker
.note_new_block
(
&
[
v
,
v3
]);
tracker
.note_new_block
(
&
[
v
,
v2
]);
tracker
.note_round_end
(
v
,
false
);
assert_eq!
(
tracker
.reports
(
&
[
v
,
v2
,
v3
]),
vec!
[
0
]);
}
}
consensus/src/service.rs
View file @
0357d231
...
...
@@ -62,32 +62,26 @@ fn start_bft<F, C>(
const
DELAY_UNTIL
:
Duration
=
Duration
::
from_millis
(
5000
);
let
mut
handle
=
LocalThreadHandle
::
current
();
let
work
=
Delay
::
new
(
Instant
::
now
()
+
DELAY_UNTIL
)
.then
(
move
|
res
|
{
if
let
Err
(
e
)
=
res
{
warn!
(
target
:
"bft"
,
"Failed to force delay of consensus: {:?}"
,
e
);
}
match
bft_service
.build_upon
(
&
header
)
{
Ok
(
Some
(
bft_work
))
=>
{
// do not poll work for some amount of time.
let
work
=
Delay
::
new
(
Instant
::
now
()
+
DELAY_UNTIL
)
.then
(
move
|
res
|
{
if
let
Err
(
e
)
=
res
{
warn!
(
target
:
"bft"
,
"Failed to force delay of consensus: {:?}"
,
e
);
}
match
bft_service
.build_upon
(
&
header
)
{
Ok
(
maybe_bft_work
)
=>
{
if
maybe_bft_work
.is_some
()
{
debug!
(
target
:
"bft"
,
"Starting agreement. After forced delay for {:?}"
,
DELAY_UNTIL
);
}
debug!
(
target
:
"bft"
,
"Starting agreement. After forced delay for {:?}"
,
DELAY_UNTIL
);
maybe_bft_work
}
Err
(
e
)
=>
{
warn!
(
target
:
"bft"
,
"BFT agreement error: {}"
,
e
);
None
}
bft_work
});
if
let
Err
(
e
)
=
handle
.spawn_local
(
Box
::
new
(
work
))
{
warn!
(
target
:
"bft"
,
"Couldn't initialize BFT agreement: {:?}"
,
e
);
}
})
.map
(|
_
|
());
if
let
Err
(
e
)
=
handle
.spawn_local
(
Box
::
new
(
work
))
{
debug!
(
target
:
"bft"
,
"Couldn't initialize BFT agreement: {:?}"
,
e
);
}
}
Ok
(
None
)
=>
trace!
(
target
:
"bft"
,
"Could not start agreement on top of {}"
,
header
.hash
()),
Err
(
e
)
=>
warn!
(
target
:
"bft"
,
"BFT agreement error: {}"
,
e
),
}
}
// creates a task to prune redundant entries in availability store upon block finalization
...
...
@@ -198,6 +192,7 @@ impl Service {
client
.import_notification_stream
()
.for_each
(
move
|
notification
|
{
if
notification
.is_new_best
{
trace!
(
target
:
"bft"
,
"Attempting to start new consensus round after import notification of {:?}"
,
notification
.hash
);
start_bft
(
notification
.header
,
bft_service
.clone
());
}
Ok
(())
...
...
@@ -221,15 +216,12 @@ impl Service {
let
c
=
client
.clone
();
let
s
=
bft_service
.clone
();
interval
.map_err
(|
e
|
debug!
(
"Timer error: {:?}"
,
e
))
.for_each
(
move
|
_
|
{
interval
.map_err
(|
e
|
debug!
(
target
:
"bft"
,
"Timer error: {:?}"
,
e
))
.for_each
(
move
|
_
|
{
if
let
Ok
(
best_block
)
=
c
.best_block_header
()
{
let
hash
=
best_block
.hash
();
let
last_agreement
=
s
.last_agreement
();
let
can_build_upon
=
last_agreement
.map_or
(
true
,
|
x
|
!
x
.live
||
x
.parent_hash
!=
hash
);
if
hash
==
prev_best
&&
can_build_upon
{
debug!
(
"Starting consensus round after a timeout"
);
if
hash
==
prev_best
{
debug!
(
target
:
"bft"
,
"Starting consensus round after a timeout"
);
start_bft
(
best_block
,
s
.clone
());
}
prev_best
=
hash
;
...
...
network/src/lib.rs
View file @
0357d231
...
...
@@ -73,7 +73,7 @@ pub const DOT_PROTOCOL_ID: ::substrate_network::ProtocolId = *b"dot";
type
FullStatus
=
GenericFullStatus
<
Block
>
;
/// Specialization of the network service for the polkadot protocol.
pub
type
NetworkService
=
::
substrate_network
::
Service
<
Block
,
PolkadotProtocol
>
;
pub
type
NetworkService
=
::
substrate_network
::
Service
<
Block
,
PolkadotProtocol
,
Hash
>
;
/// Status of a Polkadot node.
#[derive(Debug,
PartialEq,
Eq,
Clone,
Encode,
Decode)]
...
...
primitives/src/lib.rs
View file @
0357d231
...
...
@@ -120,4 +120,4 @@ pub struct InherentData {
pub
parachain_heads
:
Vec
<
::
parachain
::
CandidateReceipt
>
,
/// Indices of offline validators.
pub
offline_indices
:
Vec
<
u32
>
,
}
}
\ No newline at end of file
runtime/src/lib.rs
View file @
0357d231
...
...
@@ -250,7 +250,7 @@ pub mod api {
apply_extrinsic
=>
|
extrinsic
|
super
::
Executive
::
apply_extrinsic
(
extrinsic
),
execute_block
=>
|
block
|
super
::
Executive
::
execute_block
(
block
),
finalise_block
=>
|()|
super
::
Executive
::
finalise_block
(),
inherent_extrinsics
=>
|
inherent
|
super
::
inherent_extrinsics
(
inherent
),
inherent_extrinsics
=>
|
(
inherent
,
version
)
|
super
::
inherent_extrinsics
(
inherent
,
version
),
validator_count
=>
|()|
super
::
Session
::
validator_count
(),
validators
=>
|()|
super
::
Session
::
validators
(),
duty_roster
=>
|()|
super
::
Parachains
::
calculate_duty_roster
(),
...
...
runtime/src/utils.rs
View file @
0357d231
...
...
@@ -22,9 +22,10 @@ use runtime_primitives::traits::{Checkable, AuxLookup};
use
timestamp
::
Call
as
TimestampCall
;
use
parachains
::
Call
as
ParachainsCall
;
use
session
::
Call
as
SessionCall
;
use
version
::
RuntimeVersion
;
/// Produces the list of inherent extrinsics.
pub
fn
inherent_extrinsics
(
data
:
::
primitives
::
InherentData
)
->
Vec
<
UncheckedExtrinsic
>
{
pub
fn
inherent_extrinsics
(
data
:
::
primitives
::
InherentData
,
runtime_version
:
RuntimeVersion
)
->
Vec
<
UncheckedExtrinsic
>
{
let
make_inherent
=
|
function
|
UncheckedExtrinsic
::
new
(
Extrinsic
{
signed
:
Default
::
default
(),
...
...
@@ -39,7 +40,7 @@ pub fn inherent_extrinsics(data: ::primitives::InherentData) -> Vec<UncheckedExt
make_inherent
(
Call
::
Parachains
(
ParachainsCall
::
set_heads
(
data
.parachain_heads
))),
];
if
!
data
.offline_indices
.is_empty
()
{
if
!
data
.offline_indices
.is_empty
()
&&
runtime_version
.spec_version
==
4
{
inherent
.push
(
make_inherent
(
Call
::
Session
(
SessionCall
::
note_offline
(
data
.offline_indices
))
));
...
...
service/Cargo.toml
View file @
0357d231
...
...
@@ -24,6 +24,5 @@ substrate-runtime-io = { git = "https://github.com/paritytech/substrate" }
substrate-primitives
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-network
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-client
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-codec
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-service
=
{
git
=
"https://github.com/paritytech/substrate"
}
substrate-telemetry
=
{
git
=
"https://github.com/paritytech/substrate"
}
service/src/lib.rs
View file @
0357d231
...
...
@@ -29,7 +29,6 @@ extern crate polkadot_transaction_pool as transaction_pool;
extern
crate
polkadot_network
;
extern
crate
substrate_primitives
as
primitives
;
extern
crate
substrate_network
as
network
;
extern
crate
substrate_codec
as
codec
;
extern
crate
substrate_client
as
client
;
extern
crate
substrate_service
as
service
;
extern
crate
tokio
;
...
...
@@ -42,14 +41,12 @@ extern crate hex_literal;
pub
mod
chain_spec
;
use
std
::
sync
::
Arc
;
use
std
::
collections
::
HashMap
;
use
codec
::{
Encode
,
Decode
};
use
tokio
::
prelude
::{
Stream
,
Future
};
use
transaction_pool
::
TransactionPool
;
use
polkadot_api
::{
PolkadotApi
,
light
::
RemotePolkadotApiWrapper
};
use
polkadot_primitives
::{
parachain
,
AccountId
,
Block
,
BlockId
,
Hash
};
use
polkadot_runtime
::
GenesisConfig
;
use
client
::
Client
;
use
client
::
{
Client
,
BlockchainEvents
}
;
use
polkadot_network
::{
PolkadotProtocol
,
consensus
::
ConsensusNetwork
};
use
tokio
::
runtime
::
TaskExecutor
;
use
service
::
FactoryFullConfiguration
;
...
...
@@ -63,7 +60,7 @@ pub use client::ExecutionStrategy;
pub
type
ChainSpec
=
service
::
ChainSpec
<
GenesisConfig
>
;
/// Polkadot client type for specialised `Components`.
pub
type
ComponentClient
<
C
>
=
Client
<<
C
as
Components
>
::
Backend
,
<
C
as
Components
>
::
Executor
,
Block
>
;
pub
type
NetworkService
=
network
::
Service
<
Block
,
<
Factory
as
service
::
ServiceFactory
>
::
NetworkProtocol
>
;
pub
type
NetworkService
=
network
::
Service
<
Block
,
<
Factory
as
service
::
ServiceFactory
>
::
NetworkProtocol
,
Hash
>
;
/// A collection of type to generalise Polkadot specific components over full / light client.
pub
trait
Components
:
service
::
Components
{
...
...
@@ -106,16 +103,11 @@ pub struct Factory;
impl
service
::
ServiceFactory
for
Factory
{
type
Block
=
Block
;
type
ExtrinsicHash
=
Hash
;
type
NetworkProtocol
=
PolkadotProtocol
;
type
RuntimeDispatch
=
polkadot_executor
::
Executor
;
type
FullExtrinsicPool
=
TransactionPoolAdapter
<
service
::
FullBackend
<
Self
>
,
service
::
FullExecutor
<
Self
>
,
service
::
FullClient
<
Self
>
>
;
type
LightExtrinsicPool
=
TransactionPoolAdapter
<
service
::
LightBackend
<
Self
>
,
service
::
LightExecutor
<
Self
>
,
type
FullExtrinsicPoolApi
=
transaction_pool
::
ChainApi
<
service
::
FullClient
<
Self
>>
;
type
LightExtrinsicPoolApi
=
transaction_pool
::
ChainApi
<
RemotePolkadotApiWrapper
<
service
::
LightBackend
<
Self
>
,
service
::
LightExecutor
<
Self
>>
>
;
type
Genesis
=
GenesisConfig
;
...
...
@@ -124,25 +116,17 @@ impl service::ServiceFactory for Factory {
const
NETWORK_PROTOCOL_ID
:
network
::
ProtocolId
=
::
polkadot_network
::
DOT_PROTOCOL_ID
;
fn
build_full_extrinsic_pool
(
config
:
ExtrinsicPoolOptions
,
client
:
Arc
<
service
::
FullClient
<
Self
>>
)
->
Result
<
Self
::
FullExtrinsicPool
,
Error
>
->
Result
<
TransactionPool
<
service
::
FullClient
<
Self
>>
,
Error
>
{
let
api
=
client
.clone
();
Ok
(
TransactionPoolAdapter
{
pool
:
Arc
::
new
(
TransactionPool
::
new
(
config
,
api
)),
client
:
client
,
imports_external_transactions
:
true
,
})
Ok
(
TransactionPool
::
new
(
config
,
transaction_pool
::
ChainApi
::
new
(
api
)))
}
fn
build_light_extrinsic_pool
(
config
:
ExtrinsicPoolOptions
,
client
:
Arc
<
service
::
LightClient
<
Self
>>
)
->
Result
<
Self
::
LightExtrinsicPool
,
Error
>
->
Result
<
TransactionPool
<
RemotePolkadotApiWrapper
<
service
::
LightBackend
<
Self
>
,
service
::
LightExecutor
<
Self
>>>
,
Error
>
{
let
api
=
Arc
::
new
(
RemotePolkadotApiWrapper
(
client
.clone
()));
Ok
(
TransactionPoolAdapter
{
pool
:
Arc
::
new
(
TransactionPool
::
new
(
config
,
api
)),
client
:
client
,
imports_external_transactions
:
false
,
})
Ok
(
TransactionPool
::
new
(
config
,
transaction_pool
::
ChainApi
::
new
(
api
)))
}
fn
build_network_protocol
(
config
:
&
Configuration
)
...
...
@@ -182,8 +166,18 @@ impl <C: Components> Service<C> {
pub
fn
new_light
(
config
:
Configuration
,
executor
:
TaskExecutor
)
->
Result
<
Service
<
LightComponents
<
Factory
>>
,
Error
>
{
let
service
=
service
::
Service
::
<
LightComponents
<
Factory
>>
::
new
(
config
,
executor
)
?
;
let
service
=
service
::
Service
::
<
LightComponents
<
Factory
>>
::
new
(
config
,
executor
.clone
()
)
?
;
let
api
=
Arc
::
new
(
RemotePolkadotApiWrapper
(
service
.client
()));
let
pool
=
service
.extrinsic_pool
();
let
events
=
service
.client
()
.import_notification_stream
()
.for_each
(
move
|
notification
|
{
// re-verify all transactions without the sender.
pool
.retry_verification
(
&
BlockId
::
hash
(
notification
.hash
),
None
)
.map_err
(|
e
|
warn!
(
"Error re-verifying transactions: {:?}"
,
e
))
?
;
Ok
(())
})
.then
(|
_
|
Ok
(()));
executor
.spawn
(
events
);
Ok
(
Service
{
client
:
service
.client
(),
network
:
service
.network
(),
...
...
@@ -212,7 +206,16 @@ pub fn new_full(config: Configuration, executor: TaskExecutor)
let
is_validator
=
(
config
.roles
&
Roles
::
AUTHORITY
)
==
Roles
::
AUTHORITY
;
let
service
=
service
::
Service
::
<
FullComponents
<
Factory
>>
::
new
(
config
,
executor
.clone
())
?
;
let
pool
=
service
.extrinsic_pool
();
let
events
=
service
.client
()
.import_notification_stream
()
.for_each
(
move
|
notification
|
{
// re-verify all transactions without the sender.
pool
.retry_verification
(
&
BlockId
::
hash
(
notification
.hash
),
None
)
.map_err
(|
e
|
warn!
(
"Error re-verifying transactions: {:?}"
,
e
))
?
;
Ok
(())
})
.then
(|
_
|
Ok
(()));
executor
.spawn
(
events
);
// Spin consensus service if configured
let
consensus
=
if
is_validator
{
// Load the first available key
...
...
@@ -261,103 +264,3 @@ impl<C: Components> ::std::ops::Deref for Service<C> {
&
self
.inner
}
}
/// Transaction pool adapter.
pub
struct
TransactionPoolAdapter
<
B
,
E
,
A
>
where
A
:
Send
+
Sync
,
E
:
Send
+
Sync
{
imports_external_transactions
:
bool
,
pool
:
Arc
<
TransactionPool
<
A
>>
,
client
:
Arc
<
Client
<
B
,
E
,
Block
>>
,
}
impl
<
B
,
E
,
A
>
TransactionPoolAdapter
<
B
,
E
,
A
>
where
A
:
Send
+
Sync
,
B
:
client
::
backend
::
Backend
<
Block
,
KeccakHasher
,
RlpCodec
>
+
Send
+
Sync
,
E
:
client
::
CallExecutor
<
Block
,
KeccakHasher
,
RlpCodec
>
+
Send
+
Sync
,
{
fn
best_block_id
(
&
self
)
->
Option
<
BlockId
>
{
self
.client
.info
()
.map
(|
info
|
BlockId
::
hash
(
info
.chain.best_hash
))