Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
parity
Mirrored projects
polkadot
Commits
6088315b
Commit
6088315b
authored
Feb 17, 2021
by
Cecile Tonglet
Browse files
Merge commit
71475c85
(no conflict)
parents
32e6235e
71475c85
Pipeline
#124431
failed with stages
in 7 minutes and 34 seconds
Changes
57
Pipelines
1
Expand all
Hide whitespace changes
Inline
Side-by-side
.github/ISSUE_TEMPLATE/bug_report.md
View file @
6088315b
...
...
@@ -9,5 +9,5 @@ assignees: ''
-
It would help if you submit info about the system you are running, e.g.: operating system, kernel version, amount of available memory and swap, etc.
-
Logs could be very helpful. If possible, submit the whole log. Please format it as
```code blocks```
.
-
Describe the role your node plays, e.g. validator,
sentry,
full node or light client.
-
Describe the role your node plays, e.g. validator, full node or light client.
-
Any command-line options were passed?
Cargo.lock
View file @
6088315b
This diff is collapsed.
Click to expand it.
docker/sentry-docker-compose.yml
deleted
100644 → 0
View file @
32e6235e
# Docker compose file to simulate a sentry node setup.
#
#
# Setup:
#
# Validator A is not supposed to be connected to the public internet. Instead it
# connects to a sentry node (sentry-a) which connects to the public internet.
#
#
# Usage:
#
# 1. Build `target/release/substrate` binary: `cargo build --release`
#
# 2. Start networks and containers: `sudo docker-compose -f scripts/sentry-node/docker-compose.yml up`
#
# 3. Reach:
# - validator-a: localhost:9944
# - sentry-a: localhost:9946
version
:
"
3.7"
services
:
validator
:
ports
:
-
"
9944:9944"
# volumes:
# - ../../target/release/substrate:/usr/local/bin/substrate
image
:
parity/polkadot
networks
:
-
network-a
environment
:
-
VALIDATOR_NODE_KEY
-
VALIDATOR_BASE_PATH
-
VALIDATOR_CHAIN
-
VALIDATOR_PORT
-
VALIDATOR_NAME
-
VALIDATOR_RESERVED_NODES
command
:
# Local node id: QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR
-
"
--node-key"
-
"
${VALIDATOR_NODE_KEY:-0000000000000000000000000000000000000000000000000000000000000001}"
-
"
--base-path"
-
"
${VALIDATOR_BASE_PATH:-/tmp/alice}"
-
"
--chain=${VALIDATOR_CHAIN:-local}"
-
"
--port"
-
"
${VALIDATOR_PORT:-30333}"
-
"
--validator"
-
"
--name"
-
"
${VALIDATOR_NANE:-AlicesNode}"
-
"
--reserved-nodes"
-
"
${VALIDATOR_RESERVED_NODES:-/dns/sentry/tcp/30333/p2p/QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi}"
# Not only bind to localhost.
-
"
--ws-external"
-
"
--rpc-external"
# - "--log"
# - "sub-libp2p=trace"
# - "--log"
# - "afg=trace"
-
"
--no-telemetry"
-
"
--rpc-cors"
-
"
all"
sentry
:
image
:
parity/polkadot
ports
:
-
"
9946:9944"
# volumes:
# - ../../target/release/substrate:/usr/local/bin/substrate
networks
:
-
network-a
-
internet
environment
:
-
SENTRY_NODE_KEY
-
SENTRY_BASE_PATH
-
SENTRY_CHAIN
-
SENTRY_PORT
-
SENTRY_NAME
-
SENTRY_BOOTNODES
command
:
# Local node id: QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi
-
"
--node-key"
-
"
${SENTRY_NODE_KEY:-0000000000000000000000000000000000000000000000000000000000000003}"
-
"
--base-path"
-
"
${SENTRY_BASE_PATH:-/tmp/charlies}"
-
"
--chain=${SENTRY_CHAIN:-local}"
# Don't configure a key, as sentry is not a validator.
-
"
--port"
-
"
${SENTRY_PORT:-30333}"
# sentry-a is not a validator.
# - "--validator"
-
"
--name"
-
"
${SENTRY_NAME:-CharliesNode}"
-
"
--bootnodes"
-
"
${SENTRY_BOOTNODES:-/dns/validator-a/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR}"
-
"
--no-telemetry"
-
"
--rpc-cors"
-
"
all"
# Not only bind to localhost.
-
"
--ws-external"
-
"
--rpc-external"
networks
:
network-a
:
internet
:
node/core/approval-voting/src/import.rs
View file @
6088315b
...
...
@@ -121,13 +121,13 @@ async fn determine_new_blocks(
return
Ok
(
ancestry
);
}
loop
{
'outer
:
loop
{
let
&
(
ref
last_hash
,
ref
last_header
)
=
ancestry
.last
()
.expect
(
"ancestry has length 1 at initialization and is only added to; qed"
);
// If we iterated back to genesis, which can happen at the beginning of chains.
if
last_header
.number
<=
1
{
break
break
'outer
}
let
(
tx
,
rx
)
=
oneshot
::
channel
();
...
...
@@ -139,7 +139,7 @@ async fn determine_new_blocks(
// Continue past these errors.
let
batch_hashes
=
match
rx
.await
{
Err
(
_
)
|
Ok
(
Err
(
_
))
=>
break
,
Err
(
_
)
|
Ok
(
Err
(
_
))
=>
break
'outer
,
Ok
(
Ok
(
ancestors
))
=>
ancestors
,
};
...
...
@@ -179,14 +179,13 @@ async fn determine_new_blocks(
let
is_relevant
=
header
.number
>
finalized_number
;
if
is_known
||
!
is_relevant
{
break
break
'outer
}
ancestry
.push
((
hash
,
header
));
}
}
ancestry
.reverse
();
Ok
(
ancestry
)
}
...
...
@@ -313,7 +312,8 @@ async fn cache_session_info_for_head(
return
Ok
(
Err
(
SessionsUnavailable
));
}
Some
(
s
)
=>
{
session_window
.session_info
.drain
(
..
overlap_start
as
usize
);
let
outdated
=
std
::
cmp
::
min
(
overlap_start
as
usize
,
session_window
.session_info
.len
());
session_window
.session_info
.drain
(
..
outdated
);
session_window
.session_info
.extend
(
s
);
session_window
.earliest_session
=
Some
(
window_start
);
}
...
...
@@ -813,7 +813,7 @@ mod tests {
// Finalized block should be omitted. The head provided to `determine_new_blocks`
// should be included.
let
expected_ancestry
=
(
13
..
18
)
let
expected_ancestry
=
(
13
..
=
18
)
.map
(|
n
|
chain
.header_by_number
(
n
)
.map
(|
h
|
(
h
.hash
(),
h
.clone
()))
.unwrap
())
.rev
()
.collect
::
<
Vec
<
_
>>
();
...
...
@@ -880,7 +880,7 @@ mod tests {
});
futures
::
executor
::
block_on
(
futures
::
future
::
select
(
test_fut
,
aux_fut
));
futures
::
executor
::
block_on
(
futures
::
future
::
join
(
test_fut
,
aux_fut
));
}
#[test]
...
...
@@ -913,7 +913,7 @@ mod tests {
// Known block should be omitted. The head provided to `determine_new_blocks`
// should be included.
let
expected_ancestry
=
(
16
..
18
)
let
expected_ancestry
=
(
16
..
=
18
)
.map
(|
n
|
chain
.header_by_number
(
n
)
.map
(|
h
|
(
h
.hash
(),
h
.clone
()))
.unwrap
())
.rev
()
.collect
::
<
Vec
<
_
>>
();
...
...
@@ -957,7 +957,7 @@ mod tests {
}
});
futures
::
executor
::
block_on
(
futures
::
future
::
select
(
test_fut
,
aux_fut
));
futures
::
executor
::
block_on
(
futures
::
future
::
join
(
test_fut
,
aux_fut
));
}
#[test]
...
...
@@ -1266,7 +1266,7 @@ mod tests {
);
});
futures
::
executor
::
block_on
(
futures
::
future
::
select
(
test_fut
,
aux_fut
));
futures
::
executor
::
block_on
(
futures
::
future
::
join
(
test_fut
,
aux_fut
));
}
#[test]
...
...
@@ -1371,7 +1371,7 @@ mod tests {
);
});
futures
::
executor
::
block_on
(
futures
::
future
::
select
(
test_fut
,
aux_fut
));
futures
::
executor
::
block_on
(
futures
::
future
::
join
(
test_fut
,
aux_fut
));
}
#[test]
...
...
@@ -1451,7 +1451,7 @@ mod tests {
);
});
futures
::
executor
::
block_on
(
futures
::
future
::
select
(
test_fut
,
aux_fut
));
futures
::
executor
::
block_on
(
futures
::
future
::
join
(
test_fut
,
aux_fut
));
}
fn
cache_session_info_test
(
...
...
@@ -1484,7 +1484,7 @@ mod tests {
&
header
,
)
.await
.unwrap
()
.unwrap
();
assert_eq!
(
window
.earliest_session
,
Some
(
0
));
assert_eq!
(
window
.earliest_session
,
Some
(
start_session
));
assert_eq!
(
window
.session_info
,
(
start_session
..=
session
)
.map
(
dummy_session_info
)
.collect
::
<
Vec
<
_
>>
(),
...
...
@@ -1519,7 +1519,7 @@ mod tests {
}
});
futures
::
executor
::
block_on
(
futures
::
future
::
select
(
test_fut
,
aux_fut
));
futures
::
executor
::
block_on
(
futures
::
future
::
join
(
test_fut
,
aux_fut
));
}
#[test]
...
...
@@ -1679,7 +1679,7 @@ mod tests {
}
});
futures
::
executor
::
block_on
(
futures
::
future
::
select
(
test_fut
,
aux_fut
));
futures
::
executor
::
block_on
(
futures
::
future
::
join
(
test_fut
,
aux_fut
));
}
#[test]
...
...
@@ -1744,6 +1744,6 @@ mod tests {
);
});
futures
::
executor
::
block_on
(
futures
::
future
::
select
(
test_fut
,
aux_fut
));
futures
::
executor
::
block_on
(
futures
::
future
::
join
(
test_fut
,
aux_fut
));
}
}
node/core/approval-voting/src/lib.rs
View file @
6088315b
...
...
@@ -76,11 +76,23 @@ const LOG_TARGET: &str = "approval_voting";
/// The approval voting subsystem.
pub
struct
ApprovalVotingSubsystem
<
T
>
{
keystore
:
LocalKeystore
,
keystore
:
Arc
<
LocalKeystore
>
,
slot_duration_millis
:
u64
,
db
:
Arc
<
T
>
,
}
impl
<
T
>
ApprovalVotingSubsystem
<
T
>
{
/// Create a new approval voting subsystem with the given keystore, slot duration,
/// and underlying DB.
pub
fn
new
(
keystore
:
Arc
<
LocalKeystore
>
,
slot_duration_millis
:
u64
,
db
:
Arc
<
T
>
)
->
Self
{
ApprovalVotingSubsystem
{
keystore
,
slot_duration_millis
,
db
,
}
}
}
impl
<
T
,
C
>
Subsystem
<
C
>
for
ApprovalVotingSubsystem
<
T
>
where
T
:
AuxStore
+
Send
+
Sync
+
'static
,
C
:
SubsystemContext
<
Message
=
ApprovalVotingMessage
>
{
fn
start
(
self
,
ctx
:
C
)
->
SpawnedSubsystem
{
...
...
@@ -229,7 +241,7 @@ use approval_db_v1_reader::ApprovalDBV1Reader;
struct
State
<
T
>
{
session_window
:
import
::
RollingSessionWindow
,
keystore
:
LocalKeystore
,
keystore
:
Arc
<
LocalKeystore
>
,
slot_duration_millis
:
u64
,
db
:
T
,
clock
:
Box
<
dyn
Clock
+
Send
+
Sync
>
,
...
...
@@ -529,10 +541,10 @@ async fn handle_approved_ancestor(
db
:
&
impl
DBReader
,
target
:
Hash
,
lower_bound
:
BlockNumber
,
)
->
SubsystemResult
<
Option
<
Hash
>>
{
)
->
SubsystemResult
<
Option
<
(
Hash
,
BlockNumber
)
>>
{
let
mut
all_approved_max
=
None
;
let
block
_number
=
{
let
target
_number
=
{
let
(
tx
,
rx
)
=
oneshot
::
channel
();
ctx
.send_message
(
ChainApiMessage
::
BlockNumber
(
target
,
tx
)
.into
())
.await
;
...
...
@@ -544,17 +556,17 @@ async fn handle_approved_ancestor(
}
};
if
block
_number
<=
lower_bound
{
return
Ok
(
None
)
}
if
target
_number
<=
lower_bound
{
return
Ok
(
None
)
}
// request ancestors up to but not including the lower bound,
// as a vote on the lower bound is implied if we cannot find
// anything else.
let
ancestry
=
if
block
_number
>
lower_bound
+
1
{
let
ancestry
=
if
target
_number
>
lower_bound
+
1
{
let
(
tx
,
rx
)
=
oneshot
::
channel
();
ctx
.send_message
(
ChainApiMessage
::
Ancestors
{
hash
:
target
,
k
:
(
block
_number
-
(
lower_bound
+
1
))
as
usize
,
k
:
(
target
_number
-
(
lower_bound
+
1
))
as
usize
,
response_channel
:
tx
,
}
.into
())
.await
;
...
...
@@ -566,7 +578,7 @@ async fn handle_approved_ancestor(
Vec
::
new
()
};
for
block_hash
in
std
::
iter
::
once
(
target
)
.chain
(
ancestry
)
{
for
(
i
,
block_hash
)
in
std
::
iter
::
once
(
target
)
.chain
(
ancestry
)
.enumerate
()
{
// Block entries should be present as the assumption is that
// nothing here is finalized. If we encounter any missing block
// entries we can fail.
...
...
@@ -577,7 +589,9 @@ async fn handle_approved_ancestor(
if
entry
.is_fully_approved
()
{
if
all_approved_max
.is_none
()
{
all_approved_max
=
Some
(
block_hash
);
// First iteration of the loop is target, i = 0. After that,
// ancestry is moving backwards.
all_approved_max
=
Some
((
block_hash
,
target_number
-
i
as
BlockNumber
));
}
}
else
{
all_approved_max
=
None
;
...
...
node/core/approval-voting/src/tests.rs
View file @
6088315b
...
...
@@ -184,7 +184,7 @@ impl DBReader for TestStore {
fn
blank_state
()
->
State
<
TestStore
>
{
State
{
session_window
:
import
::
RollingSessionWindow
::
default
(),
keystore
:
LocalKeystore
::
in_memory
(),
keystore
:
Arc
::
new
(
LocalKeystore
::
in_memory
()
)
,
slot_duration_millis
:
SLOT_DURATION_MILLIS
,
db
:
TestStore
::
default
(),
clock
:
Box
::
new
(
MockClock
::
default
()),
...
...
@@ -1490,7 +1490,7 @@ fn approved_ancestor_all_approved() {
let
test_fut
=
Box
::
pin
(
async
move
{
assert_eq!
(
handle_approved_ancestor
(
&
mut
ctx
,
&
state
.db
,
block_hash_4
,
0
)
.await
.unwrap
(),
Some
(
block_hash_4
),
Some
(
(
block_hash_4
,
4
)
),
)
});
...
...
@@ -1572,7 +1572,7 @@ fn approved_ancestor_missing_approval() {
let
test_fut
=
Box
::
pin
(
async
move
{
assert_eq!
(
handle_approved_ancestor
(
&
mut
ctx
,
&
state
.db
,
block_hash_4
,
0
)
.await
.unwrap
(),
Some
(
block_hash_2
),
Some
(
(
block_hash_2
,
2
)
),
)
});
...
...
node/core/candidate-validation/src/lib.rs
View file @
6088315b
...
...
@@ -445,9 +445,9 @@ fn validate_candidate_exhaustive<B: ValidationBackend, S: SpawnNamed + 'static>(
match
B
::
validate
(
backend_arg
,
&
validation_code
,
params
,
spawn
)
{
Err
(
ValidationError
::
InvalidCandidate
(
WasmInvalidCandidate
::
Timeout
))
=>
Ok
(
ValidationResult
::
Invalid
(
InvalidCandidate
::
Timeout
)),
Err
(
ValidationError
::
InvalidCandidate
(
WasmInvalidCandidate
::
ParamsTooLarge
(
l
)))
=>
Err
(
ValidationError
::
InvalidCandidate
(
WasmInvalidCandidate
::
ParamsTooLarge
(
l
,
_
)))
=>
Ok
(
ValidationResult
::
Invalid
(
InvalidCandidate
::
ParamsTooLarge
(
l
as
u64
))),
Err
(
ValidationError
::
InvalidCandidate
(
WasmInvalidCandidate
::
CodeTooLarge
(
l
)))
=>
Err
(
ValidationError
::
InvalidCandidate
(
WasmInvalidCandidate
::
CodeTooLarge
(
l
,
_
)))
=>
Ok
(
ValidationResult
::
Invalid
(
InvalidCandidate
::
CodeTooLarge
(
l
as
u64
))),
Err
(
ValidationError
::
InvalidCandidate
(
WasmInvalidCandidate
::
BadReturn
))
=>
Ok
(
ValidationResult
::
Invalid
(
InvalidCandidate
::
BadReturn
)),
...
...
node/core/proposer/src/lib.rs
View file @
6088315b
...
...
@@ -79,7 +79,7 @@ where
+
Send
+
Sync
,
Client
::
Api
:
BlockBuilderApi
<
Block
>
+
ApiExt
<
Block
,
Error
=
sp_blockchain
::
Error
>
,
BlockBuilderApi
<
Block
>
+
ApiExt
<
Block
>
,
Backend
:
'static
+
sc_client_api
::
Backend
<
Block
,
State
=
sp_api
::
StateBackendFor
<
Client
,
Block
>>
,
// Rust bug: https://github.com/rust-lang/rust/issues/24159
...
...
@@ -133,7 +133,7 @@ where
+
Send
+
Sync
,
Client
::
Api
:
BlockBuilderApi
<
Block
>
+
ApiExt
<
Block
,
Error
=
sp_blockchain
::
Error
>
,
BlockBuilderApi
<
Block
>
+
ApiExt
<
Block
>
,
Backend
:
'static
+
sc_client_api
::
Backend
<
Block
,
State
=
sp_api
::
StateBackendFor
<
Client
,
Block
>>
,
// Rust bug: https://github.com/rust-lang/rust/issues/24159
...
...
@@ -179,7 +179,7 @@ where
+
Send
+
Sync
,
Client
::
Api
:
BlockBuilderApi
<
Block
>
+
ApiExt
<
Block
,
Error
=
sp_blockchain
::
Error
>
,
BlockBuilderApi
<
Block
>
+
ApiExt
<
Block
>
,
Backend
:
'static
+
sc_client_api
::
Backend
<
Block
,
State
=
sp_api
::
StateBackendFor
<
Client
,
Block
>>
,
// Rust bug: https://github.com/rust-lang/rust/issues/24159
...
...
node/core/runtime-api/src/lib.rs
View file @
6088315b
...
...
@@ -453,8 +453,6 @@ mod tests {
sp_api
::
mock_impl_runtime_apis!
{
impl
ParachainHost
<
Block
>
for
MockRuntimeApi
{
type
Error
=
sp_api
::
ApiError
;
fn
validators
(
&
self
)
->
Vec
<
ValidatorId
>
{
self
.validators
.clone
()
}
...
...
node/network/protocol/src/lib.rs
View file @
6088315b
...
...
@@ -70,10 +70,7 @@ impl From<sc_network::ObservedRole> for ObservedRole {
match
role
{
sc_network
::
ObservedRole
::
Light
=>
ObservedRole
::
Light
,
sc_network
::
ObservedRole
::
Authority
=>
ObservedRole
::
Authority
,
sc_network
::
ObservedRole
::
Full
|
sc_network
::
ObservedRole
::
OurSentry
|
sc_network
::
ObservedRole
::
OurGuardedAuthority
=>
ObservedRole
::
Full
,
sc_network
::
ObservedRole
::
Full
=>
ObservedRole
::
Full
,
}
}
}
...
...
node/overseer/src/lib.rs
View file @
6088315b
This diff is collapsed.
Click to expand it.
node/service/Cargo.toml
View file @
6088315b
...
...
@@ -19,6 +19,7 @@ sc-executor = { git = "https://github.com/paritytech/substrate", branch = "maste
sc-finality-grandpa-warp-sync
=
{
git
=
"https://github.com/paritytech/substrate"
,
branch
=
"master"
,
optional
=
true
}
sc-network
=
{
git
=
"https://github.com/paritytech/substrate"
,
branch
=
"master"
}
sc-transaction-pool
=
{
git
=
"https://github.com/paritytech/substrate"
,
branch
=
"master"
}
sc-keystore
=
{
git
=
"https://github.com/paritytech/substrate"
,
branch
=
"master"
}
service
=
{
package
=
"sc-service"
,
git
=
"https://github.com/paritytech/substrate"
,
branch
=
"master"
,
default-features
=
false
}
telemetry
=
{
package
=
"sc-telemetry"
,
git
=
"https://github.com/paritytech/substrate"
,
branch
=
"master"
}
...
...
@@ -95,6 +96,7 @@ polkadot-node-core-runtime-api = { path = "../core/runtime-api", optional = true
polkadot-pov-distribution
=
{
path
=
"../network/pov-distribution"
,
optional
=
true
}
polkadot-statement-distribution
=
{
path
=
"../network/statement-distribution"
,
optional
=
true
}
polkadot-approval-distribution
=
{
path
=
"../network/approval-distribution"
,
optional
=
true
}
polkadot-node-core-approval-voting
=
{
path
=
"../core/approval-voting"
,
optional
=
true
}
[dev-dependencies]
polkadot-test-client
=
{
path
=
"../test/client"
}
...
...
@@ -132,4 +134,5 @@ real-overseer = [
"polkadot-pov-distribution"
,
"polkadot-statement-distribution"
,
"polkadot-approval-distribution"
,
"polkadot-node-core-approval-voting"
,
]
node/service/src/chain_spec.rs
View file @
6088315b
...
...
@@ -870,7 +870,6 @@ fn rococo_staging_testnet_config_genesis(wasm_binary: &[u8]) -> rococo_runtime::
group_rotation_frequency
:
20
,
chain_availability_period
:
4
,
thread_availability_period
:
4
,
no_show_slots
:
10
,
max_upward_queue_count
:
8
,
max_upward_queue_size
:
8
*
1024
,
max_downward_message_size
:
1024
,
...
...
@@ -893,6 +892,11 @@ fn rococo_staging_testnet_config_genesis(wasm_binary: &[u8]) -> rococo_runtime::
hrmp_max_parachain_outbound_channels
:
4
,
hrmp_max_parathread_outbound_channels
:
4
,
hrmp_max_message_num_per_candidate
:
5
,
no_show_slots
:
2
,
n_delay_tranches
:
25
,
needed_approvals
:
2
,
relay_vrf_modulo_samples
:
10
,
zeroth_delay_tranche_width
:
0
,
..
Default
::
default
()
},
}),
...
...
@@ -1376,7 +1380,6 @@ pub fn rococo_testnet_genesis(
group_rotation_frequency
:
20
,
chain_availability_period
:
4
,
thread_availability_period
:
4
,
no_show_slots
:
10
,
max_upward_queue_count
:
8
,
max_upward_queue_size
:
8
*
1024
,
max_downward_message_size
:
1024
,
...
...
@@ -1399,6 +1402,11 @@ pub fn rococo_testnet_genesis(
hrmp_max_parachain_outbound_channels
:
4
,
hrmp_max_parathread_outbound_channels
:
4
,
hrmp_max_message_num_per_candidate
:
5
,
no_show_slots
:
2
,
n_delay_tranches
:
25
,
needed_approvals
:
2
,
relay_vrf_modulo_samples
:
10
,
zeroth_delay_tranche_width
:
0
,
..
Default
::
default
()
},
}),
...
...
node/service/src/client.rs
View file @
6088315b
...
...
@@ -30,7 +30,7 @@ use consensus_common::BlockStatus;
/// A set of APIs that polkadot-like runtimes must implement.
pub
trait
RuntimeApiCollection
:
sp_transaction_pool
::
runtime_api
::
TaggedTransactionQueue
<
Block
>
+
sp_api
::
ApiExt
<
Block
,
Error
=
sp_blockchain
::
Error
>
+
sp_api
::
ApiExt
<
Block
>
+
babe_primitives
::
BabeApi
<
Block
>
+
grandpa_primitives
::
GrandpaApi
<
Block
>
+
ParachainHost
<
Block
>
...
...
@@ -48,7 +48,7 @@ where
impl
<
Api
>
RuntimeApiCollection
for
Api
where
Api
:
sp_transaction_pool
::
runtime_api
::
TaggedTransactionQueue
<
Block
>
+
sp_api
::
ApiExt
<
Block
,
Error
=
sp_blockchain
::
Error
>
+
sp_api
::
ApiExt
<
Block
>
+
babe_primitives
::
BabeApi
<
Block
>
+
grandpa_primitives
::
GrandpaApi
<
Block
>
+
ParachainHost
<
Block
>
...
...
@@ -71,7 +71,6 @@ pub trait AbstractClient<Block, Backend>:
+
HeaderBackend
<
Block
>
+
CallApiAt
<
Block
,
Error
=
sp_blockchain
::
Error
,
StateBackend
=
Backend
::
State
>
where
...
...
@@ -90,7 +89,6 @@ impl<Block, Backend, Client> AbstractClient<Block, Backend> for Client
+
Sized
+
Send
+
Sync
+
CallApiAt
<
Block
,
Error
=
sp_blockchain
::
Error
,
StateBackend
=
Backend
::
State
>
,
Client
::
Api
:
RuntimeApiCollection
<
StateBackend
=
Backend
::
State
>
,
...
...
node/service/src/grandpa_support.rs
View file @
6088315b
...
...
@@ -16,14 +16,146 @@
//! Polkadot-specific GRANDPA integration utilities.
#[cfg(feature
=
"full-node"
)]
use
polkadot_primitives
::
v1
::
Hash
;
use
std
::
sync
::
Arc
;
use
polkadot_primitives
::
v1
::{
Block
as
PolkadotBlock
,
Header
as
PolkadotHeader
,
BlockNumber
,
Hash
};
use
polkadot_subsystem
::
messages
::
ApprovalVotingMessage
;
use
sp_runtime
::
traits
::{
Block
as
BlockT
,
NumberFor
};
use
sp_runtime
::
generic
::
BlockId
;
use
sp_runtime
::
traits
::
Header
as
_
;
use
prometheus_endpoint
::{
self
,
Registry
};
use
polkadot_overseer
::
OverseerHandler
;
use
futures
::
channel
::
oneshot
;
/// A custom GRANDPA voting rule that acts as a diagnostic for the approval
/// voting subsystem's desired votes.
///
/// The practical effect of this voting rule is to implement a fixed delay of
/// blocks and to issue a prometheus metric on the lag behind the head that
/// approval checking would indicate.
#[cfg(feature
=
"full-node"
)]
#[derive(Clone)]
pub
(
crate
)
struct
ApprovalCheckingDiagnostic
{
checking_lag
:
Option
<
prometheus_endpoint
::
Histogram
>
,
overseer
:
OverseerHandler
,
}
#[cfg(feature
=
"full-node"
)]
impl
ApprovalCheckingDiagnostic
{
/// Create a new approval checking diagnostic voting rule.
pub
fn
new
(
overseer
:
OverseerHandler
,
registry
:
Option
<&
Registry
>
)
->
Result
<
Self
,
prometheus_endpoint
::
PrometheusError
>
{
Ok
(
ApprovalCheckingDiagnostic
{
checking_lag
:
if
let
Some
(
registry
)
=
registry
{
Some
(
prometheus_endpoint
::
register
(
prometheus_endpoint
::
Histogram
::
with_opts
(
prometheus_endpoint
::
HistogramOpts
::
new
(
"approval_checking_finality_lag"
,
"How far behind the head of the chain the Approval Checking protocol wants to vote"
,
)
.buckets
(
vec!
[
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
10.0
,
20.0
,
30.0
,
40.0
,
50.0
])
)
?
,
registry
,
)
?
)
}
else
{
None
},
overseer
,
})
}
}
#[cfg(feature
=
"full-node"
)]
impl
<
B
>
grandpa
::
VotingRule
<
PolkadotBlock
,
B
>
for
ApprovalCheckingDiagnostic
where
B
:
sp_blockchain
::
HeaderBackend
<
PolkadotBlock
>
{
fn
restrict_vote
(
&
self
,
backend
:
Arc
<
B
>
,
base
:
&
PolkadotHeader
,
_best_target
:
&
PolkadotHeader
,
current_target
:
&
PolkadotHeader
,
)
->
grandpa
::
VotingRuleResult
<
PolkadotBlock
>
{
// always wait 50 blocks behind the head to finalize.
const
DIAGNOSTIC_GRANDPA_DELAY
:
BlockNumber
=
50
;
let
aux
=
||
{
let
find_target
=
|
target_number
:
BlockNumber
,
current_header
:
&
PolkadotHeader
|
{
let
mut
target_hash
=
current_header
.hash
();
let
mut
target_header
=
current_header
.clone
();
loop
{
if
*
target_header
.number
()
<
target_number
{
unreachable!
(
"we are traversing backwards from a known block;
\