Commit 8c2199dd authored by David's avatar David Committed by GitHub

Snapshot restoration overhaul (#11219)

* Comments and todos
Use `snapshot_sync` as logging target

* fix compilation

* More todos, more logs

* Fix picking snapshot peer: prefer the one with the highest block number
More docs, comments, todos

* Adjust WAIT_PEERS_TIMEOUT to be a multiple of MAINTAIN_SYNC_TIMER to try to fix snapshot startup problems
Docs, todos, comments

* Tabs

* Formatting

* Don't build new rlp::EMPTY_LIST_RLP instances

* Dial down debug logging

* Don't warn about missing hashes in the manifest: it's normal
Log client version on peer connect

* Cleanup

* Do not skip snapshots further away than 30k block from the highest block seen

Currently we look for peers that seed snapshots that are close to the highest block seen on the network (where "close" means withing 30k blocks). When a node starts up we wait for some time (5sec, increased here to 10sec) to let peers connect and if we have found a suitable peer to sync a snapshot from at the end of that delay, we start the download; if none is found and --warp-barrier is used we stall, otherwise we start a slow-sync.
When looking for a suitable snapshot, we use the highest block seen on the network to check if a peer has a snapshot that is within 30k blocks of that highest block number. This means that in a situation where all available snapshots are older than that, we will often fail to start a snapshot at all. What's worse is that the longer we delay starting a snapshot sync (to let more peers connect, in the hope of finding a good snapshot), the more likely we are to have seen a high block and thus the more likely we become to accept a snapshot.
This commit removes this comparison with the highest blocknumber criteria entirely and picks the best snapshot we find in 10sec.

* lockfile

* Add a `ChunkType::Dupe` variant so that we do not disconnect a peer if they happen to send us a duplicate chunk (just ignore the chunk and keep going)
Resolve some documentation todos, add more

* tweak log message

* Don't warp sync twice
Check if our own block is beyond the given warp barrier (can happen after we've completed a warp sync but are not quite yet synced up to the tip) and if so, don't sync.
More docs, resolve todos.
Dial down some `sync` debug level logging to trace

* Avoid iterating over all snapshot block/state hashes to find the next work item

Use a HashSet instead of a Vec and remove items from the set as chunks are processed. Calculate and store the total number of chunks in the `Snapshot`  struct instead of counting pending chunks each time.

* Address review grumbles

* Log correct number of bytes written to disk

* Revert ChunkType::Dup change

* whitespace grumble

* Cleanup debugging code

* Fix docs

* Fix import and a typo

* Fix test impl

* Use `indexmap::IndexSet` to ensure chunk hashes are accessed in order

* Revert increased SNAPSHOT_MANIFEST_TIMEOUT: 5sec should be enough
parent 6b17e321
Pipeline #55641 passed with stages
in 17 minutes and 17 seconds
This diff is collapsed.
......@@ -452,7 +452,6 @@ impl StateRebuilder {
StateDB::commit_bloom(&mut batch, bloom_journal)?;
self.db.inject(&mut batch)?;
backing.write_buffered(batch);
trace!(target: "snapshot", "current state root: {:?}", self.state_root);
Ok(())
}
......
......@@ -161,6 +161,7 @@ impl Restoration {
if let Some(ref mut writer) = self.writer.as_mut() {
writer.write_state_chunk(hash, chunk)?;
trace!(target: "snapshot", "Wrote {}/{} bytes of state to db/disk. Current state root: {:?}", len, chunk.len(), self.state.state_root());
}
self.state_chunks_left.remove(&hash);
......@@ -676,7 +677,6 @@ impl<C> Service<C> where C: SnapshotClient + ChainInfo {
} else if manifest.state_hashes.contains(&hash) {
true
} else {
warn!(target: "snapshot", "Hash of the content of {:?} not present in the manifest block/state hashes.", path);
return Ok(false);
};
......@@ -788,7 +788,7 @@ impl<C> Service<C> where C: SnapshotClient + ChainInfo {
false => Ok(())
}
}
other => other.map(drop),
Err(e) => Err(e)
};
(res, db)
}
......
......@@ -52,8 +52,7 @@ pub trait SnapshotService : Sync + Send {
fn status(&self) -> RestorationStatus;
/// Begin snapshot restoration.
/// If restoration in-progress, this will reset it.
/// From this point on, any previous snapshot may become unavailable.
/// If a restoration is in progress, this will reset it and clear all data.
fn begin_restore(&self, manifest: ManifestData);
/// Abort an in-progress restoration if there is one.
......
......@@ -19,6 +19,7 @@ ethcore-private-tx = { path = "../private-tx" }
ethereum-types = "0.8.0"
fastmap = { path = "../../util/fastmap" }
futures = "0.1"
indexmap = "1.3.0"
keccak-hash = "0.4.0"
light = { package = "ethcore-light", path = "../light" }
log = "0.4"
......
......@@ -295,7 +295,7 @@ pub struct EthSync {
light_subprotocol_name: [u8; 3],
/// Priority tasks notification channel
priority_tasks: Mutex<mpsc::Sender<PriorityTask>>,
/// for state tracking
/// Track the sync state: are we importing or verifying blocks?
is_major_syncing: Arc<AtomicBool>
}
......
......@@ -309,7 +309,7 @@ impl BlockDownloader {
}
}
}
// Update the highest block number seen on the network from the header.
if let Some((number, _)) = last_header {
if self.highest_block.as_ref().map_or(true, |n| number > *n) {
self.highest_block = Some(number);
......
......@@ -43,7 +43,7 @@ use ethereum_types::{H256, U256};
use keccak_hash::keccak;
use network::PeerId;
use network::client_version::ClientVersion;
use log::{debug, trace, error};
use log::{debug, trace, error, warn};
use rlp::Rlp;
use common_types::{
BlockNumber,
......@@ -76,14 +76,14 @@ impl SyncHandler {
SignedPrivateTransactionPacket => SyncHandler::on_signed_private_transaction(sync, io, peer, &rlp),
PrivateStatePacket => SyncHandler::on_private_state_data(sync, io, peer, &rlp),
_ => {
debug!(target: "sync", "{}: Unknown packet {}", peer, packet_id.id());
trace!(target: "sync", "{}: Unknown packet {}", peer, packet_id.id());
Ok(())
}
};
match result {
Err(DownloaderImportError::Invalid) => {
debug!(target:"sync", "{} -> Invalid packet {}", peer, packet_id.id());
trace!(target:"sync", "{} -> Invalid packet {}", peer, packet_id.id());
io.disable_peer(peer);
sync.deactivate_peer(io, peer);
},
......@@ -96,7 +96,7 @@ impl SyncHandler {
},
}
} else {
debug!(target: "sync", "{}: Unknown packet {}", peer, packet_id);
trace!(target: "sync", "{}: Unknown packet {}", peer, packet_id);
}
}
......@@ -117,14 +117,14 @@ impl SyncHandler {
sync.active_peers.remove(&peer_id);
if sync.state == SyncState::SnapshotManifest {
// Check if we are asking other peers for
// the snapshot manifest as well.
// If not, return to initial state
let still_asking_manifest = sync.peers.iter()
// Check if we are asking other peers for a snapshot manifest as well. If not,
// set our state to initial state (`Idle` or `WaitingPeers`).
let still_seeking_manifest = sync.peers.iter()
.filter(|&(id, p)| sync.active_peers.contains(id) && p.asking == PeerAsking::SnapshotManifest)
.next().is_none();
.next().is_some();
if still_asking_manifest {
if !still_seeking_manifest {
warn!(target: "snapshot_sync", "The peer we were downloading a snapshot from ({}) went away. Retrying.", peer_id);
sync.state = ChainSync::get_init_state(sync.warp_sync, io.chain());
}
}
......@@ -371,18 +371,18 @@ impl SyncHandler {
let block_set = sync.peers.get(&peer_id).and_then(|p| p.block_set).unwrap_or(BlockSet::NewBlocks);
if !sync.reset_peer_asking(peer_id, PeerAsking::BlockHeaders) {
debug!(target: "sync", "{}: Ignored unexpected headers", peer_id);
trace!(target: "sync", "{}: Ignored unexpected headers", peer_id);
return Ok(());
}
let expected_hash = match expected_hash {
Some(hash) => hash,
None => {
debug!(target: "sync", "{}: Ignored unexpected headers (expected_hash is None)", peer_id);
trace!(target: "sync", "{}: Ignored unexpected headers (expected_hash is None)", peer_id);
return Ok(());
}
};
if !allowed {
debug!(target: "sync", "{}: Ignored unexpected headers (peer not allowed)", peer_id);
trace!(target: "sync", "{}: Ignored unexpected headers (peer not allowed)", peer_id);
return Ok(());
}
......@@ -466,12 +466,12 @@ impl SyncHandler {
/// Called when snapshot manifest is downloaded from a peer.
fn on_snapshot_manifest(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) {
trace!(target: "sync", "Ignoring snapshot manifest from unconfirmed peer {}", peer_id);
trace!(target: "snapshot_sync", "Ignoring snapshot manifest from unconfirmed peer {}", peer_id);
return Ok(());
}
sync.clear_peer_download(peer_id);
if !sync.reset_peer_asking(peer_id, PeerAsking::SnapshotManifest) || sync.state != SyncState::SnapshotManifest {
trace!(target: "sync", "{}: Ignored unexpected/expired manifest", peer_id);
trace!(target: "snapshot_sync", "{}: Ignored unexpected/expired manifest", peer_id);
return Ok(());
}
......@@ -482,10 +482,12 @@ impl SyncHandler {
.map_or(false, |(l, h)| manifest.version >= l && manifest.version <= h);
if !is_supported_version {
trace!(target: "sync", "{}: Snapshot manifest version not supported: {}", peer_id, manifest.version);
warn!(target: "snapshot_sync", "{}: Snapshot manifest version not supported: {}", peer_id, manifest.version);
return Err(DownloaderImportError::Invalid);
}
sync.snapshot.reset_to(&manifest, &keccak(manifest_rlp.as_raw()));
debug!(target: "snapshot_sync", "{}: Peer sent a snapshot manifest we can use. Block number #{}, block chunks: {}, state chunks: {}",
peer_id, manifest.block_number, manifest.block_hashes.len(), manifest.state_hashes.len());
io.snapshot_service().begin_restore(manifest);
sync.state = SyncState::SnapshotData;
......@@ -495,12 +497,12 @@ impl SyncHandler {
/// Called when snapshot data is downloaded from a peer.
fn on_snapshot_data(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) {
trace!(target: "sync", "Ignoring snapshot data from unconfirmed peer {}", peer_id);
trace!(target: "snapshot_sync", "Ignoring snapshot data from unconfirmed peer {}", peer_id);
return Ok(());
}
sync.clear_peer_download(peer_id);
if !sync.reset_peer_asking(peer_id, PeerAsking::SnapshotData) || (sync.state != SyncState::SnapshotData && sync.state != SyncState::SnapshotWaiting) {
trace!(target: "sync", "{}: Ignored unexpected snapshot data", peer_id);
trace!(target: "snapshot_sync", "{}: Ignored unexpected snapshot data", peer_id);
return Ok(());
}
......@@ -508,12 +510,12 @@ impl SyncHandler {
let status = io.snapshot_service().status();
match status {
RestorationStatus::Inactive | RestorationStatus::Failed => {
trace!(target: "sync", "{}: Snapshot restoration aborted", peer_id);
trace!(target: "snapshot_sync", "{}: Snapshot restoration status: {:?}", peer_id, status);
sync.state = SyncState::WaitingPeers;
// only note bad if restoration failed.
if let (Some(hash), RestorationStatus::Failed) = (sync.snapshot.snapshot_hash(), status) {
trace!(target: "sync", "Noting snapshot hash {} as bad", hash);
debug!(target: "snapshot_sync", "Marking snapshot manifest hash {} as bad", hash);
sync.snapshot.note_bad(hash);
}
......@@ -521,30 +523,30 @@ impl SyncHandler {
return Ok(());
},
RestorationStatus::Initializing { .. } => {
trace!(target: "warp", "{}: Snapshot restoration is initializing", peer_id);
trace!(target: "snapshot_sync", "{}: Snapshot restoration is initializing. Can't accept data right now.", peer_id);
return Ok(());
}
RestorationStatus::Finalizing => {
trace!(target: "warp", "{}: Snapshot finalizing restoration", peer_id);
trace!(target: "snapshot_sync", "{}: Snapshot finalizing restoration. Can't accept data right now.", peer_id);
return Ok(());
}
RestorationStatus::Ongoing { .. } => {
trace!(target: "sync", "{}: Snapshot restoration is ongoing", peer_id);
trace!(target: "snapshot_sync", "{}: Snapshot restoration is ongoing", peer_id);
},
}
let snapshot_data: Bytes = r.val_at(0)?;
match sync.snapshot.validate_chunk(&snapshot_data) {
Ok(ChunkType::Block(hash)) => {
trace!(target: "sync", "{}: Processing block chunk", peer_id);
trace!(target: "snapshot_sync", "{}: Processing block chunk", peer_id);
io.snapshot_service().restore_block_chunk(hash, snapshot_data);
}
Ok(ChunkType::State(hash)) => {
trace!(target: "sync", "{}: Processing state chunk", peer_id);
trace!(target: "snapshot_sync", "{}: Processing state chunk", peer_id);
io.snapshot_service().restore_state_chunk(hash, snapshot_data);
}
Err(()) => {
trace!(target: "sync", "{}: Got bad snapshot chunk", peer_id);
trace!(target: "snapshot_sync", "{}: Got bad snapshot chunk", peer_id);
io.disconnect_peer(peer_id);
return Ok(());
}
......@@ -566,7 +568,7 @@ impl SyncHandler {
let warp_protocol = warp_protocol_version != 0;
let private_tx_protocol = warp_protocol_version >= PAR_PROTOCOL_VERSION_3.0;
let peer = PeerInfo {
protocol_version: protocol_version,
protocol_version,
network_id: r.val_at(1)?,
difficulty: Some(r.val_at(2)?),
latest_hash: r.val_at(3)?,
......@@ -595,7 +597,8 @@ impl SyncHandler {
latest:{}, \
genesis:{}, \
snapshot:{:?}, \
private_tx_enabled:{})",
private_tx_enabled:{}, \
client_version: {})",
peer_id,
peer.protocol_version,
peer.network_id,
......@@ -603,7 +606,8 @@ impl SyncHandler {
peer.latest_hash,
peer.genesis,
peer.snapshot_number,
peer.private_tx_enabled
peer.private_tx_enabled,
peer.client_version,
);
if io.is_expired() {
trace!(target: "sync", "Status packet from expired session {}:{}", peer_id, io.peer_version(peer_id));
......
This diff is collapsed.
......@@ -87,11 +87,11 @@ impl SyncRequester {
SyncRequester::send_request(sync, io, peer_id, PeerAsking::ForkHeader, GetBlockHeadersPacket, rlp.out());
}
/// Find some headers or blocks to download for a peer.
/// Find some headers or blocks to download from a peer.
pub fn request_snapshot_data(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId) {
// find chunk data to download
if let Some(hash) = sync.snapshot.needed_chunk() {
if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) {
if let Some(mut peer) = sync.peers.get_mut(&peer_id) {
peer.asking_snapshot_data = Some(hash.clone());
}
SyncRequester::request_snapshot_chunk(sync, io, peer_id, &hash);
......@@ -100,9 +100,8 @@ impl SyncRequester {
/// Request snapshot manifest from a peer.
pub fn request_snapshot_manifest(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId) {
trace!(target: "sync", "{} <- GetSnapshotManifest", peer_id);
let rlp = RlpStream::new_list(0);
SyncRequester::send_request(sync, io, peer_id, PeerAsking::SnapshotManifest, GetSnapshotManifestPacket, rlp.out());
trace!(target: "sync", "{}: requesting a snapshot manifest", peer_id);
SyncRequester::send_request(sync, io, peer_id, PeerAsking::SnapshotManifest, GetSnapshotManifestPacket, rlp::EMPTY_LIST_RLP.to_vec());
}
pub fn request_private_state(sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, hash: &H256) {
......
......@@ -116,7 +116,7 @@ impl SyncSupplier {
debug!(target:"sync", "Unexpected packet {} from unregistered peer: {}:{}", packet_id, peer, io.peer_version(peer));
return;
}
debug!(target: "sync", "{} -> Dispatching packet: {}", peer, packet_id);
trace!(target: "sync", "{} -> Dispatching packet: {}", peer, packet_id);
match id {
ConsensusDataPacket => {
......
This diff is collapsed.
......@@ -50,6 +50,8 @@ pub trait SyncIo {
fn peer_version(&self, peer_id: PeerId) -> ClientVersion {
ClientVersion::from(peer_id.to_string())
}
/// Returns the peer enode string
fn peer_enode(&self, peer_id: PeerId) -> Option<String>;
/// Returns information on p2p session
fn peer_session_info(&self, peer_id: PeerId) -> Option<SessionInfo>;
/// Maximum mutually supported ETH protocol version
......@@ -115,10 +117,6 @@ impl<'s> SyncIo for NetSyncIo<'s> {
self.chain
}
fn chain_overlay(&self) -> &RwLock<HashMap<BlockNumber, Bytes>> {
self.chain_overlay
}
fn snapshot_service(&self) -> &dyn SnapshotService {
self.snapshot_service
}
......@@ -127,12 +125,20 @@ impl<'s> SyncIo for NetSyncIo<'s> {
self.private_state.clone()
}
fn peer_session_info(&self, peer_id: PeerId) -> Option<SessionInfo> {
self.network.session_info(peer_id)
fn peer_version(&self, peer_id: PeerId) -> ClientVersion {
self.network.peer_client_version(peer_id)
}
fn is_expired(&self) -> bool {
self.network.is_expired()
fn peer_enode(&self, peer_id: PeerId) -> Option<String> {
self.network.session_info(peer_id).and_then(|info| {
info.id.map(|node_id| {
format!("enode:://{}@{}", node_id, info.remote_address)
})
})
}
fn peer_session_info(&self, peer_id: PeerId) -> Option<SessionInfo> {
self.network.session_info(peer_id)
}
fn eth_protocol_version(&self, peer_id: PeerId) -> u8 {
......@@ -143,8 +149,12 @@ impl<'s> SyncIo for NetSyncIo<'s> {
self.network.protocol_version(*protocol, peer_id).unwrap_or(0)
}
fn peer_version(&self, peer_id: PeerId) -> ClientVersion {
self.network.peer_client_version(peer_id)
fn is_expired(&self) -> bool {
self.network.is_expired()
}
fn chain_overlay(&self) -> &RwLock<HashMap<BlockNumber, Bytes>> {
self.chain_overlay
}
fn payload_soft_limit(&self) -> usize {
......
......@@ -114,25 +114,17 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
self.to_disconnect.insert(peer_id);
}
fn is_expired(&self) -> bool {
false
}
fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), network::Error> {
self.packets.push(TestPacket {
data: data,
packet_id: packet_id,
recipient: self.sender.unwrap()
});
self.packets.push(
TestPacket { data, packet_id, recipient: self.sender.unwrap() }
);
Ok(())
}
fn send(&mut self,peer_id: PeerId, packet_id: SyncPacket, data: Vec<u8>) -> Result<(), network::Error> {
self.packets.push(TestPacket {
data,
packet_id: packet_id.id(),
recipient: peer_id,
});
self.packets.push(
TestPacket { data, packet_id: packet_id.id(), recipient: peer_id }
);
Ok(())
}
......@@ -140,6 +132,14 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
&*self.chain
}
fn snapshot_service(&self) -> &dyn SnapshotService {
self.snapshot_service
}
fn private_state(&self) -> Option<Arc<PrivateStateDB>> {
self.private_state_db.clone()
}
fn peer_version(&self, peer_id: PeerId) -> ClientVersion {
self.peers_info.get(&peer_id)
.cloned()
......@@ -147,12 +147,8 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
.into()
}
fn snapshot_service(&self) -> &dyn SnapshotService {
self.snapshot_service
}
fn private_state(&self) -> Option<Arc<PrivateStateDB>> {
self.private_state_db.clone()
fn peer_enode(&self, _peer_id: usize) -> Option<String> {
unimplemented!()
}
fn peer_session_info(&self, _peer_id: PeerId) -> Option<SessionInfo> {
......@@ -167,6 +163,10 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
if protocol == &WARP_SYNC_PROTOCOL_ID { PAR_PROTOCOL_VERSION_4.0 } else { self.eth_protocol_version(peer_id) }
}
fn is_expired(&self) -> bool {
false
}
fn chain_overlay(&self) -> &RwLock<HashMap<BlockNumber, Bytes>> {
&self.overlay
}
......
......@@ -170,9 +170,9 @@ pub type ChunkSink<'a> = dyn FnMut(&[u8]) -> std::io::Result<()> + 'a;
/// Statuses for snapshot restoration.
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum RestorationStatus {
/// No restoration.
/// No restoration activity currently.
Inactive,
/// Restoration is initializing
/// Restoration is initializing.
Initializing {
/// Total number of state chunks.
state_chunks: u32,
......@@ -192,7 +192,7 @@ pub enum RestorationStatus {
/// Number of block chunks completed.
block_chunks_done: u32,
},
/// Finalizing restoration
/// Finalizing restoration.
Finalizing,
/// Failed restoration.
Failed,
......
......@@ -37,7 +37,6 @@ use mio::{
use parity_path::restrict_permissions_owner;
use parking_lot::{Mutex, RwLock};
use rlp::{Encodable, RlpStream};
use rustc_hex::ToHex;
use ethcore_io::{IoContext, IoHandler, IoManager, StreamToken, TimerToken};
use parity_crypto::publickey::{Generator, KeyPair, Random, Secret};
......
......@@ -393,42 +393,42 @@ impl NonReservedPeerMode {
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct IpFilter {
pub predefined: AllowIP,
pub custom_allow: Vec<IpNetwork>,
pub custom_block: Vec<IpNetwork>,
pub predefined: AllowIP,
pub custom_allow: Vec<IpNetwork>,
pub custom_block: Vec<IpNetwork>,
}
impl Default for IpFilter {
fn default() -> Self {
IpFilter {
predefined: AllowIP::All,
custom_allow: vec![],
custom_block: vec![],
}
}
fn default() -> Self {
IpFilter {
predefined: AllowIP::All,
custom_allow: vec![],
custom_block: vec![],
}
}
}
impl IpFilter {
/// Attempt to parse the peer mode from a string.
pub fn parse(s: &str) -> Result<IpFilter, IpNetworkError> {
let mut filter = IpFilter::default();
for f in s.split_whitespace() {
match f {
"all" => filter.predefined = AllowIP::All,
"private" => filter.predefined = AllowIP::Private,
"public" => filter.predefined = AllowIP::Public,
"none" => filter.predefined = AllowIP::None,
custom => {
if custom.starts_with("-") {
filter.custom_block.push(IpNetwork::from_str(&custom.to_owned().split_off(1))?)
} else {
filter.custom_allow.push(IpNetwork::from_str(custom)?)
}
}
}
}
Ok(filter)
}
/// Attempt to parse the peer mode from a string.
pub fn parse(s: &str) -> Result<IpFilter, IpNetworkError> {
let mut filter = IpFilter::default();
for f in s.split_whitespace() {
match f {
"all" => filter.predefined = AllowIP::All,
"private" => filter.predefined = AllowIP::Private,
"public" => filter.predefined = AllowIP::Public,
"none" => filter.predefined = AllowIP::None,
custom => {
if custom.starts_with("-") {
filter.custom_block.push(IpNetwork::from_str(&custom.to_owned().split_off(1))?)
} else {
filter.custom_allow.push(IpNetwork::from_str(custom)?)
}
}
}
}
Ok(filter)
}
}
/// IP fiter
......@@ -440,6 +440,6 @@ pub enum AllowIP {
Private,
/// Connect to public network only
Public,
/// Block all addresses
None,
/// Block all addresses
None,
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment