Unverified Commit 8a6af441 authored by Denis_P's avatar Denis_P 🏑 Committed by GitHub
Browse files

WIP: CI: add spellcheck (#3421)



* CI: add spellcheck

* revert me

* CI: explicit command for spellchecker

* spellcheck: edit misspells

* CI: run spellcheck on diff

* spellcheck: edits

* spellcheck: edit misspells

* spellcheck: add rules

* spellcheck: mv configs

* spellcheck: more edits

* spellcheck: chore

* spellcheck: one more thing

* spellcheck: and another one

* spellcheck: seems like it doesn't get to an end

* spellcheck: new words after rebase

* spellcheck: new words appearing out of nowhere

* chore

* review edits

* more review edits

* more edits

* wonky behavior

* wonky behavior 2

* wonky behavior 3

* change git behavior

* spellcheck: another bunch of new edits

* spellcheck: new words are koming out of nowhere

* CI: finding the master

* CI: fetching master implicitly

* CI: undebug

* new errors

* a bunch of new edits

* and some more

* Update node/core/approval-voting/src/approval_db/v1/mod.rs
Co-authored-by: Andronik Ordian's avatarAndronik Ordian <write@reusable.software>

* Update xcm/xcm-executor/src/assets.rs
Co-authored-by: Andronik Ordian's avatarAndronik Ordian <write@reusable.software>

* Apply suggestions from code review
Co-authored-by: Andronik Ordian's avatarAndronik Ordian <write@reusable.software>

* Suggestions from the code review

* CI: scan only changed files
Co-authored-by: Andronik Ordian's avatarAndronik Ordian <write@reusable.software>
parent 43920cd7
Pipeline #147422 canceled with stages
in 7 minutes and 46 seconds
......@@ -273,7 +273,7 @@ fn renice(pid: u32, niceness: i32) {
}
}
/// The entrypoint that the spawned prepare worker should start with. The socket_path specifies
/// The entrypoint that the spawned prepare worker should start with. The `socket_path` specifies
/// the path to the socket used to communicate with the host.
pub fn worker_entrypoint(socket_path: &str) {
worker_event_loop("prepare", socket_path, |mut stream| async move {
......
......@@ -42,7 +42,7 @@ impl Pvf {
Self { code, code_hash }
}
/// Creates a new pvf which artifact id can be uniquely identified by the given number.
/// Creates a new PVF which artifact id can be uniquely identified by the given number.
#[cfg(test)]
pub(crate) fn from_discriminator(num: u32) -> Self {
let descriminator_buf = num.to_le_bytes().to_vec();
......
......@@ -177,7 +177,7 @@ pub enum SpawnErr {
Accept,
/// An error happened during spawning the process.
ProcessSpawn,
/// The deadline alloted for the worker spawning and connecting to the socket has elapsed.
/// The deadline allotted for the worker spawning and connecting to the socket has elapsed.
AcceptTimeout,
}
......@@ -187,7 +187,7 @@ pub enum SpawnErr {
/// has been terminated. Since the worker is running in another process it is obviously not necessarily
/// to poll this future to make the worker run, it's only for termination detection.
///
/// This future relies on the fact that a child process's stdout fd is closed upon it's termination.
/// This future relies on the fact that a child process's stdout `fd` is closed upon it's termination.
#[pin_project]
pub struct WorkerHandle {
child: async_process::Child,
......
......@@ -51,10 +51,10 @@ mod tests;
const LOG_TARGET: &str = "parachain::runtime-api";
/// The number of maximum runtime api requests can be executed in parallel. Further requests will be buffered.
/// The number of maximum runtime API requests can be executed in parallel. Further requests will be buffered.
const MAX_PARALLEL_REQUESTS: usize = 4;
/// The name of the blocking task that executes a runtime api request.
/// The name of the blocking task that executes a runtime API request.
const API_REQUEST_TASK_NAME: &str = "polkadot-runtime-api-request";
/// The `RuntimeApiSubsystem`. See module docs for more details.
......@@ -67,7 +67,7 @@ pub struct RuntimeApiSubsystem<Client> {
Pin<Box<dyn Future<Output = ()> + Send>>,
oneshot::Receiver<Option<RequestResult>>,
)>,
/// All the active runtime api requests that are currently being executed.
/// All the active runtime API requests that are currently being executed.
active_requests: FuturesUnordered<oneshot::Receiver<Option<RequestResult>>>,
/// Requests results cache
requests_cache: RequestResultCache,
......@@ -210,7 +210,7 @@ impl<Client> RuntimeApiSubsystem<Client> where
}
}
/// Spawn a runtime api request.
/// Spawn a runtime API request.
///
/// If there are already [`MAX_PARALLEL_REQUESTS`] requests being executed, the request will be buffered.
fn spawn_request(&mut self, relay_parent: Hash, request: Request) {
......@@ -239,7 +239,7 @@ impl<Client> RuntimeApiSubsystem<Client> where
if self.waiting_requests.len() > MAX_PARALLEL_REQUESTS * 10 {
tracing::warn!(
target: LOG_TARGET,
"{} runtime api requests waiting to be executed.",
"{} runtime API requests waiting to be executed.",
self.waiting_requests.len(),
)
}
......@@ -249,7 +249,7 @@ impl<Client> RuntimeApiSubsystem<Client> where
}
}
/// Poll the active runtime api requests.
/// Poll the active runtime API requests.
async fn poll_requests(&mut self) {
// If there are no active requests, this future should be pending forever.
if self.active_requests.len() == 0 {
......
......@@ -84,13 +84,13 @@ impl Jaeger {
Jaeger::Prep(cfg)
}
/// Spawn the background task in order to send the tracing information out via udp
/// Spawn the background task in order to send the tracing information out via UDP
#[cfg(target_os = "unknown")]
pub fn launch<S: SpawnNamed>(self, _spawner: S) -> result::Result<(), JaegerError> {
Ok(())
}
/// Spawn the background task in order to send the tracing information out via udp
/// Spawn the background task in order to send the tracing information out via UDP
#[cfg(not(target_os = "unknown"))]
pub fn launch<S: SpawnNamed>(self, spawner: S) -> result::Result<(), JaegerError> {
let cfg = match self {
......
......@@ -326,7 +326,7 @@ impl Span {
/// Add an additional int tag to the span without consuming.
///
/// Should be used sparingly, introduction of new types is prefered.
/// Should be used sparingly, introduction of new types is preferred.
#[inline(always)]
pub fn with_int_tag(mut self, tag: &'static str, i: i64) -> Self {
self.add_int_tag(tag, i);
......@@ -354,11 +354,11 @@ impl Span {
}
}
/// Add a pov hash meta tag with lazy hash eval, without consuming the span.
/// Add a PoV hash meta tag with lazy hash evaluation, without consuming the span.
#[inline(always)]
pub fn add_pov(&mut self, pov: &PoV) {
if self.is_enabled() {
// avoid computing the pov hash if jaeger is not enabled
// avoid computing the PoV hash if jaeger is not enabled
self.add_string_fmt_debug_tag("pov", pov.hash());
}
}
......
......@@ -8,7 +8,7 @@ path = "src/variant-a.rs"
[package]
name = "polkadot-test-malus"
description = "Misbehaving nodes for local testnets, system and simnet tests."
description = "Misbehaving nodes for local testnets, system and Simnet tests."
license = "GPL-3.0-only"
version = "0.9.8"
authors = ["Parity Technologies <admin@parity.io>"]
......
......@@ -27,7 +27,7 @@ use std::pin::Pin;
/// Filter incoming and outgoing messages.
pub trait MsgFilter: Send + Sync + Clone + 'static {
/// The message type the original subsystm handles incoming.
/// The message type the original subsystem handles incoming.
type Message: Send + 'static;
/// Filter messages that are to be received by
......
......@@ -18,7 +18,7 @@
//!
//! An example on how to use the `OverseerGen` pattern to
//! instantiate a modified subsystem implementation
//! for usage with simnet/gurke.
//! for usage with `simnet`/Gurke.
#![allow(missing_docs)]
......
......@@ -49,7 +49,7 @@ pub mod metrics {
/// Try to register metrics in the Prometheus registry.
fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError>;
/// Convenience method to register metrics in the optional Promethius registry.
/// Convenience method to register metrics in the optional Prometheus registry.
///
/// If no registry is provided, returns `Default::default()`. Otherwise, returns the same
/// thing that `try_register` does.
......
......@@ -274,11 +274,11 @@ fn try_import_the_same_assignment() {
});
}
/// https://github.com/paritytech/polkadot/pull/2160#discussion_r547594835
/// <https://github.com/paritytech/polkadot/pull/2160#discussion_r547594835>
///
/// 1. Send a view update that removes block B from their view.
/// 2. Send a message from B that they incur COST_UNEXPECTED_MESSAGE for,
/// but then they receive BENEFIT_VALID_MESSAGE.
/// 2. Send a message from B that they incur `COST_UNEXPECTED_MESSAGE` for,
/// but then they receive `BENEFIT_VALID_MESSAGE`.
/// 3. Send all other messages related to B.
#[test]
fn spam_attack_results_in_negative_reputation_change() {
......@@ -360,7 +360,7 @@ fn spam_attack_results_in_negative_reputation_change() {
/// Upon receiving them, they both will try to send the message each other.
/// This test makes sure they will not punish each other for such duplicate messages.
///
/// See https://github.com/paritytech/polkadot/issues/2499.
/// See <https://github.com/paritytech/polkadot/issues/2499>.
#[test]
fn peer_sending_us_the_same_we_just_sent_them_is_ok() {
let parent_hash = Hash::repeat_byte(0xFF);
......
......@@ -72,7 +72,7 @@ enum FetchedState {
///
/// Once the contained `Sender` is dropped, any still running task will be canceled.
Started(oneshot::Sender<()>),
/// All relevant live_in have been removed, before we were able to get our chunk.
/// All relevant `live_in` have been removed, before we were able to get our chunk.
Canceled,
}
......@@ -118,7 +118,7 @@ struct RunningTask {
/// Sender for communicating with other subsystems and reporting results.
sender: mpsc::Sender<FromFetchTask>,
/// Prometheues metrics for reporting results.
/// Prometheus metrics for reporting results.
metrics: Metrics,
/// Span tracking the fetching of this chunk.
......
......@@ -199,7 +199,7 @@ fn task_stores_valid_chunk_if_there_is_one() {
struct TestRun {
/// Response to deliver for a given validator index.
/// None means, answer with NetworkError.
/// None means, answer with `NetworkError`.
chunk_responses: HashMap<Recipient, ChunkFetchingResponse>,
/// Set of chunks that should be considered valid:
valid_chunks: HashSet<Vec<u8>>,
......@@ -238,7 +238,7 @@ impl TestRun {
});
}
/// Returns true, if after processing of the given message it would be ok for the stream to
/// Returns true, if after processing of the given message it would be OK for the stream to
/// end.
async fn handle_message(&self, msg: AllMessages) -> bool {
match msg {
......
......@@ -153,8 +153,8 @@ impl Requester {
///
/// Starting requests where necessary.
///
/// Note: The passed in `leaf` is not the same as CandidateDescriptor::relay_parent in the
/// given cores. The latter is the relay_parent this candidate considers its parent, while the
/// Note: The passed in `leaf` is not the same as `CandidateDescriptor::relay_parent` in the
/// given cores. The latter is the `relay_parent` this candidate considers its parent, while the
/// passed in leaf might be some later block where the candidate is still pending availability.
async fn add_cores<Context>(
&mut self,
......
......@@ -35,7 +35,7 @@ use crate::{
/// It should be ensured that a cached session stays live in the cache as long as we might need it.
pub struct SessionCache {
/// Look up cached sessions by SessionIndex.
/// Look up cached sessions by `SessionIndex`.
///
/// Note: Performance of fetching is really secondary here, but we need to ensure we are going
/// to get any existing cache entry, before fetching new information, as we should not mess up
......
......@@ -85,7 +85,7 @@ where
/// Answer an incoming PoV fetch request by querying the av store.
///
/// Returns: Ok(true) if chunk was found and served.
/// Returns: `Ok(true)` if chunk was found and served.
pub async fn answer_pov_request<Context>(
ctx: &mut Context,
req: IncomingRequest<v1::PoVFetchingRequest>,
......@@ -113,7 +113,7 @@ where
/// Answer an incoming chunk request by querying the av store.
///
/// Returns: Ok(true) if chunk was found and served.
/// Returns: `Ok(true)` if chunk was found and served.
pub async fn answer_chunk_request<Context>(
ctx: &mut Context,
req: IncomingRequest<v1::ChunkFetchingRequest>,
......
......@@ -57,7 +57,7 @@ pub struct TestHarness {
pub pool: TaskExecutor,
}
/// TestState for mocking execution of this subsystem.
/// `TestState` for mocking execution of this subsystem.
///
/// The `Default` instance provides data, which makes the system succeed by providing a couple of
/// valid occupied cores. You can tune the data before calling `TestState::run`. E.g. modify some
......
......@@ -53,7 +53,7 @@ use polkadot_node_network_protocol::{
};
use polkadot_node_subsystem_util::metrics::{self, prometheus};
/// Peer set infos for network initialization.
/// Peer set info for network initialization.
///
/// To be added to [`NetworkConfiguration::extra_sets`].
pub use polkadot_node_network_protocol::peer_set::{peer_sets_info, IsAuthority};
......
......@@ -39,7 +39,7 @@ use polkadot_overseer::AllMessages;
///
/// The resulting stream will end once any of its input ends.
///
/// TODO: Get rid of this: https://github.com/paritytech/polkadot/issues/2842
// TODO: Get rid of this: <https://github.com/paritytech/polkadot/issues/2842>
pub struct RequestMultiplexer {
receivers: Vec<(Protocol, mpsc::Receiver<network::IncomingRequest>)>,
statement_fetching: Option<mpsc::Receiver<network::IncomingRequest>>,
......
......@@ -232,7 +232,7 @@ impl Network for Arc<NetworkService<Block, Hash>> {
}
}
/// We assume one peer_id per authority_id.
/// We assume one `peer_id` per `authority_id`.
pub async fn get_peer_id_by_authority_id<AD: AuthorityDiscovery>(
authority_discovery: &mut AD,
authority: AuthorityDiscoveryId,
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment