diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ed725d5a3dce2171f99ac212942846780fb928d5..e193ed4d5db2469b6b99a1ef9f1738dc8b48a3f4 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -55,7 +55,7 @@ jobs:
   coverage:
     name: Zombienet SDK - coverage
     needs: build
-    runs-on: ubuntu-latest
+    runs-on: ubuntu-20.04
     if: github.event_name == 'pull_request'
 
     permissions:
@@ -83,7 +83,7 @@ jobs:
         uses: taiki-e/install-action@cargo-llvm-cov
 
       - name: Collect coverage data
-        run: cargo llvm-cov nextest --lcov --output-path lcov.info
+        run: cargo llvm-cov nextest --workspace --exclude zombienet-sdk --test-threads 1 --lcov --output-path lcov.info
 
       - name: Report code coverage
         uses: Nef10/lcov-reporter-action@v0.4.0
diff --git a/crates/orchestrator/src/generators/chain_spec.rs b/crates/orchestrator/src/generators/chain_spec.rs
index faf9d223527fbcf84170883c0b2ef3243bd0ab2e..fd9ed40e7d9860ef7c6626d8ac0b1c0220779db2 100644
--- a/crates/orchestrator/src/generators/chain_spec.rs
+++ b/crates/orchestrator/src/generators/chain_spec.rs
@@ -4,7 +4,7 @@ use std::{
 };
 
 use anyhow::anyhow;
-use configuration::{types::AssetLocation, HrmpChannelConfig};
+use configuration::{shared::constants::THIS_IS_A_BUG, types::AssetLocation, HrmpChannelConfig};
 use provider::{
     constants::NODE_CONFIG_DIR,
     types::{GenerateFileCommand, GenerateFilesOptions, TransferedFile},
@@ -835,7 +835,9 @@ fn add_aura_authorities(
                 node.accounts
                     .accounts
                     .get("sr")
-                    .expect("'sr' account should be set at spec computation, this is a bug")
+                    .expect(&format!(
+                        "'sr' account should be set at spec computation {THIS_IS_A_BUG}"
+                    ))
                     .address
                     .clone()
             })
@@ -880,7 +882,9 @@ fn add_collator_selection(
                 node.accounts
                     .accounts
                     .get("sr")
-                    .expect("'sr' account should be set at spec computation, this is a bug")
+                    .expect(&format!(
+                        "'sr' account should be set at spec computation {THIS_IS_A_BUG}"
+                    ))
                     .address
                     .clone()
             })
diff --git a/crates/orchestrator/src/generators/command.rs b/crates/orchestrator/src/generators/command.rs
index 0b648c430d23deb321bdc74d5c34a25355de48e3..1baaec866ef3f9ca353491505de3c83417104867 100644
--- a/crates/orchestrator/src/generators/command.rs
+++ b/crates/orchestrator/src/generators/command.rs
@@ -1,4 +1,4 @@
-use configuration::types::Arg;
+use configuration::{shared::constants::THIS_IS_A_BUG, types::Arg};
 
 use crate::{network_spec::node::NodeSpec, shared::constants::*};
 
@@ -232,8 +232,9 @@ pub fn generate_for_node(
 
     if *is_validator && !args.contains(&Arg::Flag("--validator".into())) {
         tmp_args.push("--validator".into());
-        // TODO: we need to impl cli args checking
-        tmp_args.push("--insecure-validator-i-know-what-i-do".into());
+        if node.supports_arg("--insecure-validator-i-know-what-i-do") {
+            tmp_args.push("--insecure-validator-i-know-what-i-do".into());
+        }
     }
 
     if !bootnodes_addresses.is_empty() {
@@ -285,7 +286,7 @@ pub fn generate_for_node(
         // TODO: move this to error
         let port_part = parts
             .get_mut(4)
-            .expect("should have at least 5 parts, this is a bug");
+            .expect(&format!("should have at least 5 parts {THIS_IS_A_BUG}"));
         let port_to_use = p2p_port.to_string();
         *port_part = port_to_use.as_str();
         parts.join("/")
diff --git a/crates/orchestrator/src/generators/keystore.rs b/crates/orchestrator/src/generators/keystore.rs
index 91ac4b80cd148a08af6633944a620c8df2b32637..01edca2d2268ef126aa1a81d1c19a548c36881aa 100644
--- a/crates/orchestrator/src/generators/keystore.rs
+++ b/crates/orchestrator/src/generators/keystore.rs
@@ -3,6 +3,7 @@ use std::{
     vec,
 };
 
+use configuration::shared::constants::THIS_IS_A_BUG;
 use hex::encode;
 use support::fs::FileSystem;
 
@@ -40,7 +41,7 @@ where
                 let pk = acc
                     .accounts
                     .get("sr")
-                    .expect("Key 'sr' should be set for node, this is a bug.")
+                    .expect(&format!("Key 'sr' should be set for node {THIS_IS_A_BUG}"))
                     .public_key
                     .as_str();
                 format!("{}{}", encode(k), pk)
@@ -49,7 +50,7 @@ where
                 let pk = acc
                     .accounts
                     .get("ed")
-                    .expect("Key 'ed' should be set for node, this is a bug.")
+                    .expect(&format!("Key 'ed' should be set for node {THIS_IS_A_BUG}"))
                     .public_key
                     .as_str();
                 format!("{}{}", encode(k), pk)
@@ -58,7 +59,7 @@ where
                 let pk = acc
                     .accounts
                     .get("ec")
-                    .expect("Key 'ec' should be set for node, this is a bug.")
+                    .expect(&format!("Key 'ec' should be set for node {THIS_IS_A_BUG}"))
                     .public_key
                     .as_str();
                 format!("{}{}", encode(k), pk)
diff --git a/crates/orchestrator/src/generators/port.rs b/crates/orchestrator/src/generators/port.rs
index f6bd519fba8247fca65f09fee363ad4d703e035d..e2175e91a1250d666e8c0e3a1e91bc329cd55827 100644
--- a/crates/orchestrator/src/generators/port.rs
+++ b/crates/orchestrator/src/generators/port.rs
@@ -1,6 +1,6 @@
 use std::net::TcpListener;
 
-use configuration::shared::types::Port;
+use configuration::shared::{constants::THIS_IS_A_BUG, types::Port};
 
 use super::errors::GeneratorError;
 use crate::shared::types::ParkedPort;
@@ -18,7 +18,9 @@ pub fn generate(port: Option<Port>) -> Result<ParkedPort, GeneratorError> {
         .map_err(|_e| GeneratorError::PortGeneration(port, "Can't bind".into()))?;
     let port = listener
         .local_addr()
-        .expect("We should always get the local_addr from the listener, please report as bug")
+        .expect(&format!(
+            "We should always get the local_addr from the listener {THIS_IS_A_BUG}"
+        ))
         .port();
     Ok(ParkedPort::new(port, listener))
 }
diff --git a/crates/orchestrator/src/lib.rs b/crates/orchestrator/src/lib.rs
index d823261ab1ec433547de8f3bf1c2080f50d35bf5..633257deb3c8d67c6ee65cbcc6577efe84a6ef6d 100644
--- a/crates/orchestrator/src/lib.rs
+++ b/crates/orchestrator/src/lib.rs
@@ -1,5 +1,5 @@
 // TODO(Javier): Remove when we implement the logic in the orchestrator to spawn with the provider.
-#![allow(dead_code)]
+#![allow(dead_code, clippy::expect_fun_call)]
 
 pub mod errors;
 mod generators;
@@ -85,6 +85,10 @@ where
         // create namespace
         let ns = self.provider.create_namespace().await?;
 
+        network_spec
+            .populate_nodes_available_args(ns.clone())
+            .await?;
+
         info!("🧰 ns: {}", ns.name());
         info!("🧰 base_dir: {:?}", ns.base_dir());
 
diff --git a/crates/orchestrator/src/network/parachain.rs b/crates/orchestrator/src/network/parachain.rs
index 3d05ab4c89f73cb59b1a334a1e3a23159979cebc..16e38cddbec284d9a8e791c2ae62d4d97a6da986 100644
--- a/crates/orchestrator/src/network/parachain.rs
+++ b/crates/orchestrator/src/network/parachain.rs
@@ -3,6 +3,7 @@ use std::{
     str::FromStr,
 };
 
+use configuration::shared::constants::THIS_IS_A_BUG;
 use provider::types::TransferedFile;
 use subxt::{dynamic::Value, tx::TxStatus, OnlineClient, SubstrateConfig};
 use subxt_signer::{sr25519::Keypair, SecretUri};
@@ -98,7 +99,8 @@ impl Parachain {
         // get the seed
         let sudo: Keypair;
         if let Some(possible_seed) = options.seed {
-            sudo = Keypair::from_seed(possible_seed).expect("seed should return a Keypair.");
+            sudo = Keypair::from_seed(possible_seed)
+                .expect(&format!("seed should return a Keypair {THIS_IS_A_BUG}"));
         } else {
             let uri = SecretUri::from_str("//Alice")?;
             sudo = Keypair::from_uri(&uri)?;
@@ -107,11 +109,15 @@ impl Parachain {
         let genesis_state = scoped_fs
             .read_to_string(options.state_path)
             .await
-            .expect("State Path should be ok by this point.");
+            .expect(&format!(
+                "State Path should be ok by this point {THIS_IS_A_BUG}"
+            ));
         let wasm_data = scoped_fs
             .read_to_string(options.wasm_path)
             .await
-            .expect("Wasm Path should be ok by this point.");
+            .expect(&format!(
+                "Wasm Path should be ok by this point {THIS_IS_A_BUG}"
+            ));
 
         let api = OnlineClient::<SubstrateConfig>::from_url(options.node_ws_url).await?;
 
diff --git a/crates/orchestrator/src/network_spec.rs b/crates/orchestrator/src/network_spec.rs
index a38a4f26f7bc5e2a6d0d9bbe7920a75b63ede64b..b07d0b0f9be2d79aae485981dd04bce1549dc6e0 100644
--- a/crates/orchestrator/src/network_spec.rs
+++ b/crates/orchestrator/src/network_spec.rs
@@ -1,4 +1,14 @@
-use configuration::{GlobalSettings, HrmpChannelConfig, NetworkConfig};
+use std::{
+    collections::{hash_map::Entry, HashMap},
+    sync::Arc,
+};
+
+use configuration::{
+    shared::constants::THIS_IS_A_BUG, GlobalSettings, HrmpChannelConfig, NetworkConfig,
+};
+use futures::future::try_join_all;
+use provider::ProviderNamespace;
+use tracing::debug;
 
 use crate::errors::OrchestratorError;
 
@@ -6,7 +16,7 @@ pub mod node;
 pub mod parachain;
 pub mod relaychain;
 
-use self::{parachain::ParachainSpec, relaychain::RelaychainSpec};
+use self::{node::NodeSpec, parachain::ParachainSpec, relaychain::RelaychainSpec};
 
 #[derive(Debug, Clone)]
 pub struct NetworkSpec {
@@ -50,6 +60,115 @@ impl NetworkSpec {
             global_settings: network_config.global_settings().clone(),
         })
     }
+
+    pub async fn populate_nodes_available_args(
+        &mut self,
+        ns: Arc<dyn ProviderNamespace + Send + Sync>,
+    ) -> Result<(), OrchestratorError> {
+        let network_nodes = self.collect_network_nodes();
+
+        let mut image_command_to_nodes_mapping =
+            Self::create_image_command_to_nodes_mapping(network_nodes);
+
+        let available_args_outputs =
+            Self::retrieve_all_nodes_available_args_output(ns, &image_command_to_nodes_mapping)
+                .await?;
+
+        Self::update_nodes_available_args_output(
+            &mut image_command_to_nodes_mapping,
+            available_args_outputs,
+        );
+
+        Ok(())
+    }
+
+    // collect mutable references to all nodes from relaychain and parachains
+    fn collect_network_nodes(&mut self) -> Vec<&mut NodeSpec> {
+        vec![
+            self.relaychain.nodes.iter_mut().collect::<Vec<_>>(),
+            self.parachains
+                .iter_mut()
+                .flat_map(|para| para.collators.iter_mut())
+                .collect(),
+        ]
+        .into_iter()
+        .flatten()
+        .collect::<Vec<_>>()
+    }
+
+    // initialize the mapping of all possible node image/commands to corresponding nodes
+    fn create_image_command_to_nodes_mapping(
+        network_nodes: Vec<&mut NodeSpec>,
+    ) -> HashMap<(Option<String>, String), Vec<&mut NodeSpec>> {
+        network_nodes.into_iter().fold(
+            HashMap::new(),
+            |mut acc: HashMap<(Option<String>, String), Vec<&mut node::NodeSpec>>, node| {
+                // build mapping key using image and command if image is present or command only
+                let key = node
+                    .image
+                    .as_ref()
+                    .map(|image| {
+                        (
+                            Some(image.as_str().to_string()),
+                            node.command.as_str().to_string(),
+                        )
+                    })
+                    .unwrap_or_else(|| (None, node.command.as_str().to_string()));
+
+                // append the node to the vector of nodes for this image/command tuple
+                if let Entry::Vacant(entry) = acc.entry(key.clone()) {
+                    entry.insert(vec![node]);
+                } else {
+                    acc.get_mut(&key).unwrap().push(node);
+                }
+
+                acc
+            },
+        )
+    }
+
+    async fn retrieve_all_nodes_available_args_output(
+        ns: Arc<dyn ProviderNamespace + Send + Sync>,
+        image_command_to_nodes_mapping: &HashMap<(Option<String>, String), Vec<&mut NodeSpec>>,
+    ) -> Result<Vec<(Option<String>, String, String)>, OrchestratorError> {
+        try_join_all(
+            image_command_to_nodes_mapping
+                .keys()
+                .cloned()
+                .map(|(image, command)| async {
+                    // get node available args output from image/command
+                    let available_args = ns
+                        .get_node_available_args((command.clone(), image.clone()))
+                        .await?;
+                    debug!(
+                        "retrieved available args for image: {:?}, command: {}",
+                        image, command
+                    );
+
+                    // map the result to include image and command
+                    Ok::<_, OrchestratorError>((image, command, available_args))
+                })
+                .collect::<Vec<_>>(),
+        )
+        .await
+    }
+
+    fn update_nodes_available_args_output(
+        image_command_to_nodes_mapping: &mut HashMap<(Option<String>, String), Vec<&mut NodeSpec>>,
+        available_args_outputs: Vec<(Option<String>, String, String)>,
+    ) {
+        for (image, command, available_args_output) in available_args_outputs {
+            let nodes = image_command_to_nodes_mapping
+                .get_mut(&(image, command))
+                .expect(&format!(
+                    "node image/command key should exist {THIS_IS_A_BUG}"
+                ));
+
+            for node in nodes {
+                node.available_args_output = Some(available_args_output.clone());
+            }
+        }
+    }
 }
 
 #[cfg(test)]
diff --git a/crates/orchestrator/src/network_spec/node.rs b/crates/orchestrator/src/network_spec/node.rs
index 5a3dda3bb78fef983d194756021b110ecdbd4c08..f44c531d5fe9e7d6cd3636d4ffb495f7f9d3000d 100644
--- a/crates/orchestrator/src/network_spec/node.rs
+++ b/crates/orchestrator/src/network_spec/node.rs
@@ -1,4 +1,5 @@
 use configuration::shared::{
+    constants::THIS_IS_A_BUG,
     node::{EnvVar, NodeConfig},
     resources::Resources,
     types::{Arg, AssetLocation, Command, Image},
@@ -69,6 +70,9 @@ pub struct NodeSpec {
     /// Arguments to use for node. Appended to default.
     pub(crate) args: Vec<Arg>,
 
+    // The help command output containing the available arguments.
+    pub(crate) available_args_output: Option<String>,
+
     /// Wether the node is a validator.
     pub(crate) is_validator: bool,
 
@@ -164,6 +168,7 @@ impl NodeSpec {
             command,
             subcommand,
             args,
+            available_args_output: None,
             is_validator: node_config.is_validator(),
             is_invulnerable: node_config.is_invulnerable(),
             is_bootnode: node_config.is_bootnode(),
@@ -243,6 +248,7 @@ impl NodeSpec {
             command,
             subcommand,
             args,
+            available_args_output: None,
             is_validator: options.is_validator,
             is_invulnerable: false,
             is_bootnode: false,
@@ -260,4 +266,13 @@ impl NodeSpec {
             p2p_port: generators::generate_node_port(options.p2p_port)?,
         })
     }
+
+    pub(crate) fn supports_arg(&self, arg: impl AsRef<str>) -> bool {
+        self.available_args_output
+            .as_ref()
+            .expect(&format!(
+                "available args should be present at this point {THIS_IS_A_BUG}"
+            ))
+            .contains(arg.as_ref())
+    }
 }
diff --git a/crates/orchestrator/src/spawner.rs b/crates/orchestrator/src/spawner.rs
index 97a959445c3ef9ca12526a41d25048a26d21d27a..c5e9dd782753c51887a271c5bfa020a4a5f20026 100644
--- a/crates/orchestrator/src/spawner.rs
+++ b/crates/orchestrator/src/spawner.rs
@@ -1,6 +1,7 @@
 use std::path::PathBuf;
 
 use anyhow::Context;
+use configuration::shared::constants::THIS_IS_A_BUG;
 use provider::{
     constants::{LOCALHOST, NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_RELAY_DATA_DIR},
     shared::helpers::running_in_ci,
@@ -130,9 +131,9 @@ where
             generators::generate_node_command(node, gen_opts, maybe_para_id)
         },
         ZombieRole::CumulusCollator => {
-            let para = ctx
-                .parachain
-                .expect("parachain must be part of the context, this is a bug");
+            let para = ctx.parachain.expect(&format!(
+                "parachain must be part of the context {THIS_IS_A_BUG}"
+            ));
             let full_p2p = generators::generate_node_port(None)?;
             generators::generate_node_command_cumulus(node, gen_opts, para.id, full_p2p.0)
         },
diff --git a/crates/orchestrator/src/tx_helper/register_para.rs b/crates/orchestrator/src/tx_helper/register_para.rs
index d9e19abdd7340d3cdd5b596bca2fcf8e37f4caaa..eeb6ecf87dd27c66114989e21cd0a098622e8772 100644
--- a/crates/orchestrator/src/tx_helper/register_para.rs
+++ b/crates/orchestrator/src/tx_helper/register_para.rs
@@ -1,5 +1,6 @@
 use std::str::FromStr;
 
+use configuration::shared::constants::THIS_IS_A_BUG;
 use subxt::{dynamic::Value, OnlineClient, SubstrateConfig};
 use subxt_signer::{sr25519::Keypair, SecretUri};
 use support::fs::FileSystem;
@@ -7,7 +8,6 @@ use support::fs::FileSystem;
 use crate::{shared::types::RegisterParachainOptions, ScopedFilesystem};
 use tracing::{debug, info, trace};
 
-
 pub async fn register(
     options: RegisterParachainOptions,
     scoped_fs: &ScopedFilesystem<'_, impl FileSystem>,
@@ -16,7 +16,8 @@ pub async fn register(
     // get the seed
     let sudo: Keypair;
     if let Some(possible_seed) = options.seed {
-        sudo = Keypair::from_seed(possible_seed).expect("seed should return a Keypair.");
+        sudo = Keypair::from_seed(possible_seed)
+            .expect(&format!("seed should return a Keypair {THIS_IS_A_BUG}"));
     } else {
         let uri = SecretUri::from_str("//Alice")?;
         sudo = Keypair::from_uri(&uri)?;
@@ -25,11 +26,15 @@ pub async fn register(
     let genesis_state = scoped_fs
         .read_to_string(options.state_path)
         .await
-        .expect("State Path should be ok by this point.");
+        .expect(&format!(
+            "State Path should be ok by this point {THIS_IS_A_BUG}"
+        ));
     let wasm_data = scoped_fs
         .read_to_string(options.wasm_path)
         .await
-        .expect("Wasm Path should be ok by this point.");
+        .expect(&format!(
+            "Wasm Path should be ok by this point {THIS_IS_A_BUG}"
+        ));
 
     let api = OnlineClient::<SubstrateConfig>::from_url(options.node_ws_url).await?;
 
@@ -64,4 +69,4 @@ pub async fn register(
     let result = result.wait_for_in_block().await?;
     debug!("In block: {:#?}", result.block_hash());
     Ok(())
-}
\ No newline at end of file
+}
diff --git a/crates/provider/src/kubernetes/client.rs b/crates/provider/src/kubernetes/client.rs
index e20a15e5522693adf78852e634af8dd9dc20dc5a..e9d5306fdbc4b06ce30a8b9b4d920f76afee385e 100644
--- a/crates/provider/src/kubernetes/client.rs
+++ b/crates/provider/src/kubernetes/client.rs
@@ -3,6 +3,7 @@ use std::{
 };
 
 use anyhow::anyhow;
+use configuration::shared::constants::THIS_IS_A_BUG;
 use futures::{StreamExt, TryStreamExt};
 use k8s_openapi::api::core::v1::{
     ConfigMap, Namespace, Pod, PodSpec, PodStatus, Service, ServiceSpec,
@@ -241,17 +242,17 @@ impl KubernetesClient {
             .await
             .map_err(|err| Error::from(anyhow!("error while exec in the pod {name}: {err}")))?;
 
-        let stdout_stream = process
-            .stdout()
-            .expect("stdout shouldn't be None when true passed to exec");
+        let stdout_stream = process.stdout().expect(&format!(
+            "stdout shouldn't be None when true passed to exec {THIS_IS_A_BUG}"
+        ));
         let stdout = tokio_util::io::ReaderStream::new(stdout_stream)
             .filter_map(|r| async { r.ok().and_then(|v| String::from_utf8(v.to_vec()).ok()) })
             .collect::<Vec<_>>()
             .await
             .join("");
-        let stderr_stream = process
-            .stderr()
-            .expect("stderr shouldn't be None when true passed to exec");
+        let stderr_stream = process.stderr().expect(&format!(
+            "stderr shouldn't be None when true passed to exec {THIS_IS_A_BUG}"
+        ));
         let stderr = tokio_util::io::ReaderStream::new(stderr_stream)
             .filter_map(|r| async { r.ok().and_then(|v| String::from_utf8(v.to_vec()).ok()) })
             .collect::<Vec<_>>()
@@ -260,7 +261,9 @@ impl KubernetesClient {
 
         let status = process
             .take_status()
-            .expect("first call to status shouldn't fail")
+            .expect(&format!(
+                "first call to status shouldn't fail {THIS_IS_A_BUG}"
+            ))
             .await;
 
         // await process to finish
@@ -292,7 +295,7 @@ impl KubernetesClient {
                                 })
                             })
                             .expect(
-                                "command with non-zero exit code should have exit code present",
+                                &format!("command with non-zero exit code should have exit code present {THIS_IS_A_BUG}")
                             );
 
                         Ok(Err((exit_status, stderr)))
@@ -464,7 +467,10 @@ impl KubernetesClient {
                 WatchEvent::Error(err) => Err(Error::from(anyhow!(
                     "error while awaiting resource {name} is created: {err}"
                 )))?,
-                _ => panic!("Unexpected event happened while creating '{}'", name),
+                _ => panic!(
+                    "Unexpected event happened while creating '{}' {THIS_IS_A_BUG}",
+                    name
+                ),
             }
         }
 
diff --git a/crates/provider/src/kubernetes/namespace.rs b/crates/provider/src/kubernetes/namespace.rs
index dd28ab6660791a33f8d12c1366c2e62580f71e98..0a5888f5ab1efbf04ba2763bf1f3c6d8321b3bf3 100644
--- a/crates/provider/src/kubernetes/namespace.rs
+++ b/crates/provider/src/kubernetes/namespace.rs
@@ -7,6 +7,7 @@ use std::{
 
 use anyhow::anyhow;
 use async_trait::async_trait;
+use configuration::shared::constants::THIS_IS_A_BUG;
 use k8s_openapi::{
     api::core::v1::{
         Container, ContainerPort, HTTPGetAction, PodSpec, Probe, ServicePort, ServiceSpec,
@@ -363,6 +364,30 @@ where
             .collect()
     }
 
+    async fn get_node_available_args(
+        &self,
+        (command, image): (String, Option<String>),
+    ) -> Result<String, ProviderError> {
+        let node_image = image.expect(&format!("image should be present when getting node available args with kubernetes provider {THIS_IS_A_BUG}"));
+
+        // run dummy command in new pod
+        let temp_node = self
+            .spawn_node(
+                &SpawnNodeOptions::new(format!("temp-{}", Uuid::new_v4()), "cat".to_string())
+                    .image(node_image.clone()),
+            )
+            .await?;
+
+        let available_args_output = temp_node
+            .run_command(RunCommandOptions::new(command.clone()).args(vec!["--help"]))
+            .await?
+            .map_err(|(_exit, status)| {
+                ProviderError::NodeAvailableArgsError(node_image, command, status)
+            })?;
+
+        Ok(available_args_output)
+    }
+
     async fn spawn_node(&self, options: &SpawnNodeOptions) -> Result<DynNode, ProviderError> {
         trace!("spawn option {:?}", options);
         if self.nodes.read().await.contains_key(&options.name) {
@@ -401,7 +426,7 @@ where
             .unwrap_or_else(|| format!("temp-{}", Uuid::new_v4()));
         let node_image = options
             .image
-            .expect("image should be present when generating files with kubernetes provider");
+            .expect(&format!("image should be present when generating files with kubernetes provider {THIS_IS_A_BUG}"));
 
         // run dummy command in new pod
         let temp_node = self
diff --git a/crates/provider/src/kubernetes/node.rs b/crates/provider/src/kubernetes/node.rs
index 75c21dff23134e142d90280cb4d62e181567973f..8a158d83ef0fdca8a7b184674c9f715f494f5a05 100644
--- a/crates/provider/src/kubernetes/node.rs
+++ b/crates/provider/src/kubernetes/node.rs
@@ -516,7 +516,9 @@ where
         let file_name = options
             .local_script_path
             .file_name()
-            .expect("file name should be present at this point")
+            .expect(&format!(
+                "file name should be present at this point {THIS_IS_A_BUG}"
+            ))
             .to_string_lossy();
 
         self.run_command(RunCommandOptions {
diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs
index dd19a0e26525d38d36819d2dc8b6de3087f68767..0e72d2ff693620ba6a046789ed0b168c9115334c 100644
--- a/crates/provider/src/lib.rs
+++ b/crates/provider/src/lib.rs
@@ -1,3 +1,4 @@
+#![allow(clippy::expect_fun_call)]
 mod kubernetes;
 mod native;
 pub mod shared;
@@ -41,6 +42,9 @@ pub enum ProviderError {
     #[error("Invalid network configuration field {0}")]
     InvalidConfig(String),
 
+    #[error("Failed to retrieve node available args using image {0} and command {1}: {2}")]
+    NodeAvailableArgsError(String, String, String),
+
     #[error("Can not recover node: {0}")]
     MissingNode(String),
 
@@ -136,6 +140,11 @@ pub trait ProviderNamespace {
 
     async fn nodes(&self) -> HashMap<String, DynNode>;
 
+    async fn get_node_available_args(
+        &self,
+        options: (String, Option<String>),
+    ) -> Result<String, ProviderError>;
+
     async fn spawn_node(&self, options: &SpawnNodeOptions) -> Result<DynNode, ProviderError>;
 
     async fn generate_files(&self, options: GenerateFilesOptions) -> Result<(), ProviderError>;
diff --git a/crates/provider/src/native/namespace.rs b/crates/provider/src/native/namespace.rs
index 83ac4af40d356b625e75712be6fdcc683204a73b..33886cbc69c4c46136f7d5e8e5bd824b94408f94 100644
--- a/crates/provider/src/native/namespace.rs
+++ b/crates/provider/src/native/namespace.rs
@@ -86,6 +86,27 @@ where
             .collect()
     }
 
+    async fn get_node_available_args(
+        &self,
+        (command, _image): (String, Option<String>),
+    ) -> Result<String, ProviderError> {
+        let temp_node = self
+            .spawn_node(
+                &SpawnNodeOptions::new(format!("temp-{}", Uuid::new_v4()), "bash".to_string())
+                    .args(vec!["-c", "while :; do sleep 1; done"]),
+            )
+            .await?;
+
+        let available_args_output = temp_node
+            .run_command(RunCommandOptions::new(command.clone()).args(vec!["--help"]))
+            .await?
+            .map_err(|(_exit, status)| {
+                ProviderError::NodeAvailableArgsError("".to_string(), command, status)
+            })?;
+
+        Ok(available_args_output)
+    }
+
     async fn spawn_node(&self, options: &SpawnNodeOptions) -> Result<DynNode, ProviderError> {
         if self.nodes.read().await.contains_key(&options.name) {
             return Err(ProviderError::DuplicatedNodeName(options.name.clone()));
diff --git a/crates/provider/src/native/node.rs b/crates/provider/src/native/node.rs
index f199f98e099054f6fa9931e8022eb09472345018..37e73438fd8d3757e75801915da5c691fb0b4c38 100644
--- a/crates/provider/src/native/node.rs
+++ b/crates/provider/src/native/node.rs
@@ -226,9 +226,14 @@ where
             .current_dir(&self.base_dir)
             .spawn()
             .map_err(|err| ProviderError::NodeSpawningFailed(self.name.to_string(), err.into()))?;
-
-        let stdout = process.stdout.take().expect("infaillible, stdout is piped");
-        let stderr = process.stderr.take().expect("infaillible, stderr is piped");
+        let stdout = process
+            .stdout
+            .take()
+            .expect(&format!("infaillible, stdout is piped {THIS_IS_A_BUG}"));
+        let stderr = process
+            .stderr
+            .take()
+            .expect(&format!("infaillible, stderr is piped {THIS_IS_A_BUG}"));
 
         self.process.write().await.replace(process);