diff --git a/.gitlab-ci.yaml b/.gitlab-ci.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5c73169fe59470196c1c0bb83e4a78d120a0830c
--- /dev/null
+++ b/.gitlab-ci.yaml
@@ -0,0 +1,20 @@
+stages:
+  - integration-test
+
+include:
+  # ci image
+  - project: parity/infrastructure/ci_cd/shared
+    ref: main
+    file: /common/ci-unified.yml
+
+polkadot-integration-test:
+  stage: integration-test
+  image: "${CI_IMAGE}"
+  tags:
+    - zombienet-polkadot-integration-test
+  script:
+    - kubectl get ns
+  rules:
+    - when: manual
+  variables:
+    CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE]
diff --git a/crates/orchestrator/src/network.rs b/crates/orchestrator/src/network.rs
index 66643b95d85fc3d7d9538ef1afd0459e4a437603..ae0fc3e90546cad5b6c97f2883c504ab6c57a87a 100644
--- a/crates/orchestrator/src/network.rs
+++ b/crates/orchestrator/src/network.rs
@@ -10,7 +10,7 @@ use configuration::{
     types::{Arg, Command, Image, Port},
     ParachainConfig, ParachainConfigBuilder,
 };
-use provider::{types::TransferedFile, DynNamespace};
+use provider::{types::TransferedFile, DynNamespace, ProviderError};
 use support::fs::FileSystem;
 
 use self::{node::NetworkNode, parachain::Parachain, relaychain::Relaychain};
@@ -73,13 +73,18 @@ impl<T: FileSystem> Network<T> {
     }
 
     // Pubic API
+    pub fn ns_name(&self) -> String {
+        self.ns.name().to_string()
+    }
 
     pub fn relaychain(&self) -> &Relaychain {
         &self.relay
     }
 
     // Teardown the network
-    // destroy()
+    pub async fn destroy(self) -> Result<(), ProviderError> {
+        self.ns.destroy().await
+    }
 
     /// Add a node to the relaychain
     ///
diff --git a/crates/orchestrator/src/network/node.rs b/crates/orchestrator/src/network/node.rs
index e5ac2256bc3bc78f33d53f2fd4a8a5a635cf8da3..5c4a4cd1ce2c9147a8456887946d08d730b7db60 100644
--- a/crates/orchestrator/src/network/node.rs
+++ b/crates/orchestrator/src/network/node.rs
@@ -1,4 +1,4 @@
-use std::{path::Path, sync::Arc, thread, time::Duration};
+use std::{path::Path, sync::Arc, time::Duration};
 
 use anyhow::anyhow;
 use pjs_rs::ReturnValue;
@@ -7,6 +7,7 @@ use provider::DynNode;
 use serde_json::json;
 use subxt::{backend::rpc::RpcClient, OnlineClient};
 use tokio::sync::RwLock;
+use tracing::trace;
 
 use crate::{network_spec::node::NodeSpec, shared::types::PjsResult};
 
@@ -79,10 +80,8 @@ impl NetworkNode {
         args: Vec<serde_json::Value>,
     ) -> Result<PjsResult, anyhow::Error> {
         let code = pjs_build_template(self.ws_uri(), code.as_ref(), args);
-        let value = match thread::spawn(|| pjs_inner(code))
-            .join()
-            .map_err(|_| anyhow!("[pjs] Thread panicked"))??
-        {
+        trace!("Code to execute: {code}");
+        let value = match pjs_inner(code)? {
             ReturnValue::Deserialized(val) => Ok(val),
             ReturnValue::CantDeserialize(msg) => Err(msg),
         };
@@ -102,10 +101,9 @@ impl NetworkNode {
     ) -> Result<PjsResult, anyhow::Error> {
         let content = std::fs::read_to_string(file)?;
         let code = pjs_build_template(self.ws_uri(), content.as_ref(), args);
-        let value = match thread::spawn(|| pjs_inner(code))
-            .join()
-            .map_err(|_| anyhow!("[pjs] Thread panicked"))??
-        {
+        trace!("Code to execute: {code}");
+
+        let value = match pjs_inner(code)? {
             ReturnValue::Deserialized(val) => Ok(val),
             ReturnValue::CantDeserialize(msg) => Err(msg),
         };
@@ -222,8 +220,18 @@ fn pjs_build_template(ws_uri: &str, content: &str, args: Vec<serde_json::Value>)
 
 // Since pjs-rs run a custom javascript runtime (using deno_core) we need to
 // execute in an isolated thread.
-#[tokio::main(flavor = "current_thread")]
-async fn pjs_inner(code: String) -> Result<ReturnValue, anyhow::Error> {
-    // Arguments are already encoded in the code built from the template.
-    pjs_rs::run_ts_code(code, None).await
+fn pjs_inner(code: String) -> Result<ReturnValue, anyhow::Error> {
+    let rt = tokio::runtime::Builder::new_current_thread()
+        .enable_all()
+        .build()?;
+
+    std::thread::spawn(move || {
+        rt.block_on(async move {
+            let value = pjs_rs::run_ts_code(code, None).await;
+            trace!("ts_code return: {:?}", value);
+            value
+        })
+    })
+    .join()
+    .map_err(|_| anyhow!("[pjs] Thread panicked"))?
 }
diff --git a/crates/provider/src/kubernetes/client.rs b/crates/provider/src/kubernetes/client.rs
index 7f007886feec4ca27faf4d3e02e4f9be1f7312ec..b447e3cfb37fc7e841c84edc98c15e46956a7142 100644
--- a/crates/provider/src/kubernetes/client.rs
+++ b/crates/provider/src/kubernetes/client.rs
@@ -86,6 +86,17 @@ impl KubernetesClient {
         Ok(namespace)
     }
 
+    pub(super) async fn delete_namespace(&self, name: &str) -> Result<()> {
+        let namespaces = Api::<Namespace>::all(self.inner.clone());
+
+        namespaces
+            .delete(name, &DeleteParams::default())
+            .await
+            .map_err(|err| Error::from(anyhow!("error while deleting namespace {name}: {err}")))?;
+
+        Ok(())
+    }
+
     pub(super) async fn create_config_map_from_file(
         &self,
         namespace: &str,
diff --git a/crates/provider/src/kubernetes/namespace.rs b/crates/provider/src/kubernetes/namespace.rs
index 2171671b45d9167c2b56479b636a40f3af0c6568..b4ffe3f8765f27db20043faa303a5df401712cac 100644
--- a/crates/provider/src/kubernetes/namespace.rs
+++ b/crates/provider/src/kubernetes/namespace.rs
@@ -421,9 +421,11 @@ where
     }
 
     async fn destroy(&self) -> Result<(), ProviderError> {
-        for node in self.nodes.read().await.values() {
-            node.destroy().await?;
-        }
+        let _ = self
+            .k8s_client
+            .delete_namespace(&self.name)
+            .await
+            .map_err(|err| ProviderError::DeleteNamespaceFailed(self.name.clone(), err.into()))?;
 
         if let Some(provider) = self.provider.upgrade() {
             provider.namespaces.write().await.remove(&self.name);
diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs
index 9c13d0c2caede61b2d175b19b51e467a1bc5e10c..da238652a5eef7223d225058b8dd8d1b223127ae 100644
--- a/crates/provider/src/lib.rs
+++ b/crates/provider/src/lib.rs
@@ -97,6 +97,9 @@ pub enum ProviderError {
 
     #[error("Error creating port-forward '{0}:{1}': {2}")]
     PortForwardError(u16, u16, anyhow::Error),
+
+    #[error("Failed to delete namespace '{0}': {1}")]
+    DeleteNamespaceFailed(String, anyhow::Error),
 }
 
 #[async_trait]
diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml
index dcba750f206ecc6f82c55b61a222fda02c82991b..00e338707cb84f5a9c140580bb902e6473369289 100644
--- a/crates/sdk/Cargo.toml
+++ b/crates/sdk/Cargo.toml
@@ -23,3 +23,12 @@ configuration = { workspace = true }
 orchestrator = { workspace = true }
 provider = { workspace = true }
 support = { workspace = true }
+
+[features]
+ci-k8s = []
+
+[dev-dependencies]
+tracing-subscriber = "0.3"
+kube = { workspace = true, features = ["ws", "runtime"] }
+k8s-openapi = { workspace = true, features = ["v1_27"] }
+serde_json = {workspace = true }
diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs
index 5933a722c09f7e275db15cdd3aef37e326c04ddb..9e06661c63d1fe1bb7ad97bb7a31f5ed82816ac6 100644
--- a/crates/sdk/src/lib.rs
+++ b/crates/sdk/src/lib.rs
@@ -10,7 +10,7 @@ use support::fs::local::LocalFileSystem;
 
 #[async_trait]
 pub trait NetworkConfigExt {
-    /// Spawns a network using the native provider.
+    /// Spawns a network using the native or k8s provider.
     ///
     /// # Example:
     /// ```rust
diff --git a/crates/sdk/tests/smoke.rs b/crates/sdk/tests/smoke.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ce0c2bfae1cac5c13295a877bac3c44fc2eb704b
--- /dev/null
+++ b/crates/sdk/tests/smoke.rs
@@ -0,0 +1,142 @@
+use std::{panic, pin::Pin};
+
+use configuration::{NetworkConfig, NetworkConfigBuilder};
+use futures::{stream::StreamExt, Future};
+use k8s_openapi::api::core::v1::Namespace;
+use kube::{api::DeleteParams, Api};
+use serde_json::json;
+use support::fs::local::LocalFileSystem;
+use zombienet_sdk::{Network, NetworkConfigExt};
+
+fn small_network() -> NetworkConfig {
+    NetworkConfigBuilder::new()
+        .with_relaychain(|r| {
+            r.with_chain("rococo-local")
+                .with_default_command("polkadot")
+                .with_default_image("docker.io/parity/polkadot:v1.4.0")
+                .with_node(|node| node.with_name("alice"))
+                .with_node(|node| node.with_name("bob"))
+        })
+        .with_parachain(|p| {
+            p.with_id(2000).cumulus_based(true).with_collator(|n| {
+                n.with_name("collator")
+                    .with_command("test-parachain")
+                    .with_image(
+                    "docker.io/paritypr/test-parachain:c90f9713b5bc73a9620b2e72b226b4d11e018190",
+                )
+            })
+        })
+        .build()
+        .unwrap()
+}
+
+pub fn run_k8s_test<T>(config: NetworkConfig, test: T)
+where
+    T: panic::UnwindSafe,
+    T: FnOnce(Network<LocalFileSystem>) -> Pin<Box<dyn Future<Output = ()> + 'static + Send>>,
+{
+    use std::time::Instant;
+
+    let mut ns_name: Option<String> = None;
+    let runtime = tokio::runtime::Builder::new_multi_thread()
+        .worker_threads(2)
+        .enable_all()
+        .build()
+        .unwrap();
+
+    let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
+        runtime.block_on(async {
+            let now = Instant::now();
+
+            #[allow(unused_mut)]
+            let mut network = config.spawn_k8s().await.unwrap();
+
+            let elapsed = now.elapsed();
+            println!("🚀🚀🚀🚀 network deployed in {:.2?}", elapsed);
+
+            // get ns name to cleanup if test fails
+            ns_name = Some(network.ns_name());
+
+            // run some tests on the newly started network
+            test(network).await;
+        })
+    }));
+
+    // IF we created a new namespace, allway cleanup
+    if let Some(ns_name) = ns_name {
+        // remove the ns
+        runtime.block_on(async {
+            let k8s_client = kube::Client::try_default().await.unwrap();
+            let namespaces = Api::<Namespace>::all(k8s_client);
+
+            _ = namespaces.delete(&ns_name, &DeleteParams::default()).await;
+        })
+    }
+
+    assert!(result.is_ok());
+}
+
+#[test]
+#[cfg_attr(not(feature = "ci-k8s"), ignore = "Run with k8s")]
+fn basic_functionalities_should_works() {
+    tracing_subscriber::fmt::init();
+    let config = small_network();
+    run_k8s_test(config, |network| {
+        Box::pin(async move {
+            // Get a ref to the node
+            let alice = network.get_node("alice").unwrap();
+
+            let role = alice.reports("node_roles").await.unwrap();
+            println!("Role is {role}");
+            assert_eq!(role, 4.0);
+
+            // subxt
+            let client = alice.client::<subxt::PolkadotConfig>().await.unwrap();
+
+            // wait 3 blocks
+            let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(3);
+            while let Some(block) = blocks.next().await {
+                println!("Block #{}", block.unwrap().header().number);
+            }
+
+            // drop the client
+            drop(client);
+
+            // check best block through metrics
+            let best_block = alice
+                .reports("block_height{status=\"best\"}")
+                .await
+                .unwrap();
+
+            assert!(best_block >= 2.0, "Current best {}", best_block);
+
+            // pjs
+            let para_is_registered = r#"
+            const paraId = arguments[0];
+            const parachains: number[] = (await api.query.paras.parachains()) || [];
+            const isRegistered = parachains.findIndex((id) => id.toString() == paraId.toString()) >= 0;
+            return isRegistered;
+            "#;
+
+            let is_registered = alice
+                .pjs(para_is_registered, vec![json!(2000)])
+                .await
+                .unwrap()
+                .unwrap();
+            assert_eq!(is_registered, json!(true));
+
+            // run pjs with code
+            let query_paras = r#"
+            const parachains: number[] = (await api.query.paras.parachains()) || [];
+            return parachains.toJSON()
+            "#;
+
+            let paras = alice.pjs(query_paras, vec![]).await.unwrap();
+
+            println!("parachains registered: {:?}", paras);
+
+            // tear down
+            network.destroy().await.unwrap();
+        })
+    });
+}