From 77d99cd0f2e49b723e69d372b990f75b2175ea98 Mon Sep 17 00:00:00 2001
From: Hamid <33328203+emamihe@users.noreply.github.com>
Date: Tue, 20 Feb 2024 17:08:03 +0300
Subject: [PATCH] chore: gitlab runner test for new repo (#163)

This is a very simple gitlab CI manifest to just check the tag on this
pipeline schedule it on Zombienet k8s runner and is the SA inject to the
runners in privileged enough to run command within the cluster or not

---------

Co-authored-by: Javier Viola <javier@parity.io>
---
 .gitlab-ci.yaml                             |  20 +++
 crates/orchestrator/src/network.rs          |   9 +-
 crates/orchestrator/src/network/node.rs     |  34 +++--
 crates/provider/src/kubernetes/client.rs    |  11 ++
 crates/provider/src/kubernetes/namespace.rs |   8 +-
 crates/provider/src/lib.rs                  |   3 +
 crates/sdk/Cargo.toml                       |   9 ++
 crates/sdk/src/lib.rs                       |   2 +-
 crates/sdk/tests/smoke.rs                   | 142 ++++++++++++++++++++
 9 files changed, 219 insertions(+), 19 deletions(-)
 create mode 100644 .gitlab-ci.yaml
 create mode 100644 crates/sdk/tests/smoke.rs

diff --git a/.gitlab-ci.yaml b/.gitlab-ci.yaml
new file mode 100644
index 0000000..5c73169
--- /dev/null
+++ b/.gitlab-ci.yaml
@@ -0,0 +1,20 @@
+stages:
+  - integration-test
+
+include:
+  # ci image
+  - project: parity/infrastructure/ci_cd/shared
+    ref: main
+    file: /common/ci-unified.yml
+
+polkadot-integration-test:
+  stage: integration-test
+  image: "${CI_IMAGE}"
+  tags:
+    - zombienet-polkadot-integration-test
+  script:
+    - kubectl get ns
+  rules:
+    - when: manual
+  variables:
+    CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE]
diff --git a/crates/orchestrator/src/network.rs b/crates/orchestrator/src/network.rs
index 66643b9..ae0fc3e 100644
--- a/crates/orchestrator/src/network.rs
+++ b/crates/orchestrator/src/network.rs
@@ -10,7 +10,7 @@ use configuration::{
     types::{Arg, Command, Image, Port},
     ParachainConfig, ParachainConfigBuilder,
 };
-use provider::{types::TransferedFile, DynNamespace};
+use provider::{types::TransferedFile, DynNamespace, ProviderError};
 use support::fs::FileSystem;
 
 use self::{node::NetworkNode, parachain::Parachain, relaychain::Relaychain};
@@ -73,13 +73,18 @@ impl<T: FileSystem> Network<T> {
     }
 
     // Pubic API
+    pub fn ns_name(&self) -> String {
+        self.ns.name().to_string()
+    }
 
     pub fn relaychain(&self) -> &Relaychain {
         &self.relay
     }
 
     // Teardown the network
-    // destroy()
+    pub async fn destroy(self) -> Result<(), ProviderError> {
+        self.ns.destroy().await
+    }
 
     /// Add a node to the relaychain
     ///
diff --git a/crates/orchestrator/src/network/node.rs b/crates/orchestrator/src/network/node.rs
index e5ac225..5c4a4cd 100644
--- a/crates/orchestrator/src/network/node.rs
+++ b/crates/orchestrator/src/network/node.rs
@@ -1,4 +1,4 @@
-use std::{path::Path, sync::Arc, thread, time::Duration};
+use std::{path::Path, sync::Arc, time::Duration};
 
 use anyhow::anyhow;
 use pjs_rs::ReturnValue;
@@ -7,6 +7,7 @@ use provider::DynNode;
 use serde_json::json;
 use subxt::{backend::rpc::RpcClient, OnlineClient};
 use tokio::sync::RwLock;
+use tracing::trace;
 
 use crate::{network_spec::node::NodeSpec, shared::types::PjsResult};
 
@@ -79,10 +80,8 @@ impl NetworkNode {
         args: Vec<serde_json::Value>,
     ) -> Result<PjsResult, anyhow::Error> {
         let code = pjs_build_template(self.ws_uri(), code.as_ref(), args);
-        let value = match thread::spawn(|| pjs_inner(code))
-            .join()
-            .map_err(|_| anyhow!("[pjs] Thread panicked"))??
-        {
+        trace!("Code to execute: {code}");
+        let value = match pjs_inner(code)? {
             ReturnValue::Deserialized(val) => Ok(val),
             ReturnValue::CantDeserialize(msg) => Err(msg),
         };
@@ -102,10 +101,9 @@ impl NetworkNode {
     ) -> Result<PjsResult, anyhow::Error> {
         let content = std::fs::read_to_string(file)?;
         let code = pjs_build_template(self.ws_uri(), content.as_ref(), args);
-        let value = match thread::spawn(|| pjs_inner(code))
-            .join()
-            .map_err(|_| anyhow!("[pjs] Thread panicked"))??
-        {
+        trace!("Code to execute: {code}");
+
+        let value = match pjs_inner(code)? {
             ReturnValue::Deserialized(val) => Ok(val),
             ReturnValue::CantDeserialize(msg) => Err(msg),
         };
@@ -222,8 +220,18 @@ fn pjs_build_template(ws_uri: &str, content: &str, args: Vec<serde_json::Value>)
 
 // Since pjs-rs run a custom javascript runtime (using deno_core) we need to
 // execute in an isolated thread.
-#[tokio::main(flavor = "current_thread")]
-async fn pjs_inner(code: String) -> Result<ReturnValue, anyhow::Error> {
-    // Arguments are already encoded in the code built from the template.
-    pjs_rs::run_ts_code(code, None).await
+fn pjs_inner(code: String) -> Result<ReturnValue, anyhow::Error> {
+    let rt = tokio::runtime::Builder::new_current_thread()
+        .enable_all()
+        .build()?;
+
+    std::thread::spawn(move || {
+        rt.block_on(async move {
+            let value = pjs_rs::run_ts_code(code, None).await;
+            trace!("ts_code return: {:?}", value);
+            value
+        })
+    })
+    .join()
+    .map_err(|_| anyhow!("[pjs] Thread panicked"))?
 }
diff --git a/crates/provider/src/kubernetes/client.rs b/crates/provider/src/kubernetes/client.rs
index 7f00788..b447e3c 100644
--- a/crates/provider/src/kubernetes/client.rs
+++ b/crates/provider/src/kubernetes/client.rs
@@ -86,6 +86,17 @@ impl KubernetesClient {
         Ok(namespace)
     }
 
+    pub(super) async fn delete_namespace(&self, name: &str) -> Result<()> {
+        let namespaces = Api::<Namespace>::all(self.inner.clone());
+
+        namespaces
+            .delete(name, &DeleteParams::default())
+            .await
+            .map_err(|err| Error::from(anyhow!("error while deleting namespace {name}: {err}")))?;
+
+        Ok(())
+    }
+
     pub(super) async fn create_config_map_from_file(
         &self,
         namespace: &str,
diff --git a/crates/provider/src/kubernetes/namespace.rs b/crates/provider/src/kubernetes/namespace.rs
index 2171671..b4ffe3f 100644
--- a/crates/provider/src/kubernetes/namespace.rs
+++ b/crates/provider/src/kubernetes/namespace.rs
@@ -421,9 +421,11 @@ where
     }
 
     async fn destroy(&self) -> Result<(), ProviderError> {
-        for node in self.nodes.read().await.values() {
-            node.destroy().await?;
-        }
+        let _ = self
+            .k8s_client
+            .delete_namespace(&self.name)
+            .await
+            .map_err(|err| ProviderError::DeleteNamespaceFailed(self.name.clone(), err.into()))?;
 
         if let Some(provider) = self.provider.upgrade() {
             provider.namespaces.write().await.remove(&self.name);
diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs
index 9c13d0c..da23865 100644
--- a/crates/provider/src/lib.rs
+++ b/crates/provider/src/lib.rs
@@ -97,6 +97,9 @@ pub enum ProviderError {
 
     #[error("Error creating port-forward '{0}:{1}': {2}")]
     PortForwardError(u16, u16, anyhow::Error),
+
+    #[error("Failed to delete namespace '{0}': {1}")]
+    DeleteNamespaceFailed(String, anyhow::Error),
 }
 
 #[async_trait]
diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml
index dcba750..00e3387 100644
--- a/crates/sdk/Cargo.toml
+++ b/crates/sdk/Cargo.toml
@@ -23,3 +23,12 @@ configuration = { workspace = true }
 orchestrator = { workspace = true }
 provider = { workspace = true }
 support = { workspace = true }
+
+[features]
+ci-k8s = []
+
+[dev-dependencies]
+tracing-subscriber = "0.3"
+kube = { workspace = true, features = ["ws", "runtime"] }
+k8s-openapi = { workspace = true, features = ["v1_27"] }
+serde_json = {workspace = true }
diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs
index 5933a72..9e06661 100644
--- a/crates/sdk/src/lib.rs
+++ b/crates/sdk/src/lib.rs
@@ -10,7 +10,7 @@ use support::fs::local::LocalFileSystem;
 
 #[async_trait]
 pub trait NetworkConfigExt {
-    /// Spawns a network using the native provider.
+    /// Spawns a network using the native or k8s provider.
     ///
     /// # Example:
     /// ```rust
diff --git a/crates/sdk/tests/smoke.rs b/crates/sdk/tests/smoke.rs
new file mode 100644
index 0000000..ce0c2bf
--- /dev/null
+++ b/crates/sdk/tests/smoke.rs
@@ -0,0 +1,142 @@
+use std::{panic, pin::Pin};
+
+use configuration::{NetworkConfig, NetworkConfigBuilder};
+use futures::{stream::StreamExt, Future};
+use k8s_openapi::api::core::v1::Namespace;
+use kube::{api::DeleteParams, Api};
+use serde_json::json;
+use support::fs::local::LocalFileSystem;
+use zombienet_sdk::{Network, NetworkConfigExt};
+
+fn small_network() -> NetworkConfig {
+    NetworkConfigBuilder::new()
+        .with_relaychain(|r| {
+            r.with_chain("rococo-local")
+                .with_default_command("polkadot")
+                .with_default_image("docker.io/parity/polkadot:v1.4.0")
+                .with_node(|node| node.with_name("alice"))
+                .with_node(|node| node.with_name("bob"))
+        })
+        .with_parachain(|p| {
+            p.with_id(2000).cumulus_based(true).with_collator(|n| {
+                n.with_name("collator")
+                    .with_command("test-parachain")
+                    .with_image(
+                    "docker.io/paritypr/test-parachain:c90f9713b5bc73a9620b2e72b226b4d11e018190",
+                )
+            })
+        })
+        .build()
+        .unwrap()
+}
+
+pub fn run_k8s_test<T>(config: NetworkConfig, test: T)
+where
+    T: panic::UnwindSafe,
+    T: FnOnce(Network<LocalFileSystem>) -> Pin<Box<dyn Future<Output = ()> + 'static + Send>>,
+{
+    use std::time::Instant;
+
+    let mut ns_name: Option<String> = None;
+    let runtime = tokio::runtime::Builder::new_multi_thread()
+        .worker_threads(2)
+        .enable_all()
+        .build()
+        .unwrap();
+
+    let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
+        runtime.block_on(async {
+            let now = Instant::now();
+
+            #[allow(unused_mut)]
+            let mut network = config.spawn_k8s().await.unwrap();
+
+            let elapsed = now.elapsed();
+            println!("🚀🚀🚀🚀 network deployed in {:.2?}", elapsed);
+
+            // get ns name to cleanup if test fails
+            ns_name = Some(network.ns_name());
+
+            // run some tests on the newly started network
+            test(network).await;
+        })
+    }));
+
+    // IF we created a new namespace, allway cleanup
+    if let Some(ns_name) = ns_name {
+        // remove the ns
+        runtime.block_on(async {
+            let k8s_client = kube::Client::try_default().await.unwrap();
+            let namespaces = Api::<Namespace>::all(k8s_client);
+
+            _ = namespaces.delete(&ns_name, &DeleteParams::default()).await;
+        })
+    }
+
+    assert!(result.is_ok());
+}
+
+#[test]
+#[cfg_attr(not(feature = "ci-k8s"), ignore = "Run with k8s")]
+fn basic_functionalities_should_works() {
+    tracing_subscriber::fmt::init();
+    let config = small_network();
+    run_k8s_test(config, |network| {
+        Box::pin(async move {
+            // Get a ref to the node
+            let alice = network.get_node("alice").unwrap();
+
+            let role = alice.reports("node_roles").await.unwrap();
+            println!("Role is {role}");
+            assert_eq!(role, 4.0);
+
+            // subxt
+            let client = alice.client::<subxt::PolkadotConfig>().await.unwrap();
+
+            // wait 3 blocks
+            let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(3);
+            while let Some(block) = blocks.next().await {
+                println!("Block #{}", block.unwrap().header().number);
+            }
+
+            // drop the client
+            drop(client);
+
+            // check best block through metrics
+            let best_block = alice
+                .reports("block_height{status=\"best\"}")
+                .await
+                .unwrap();
+
+            assert!(best_block >= 2.0, "Current best {}", best_block);
+
+            // pjs
+            let para_is_registered = r#"
+            const paraId = arguments[0];
+            const parachains: number[] = (await api.query.paras.parachains()) || [];
+            const isRegistered = parachains.findIndex((id) => id.toString() == paraId.toString()) >= 0;
+            return isRegistered;
+            "#;
+
+            let is_registered = alice
+                .pjs(para_is_registered, vec![json!(2000)])
+                .await
+                .unwrap()
+                .unwrap();
+            assert_eq!(is_registered, json!(true));
+
+            // run pjs with code
+            let query_paras = r#"
+            const parachains: number[] = (await api.query.paras.parachains()) || [];
+            return parachains.toJSON()
+            "#;
+
+            let paras = alice.pjs(query_paras, vec![]).await.unwrap();
+
+            println!("parachains registered: {:?}", paras);
+
+            // tear down
+            network.destroy().await.unwrap();
+        })
+    });
+}
-- 
GitLab