# .gitlab-ci.yml
#
# substrate
#
# pipelines can be triggered manually in the web
# setting DEPLOY_TAG will only deploy the tagged image
stages:
- test
- build
- publish
- kubernetes
- flaming-fir
variables:
GIT_STRATEGY: fetch
CARGO_HOME: "/ci-cache/${CI_PROJECT_NAME}/cargo/${CI_JOB_NAME}"
SCCACHE_DIR: "/ci-cache/${CI_PROJECT_NAME}/sccache"
CARGO_INCREMENTAL: 0
CI_SERVER_NAME: "GitLab CI"
DOCKER_OS: "debian:stretch"
ARCH: "x86_64"
.collect-artifacts: &collect-artifacts
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
when: on_success
expire_in: 7 days
paths:
- artifacts/
.kubernetes-build: &kubernetes-build
tags:
- kubernetes-parity-build
environment:
name: parity-build
.docker-env: &docker-env
image: parity/rust-builder:latest
before_script:
- rustup show
- cargo --version
- sccache -s
only:
- tags
- master
- /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
- schedules
- web
- /^[0-9]+$/ # PRs
tags:
- linux-docker
#### stage: test
check-runtime:
stage: test
image: parity/tools:latest
<<: *kubernetes-build
only:
- /^[0-9]+$/
variables:
GITLAB_API: "https://gitlab.parity.io/api/v4"
GITHUB_API_PROJECT: "parity%2Finfrastructure%2Fgithub-api"
script:
- ./scripts/gitlab/check_runtime.sh
allow_failure: true
check-line-width:
stage: test
image: parity/tools:latest
<<: *kubernetes-build
only:
- /^[0-9]+$/
script:
- ./scripts/gitlab/check_line_width.sh
allow_failure: true
cargo-audit:
stage: test
<<: *docker-env
except:
- /^[0-9]+$/
script:
- cargo audit
allow_failure: true
cargo-check-benches:
stage: test
<<: *docker-env
script:
- BUILD_DUMMY_WASM_BINARY=1 time cargo check --benches
- sccache -s
cargo-check-subkey:
stage: test
<<: *docker-env
except:
- /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
script:
- cd ./subkey
- BUILD_DUMMY_WASM_BINARY=1 time cargo check --release # makes sense to save artifacts for building it
- sccache -s
test-linux-stable: &test-linux
stage: test
<<: *docker-env
variables:
# Enable debug assertions since we are running optimized builds for testing
# but still want to have debug assertions.
RUSTFLAGS: -Cdebug-assertions=y
except:
variables:
- $DEPLOY_TAG
script:
- time cargo test --all --release --verbose --locked
- sccache -s
test-srml-staking: &test-srml-staking
stage: test
<<: *docker-env
variables:
# Enable debug assertions since we are running optimized builds for testing
# but still want to have debug assertions.
RUSTFLAGS: -Cdebug-assertions=y
RUST_BACKTRACE: 1
except:
variables:
- $DEPLOY_TAG
only:
changes:
- .gitlab-ci.yml
- srml/staking/*
script:
- cd srml/staking/
- time cargo test --release --verbose --no-default-features --features std
- sccache -s
test-linux-stable-int:
<<: *test-linux
except:
refs:
- /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
variables:
- $DEPLOY_TAG
script:
- echo "___Logs will be partly shown at the end in case of failure.___"
- echo "___Full log will be saved to the job artifacts only in case of failure.___"
- RUST_LOG=sync=trace,consensus=trace,client=trace,state-db=trace,db=trace,forks=trace,state_db=trace,storage_cache=trace
time cargo test -p node-cli --release --verbose --locked -- --ignored --test-threads=1
&> ${CI_COMMIT_SHORT_SHA}_int_failure.log
- sccache -s
after_script:
- awk '/FAILED/,0' ${CI_COMMIT_SHORT_SHA}_int_failure.log
artifacts:
name: $CI_COMMIT_SHORT_SHA
when: on_failure
expire_in: 24 hrs
paths:
- ${CI_COMMIT_SHORT_SHA}_int_failure.log
check-web-wasm:
stage: test
<<: *docker-env
except:
- /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
script:
# WASM support is in progress. As more and more crates support WASM, we
# should add entries here. See https://github.com/paritytech/substrate/issues/2416
- time cargo web build -p sr-io
- time cargo web build -p sr-primitives
- time cargo web build -p sr-std
- time cargo web build -p substrate-client
- time cargo web build -p substrate-consensus-aura
- time cargo web build -p substrate-consensus-babe
- time cargo web build -p substrate-consensus-common
- time cargo web build -p substrate-keyring
- time cargo web build -p substrate-keystore
- time cargo web build -p substrate-executor
- time cargo web build -p substrate-network
- time cargo web build -p substrate-offchain
- time cargo web build -p substrate-panic-handler
- time cargo web build -p substrate-peerset
- time cargo web build -p substrate-primitives
# TODO: we can't use cargo web until https://github.com/paritytech/jsonrpc/pull/436 is deployed
- time cargo build -p substrate-rpc-servers --target wasm32-unknown-unknown
- time cargo web build -p substrate-serializer
- time cargo web build -p substrate-state-db
- time cargo web build -p substrate-state-machine
- time cargo web build -p substrate-telemetry
- time cargo web build -p substrate-trie
- sccache -s
.build-only: &build-only
only:
- master
- tags
- web
#### stage: build
build-linux-release:
stage: build
<<: *collect-artifacts
<<: *docker-env
<<: *build-only
except:
variables:
- $DEPLOY_TAG
script:
- time cargo build --release --verbose
- mkdir -p ./artifacts
- mv ./target/release/substrate ./artifacts/.
- echo -n "Substrate version = "
- if [ "${CI_COMMIT_TAG}" ]; then
echo "${CI_COMMIT_TAG}" | tee ./artifacts/VERSION;
else
./artifacts/substrate --version |
sed -n -r 's/^substrate ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p' |
tee ./artifacts/VERSION;
fi
- sha256sum ./artifacts/substrate | tee ./artifacts/substrate.sha256
- printf '\n# building node-template\n\n'
- ./scripts/node-template-release.sh ./artifacts/substrate-node-template.tar.gz
- cp -r scripts/docker/* ./artifacts
- sccache -s
build-linux-subkey:
stage: build
<<: *collect-artifacts
<<: *docker-env
# <<: *build-only
except:
variables:
- $DEPLOY_TAG
script:
- cd ./subkey
- BUILD_DUMMY_WASM_BINARY=1 time cargo build --release --verbose
- cd ..
# - time cargo build --release
- sccache -s
- mkdir -p ./artifacts
- mv ./target/release/subkey ./artifacts/.
- echo -n "Subkey version = "
- ./artifacts/subkey --version |
sed -n -r 's/^subkey ([0-9.]+.*)/\1/p' |
tee ./artifacts/SUBKEY-VERSION;
- sha256sum ./artifacts/subkey | tee ./artifacts/subkey.sha256
build-rust-doc-release:
stage: build
<<: *docker-env
allow_failure: true
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc"
when: on_success
expire_in: 7 days
paths:
- ./crate-docs
<<: *build-only
script:
- rm -f ./crate-docs/index.html # use it as an indicator if the job succeeds
- BUILD_DUMMY_WASM_BINARY=1 time cargo +nightly doc --release --all --verbose
- cp -R ./target/doc ./crate-docs
- echo "" > ./crate-docs/index.html
- sccache -s
#### stage: publish
.publish-build: &publish-build
stage: publish
dependencies:
- build-linux-release
- build-linux-subkey
<<: *build-only
<<: *kubernetes-build
publish-docker-release:
<<: *publish-build
image: docker:stable
services:
- docker:dind
# collect VERSION artifact here to pass it on to kubernetes
<<: *collect-artifacts
variables:
DOCKER_HOST: tcp://localhost:2375
DOCKER_DRIVER: overlay2
GIT_STRATEGY: none
# DOCKERFILE: scripts/docker/Dockerfile
CONTAINER_IMAGE: parity/substrate
before_script:
- test "$Docker_Hub_User_Parity" -a "$Docker_Hub_Pass_Parity"
|| ( echo "no docker credentials provided"; exit 1 )
- docker login -u "$Docker_Hub_User_Parity" -p "$Docker_Hub_Pass_Parity"
- docker info
script:
- VERSION="$(cat ./artifacts/VERSION)"
- echo "Substrate version = ${VERSION}"
- test -z "${VERSION}" && exit 1
- cd ./artifacts
- docker build
--build-arg VCS_REF="${CI_COMMIT_SHA}"
--build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')"
--tag $CONTAINER_IMAGE:$VERSION
--tag $CONTAINER_IMAGE:latest .
- docker push $CONTAINER_IMAGE:$VERSION
- docker push $CONTAINER_IMAGE:latest
after_script:
- docker logout
# only VERSION information is needed for the deployment
- find ./artifacts/ -depth -not -name VERSION -not -name artifacts -delete
publish-s3-release:
<<: *publish-build
image: parity/awscli:latest
variables:
GIT_STRATEGY: none
BUCKET: "releases.parity.io"
PREFIX: "substrate/${ARCH}-${DOCKER_OS}"
script:
- aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/
- echo "update objects in latest path"
- for file in ./artifacts/*; do
name="$(basename ${file})";
aws s3api copy-object
--copy-source ${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/${name}
--bucket ${BUCKET} --key ${PREFIX}/latest/${name};
done
after_script:
- aws s3 ls s3://${BUCKET}/${PREFIX}/latest/
--recursive --human-readable --summarize
publish-s3-doc:
stage: publish
image: parity/awscli:latest
allow_failure: true
dependencies:
- build-rust-doc-release
cache: {}
<<: *build-only
<<: *kubernetes-build
variables:
GIT_STRATEGY: none
BUCKET: "releases.parity.io"
PREFIX: "substrate-rustdoc"
script:
- test -r ./crate-docs/index.html || (
echo "./crate-docs/index.html not present, build:rust:doc:release job not complete";
exit 1
)
- aws s3 sync --delete --size-only --only-show-errors
./crate-docs/ s3://${BUCKET}/${PREFIX}/
after_script:
- aws s3 ls s3://${BUCKET}/${PREFIX}/
--human-readable --summarize
publish-gh-doc:
stage: publish
image: parity/tools:latest
allow_failure: true
dependencies:
- build-rust-doc-release
cache: {}
<<: *build-only
<<: *kubernetes-build
variables:
GIT_STRATEGY: none
GITHUB_API: "https://api.github.com"
script:
- test -r ./crate-docs/index.html || (
echo "./crate-docs/index.html not present, build:rust:doc:release job not complete";
exit 1
)
- test "${GITHUB_USER}" -a "${GITHUB_EMAIL}" -a "${GITHUB_TOKEN}" || (
echo "environment variables for github insufficient";
exit 1
)
- |
cat > ${HOME}/.gitconfig <&1 | sed -r "s|(${GITHUB_USER}):[a-f0-9]+@|\1:REDACTED@|g"
after_script:
- rm -vrf ${HOME}/.gitconfig
.deploy-template: &deploy
stage: kubernetes
when: manual
retry: 1
image: parity/kubetools:latest
<<: *build-only
tags:
# this is the runner that is used to deploy it
- kubernetes-parity-build
before_script:
- test -z "${DEPLOY_TAG}" &&
test -f ./artifacts/VERSION &&
DEPLOY_TAG="$(cat ./artifacts/VERSION)"
- test "${DEPLOY_TAG}" || ( echo "Neither DEPLOY_TAG nor VERSION information available"; exit 1 )
script:
- echo "Substrate version = ${DEPLOY_TAG}"
# or use helm to render the template
- helm template
--values ./scripts/kubernetes/values.yaml
--set image.tag=${DEPLOY_TAG}
--set validator.keys=${VALIDATOR_KEYS}
./scripts/kubernetes | kubectl apply -f - --dry-run=false
- echo "# substrate namespace ${KUBE_NAMESPACE}"
- kubectl -n ${KUBE_NAMESPACE} get all
- echo "# substrate's nodes' external ip addresses:"
- kubectl get nodes -l node=substrate
-o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{range @.status.addresses[?(@.type=="ExternalIP")]}{.address}{"\n"}{end}'
- echo "# substrate' nodes"
- kubectl -n ${KUBE_NAMESPACE} get pods
-o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.nodeName}{"\n"}{end}'
- echo "# wait for the rollout to complete"
- kubectl -n ${KUBE_NAMESPACE} rollout status statefulset/substrate
# have environment:url eventually point to the logs
.deploy-cibuild: &deploy-cibuild
<<: *deploy
dependencies:
- publish-docker-release
.deploy-tag: &deploy-tag
<<: *deploy
only:
variables:
- $DEPLOY_TAG
# have environment:url eventually point to the logs
deploy-ew3:
<<: *deploy-cibuild
environment:
name: parity-prod-ew3
deploy-ue1:
<<: *deploy-cibuild
environment:
name: parity-prod-ue1
deploy-ew3-tag:
<<: *deploy-tag
environment:
name: parity-prod-ew3
deploy-ue1-tag:
<<: *deploy-tag
environment:
name: parity-prod-ue1
.validator-deploy: &validator-deploy
stage: flaming-fir
dependencies:
- build-linux-release
image: parity/azure-ansible:v1
allow_failure: true
when: manual
tags:
- linux-docker
validator 1 4:
<<: *validator-deploy
script:
- ./scripts/flamingfir-deploy.sh flamingfir-validator1
validator 2 4:
<<: *validator-deploy
script:
- ./scripts/flamingfir-deploy.sh flamingfir-validator2
validator 3 4:
<<: *validator-deploy
script:
- ./scripts/flamingfir-deploy.sh flamingfir-validator3
validator 4 4:
<<: *validator-deploy
script:
- ./scripts/flamingfir-deploy.sh flamingfir-validator4