Newer
Older
# .gitlab-ci.yml
#
# substrate
#
# pipelines can be triggered manually in the web
# setting DEPLOY_TAG will only deploy the tagged image
- publish
CARGO_HOME: "/ci-cache/${CI_PROJECT_NAME}/cargo/${CI_JOB_NAME}"
SCCACHE_DIR: "/ci-cache/${CI_PROJECT_NAME}/sccache"
CI_SERVER_NAME: "GitLab CI"
.collect-artifacts: &collect-artifacts
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
when: on_success
expire_in: 7 days
.kubernetes-build: &kubernetes-build
tags:
- kubernetes-parity-build
environment:
name: parity-build
.docker-env: &docker-env
image: parity/rust-builder:latest
before_script:
- rustup show
- cargo --version
- sccache -s
only:
- tags
- master
- /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
- schedules
- web
- /^[0-9]+$/ # PRs
tags:
- linux-docker
gabriel klawitter
committed
image: parity/tools:latest
gabriel klawitter
committed
variables:
GITLAB_API: "https://gitlab.parity.io/api/v4"
GITHUB_API_PROJECT: "parity%2Finfrastructure%2Fgithub-api"
script:
- ./scripts/gitlab/check_runtime.sh
check-line-width:
image: parity/tools:latest
<<: *kubernetes-build
only:
- /^[0-9]+$/
script:
- ./scripts/gitlab/check_line_width.sh
cargo-audit:
stage: pre-test
<<: *docker-env
except:
- /^[0-9]+$/
script:
- cargo audit
allow_failure: true
cargo-check-subkey:
stage: test
<<: *docker-env
except:
- /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
script:
- cd ./subkey
- time cargo check --release # makes sense to save artifacts for building it
- sccache -s
# Enable debug assertions since we are running optimized builds for testing
# but still want to have debug assertions.
RUSTFLAGS: -Cdebug-assertions=y
except:
variables:
- $DEPLOY_TAG
script:
- ./scripts/build.sh --locked
- time cargo test --all --release --verbose --locked
variables:
- $DEPLOY_TAG
script:
- ./scripts/build.sh --locked
- time RUST_LOG=sync=trace,consensus=trace,client=trace,state-db=trace,db=trace,forks=trace,state_db=trace,storage_cache=trace
cargo test -p node-cli --release --verbose --locked -- --ignored --test-threads=1
- sccache -s
allow_failure: true
check-web-wasm:
stage: test
allow_failure: true
except:
- /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
script:
# WASM support is in progress. As more and more crates support WASM, we
# should add entries here. See https://github.com/paritytech/substrate/issues/2416
- time cargo web build -p sr-io
- time cargo web build -p sr-primitives
- time cargo web build -p sr-std
- time cargo web build -p substrate-client
- time cargo web build -p substrate-consensus-aura
- time cargo web build -p substrate-consensus-babe
- time cargo web build -p substrate-consensus-common
- time cargo web build -p substrate-keyring
- time cargo web build -p substrate-keystore
- time cargo web build -p substrate-executor
- time cargo web build -p substrate-network
- time cargo web build -p substrate-network-libp2p
- time cargo web build -p substrate-panic-handler
- time cargo web build -p substrate-peerset
- time cargo web build -p substrate-primitives
- time cargo web build -p substrate-serializer
- time cargo web build -p substrate-state-db
- time cargo web build -p substrate-state-machine
- time cargo web build -p substrate-telemetry
- time cargo web build -p substrate-trie
only:
- master
- tags
- web
<<: *build-only
except:
variables:
- $DEPLOY_TAG
script:
- ./scripts/build.sh --locked
- mkdir -p ./artifacts
- mv ./target/release/substrate ./artifacts/.
- echo -n "Substrate version = "
- if [ "${CI_COMMIT_TAG}" ]; then
echo "${CI_COMMIT_TAG}" | tee ./artifacts/VERSION;
./artifacts/substrate --version |
sed -n -r 's/^substrate ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p' |
tee ./artifacts/VERSION;
fi
- sha256sum ./artifacts/substrate | tee ./artifacts/substrate.sha256
- ./scripts/node-template-release.sh ./artifacts/substrate-node-template.tar.gz
- cp -r scripts/docker/* ./artifacts
allow_failure: true
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc"
when: on_success
expire_in: 7 days
paths:
- ./crate-docs
- ./scripts/build.sh --locked
- rm -f ./crate-docs/index.html # use it as an indicator if the job succeeds
- time cargo +nightly doc --release --all --verbose
- cp -R ./target/doc ./crate-docs
- echo "<meta http-equiv=refresh content=0;url=substrate_service/index.html>" > ./crate-docs/index.html
#### stage: publish
stage: publish
dependencies:
- build-linux-release
<<: *build-only
<<: *kubernetes-build
publish-docker-release:
<<: *publish-build
image: docker:stable
services:
- docker:dind
# collect VERSION artifact here to pass it on to kubernetes
variables:
DOCKER_HOST: tcp://localhost:2375
DOCKER_DRIVER: overlay2
# DOCKERFILE: scripts/docker/Dockerfile
CONTAINER_IMAGE: parity/substrate
- test "$Docker_Hub_User_Parity" -a "$Docker_Hub_Pass_Parity"
|| ( echo "no docker credentials provided"; exit 1 )
- docker login -u "$Docker_Hub_User_Parity" -p "$Docker_Hub_Pass_Parity"
- docker info
script:
- VERSION="$(cat ./artifacts/VERSION)"
- echo "Substrate version = ${VERSION}"
- test -z "${VERSION}" && exit 1
- cd ./artifacts
--build-arg VCS_REF="${CI_COMMIT_SHA}"
--build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')"
- docker push $CONTAINER_IMAGE:$VERSION
- docker push $CONTAINER_IMAGE:latest
after_script:
- docker logout
# only VERSION information is needed for the deployment
- find ./artifacts/ -depth -not -name VERSION -not -name artifacts -delete
publish-s3-release:
<<: *publish-build
image: parity/awscli:latest
variables:
GIT_STRATEGY: none
BUCKET: "releases.parity.io"
PREFIX: "substrate/${ARCH}-${DOCKER_OS}"
script:
- aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/
- echo "update objects in latest path"
- for file in ./artifacts/*; do
name="$(basename ${file})";
aws s3api copy-object
--copy-source ${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/${name}
--bucket ${BUCKET} --key ${PREFIX}/latest/${name};
done
- aws s3 ls s3://${BUCKET}/${PREFIX}/latest/
--recursive --human-readable --summarize
<<: *build-only
<<: *kubernetes-build
variables:
GIT_STRATEGY: none
BUCKET: "releases.parity.io"
PREFIX: "substrate-rustdoc"
script:
- test -r ./crate-docs/index.html || (
echo "./crate-docs/index.html not present, build:rust:doc:release job not complete";
exit 1
)
- aws s3 sync --delete --size-only --only-show-errors
./crate-docs/ s3://${BUCKET}/${PREFIX}/
after_script:
- aws s3 ls s3://${BUCKET}/${PREFIX}/
--human-readable --summarize
gabriel klawitter
committed
when: manual
retry: 1
image: parity/kubetools:latest
# this is the runner that is used to deploy it
- kubernetes-parity-build
before_script:
- test -z "${DEPLOY_TAG}" &&
test -f ./artifacts/VERSION &&
DEPLOY_TAG="$(cat ./artifacts/VERSION)"
- test "${DEPLOY_TAG}" || ( echo "Neither DEPLOY_TAG nor VERSION information available"; exit 1 )
script:
- echo "Substrate version = ${DEPLOY_TAG}"
# or use helm to render the template
- helm template
--values ./scripts/kubernetes/values.yaml
--set image.tag=${DEPLOY_TAG}
--set validator.keys=${VALIDATOR_KEYS}
./scripts/kubernetes | kubectl apply -f - --dry-run=false
- echo "# substrate namespace ${KUBE_NAMESPACE}"
- kubectl -n ${KUBE_NAMESPACE} get all
- echo "# substrate's nodes' external ip addresses:"
- kubectl get nodes -l node=substrate
-o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{range @.status.addresses[?(@.type=="ExternalIP")]}{.address}{"\n"}{end}'
- echo "# substrate' nodes"
- kubectl -n ${KUBE_NAMESPACE} get pods
-o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.nodeName}{"\n"}{end}'
- echo "# wait for the rollout to complete"
- kubectl -n ${KUBE_NAMESPACE} rollout status statefulset/substrate
# have environment:url eventually point to the logs
.deploy-cibuild: &deploy-cibuild
<<: *deploy
dependencies:
- publish-docker-release
.deploy-tag: &deploy-tag
<<: *deploy
only:
variables:
- $DEPLOY_TAG
# have environment:url eventually point to the logs
deploy-ew3:
<<: *deploy-cibuild
environment:
deploy-ue1:
<<: *deploy-cibuild
environment:
deploy-ew3-tag:
<<: *deploy-tag
environment:
deploy-ue1-tag:
<<: *deploy-tag
environment:
.validator-deploy: &validator-deploy
stage: flaming-fir
dependencies:
- build-linux-release
image: parity/azure-ansible:v1
allow_failure: true
when: manual
- ./scripts/flamingfir-deploy.sh flamingfir-validator1
- ./scripts/flamingfir-deploy.sh flamingfir-validator2
script:
- ./scripts/flamingfir-deploy.sh flamingfir-validator3
script:
- ./scripts/flamingfir-deploy.sh flamingfir-validator4