.gitlab-ci.yml 11.1 KiB
Newer Older
#
# substrate
#
# pipelines can be triggered manually in the web
# setting DEPLOY_TAG will only deploy the tagged image
stages:
  - test
  - build
image:                             parity/rust-builder:latest
  GIT_STRATEGY:                    fetch
  CARGO_HOME:                      "/ci-cache/${CI_PROJECT_NAME}/cargo/${CI_JOB_NAME}"
  SCCACHE_DIR:                     "/ci-cache/${CI_PROJECT_NAME}/sccache"
  CI_SERVER_NAME:                  "GitLab CI"
  DOCKER_OS:                       "debian:stretch"
.collect-artifacts:                &collect-artifacts
  artifacts:
    name:                          "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
    when:                          on_success
      - artifacts/
.kubernetes-build:                 &kubernetes-build
  tags:
    - kubernetes-parity-build
  environment:
    name: parity-build

#### stage:                        test

check-runtime:
  <<:                              *kubernetes-build
  variables:
    GITLAB_API:                    "https://gitlab.parity.io/api/v4"
    GITHUB_API_PROJECT:            "parity%2Finfrastructure%2Fgithub-api"
  script:
    - ./scripts/gitlab/check_runtime.sh


check-line-width:
  stage:                           test
  image:                           parity/tools:latest
  <<:                              *kubernetes-build
  only:
    - /^[0-9]+$/
  script:
    - ./scripts/gitlab/check_line_width.sh
  allow_failure:                   false


test-linux-stable:                 &test
  stage:                           test
  variables:
    RUST_TOOLCHAIN: stable
    # Enable debug assertions since we are running optimized builds for testing
    # but still want to have debug assertions.
    RUSTFLAGS: -Cdebug-assertions=y
    TARGET: native
  except:
    variables:
      - $DEPLOY_TAG
    - sccache -s
    - ./scripts/build.sh
  script:
    - time cargo test --all --release --verbose --locked
    - sccache -s
check-web-wasm:
  stage:                           test
    - /^[0-9]+$/
  script:
    # WASM support is in progress. As more and more crates support WASM, we
    # should add entries here. See https://github.com/paritytech/substrate/issues/2416
    - time cargo web build -p sr-io
    - time cargo web build -p sr-primitives
    - time cargo web build -p sr-std
    - time cargo web build -p substrate-consensus-common
    - time cargo web build -p substrate-executor
    - time cargo web build -p substrate-network-libp2p
    - time cargo web build -p substrate-panic-handler
    - time cargo web build -p substrate-peerset
    - time cargo web build -p substrate-primitives
    - time cargo web build -p substrate-serializer
    - time cargo web build -p substrate-state-db
    - time cargo web build -p substrate-state-machine
    - time cargo web build -p substrate-trie
Denis_P's avatar
Denis_P committed
  tags:
    - linux-docker
.build-only:                      &build-only

#### stage:                        build

build-linux-release:               &build
  stage:                           build
  <<:                              *collect-artifacts
  <<:                              *build-only
  except:
    variables:
      - $DEPLOY_TAG
    - sccache -s
    - ./scripts/build.sh
    - time cargo build --release --verbose
    - mkdir -p ./artifacts
    - mv ./target/release/substrate ./artifacts/.
    - echo -n "Substrate version = "
    - if [ "${CI_COMMIT_TAG}" ]; then
        echo "${CI_COMMIT_TAG}" | tee ./artifacts/VERSION;
        ./artifacts/substrate --version |
        sed -n -r 's/^substrate ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p' |
        tee ./artifacts/VERSION;
      fi
    - sha256sum ./artifacts/substrate | tee ./artifacts/substrate.sha256
    - printf '\n# building node-template\n\n'
    - ./scripts/node-template-release.sh ./artifacts/substrate-node-template.tar.gz
    - cp -r scripts/docker/* ./artifacts
    - sccache -s
build-rust-doc-release:            &build
  stage:                           build
  artifacts:
    name:                          "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc"
    when:                          on_success
    expire_in:                     7 days
    paths:
    - ./crate-docs
  <<:                              *build-only
  tags:
    - linux-docker
    - sccache -s
    - ./scripts/build.sh
  script:
    - rm -f ./crate-docs/index.html # use it as an indicator if the job succeeds
    - time cargo +nightly doc --release --verbose
    - cp -R ./target/doc ./crate-docs
    - echo "<meta http-equiv=refresh content=0;url=substrate_service/index.html>" > ./crate-docs/index.html
    - sccache -s
.publish-build:                    &publish-build
    - build-linux-release
  <<:                              *build-only
  <<:                              *kubernetes-build
publish-docker-release:
  <<:                              *publish-build
  image:                           docker:stable
  services:
    - docker:dind
  # collect VERSION artifact here to pass it on to kubernetes
  <<:                              *collect-artifacts
    DOCKER_HOST:                   tcp://localhost:2375
    DOCKER_DRIVER:                 overlay2
    GIT_STRATEGY:                  none
    # DOCKERFILE:                  scripts/docker/Dockerfile
    CONTAINER_IMAGE:               parity/substrate
  before_script:
    - test "$Docker_Hub_User_Parity" -a "$Docker_Hub_Pass_Parity"
        || ( echo "no docker credentials provided"; exit 1 )
    - docker login -u "$Docker_Hub_User_Parity" -p "$Docker_Hub_Pass_Parity"
    - docker info
  script:
    - VERSION="$(cat ./artifacts/VERSION)"
    - echo "Substrate version = ${VERSION}"
    - test -z "${VERSION}" && exit 1
Denis_P's avatar
Denis_P committed
    - docker build
      --build-arg VCS_REF="${CI_COMMIT_SHORT_SHA}"
      --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')"
      --tag $CONTAINER_IMAGE:$VERSION 
      --tag $CONTAINER_IMAGE:latest .
    - docker push $CONTAINER_IMAGE:$VERSION
    - docker push $CONTAINER_IMAGE:latest
  after_script:
    - docker logout
    # only VERSION information is needed for the deployment
    - find ./artifacts/ -depth -not -name VERSION -not -name artifacts -delete
publish-s3-release:
  <<:                              *publish-build
  image:                           parity/awscli:latest
  variables:
    GIT_STRATEGY:                  none
    BUCKET:                        "releases.parity.io"
    PREFIX:                        "substrate/${ARCH}-${DOCKER_OS}"
  script:
    - aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/
    - echo "update objects in latest path"
    - for file in ./artifacts/*; do
      name="$(basename ${file})";
      aws s3api copy-object
        --copy-source ${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/${name}
        --bucket ${BUCKET} --key ${PREFIX}/latest/${name};
      done
    - aws s3 ls s3://${BUCKET}/${PREFIX}/latest/
        --recursive --human-readable --summarize
publish-s3-doc:
  stage:                           publish
  image:                           parity/awscli:latest
  allow_failure:                   true
  dependencies:
    - build-rust-doc-release
  cache:                           {}
  <<:                              *build-only
  <<:                              *kubernetes-build
  variables:
    GIT_STRATEGY:                  none
    BUCKET:                        "releases.parity.io"
    PREFIX:                        "substrate-rustdoc"
  script:
    - test -r ./crate-docs/index.html || (
        echo "./crate-docs/index.html not present, build:rust:doc:release job not complete";
        exit 1
      )
    - aws s3 sync --delete --size-only --only-show-errors
        ./crate-docs/ s3://${BUCKET}/${PREFIX}/
  after_script:
    - aws s3 ls s3://${BUCKET}/${PREFIX}/
        --human-readable --summarize
.deploy-template:                  &deploy
  stage:                           deploy
  when:                            manual
  retry:                           1
  image:                           parity/kubetools:latest
  <<:                              *build-only
    # this is the runner that is used to deploy it
    - kubernetes-parity-build
  before_script:
    - test -z "${DEPLOY_TAG}" &&
      test -f ./artifacts/VERSION &&
      DEPLOY_TAG="$(cat ./artifacts/VERSION)"
    - test "${DEPLOY_TAG}" || ( echo "Neither DEPLOY_TAG nor VERSION information available"; exit 1 )
  script:
    - echo "Substrate version = ${DEPLOY_TAG}"
    # or use helm to render the template
    - helm template
      --values ./scripts/kubernetes/values.yaml
      --set image.tag=${DEPLOY_TAG}
      --set validator.keys=${VALIDATOR_KEYS}
      ./scripts/kubernetes | kubectl apply -f - --dry-run=false
    - echo "# substrate namespace ${KUBE_NAMESPACE}"
    - kubectl -n ${KUBE_NAMESPACE} get all
    - echo "# substrate's nodes' external ip addresses:"
    - kubectl get nodes -l node=substrate
      -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{range @.status.addresses[?(@.type=="ExternalIP")]}{.address}{"\n"}{end}'
    - echo "# substrate' nodes"
    - kubectl -n ${KUBE_NAMESPACE} get pods
      -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.nodeName}{"\n"}{end}'
    - echo "# wait for the rollout to complete"
    - kubectl -n ${KUBE_NAMESPACE} rollout status statefulset/substrate

# have environment:url eventually point to the logs

.deploy-cibuild:                   &deploy-cibuild
  <<:                              *deploy
  dependencies:
    - publish-docker-release

.deploy-tag:                       &deploy-tag
  only:
    variables:
      - $DEPLOY_TAG

# have environment:url eventually point to the logs
deploy-ew3:
  <<:                              *deploy-cibuild
deploy-ue1:
  <<:                              *deploy-cibuild
deploy-ew3-tag:
  <<:                              *deploy-tag
  environment:
    name: parity-prod-ew3

deploy-ue1-tag:
  <<:                              *deploy-tag
  environment:
    name: parity-prod-ue1

.validator-deploy: &validator-deploy
  stage: publish
  dependencies:
    - build-linux-release
  image: parity/azure-ansible:v1
  allow_failure: true
  when: manual
  tags:
    - linux-docker

validator1:
  <<: *validator-deploy
  script:
    - ansible-playbook -i scripts/ansible/inventory.ini -u gitlab scripts/ansible/alexander.yml -l validator1

validator2:
  <<: *validator-deploy
  script:
    - ansible-playbook -i scripts/ansible/inventory.ini -u gitlab scripts/ansible/alexander.yml -l validator2