Skip to content
Snippets Groups Projects
Commit 181f7f13 authored by command-bot's avatar command-bot
Browse files

"$PIPELINE_SCRIPTS_DIR/commands/bench/bench.sh" --subcommand=pallet...

"$PIPELINE_SCRIPTS_DIR/commands/bench/bench.sh" --subcommand=pallet --runtime=westend --target_dir=polkadot --pallet=pallet_beefy_mmr
parent 9420dac5
Branches cmd-bot/6732-6
No related merge requests found
Pipeline #507786 passed with stage
in 41 minutes and 12 seconds
# polkadot-sdk | CI definitions (via GitLab CI)
#
# FYI: Pipelines can be triggered manually through the web UI (if you have enough permissions)
#
# Currently, entire CI instructions are split into different subfiles. Each CI stage has a corresponding
# file which can be found here: .gitlab/pipeline/<stage_name>.yml
stages:
- check
- test
- build
- publish
- short-benchmarks
- zombienet
- deploy
- notify
workflow: workflow:
rules: rules:
- if: $CI_COMMIT_TAG - if: $CI_PIPELINE_SOURCE == "api"
- if: $CI_COMMIT_BRANCH - if: $CI_PIPELINE_SOURCE == "web"
command:
variables: timeout: 24 hours
# CI_IMAGE: !reference [ .ci-unified, variables, CI_IMAGE ] image: paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558
CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558"
# BUILDAH_IMAGE is defined in group variables
BUILDAH_COMMAND: "buildah --storage-driver overlay2"
RELENG_SCRIPTS_BRANCH: "master"
RUSTY_CACHIER_SINGLE_BRANCH: master
RUSTY_CACHIER_DONT_OPERATE_ON_MAIN_BRANCH: "true"
RUSTY_CACHIER_COMPRESSION_METHOD: zstd
NEXTEST_FAILURE_OUTPUT: immediate-final
NEXTEST_SUCCESS_OUTPUT: final
DOCKER_IMAGES_VERSION: "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}"
default:
retry:
max: 2
when:
- runner_system_failure
- unknown_failure
- api_failure
cache: {}
interruptible: true
.collect-artifacts:
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
when: on_success
expire_in: 1 days
paths:
- artifacts/
.collect-artifacts-short:
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
when: on_failure
expire_in: 3 hours
paths:
- artifacts/
.prepare-env:
before_script:
# $WASM_BUILD_WORKSPACE_HINT enables wasm-builder to find the Cargo.lock from within generated
# packages
- export WASM_BUILD_WORKSPACE_HINT="$PWD"
# ensure that RUSTFLAGS are set correctly
- echo $RUSTFLAGS
.common-before-script:
before_script:
- !reference [.job-switcher, before_script]
- !reference [.pipeline-stopper-vars, script]
.job-switcher:
before_script:
- if echo "$CI_DISABLED_JOBS" | grep -xF "$CI_JOB_NAME"; then echo "The job has been cancelled in CI settings"; exit 0; fi
.kubernetes-env:
image: "${CI_IMAGE}"
before_script:
- !reference [.common-before-script, before_script]
- !reference [.prepare-env, before_script]
tags: tags:
- kubernetes-parity-build - weights-vm
variables:
.rust-info-script: PIPELINE_SCRIPTS_REF: main
script: GH_OWNER: paritytech
- rustup show GH_OWNER_REPO: polkadot-sdk
- cargo --version GH_OWNER_BRANCH: master
- rustup +nightly show GH_CONTRIBUTOR: serban300
- cargo +nightly --version GH_CONTRIBUTOR_REPO: polkadot-sdk
GH_CONTRIBUTOR_BRANCH: beefy-equivocation-runtime
# collecting vars for pipeline stopper GH_HEAD_SHA: 9420dac5fa91eb80f732b967657e9d5c9ab3275e
# they will be used if the job fails COMMIT_MESSAGE: '"$PIPELINE_SCRIPTS_DIR/commands/bench/bench.sh"
.pipeline-stopper-vars: --subcommand=pallet --runtime=westend --target_dir=polkadot
--pallet=pallet_beefy_mmr'
PIPELINE_SCRIPTS_REPOSITORY: https://github.com/paritytech/command-bot-scripts/
PIPELINE_SCRIPTS_DIR: .git/.scripts
script: script:
- echo "Collecting env variables for the cancel-pipeline job" - ""
- echo "FAILED_JOB_URL=${CI_JOB_URL}" > pipeline-stopper.env - ' echo "This job is related to task
- echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env 6-ee38b68a-0826-4093-81e1-df539846b049. The task was generated from a
- echo "PR_NUM=${CI_COMMIT_REF_NAME}" >> pipeline-stopper.env comment in
https://github.com/paritytech/polkadot-sdk/pull/6732#issuecomment-2511957314."'
.pipeline-stopper-artifacts: - ""
- " # The scripts repository might be left over from a previous run
in the"
- " # same Gitlab shell executor"
- ' rm -rf "$PIPELINE_SCRIPTS_DIR"'
- ""
- ' if [ "${PIPELINE_SCRIPTS_REPOSITORY:-}" ]; then'
- ' if [ "${PIPELINE_SCRIPTS_REF:-}" ]; then'
- ' git clone --progress --verbose --depth 1 --branch
"$PIPELINE_SCRIPTS_REF" "$PIPELINE_SCRIPTS_REPOSITORY"
"$PIPELINE_SCRIPTS_DIR"'
- " else"
- ' git clone --progress --verbose --depth 1
"$PIPELINE_SCRIPTS_REPOSITORY" "$PIPELINE_SCRIPTS_DIR"'
- " fi"
- " fi"
- ' export ARTIFACTS_DIR="$PWD/.git/.artifacts"'
- " # The artifacts directory might be left over from a previous run
in"
- " # the same Gitlab shell executor"
- ' rm -rf "$ARTIFACTS_DIR"'
- ' mkdir -p "$ARTIFACTS_DIR"'
- " "
- '"$PIPELINE_SCRIPTS_DIR/commands/bench/bench.sh" --subcommand=pallet
--runtime=westend --target_dir=polkadot --pallet=pallet_beefy_mmr'
artifacts: artifacts:
reports: name: ${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}
dotenv: pipeline-stopper.env expire_in: 7 days
when: always
.docker-env: paths:
image: "${CI_IMAGE}" - .git/.artifacts
variables:
FL_FORKLIFT_VERSION: !reference [.forklift, variables, FL_FORKLIFT_VERSION]
before_script:
- !reference [.common-before-script, before_script]
- !reference [.prepare-env, before_script]
- !reference [.rust-info-script, script]
- !reference [.forklift-cache, before_script]
tags:
- linux-docker
#
.forklift-cache:
before_script:
- mkdir ~/.forklift
- cp .forklift/config-gitlab.toml ~/.forklift/config.toml
- cat .forklift/config-gitlab.toml > .forklift/config.toml
- >
if [ "$FORKLIFT_BYPASS" != "true" ]; then
echo "FORKLIFT_BYPASS not set";
if command -v forklift >/dev/null 2>&1; then
echo "forklift already exists";
forklift version
else
echo "forklift does not exist, downloading";
curl --header "PRIVATE-TOKEN: $FL_CI_GROUP_TOKEN" -o forklift -L "${CI_API_V4_URL}/projects/676/packages/generic/forklift/${FL_FORKLIFT_VERSION}/forklift_${FL_FORKLIFT_VERSION}_linux_amd64";
chmod +x forklift;
export PATH=$PATH:$(pwd);
echo ${FL_FORKLIFT_VERSION};
fi
echo "Creating alias cargo='forklift cargo'";
shopt -s expand_aliases;
alias cargo="forklift cargo";
fi
#
.common-refs:
rules:
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
- if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
.test-pr-refs:
rules:
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
- if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues
.publish-gh-pages-refs:
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_PIPELINE_SOURCE == "web" && $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME == "master"
# handle the specific case where benches could store incorrect bench data because of the downstream staging runs
# exclude cargo-check-benches from such runs
.test-refs-check-benches:
rules:
- if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "pipeline" && $CI_IMAGE =~ /staging$/
when: never
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
- if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
.test-refs-no-trigger:
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
- if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
- if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/
.test-refs-no-trigger-prs-only:
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
- if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues
.publish-refs:
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
.build-refs:
# publish-refs + PRs
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
- if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues
include:
# check jobs
- .gitlab/pipeline/check.yml
# test jobs
- .gitlab/pipeline/test.yml
# build jobs
- .gitlab/pipeline/build.yml
# publish jobs
- .gitlab/pipeline/publish.yml
# zombienet jobs
- .gitlab/pipeline/zombienet.yml
# ci image
- project: parity/infrastructure/ci_cd/shared
ref: main
file: /common/ci-unified.yml
- project: parity/infrastructure/ci_cd/shared
ref: main
file: /common/forklift.yml
# This job cancels the whole pipeline if any of provided jobs fail.
# In a DAG, every jobs chain is executed independently of others. The `fail_fast` principle suggests
# to fail the pipeline as soon as possible to shorten the feedback loop.
.cancel-pipeline-template:
stage: .post
rules:
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
when: on_failure
variables:
PROJECT_ID: "${CI_PROJECT_ID}"
PROJECT_NAME: "${CI_PROJECT_NAME}"
PIPELINE_ID: "${CI_PIPELINE_ID}"
FAILED_JOB_URL: "${FAILED_JOB_URL}"
FAILED_JOB_NAME: "${FAILED_JOB_NAME}"
PR_NUM: "${PR_NUM}"
trigger:
project: "parity/infrastructure/ci_cd/pipeline-stopper"
remove-cancel-pipeline-message:
stage: .post
rules:
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
variables:
PROJECT_ID: "${CI_PROJECT_ID}"
PROJECT_NAME: "${CI_PROJECT_NAME}"
PIPELINE_ID: "${CI_PIPELINE_ID}"
FAILED_JOB_URL: "https://gitlab.com"
FAILED_JOB_NAME: "nope"
PR_NUM: "${CI_COMMIT_REF_NAME}"
trigger:
project: "parity/infrastructure/ci_cd/pipeline-stopper"
cancel-pipeline-build-linux-stable:
extends: .cancel-pipeline-template
needs:
- job: build-linux-stable
cancel-pipeline-build-linux-stable-cumulus:
extends: .cancel-pipeline-template
needs:
- job: build-linux-stable-cumulus
cancel-pipeline-build-linux-substrate:
extends: .cancel-pipeline-template
needs:
- job: build-linux-substrate
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment