Skip to content
Snippets Groups Projects
Commit f5c32f71 authored by gabriel klawitter's avatar gabriel klawitter Committed by GitHub
Browse files

ci: add kubernetes helm chart and gcp deployment (#1854)

* ci: add kubernetes helm chart and gcp deployment

* use official or parity's docker images only
parent 52826154
No related merge requests found
Showing with 426 additions and 38 deletions
......@@ -7,7 +7,7 @@ stages:
- test
- build
- publish
- label
- deploy
image: parity/rust:nightly
......@@ -20,11 +20,7 @@ variables:
cache:
key: "${CI_JOB_NAME}"
paths:
- ${CARGO_HOME}
- ./target
cache: {}
.collect_artifacts: &collect_artifacts
artifacts:
......@@ -36,14 +32,21 @@ cache:
.kubernetes_build: &kubernetes_build
tags:
- kubernetes-parity-build
environment:
name: parity-build
#### stage: merge-test
check:merge:conflict:
stage: merge-test
image: parity/tools:latest
cache: {}
tags:
- linux-docker
<<: *kubernetes_build
only:
- /^[0-9]+$/
variables:
......@@ -62,8 +65,7 @@ check:runtime:
stage: test
image: parity/tools:latest
cache: {}
tags:
- linux-docker
<<: *kubernetes_build
only:
- /^[0-9]+$/
variables:
......@@ -77,25 +79,29 @@ check:runtime:
test:rust:stable: &test
stage: test
cache:
key: "${CI_JOB_NAME}-test"
paths:
- ${CARGO_HOME}
- ./target
variables:
RUST_TOOLCHAIN: stable
# Enable debug assertions since we are running optimized builds for testing
# but still want to have debug assertions.
RUSTFLAGS: -Cdebug-assertions=y
TARGET: native
tags:
- linux-docker
only:
- tags
- master
- schedules
- web
- /^pr-[0-9]+$/
- /^[0-9]+$/
tags:
- linux-docker
before_script:
- test -d ${CARGO_HOME} -a -d ./target &&
- test -d ${CARGO_HOME} -a -d ./target &&
echo "build cache size:" &&
du -hs ${CARGO_HOME} ./target
du -h --max-depth=2 ${CARGO_HOME} ./target
- ./scripts/build.sh
script:
- time cargo test --all --release --verbose --locked
......@@ -115,6 +121,11 @@ test:rust:stable: &test
build:rust:linux:release: &build
stage: build
cache:
key: "${CI_JOB_NAME}-build"
paths:
- ${CARGO_HOME}
- ./target
<<: *collect_artifacts
<<: *build_only
tags:
......@@ -126,15 +137,17 @@ build:rust:linux:release: &build
- mkdir -p ./artifacts
- mv ./target/release/substrate ./artifacts/.
- echo -n "Substrate version = "
- if [ "${CI_COMMIT_TAG}" ]; then
- if [ "${CI_COMMIT_TAG}" ]; then
echo "${CI_COMMIT_TAG}" | tee ./artifacts/VERSION;
else
else
./artifacts/substrate --version |
sed -n -r 's/^substrate ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p' |
tee ./artifacts/VERSION;
fi
- sha256sum ./artifacts/substrate | tee ./artifacts/substrate.sha256
- echo "\n# building node-template\n"
- ./scripts/node-template-release.sh ./artifacts/substrate-node-template.tar.gz
- cp -r scripts/docker/* ./artifacts
......@@ -167,16 +180,20 @@ build:rust:doc:release: &build
- build:rust:linux:release
cache: {}
<<: *build_only
<<: *kubernetes_build
publish:docker:release:
<<: *publish_build
tags:
- shell
image: docker:stable
services:
- docker:dind
variables:
DOCKER_HOST: tcp://localhost:2375
DOCKER_DRIVER: overlay2
GIT_STRATEGY: none
DOCKERFILE: scripts/docker/Dockerfile
# DOCKERFILE: scripts/docker/Dockerfile
CONTAINER_IMAGE: parity/substrate
script:
- VERSION="$(cat ./artifacts/VERSION)"
......@@ -184,7 +201,8 @@ publish:docker:release:
|| ( echo "no docker credentials provided"; exit 1 )
- docker login -u "$Docker_Hub_User_Parity" -p "$Docker_Hub_Pass_Parity"
- docker info
- docker build --tag $CONTAINER_IMAGE:$VERSION --tag $CONTAINER_IMAGE:latest -f $DOCKERFILE ./artifacts/
- cd ./artifacts
- docker build --tag $CONTAINER_IMAGE:$VERSION --tag $CONTAINER_IMAGE:latest .
- docker push $CONTAINER_IMAGE:$VERSION
- docker push $CONTAINER_IMAGE:latest
after_script:
......@@ -213,8 +231,6 @@ publish:s3:release:
after_script:
- aws s3 ls s3://${BUCKET}/${PREFIX}/latest/
--recursive --human-readable --summarize
tags:
- linux-docker
......@@ -223,11 +239,8 @@ publish:s3:doc:
dependencies:
- build:rust:doc:release
cache: {}
only:
- master
- tags
- web
- publish-rustdoc
<<: *build_only
<<: *kubernetes_build
variables:
GIT_STRATEGY: none
BUCKET: "releases.parity.io"
......@@ -242,10 +255,59 @@ publish:s3:doc:
after_script:
- aws s3 ls s3://${BUCKET}/${PREFIX}/
--human-readable --summarize
tags:
- linux-docker
.deploy:template: &deploy
stage: deploy
when: manual
cache: {}
retry: 1
image: parity/kubectl-helm:$HELM_VERSION
<<: *build_only
# variables:
# DEPLOY_TAG: "latest"
tags:
- kubernetes-parity-build
before_script:
- test -z "${DEPLOY_TAG}" &&
test -f ./target/release/VERSION &&
DEPLOY_TAG="$(cat ./target/release/VERSION)"
- test "${DEPLOY_TAG}" || ( echo "Neither DEPLOY_TAG nor VERSION information available"; exit 1 )
script:
- echo "Substrate version = ${DEPLOY_TAG}"
# or use helm to render the template
- helm template
--values ./scripts/kubernetes/values.yaml
--set image.tag=${DEPLOY_TAG}
--set validator.keys=${VALIDATOR_KEYS}
./scripts/kubernetes | kubectl apply -f - --dry-run=false
- echo "# substrate namespace"
- kubectl -n substrate get all
- echo "# substrate's nodes' external ip addresses:"
- kubectl get nodes -l node=substrate
-o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{range @.status.addresses[?(@.type=="ExternalIP")]}{.address}{"\n"}{end}'
- echo "# substrate' nodes"
- kubectl -n substrate get pods
-o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.nodeName}{"\n"}{end}'
# have environment:url eventually point to the logs
deploy:ew3:
<<: *deploy
environment:
name: parity-prod-ew3
deploy:ue1:
<<: *deploy
environment:
name: parity-prod-ue1
......@@ -45,6 +45,11 @@ EOT
test "${mergeable}" = "true" && echo "| yes, it is." && exit 0
if [ "${baseref}" = "null" -o "${baserepo}" = "null" ]
then
echo "| either connectivity issues with github or pull request not existant"
exit 3
fi
cat <<-EOT
| not mergeable
......@@ -94,7 +99,6 @@ curl -sS -X POST \
-F "token=${CI_JOB_TOKEN}" \
-F "ref=master" \
-F "variables[REBUILD_WASM]=\"${baserepo}:${baseref}\"" \
-F "variables[PRNO]=${CI_COMMIT_REF_NAME}" \
${GITLAB_API}/projects/${GITHUB_API_PROJECT}/trigger/pipeline \
| jq -r .web_url
......
#!/bin/sh
#
#
# check for any changes in the node/src/runtime, srml/ and core/sr_* trees. if
# there are any changes found, it should mark the PR breaksconsensus and
# "auto-fail" the PR in some way unless a) the runtime is rebuilt and b) there
#
#
# check for any changes in the node/src/runtime, srml/ and core/sr_* trees. if
# there are any changes found, it should mark the PR breaksconsensus and
# "auto-fail" the PR in some way unless a) the runtime is rebuilt and b) there
# isn't a change in the runtime/src/lib.rs file that alters the version.
set -e # fail on any error
......@@ -66,14 +66,14 @@ then
then
cat <<-EOT
changes to the runtime sources and changes in the spec version. Wasm
changes to the runtime sources and changes in the spec version. Wasm
binary blob is rebuilt. Looks good.
spec_version: ${sub_spec_version} -> ${add_spec_version}
EOT
exit 0
else
else
cat <<-EOT
changes to the runtime sources and changes in the spec version. Wasm
......@@ -122,6 +122,11 @@ else
versions file: ${VERSIONS_FILE}
note: if the master branch was merged in as automated wasm rebuilds do it
might be the case that a {spec,impl}_version has been changed. but for pull
requests that involve wasm source file changes a version has to be changed
in the pull request itself.
EOT
# drop through into pushing `gotissues` and exit 1...
......
name: substrate
version: 0.1
appVersion: 0.9.1
description: "Substrate: The platform for blockchain innovators"
home: https://substrate.network/
icon: https://substrate.network/favicon.ico
sources:
- https://github.com/paritytech/substrate/
maintainers:
- name: Paritytech Devops Team
email: devops-team@parity.io
tillerVersion: ">=2.8.0"
# Substrate Kubernetes Helm Chart
This [Helm Chart](https://helm.sh/) can be used for deploying containerized
**Substrate** to a [Kubernetes](https://kubernetes.io/) cluster.
## Prerequisites
- Tested on Kubernetes 1.10.7-gke.6
## Installation
To install the chart with the release name `my-release` into namespace
`my-namespace` from within this directory:
```console
$ helm install --namespace my-namespace --name my-release --values values.yaml ./
```
The command deploys Substrate on the Kubernetes cluster in the configuration
given in `values.yaml`. When the namespace is omitted it'll be installed in
the default one.
## Removal of the Chart
To uninstall/delete the `my-release` deployment:
```console
$ helm delete --namespace my-namespace my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Upgrading
Once the chart is installed and a new version should be deployed helm takes
care of this by
```console
$ helm upgrade --namespace my-namespace --values values.yaml my-release ./
```
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: substrate
spec:
selector:
matchLabels:
app: substrate
maxUnavailable: 1
{{- if .Values.validator.keys }}
apiVersion: v1
kind: Secret
metadata:
name: substrate-secrets
labels:
app: substrate
type: Opaque
data:
secrets: {{ .Values.validator.keys | default "" }}
{{- end }}
# see:
# https://kubernetes.io/docs/tutorials/services/
# https://kubernetes.io/docs/concepts/services-networking/service/
# headless service for rpc
apiVersion: v1
kind: Service
metadata:
name: substrate-rpc
labels:
app: substrate
spec:
ports:
- port: 9933
name: http-rpc
- port: 9944
name: websocket-rpc
selector:
app: substrate
sessionAffinity: None
type: ClusterIP
clusterIP: None
---
apiVersion: v1
kind: Service
metadata:
name: substrate
spec:
ports:
- port: 30333
name: p2p
nodePort: 30333
protocol: TCP
selector:
app: substrate
sessionAffinity: None
type: NodePort
# don't route exteral traffic to non-local pods
externalTrafficPolicy: Local
{{- if .Values.rbac.enable }}
# service account for substrate pods themselves
# no permissions for the api are required
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: substrate
name: {{ .Values.rbac.name }}
{{- end }}
# https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/
# https://cloud.google.com/kubernetes-engine/docs/concepts/statefulset
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: substrate
spec:
selector:
matchLabels:
app: substrate
serviceName: substrate
replicas: {{ .Values.nodes.replicas }}
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
template:
metadata:
labels:
app: substrate
spec:
{{- if .Values.rbac.enable }}
serviceAccountName: {{ .Values.rbac.name }}
{{- else }}
serviceAccountName: default
{{- end }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node
operator: In
values:
- substrate
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- substrate
topologyKey: "kubernetes.io/hostname"
terminationGracePeriodSeconds: 300
{{- if .Values.validator.keys }}
volumes:
- name: substrate-validator-secrets
secret:
secretName: substrate-secrets
initContainers:
- name: prepare-secrets
image: busybox
command: [ "/bin/sh" ]
args:
- -c
- sed -n -r "s/^${POD_NAME}-key ([^ ]+)$/\1/p" /etc/validator/secrets > {{ .Values.image.basepath }}/key;
sed -n -r "s/^${POD_NAME}-node-key ([^ ]+)$/\1/p" /etc/validator/secrets > {{ .Values.image.basepath }}/node-key
env:
# from (workaround for hostname)
# https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
volumeMounts:
- name: substrate-validator-secrets
readOnly: true
mountPath: "/etc/validator"
- name: substratedir
mountPath: {{ .Values.image.basepath }}
{{- end }}
containers:
- name: substrate
imagePullPolicy: "{{ .Values.image.pullPolicy }}"
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
{{- if .Values.resources }}
resources:
requests:
memory: {{ .Values.resources.memory }}
cpu: {{ .Values.resources.cpu }}
{{- end }}
ports:
- containerPort: 30333
name: p2p
- containerPort: 9933
name: http-rpc
- containerPort: 9944
name: websocket-rpc
command: ["/bin/sh"]
args:
- -c
- exec /usr/local/bin/substrate
--base-path {{ .Values.image.basepath }}
--name $(POD_NAME)
{{- if .Values.validator.enable }}
--validator
{{- end }}
{{- if .Values.validator.keys }}
--key $(cat {{ .Values.image.basepath }}/key)
--node-key $(cat {{ .Values.image.basepath }}/node-key)
{{- end }}
{{- range .Values.nodes.args }} {{ . }} {{- end }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
volumeMounts:
- name: substratedir
mountPath: {{ .Values.image.basepath }}
readinessProbe:
httpGet:
path: /health
port: http-rpc
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
httpGet:
path: /health
port: http-rpc
initialDelaySeconds: 10
periodSeconds: 10
securityContext:
runAsUser: 1000
fsGroup: 1000
volumeClaimTemplates:
- metadata:
name: substratedir
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: ssd
resources:
requests:
storage: 32Gi
# set tag manually --set image.tag=latest
image:
repository: parity/substrate
tag: latest
pullPolicy: Always
basepath: /substrate
# if set to true a service account for substrate will be created
rbac:
enable: true
name: substrate
nodes:
replicas: 2
args:
# name and data directory are set by the chart itself
# key and node-key may be provided on commandline invocation
#
# - --chain
# - krummelanke
# serve rpc within the local network
# - fenced off the world via firewall
# - used for health checks
- --rpc-external
- --ws-external
# - --log
# - sub-libp2p=trace
validator:
enable: True
# adds --validator commandline option
#
# key and node-key can be given in a base64 encoded keyfile string (at
# validator.keys) which has the following format:
#
# substrate-0-key <key-seed>
# substrate-0-node-key <node-secret-key>
# substrate-1-key <key-seed>
# substrate-1-node-key <node-secret-key>
#
# pod names are canonical. changing these or providing different amount of
# keys than the replicas count will lead to behaviour noone ever has
# experienced before.
# maybe adopt resource limits here to the nodes of the pool
# resources:
# memory: "5Gi"
# cpu: "1.5"
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment