Use explicit external network for CI (#177)

* Remove unused image build

* Use explicit external network for CI
This commit is contained in:
Matt Pryor 2023-12-13 14:15:18 +00:00 committed by GitHub
parent a6626576cf
commit 00c13f35cc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 3 additions and 441 deletions

View File

@ -3,46 +3,9 @@ on:
push:
jobs:
build_push_utils_image:
name: Build and push images
runs-on: ubuntu-latest
steps:
- name: Check out the repository
uses: actions/checkout@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Calculate metadata for image
id: image-meta
uses: docker/metadata-action@v4
with:
images: ghcr.io/stackhpc/capi-helm-utils
# Produce the branch name or tag and the SHA as tags
tags: |
type=ref,event=branch
type=ref,event=tag
type=sha,prefix=
- name: Build and push image
uses: stackhpc/github-actions/docker-multiarch-build-push@master
with:
cache-key: utils
context: ./utils
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.image-meta.outputs.tags }}
labels: ${{ steps.image-meta.outputs.labels }}
build_push_chart:
name: Build and push Helm chart
name: Build and push Helm charts
runs-on: ubuntu-latest
# Only build and push the chart if the image built successfully
needs: [build_push_utils_image]
steps:
- name: Check out the repository
uses: actions/checkout@v3

View File

@ -51,6 +51,8 @@ jobs:
registryMirrors:
docker.io:
- ${{ secrets.DOCKER_HUB_MIRROR_URL }}
clusterNetworking:
externalNetworkId: ${{ secrets.EXTERNAL_NETWORK_ID }}
controlPlane:
machineFlavor: ${{ secrets.CONTROL_PLANE_FLAVOR }}
machineCount: 1

View File

@ -1,139 +0,0 @@
{{/*
Job that cleans up artifacts from the previous job-based addon installation
in preparation for creating addon objects.
We only produce the job if jobs from a previous installation exist.
*/}}
{{- $clusterName := include "cluster-addons.clusterName" . }}
{{- $exists := false }}
{{- range $job := (lookup "batch/v1" "Job" .Release.Namespace "").items }}
{{-
$exists = or
$exists
(and
(index $job.metadata.labels "app.kubernetes.io/name" | default "" | eq "addons")
(index $job.metadata.labels "app.kubernetes.io/instance" | default "" | eq $clusterName)
)
}}
{{- end }}
{{- if $exists }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "cluster-addons.componentName" (list . "addons-migrate") }}
labels: {{ include "cluster-addons.componentLabels" (list . "addons-migrate") | nindent 4 }}
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
spec:
backoffLimit: {{ .Values.hooks.backoffLimit }}
activeDeadlineSeconds: {{ .Values.hooks.activeDeadlineSeconds }}
template:
metadata:
labels: {{ include "cluster-addons.componentSelectorLabels" (list . "addons-migrate") | nindent 8 }}
spec:
{{- with .Values.hooks.imagePullSecrets }}
imagePullSecrets: {{ toYaml . | nindent 8 }}
{{- end }}
securityContext: {{ toYaml .Values.hooks.podSecurityContext | nindent 8 }}
restartPolicy: OnFailure
containers:
- name: addons-migrate
image: {{
printf "%s:%s"
.Values.hooks.image.repository
(default .Chart.AppVersion .Values.hooks.image.tag)
}}
imagePullPolicy: {{ .Values.hooks.image.pullPolicy }}
securityContext: {{ toYaml .Values.hooks.securityContext | nindent 12 }}
args:
- /bin/bash
- -c
- |
set -ex
test -f "$KUBECONFIG" || exit 0
kubectl version || exit 0
# Remove all the old kustomize releases where possible
helm status -n kustomize-releases ccm-openstack && \
helm delete -n kustomize-releases ccm-openstack
helm status -n kustomize-releases metrics-server && \
helm delete -n kustomize-releases metrics-server
# The csi-cinder kustomize release contains the Cinder storage class, which we cannot delete
# if there are volumes associated with it
# Instead, we move the release to the new namespace, move the storage class into a separate
# release and annotate the storage class so that it doesn't get removed by the Helm upgrade
if helm status -n kustomize-releases csi-cinder; then
helm-move csi-cinder kustomize-releases {{ .Values.openstack.targetNamespace }}
helm-adopt \
csi-cinder-storageclass \
{{ .Values.openstack.targetNamespace }} \
storageclass/{{ .Values.openstack.csiCinder.storageClass.name }}
kubectl annotate \
storageclass/{{ .Values.openstack.csiCinder.storageClass.name }} \
"helm.sh/resource-policy=keep"
fi
# Adopt resources previously created in post-install scripts into the relevant Helm releases
helm-adopt \
cni-calico \
{{ .Values.cni.calico.release.namespace }} \
installation/default
helm-adopt \
kube-prometheus-stack-dashboards \
{{ .Values.monitoring.kubePrometheusStack.release.namespace }} \
configmap/nvidia-dcgm-exporter-dashboard \
--namespace {{ .Values.monitoring.kubePrometheusStack.release.namespace }}
helm-adopt \
loki-stack-dashboards \
{{ .Values.monitoring.kubePrometheusStack.release.namespace }} \
configmap/loki-stack-grafana-datasource \
--namespace {{ .Values.monitoring.kubePrometheusStack.release.namespace }}
helm-adopt \
loki-stack-dashboards \
{{ .Values.monitoring.kubePrometheusStack.release.namespace }} \
configmap/loki-stack-grafana-dashboard \
--namespace {{ .Values.monitoring.kubePrometheusStack.release.namespace }}
# With the version bump to 40.x, kube-prometheus-stack picks up prometheus-node-exporter 4.x
# This changes the selector labels on the daemonset, which is an immutable field, so we remove
# the daemonset with the old labels before upgrading
# https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#from-39x-to-40x
kubectl delete daemonset \
-l release=kube-prometheus-stack,app=prometheus-node-exporter \
-n {{ .Values.monitoring.kubePrometheusStack.release.namespace }}
# With the version bump from 2.6.3 to 2.6.4, loki-stack picks up an updated promtail that
# changes the selector labels on the daemonset, which is an immutable field
# So we remove the daemonset with the old labels before upgrading
kubectl delete daemonset \
-l release=loki-stack,app=promtail \
-n {{ .Values.monitoring.lokiStack.release.namespace }}
env:
- name: KUBECONFIG
value: /etc/kubernetes/config
resources: {{ toYaml .Values.hooks.resources | nindent 12 }}
volumeMounts:
- name: etc-kubernetes
mountPath: /etc/kubernetes
readOnly: true
hostNetwork: {{ .Values.hooks.hostNetwork }}
{{- with .Values.hooks.nodeSelector }}
nodeSelector: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.hooks.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.hooks.tolerations }}
tolerations: {{ toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: etc-kubernetes
secret:
secretName: {{ include "cluster-addons.componentName" (list . "kubeconfig") }}
optional: true
items:
- key: value
path: config
{{- end }}

View File

@ -4,28 +4,6 @@
# if not given, the release name is used
clusterName:
# Settings for hook jobs
hooks:
image:
repository: ghcr.io/stackhpc/capi-helm-utils
tag: # Defaults to chart appVersion if not given
pullPolicy: IfNotPresent
imagePullSecrets: []
backoffLimit: 1000
activeDeadlineSeconds: 3600
podSecurityContext:
runAsNonRoot: true
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: [ALL]
readOnlyRootFilesystem: true
resources: {}
hostNetwork: false
tolerations: []
nodeSelector: {}
affinity: {}
# Settings for the CNI addon
cni:
# Indicates if a CNI should be deployed

View File

@ -1,78 +0,0 @@
FROM debian:bullseye-slim
ENV UTILS_UID 1001
ENV UTILS_GID 1001
ENV UTILS_USER utils
ENV UTILS_GROUP utils
ENV UTILS_HOME /home/utils
RUN groupadd --gid $UTILS_GID $UTILS_GROUP && \
useradd \
--home-dir $UTILS_HOME \
--create-home \
--gid $UTILS_GID \
--shell /sbin/nologin \
--uid $UTILS_UID \
$UTILS_USER
RUN apt-get update && \
apt-get install -y curl git jq python3 python3-pip tini && \
rm -rf /var/lib/apt/lists/*
ARG KUBECTL_VN_1_24=v1.24.11
RUN set -ex; \
OS_ARCH="$(uname -m)"; \
case "$OS_ARCH" in \
x86_64) kubectl_arch=amd64 ;; \
aarch64) kubectl_arch=arm64 ;; \
*) false ;; \
esac; \
curl -fsSL https://dl.k8s.io/release/${KUBECTL_VN_1_24}/bin/linux/${kubectl_arch}/kubectl -o /usr/bin/kubectl-v1.24; \
chmod +x /usr/bin/kubectl-v1.24; \
/usr/bin/kubectl-v1.24 version --client
ARG KUBECTL_VN_1_25=v1.25.7
RUN set -ex; \
OS_ARCH="$(uname -m)"; \
case "$OS_ARCH" in \
x86_64) kubectl_arch=amd64 ;; \
aarch64) kubectl_arch=arm64 ;; \
*) false ;; \
esac; \
curl -fsSL https://dl.k8s.io/release/${KUBECTL_VN_1_25}/bin/linux/${kubectl_arch}/kubectl -o /usr/bin/kubectl-v1.25; \
chmod +x /usr/bin/kubectl-v1.25; \
/usr/bin/kubectl-v1.25 version --client
ARG KUBECTL_VN_1_26=v1.26.2
RUN set -ex; \
OS_ARCH="$(uname -m)"; \
case "$OS_ARCH" in \
x86_64) kubectl_arch=amd64 ;; \
aarch64) kubectl_arch=arm64 ;; \
*) false ;; \
esac; \
curl -fsSL https://dl.k8s.io/release/${KUBECTL_VN_1_26}/bin/linux/${kubectl_arch}/kubectl -o /usr/bin/kubectl-v1.26; \
chmod +x /usr/bin/kubectl-v1.26; \
/usr/bin/kubectl-v1.26 version --client
ENV HELM_CACHE_HOME /tmp/helm/cache
ENV HELM_CONFIG_HOME /tmp/helm/config
ENV HELM_DATA_HOME /tmp/helm/data
ARG HELM_VERSION=v3.11.2
RUN set -ex; \
OS_ARCH="$(uname -m)"; \
case "$OS_ARCH" in \
x86_64) helm_arch=amd64 ;; \
aarch64) helm_arch=arm64 ;; \
*) false ;; \
esac; \
curl -fsSL https://get.helm.sh/helm-${HELM_VERSION}-linux-${helm_arch}.tar.gz | \
tar -xz --strip-components 1 -C /usr/bin linux-${helm_arch}/helm; \
helm version
ENV KUBECTL_VN_LATEST v1.26
COPY ./bin/* /usr/bin/
USER $UTILS_UID
WORKDIR $UTILS_HOME
ENTRYPOINT ["tini", "-g", "--"]
CMD ["bash"]

View File

@ -1,65 +0,0 @@
#!/usr/bin/env bash
#####
# Script that adopts the specified resource into the specified release
#####
set -eo pipefail
RELEASE_NAME=
RELEASE_NAMESPACE=
OBJECT=
OBJECT_NAMESPACE=
while :; do
case $1 in
--help)
echo "Adopts a resource into a Helm release."
echo ""
echo "Usage: helm-adopt RELEASE-NAME RELEASE-NAMESPACE OBJECT [--namespace OBJECT-NAMESPACE]"
exit
;;
--release-namespace)
RELEASE_NAMESPACE="$2"
shift
;;
--namespace)
OBJECT_NAMESPACE="$2"
shift
;;
?*)
if [ -z "$RELEASE_NAME" ]; then
RELEASE_NAME="$1"
elif [ -z "$RELEASE_NAMESPACE" ]; then
RELEASE_NAMESPACE="$1"
elif [ -z "$OBJECT" ]; then
OBJECT="$1"
else
echo "Too many arguments" >&2
exit 1
fi
;;
*)
break
esac
shift
done
if [ -z "$RELEASE_NAME" ]; then
echo "RELEASE-NAME was not given" >&2
exit 1
elif [ -z "$RELEASE_NAMESPACE" ]; then
echo "RELEASE-NAMESPACE was not given" >&2
exit 1
elif [ -z "$OBJECT" ]; then
echo "OBJECT was not given" >&2
exit 1
fi
KUBECTL_ARGS="$OBJECT"
[ -n "$OBJECT_NAMESPACE" ] && KUBECTL_ARGS="$KUBECTL_ARGS --namespace $OBJECT_NAMESPACE"
kubectl get $KUBECTL_ARGS || exit 0
kubectl label --overwrite $KUBECTL_ARGS "app.kubernetes.io/managed-by=Helm"
kubectl annotate --overwrite $KUBECTL_ARGS "meta.helm.sh/release-name=$RELEASE_NAME"
kubectl annotate --overwrite $KUBECTL_ARGS "meta.helm.sh/release-namespace=$RELEASE_NAMESPACE"

View File

@ -1,80 +0,0 @@
#!/usr/bin/env bash
#####
# Script that moves the specified Helm release (NOT resources!) from one namespace to another
#####
set -eo pipefail
RELEASE_NAME=
FROM_NAMESPACE=
TO_NAMESPACE=
while :; do
case $1 in
--help)
echo "Moves the specified Helm release from one namespace to another."
echo ""
echo "WARNING: This script does NOT move resources, only the release itself."
echo " It should only be used with charts that explicitly specify resource namespaces."
echo ""
echo "Usage: helm-move RELEASE-NAME FROM-NAMESPACE TO-NAMESPACE"
exit
;;
?*)
if [ -z "$RELEASE_NAME" ]; then
RELEASE_NAME="$1"
elif [ -z "$FROM_NAMESPACE" ]; then
FROM_NAMESPACE="$1"
elif [ -z "$TO_NAMESPACE" ]; then
TO_NAMESPACE="$1"
else
echo "Too many arguments" >&2
exit 1
fi
;;
*)
break
esac
shift
done
if [ -z "$RELEASE_NAME" ]; then
echo "RELEASE-NAME was not given" >&2
exit 1
elif [ -z "$FROM_NAMESPACE" ]; then
echo "FROM-NAMESPACE was not given" >&2
exit 1
elif [ -z "$TO_NAMESPACE" ]; then
echo "TO-NAMESPACE was not given" >&2
exit 1
fi
# Make sure that the target namespace exists
kubectl create ns "$TO_NAMESPACE" || true
# Move each secret that corresponds to a revision of the release to the new namespace
for secret in $(kubectl -n $FROM_NAMESPACE get secret -o name --field-selector "type=helm.sh/release.v1" -l "name=$RELEASE_NAME"); do
# We need to replace the namespace in the release data
release="$(
kubectl -n $FROM_NAMESPACE get $secret -o go-template='{{.data.release}}' |
base64 -d |
base64 -d |
gzip -d |
jq -c ".namespace=\"$TO_NAMESPACE\"" |
gzip |
base64 |
base64
)"
# Copy the secret to a new namespace, modifying it as it goes
kubectl -n $FROM_NAMESPACE get $secret -o json |
jq -c 'del(.metadata.creationTimestamp)' |
jq -c 'del(.metadata.resourceVersion)' |
jq -c 'del(.metadata.uid)' |
jq -c ".metadata.namespace=\"$TO_NAMESPACE\"" |
jq -c ".data.release=\"$release\"" |
kubectl create -f -
# Remove the old secret
kubectl -n $FROM_NAMESPACE delete $secret
done

View File

@ -1,19 +0,0 @@
#!/usr/bin/env bash
#####
# Script that selects the correct kubectl version for the target API server and
# executes the given kubectl command using it
# 
# This is done by making a pre-flight request with the latest kubectl version to get
# the API server version in the hope that it works despite any version skew, being
# the simplest possible API request... :fingerscrossed:
#####
set -eo pipefail
# Use the latest version of kubectl to detect the server version
kubectl_exe=kubectl-$KUBECTL_VN_LATEST
server_version="$($kubectl_exe version -o json | jq -r '"v" + .serverVersion.major + "." + .serverVersion.minor')"
# Account for the case where we don't have the correct kubectl version by falling back to using the latest
which "kubectl-$server_version" > /dev/null && kubectl_exe="kubectl-$server_version"
exec $kubectl_exe "$@"