New-style addons working with library chart

This commit is contained in:
Matt Pryor 2022-01-19 15:34:54 +00:00
parent 7b9be43d88
commit a525daaad0
76 changed files with 1148 additions and 1808 deletions

View File

@ -50,6 +50,11 @@ jobs:
# when determining the number of commits since the last tag
fetch-depth: 0
- name: Resolve chart dependencies in order
run: |
helm dependency update ./charts/cluster-addons
helm dependency update ./charts/openstack-cluster
- name: Get SemVer version for current commit
id: semver
uses: stackhpc/github-actions/semver@master

View File

@ -1,6 +0,0 @@
FROM quay.io/operator-framework/helm-operator:v1.16.0
ENV HOME=/opt/helm
COPY watches.yaml ${HOME}/watches.yaml
COPY charts/addon ${HOME}/helm-charts/addon
WORKDIR ${HOME}

View File

@ -1,8 +1,8 @@
apiVersion: v2
name: addon
description: >
Helm chart that installs an addon into a target cluster using a job.
Helm chart that provides templates for producing jobs that install addons onto a target cluster.
The target cluster can be local (using a service account) or remote (using a kubeconfig file).
type: application
type: library
version: 0.1.0
appVersion: main

View File

@ -1,6 +0,0 @@
# Just create and delete a namespace
custom:
install: |
kubectl create ns test-ns
delete: |
kubectl delete ns test-ns

View File

@ -1,36 +0,0 @@
installType: helm
helm:
chart:
repo: https://charts.jetstack.io
name: cert-manager
version: v1.6.1
release:
namespace: cert-manager
name: cert-manager
values:
installCRDs: true
prometheus:
enabled: false
## Use extraFiles and afterScript to install an issuer
extraFiles:
acme-http01-issuer.yaml: |
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-http01
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-http01-key
solvers:
- http01:
ingress:
class: nginx
hooks:
postInstall: |
kubectl apply -f acme-http01-issuer.yaml

View File

@ -1,24 +0,0 @@
# We can access this in the kustomization template
# Just make sure it doesn't conflict with any actual chart options!
version: v0.5.2
installType: kustomize
# The URLs of the metrics server manifests
kustomize:
kustomizationTemplate: |
resources:
- https://github.com/kubernetes-sigs/metrics-server/releases/download/{{ .Values.version }}/components.yaml
kustomization:
patches:
- patch: |-
- op: add
path: /spec/template/spec/containers/0/args/-
value: --kubelet-insecure-tls
target:
kind: Deployment
name: metrics-server
# Define the resources to watch for
resourceNamespace: kube-system
resources:
- deployment/metrics-server

View File

@ -3,31 +3,45 @@
{{- end }}
{{- define "addon.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- $ctx := index . 0 }}
{{- $componentName := index . 1 }}
{{- if $ctx.Values.fullnameOverride }}
{{- printf "%s-%s" $ctx.Values.fullnameOverride $componentName | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- $name := default $ctx.Chart.Name $ctx.Values.nameOverride }}
{{- if contains $name $ctx.Release.Name }}
{{- printf "%s-%s" $ctx.Release.Name $componentName | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- printf "%s-%s-%s" $ctx.Release.Name $name $componentName | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{- define "addon.job.name" -}}
{{- $ctx := index . 0 }}
{{- $componentName := index . 1 }}
{{- $operation := index . 2 }}
{{- $fullname := include "addon.fullname" (list $ctx $componentName) }}
{{- printf "%s-%s" $fullname $operation | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "addon.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "addon.selectorLabels" -}}
app.kubernetes.io/name: {{ include "addon.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- $ctx := index . 0 -}}
{{- $componentName := index . 1 -}}
app.kubernetes.io/name: {{ include "addon.name" $ctx }}
app.kubernetes.io/instance: {{ $ctx.Release.Name }}
app.kubernetes.io/component: {{ $componentName }}
{{- end }}
{{- define "addon.jobSelectorLabels" -}}
{{- define "addon.job.selectorLabels" -}}
{{- $ctx := index . 0 -}}
{{- $operation := index . 1 -}}
{{ include "addon.selectorLabels" $ctx }}
{{- $componentName := index . 1 -}}
{{- $operation := index . 2 -}}
{{ include "addon.selectorLabels" (list $ctx $componentName) }}
capi.stackhpc.com/operation: {{ $operation }}
{{- end }}
@ -40,21 +54,15 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
{{- define "addon.labels" -}}
{{ include "addon.commonLabels" . }}
{{- $ctx := index . 0 -}}
{{ include "addon.commonLabels" $ctx }}
{{ include "addon.selectorLabels" . }}
{{- end }}
{{- define "addon.jobLabels" -}}
{{- define "addon.job.labels" -}}
{{- $ctx := index . 0 -}}
{{ include "addon.commonLabels" $ctx }}
{{ include "addon.jobSelectorLabels" . }}
{{- end }}
{{/*
Template that prints the configured image.
*/}}
{{- define "addon.image" -}}
{{- printf "%s:%s" .Values.image.repository (default .Chart.AppVersion .Values.image.tag) }}
{{ include "addon.job.selectorLabels" . }}
{{- end }}
{{/*
@ -90,11 +98,13 @@ Template for a Helm values file that consists of the given values merged with th
values obtained from rendering the valuesTemplate.
*/}}
{{- define "addon.helm.values" }}
{{- if .Values.helm.release.valuesTemplate }}
{{- $templateValues := tpl .Values.helm.release.valuesTemplate . | fromYaml }}
{{- include "addon.mergeConcat" (list .Values.helm.release.values $templateValues) }}
{{- $ctx := index . 0 }}
{{- $config := index . 1 }}
{{- if $config.release.valuesTemplate }}
{{- $templateValues := tpl $config.release.valuesTemplate $ctx | fromYaml }}
{{- include "addon.mergeConcat" (list $config.release.values $templateValues) }}
{{- else }}
{{- toYaml .Values.helm.release.values }}
{{- toYaml $config.release.values }}
{{- end }}
{{- end }}
@ -110,31 +120,30 @@ by checking for the pending-[install,upgrade] status.
{{- define "addon.helm.install" -}}
{{-
$chartRepo := required
".Values.helm.chart.repo is required for a Helm job"
.Values.helm.chart.repo
"chart.repo is required for a Helm job"
.chart.repo
}}
{{-
$chartName := required
".Values.helm.chart.name is required for a Helm job"
.Values.helm.chart.name
"chart.name is required for a Helm job"
.chart.name
}}
{{-
$chartVersion := required
".Values.helm.chart.version is required for a Helm job"
.Values.helm.chart.version
"chart.version is required for a Helm job"
.chart.version
}}
{{-
$releaseNamespace := required
".Values.helm.release.namespace is required for a Helm job"
.Values.helm.release.namespace
"release.namespace is required for a Helm job"
.release.namespace
}}
{{-
$releaseName := required
".Values.helm.release.name is required for a Helm job"
.Values.helm.release.name
"release.name is required for a Helm job"
.release.name
}}
{{- $releaseTimeout := .Values.helm.release.timeout -}}
{{- range .Values.helm.crdManifests }}
{{- range .crdManifests }}
kubectl apply -f {{ . }}
{{- end }}
helm-upgrade {{ $releaseName }} {{ $chartName }} \
@ -144,13 +153,13 @@ helm-upgrade {{ $releaseName }} {{ $chartName }} \
--create-namespace \
--repo {{ $chartRepo }} \
--version {{ $chartVersion }} \
{{- if .Values.helm.crdManifests -}}
{{- if .crdManifests -}}
--skip-crds \
{{- end }}
--values values.yaml \
--wait \
--wait-for-jobs \
--timeout {{ $releaseTimeout }} \
--timeout {{ .release.timeout }} \
$HELM_EXTRA_ARGS
{{- end }}
@ -160,20 +169,19 @@ Template for a script that deletes a Helm release.
{{- define "addon.helm.delete" -}}
{{-
$releaseNamespace := required
".Values.helm.release.namespace is required for a Helm job"
.Values.helm.release.namespace
"release.namespace is required for a Helm job"
.release.namespace
}}
{{-
$releaseName := required
".Values.helm.release.name is required for a Helm job"
.Values.helm.release.name
"release.name is required for a Helm job"
.release.name
}}
{{- $releaseTimeout := .Values.helm.release.timeout -}}
helm-delete {{ $releaseName }} \
--namespace {{ $releaseNamespace }} \
--wait \
--timeout {{ $releaseTimeout }}
{{- range .Values.helm.crdManifests }}
--timeout {{ .release.timeout }}
{{- range .crdManifests }}
kubectl delete -f {{ . }}
{{- end }}
{{- end }}
@ -182,15 +190,17 @@ kubectl delete -f {{ . }}
Template for a kustomization file for use with Kustomize.
*/}}
{{- define "addon.kustomize.kustomization" }}
{{- $ctx := index . 0 }}
{{- $config := index . 1 }}
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
{{- if .Values.kustomize.kustomizationTemplate }}
{{- $templateValues := tpl .Values.kustomize.kustomizationTemplate . | fromYaml }}
{{- include "addon.mergeConcat" (list .Values.kustomize.kustomization $templateValues) }}
{{- else if .Values.kustomize.kustomization }}
{{- toYaml .Values.kustomize.kustomization }}
{{- if $config.kustomizationTemplate }}
{{- $templateValues := tpl $config.kustomizationTemplate $ctx | fromYaml }}
{{ include "addon.mergeConcat" (list $config.kustomization $templateValues) }}
{{- else if $config.kustomization }}
{{ toYaml $config.kustomization }}
{{- else }}
{{- fail "One of .Values.kustomize.kustomization or .Values.kustomize.kustomizationTemplate is required for a Kustomize job" }}
{{- fail "One of kustomization or kustomizationTemplate is required for a Kustomize job" }}
{{- end }}
{{- end }}
@ -199,13 +209,13 @@ Template for a script that installs or upgrades resources using Kustomize.
*/}}
{{- define "addon.kustomize.install" }}
kustomize build . | kubectl apply -f -
{{- if .Values.kustomize.resources }}
{{- if .resources }}
{{-
$namespace := required
".Values.kustomize.resourceNamespace is required for a Kustomize job with resources"
.Values.kustomize.resourceNamespace
"resourceNamespace is required for a Kustomize job with resources"
.resourceNamespace
}}
{{- range .Values.kustomize.resources }}
{{- range .resources }}
kubectl -n {{ $namespace }} rollout status {{ . }}
{{- end }}
{{- end }}
@ -216,14 +226,118 @@ Template for a script that deletes resources using Kustomize.
*/}}
{{- define "addon.kustomize.delete" }}
kustomize build . | kubectl delete -f -
{{- if .Values.kustomize.resources }}
{{- if .resources }}
{{-
$namespace := required
".Values.kustomize.resourceNamespace is required for a Kustomize job with resources"
.Values.kustomize.resourceNamespace
"resourceNamespace is required for a Kustomize job with resources"
.resourceNamespace
}}
{{- range .Values.kustomize.resources }}
{{- range .resources }}
kubectl -n {{ $namespace }} wait --for=delete {{ . }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Template that produces the default configuration.
*/}}
{{- define "addon.config.defaults" -}}
image:
repository: ghcr.io/stackhpc/k8s-utils
tag: # Defaults to chart appVersion if not given
pullPolicy: IfNotPresent
imagePullSecrets:
kubeconfigSecret:
name:
key: value
serviceAccountName:
# One of helm, kustomize or custom
installType: custom
helm:
crdManifests: []
chart:
repo:
name:
version:
release:
namespace:
name:
timeout: 60m
# The template is rendered with the root context, then the result is merged into the dict
# Values from the template take precedence over the dict
values: {}
valuesTemplate:
kustomize:
# The template is rendered with the root context, then the result is merged into the dict
# Values from the template take precedence over the dict
kustomization: {}
kustomizationTemplate:
resourceNamespace:
resources: []
custom:
# Scripts are treated as templates during rendering
install:
delete:
extraVolumes: []
extraFiles: {}
# The hook scripts are treated as templates during the rendering
hooks:
preInstall:
postInstall:
preDelete:
postDelete:
backoffLimit: 1000
activeDeadlineSeconds: 3600
podSecurityContext:
runAsNonRoot: true
securityContext:
allowPrivilegeEscalation: false
resources: {}
hostNetwork: false
tolerations: []
nodeSelector: {}
affinity: {}
{{- end }}
{{/*
Template that produces a config secret, an install job and a hooks for the specified addon.
If the addon is enabled, an install job is produced as part of the main release and a pre-delete
hook is also produced.
If the addon is disabled, then we check if the config secret exists for the addon. If it does, a
pre-upgrade hook is produced to uninstall the addon.
*/}}
{{- define "addon.job.fromConfig" -}}
{{- $ctx := index . 0 }}
{{- $name := index . 1 }}
{{- $overrides := index . 2 }}
{{- $enabled := index . 3 }}
{{- $defaults := include "addon.config.defaults" $ctx | fromYaml }}
{{- $config := include "addon.mergeConcat" (list $defaults $overrides) | fromYaml }}
{{- if $enabled }}
{{- include "addon.config.secret" (list $ctx $name $config) }}
---
{{- include "addon.job.install" (list $ctx $name $config) }}
---
{{- include "addon.job.uninstall" (list $ctx $name "pre-delete" $config) }}
{{- else if $ctx.Release.IsUpgrade }}
{{- $secretName := include "addon.fullname" (list $ctx $name) | printf "%s-config" }}
{{- if lookup "v1" "Secret" $ctx.Release.Namespace $secretName }}
{{- include "addon.job.uninstall" (list $ctx $name "pre-upgrade" $config) }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Template that produces a config secret, an install job and a delete hook
for the configuration produced by the specified template.
*/}}
{{- define "addon.job" -}}
{{- $ctx := index . 0 }}
{{- $name := index . 1 }}
{{- $configTemplate := index . 2 }}
{{- $enabled := index . 3 }}
{{- $config := include $configTemplate $ctx | fromYaml }}
{{- include "addon.job.fromConfig" (list $ctx $name $config $enabled) }}
{{- end }}

View File

@ -0,0 +1,80 @@
{{- define "addon.job.install" -}}
{{- $ctx := index . 0 }}
{{- $name := index . 1 }}
{{- $config := index . 2 }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "addon.job.name" (list $ctx $name "install") }}-{{ $ctx.Release.Revision }}
labels: {{ include "addon.job.labels" (list $ctx $name "install") | nindent 4 }}
spec:
backoffLimit: {{ $config.backoffLimit }}
activeDeadlineSeconds: {{ $config.activeDeadlineSeconds }}
template:
metadata:
labels: {{ include "addon.job.selectorLabels" (list $ctx $name "install") | nindent 8 }}
spec:
{{- with $config.imagePullSecrets }}
imagePullSecrets: {{ toYaml . | nindent 8 }}
{{- end }}
securityContext: {{ toYaml $config.podSecurityContext | nindent 8 }}
restartPolicy: OnFailure
{{- if not $config.kubeconfigSecret.name }}
serviceAccountName: {{ tpl $config.serviceAccountName $ctx }}
{{- end }}
containers:
- name: install
image: {{ printf "%s:%s" $config.image.repository (default $ctx.Chart.AppVersion $config.image.tag) }}
imagePullPolicy: {{ $config.image.pullPolicy }}
securityContext: {{ toYaml $config.securityContext | nindent 12 }}
args:
- /bin/bash
- -c
- |
set -exo pipefail
{{- if $config.hooks.preInstall }}
source ./hook-preinstall.sh
{{- end }}
source ./install.sh
{{- if $config.hooks.postInstall }}
source ./hook-postinstall.sh
{{- end }}
{{- if $config.kubeconfigSecret.name }}
env:
- name: KUBECONFIG
value: /config/kubeconfig
{{- end }}
# Set the working directory to the directory containing the config
workingDir: /config
resources: {{ toYaml $config.resources | nindent 12 }}
volumeMounts:
- name: config
mountPath: /config
readOnly: true
hostNetwork: {{ $config.hostNetwork }}
{{- with $config.nodeSelector }}
nodeSelector: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with $config.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with $config.tolerations }}
tolerations: {{ toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: config
projected:
sources:
- secret:
name: {{ include "addon.fullname" (list $ctx $name) }}-config
{{- if $config.kubeconfigSecret.name }}
- secret:
name: {{ tpl $config.kubeconfigSecret.name $ctx }}
items:
- key: {{ $config.kubeconfigSecret.key }}
path: kubeconfig
{{- end }}
{{- range $config.extraVolumes }}
- {{ toYaml . | nindent 16 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,139 @@
{{- define "addon.job.uninstall" -}}
{{- $ctx := index . 0 }}
{{- $name := index . 1 }}
{{- $hook := index . 2 }}
{{- $config := index . 3 }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "addon.job.name" (list $ctx $name "uninstall") }}
labels: {{ include "addon.job.labels" (list $ctx $name "uninstall") | nindent 4 }}
annotations:
helm.sh/hook: {{ $hook }}
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
spec:
backoffLimit: {{ $config.backoffLimit }}
activeDeadlineSeconds: {{ $config.activeDeadlineSeconds }}
template:
metadata:
labels: {{ include "addon.job.selectorLabels" (list $ctx $name "install") | nindent 8 }}
spec:
{{- with $config.imagePullSecrets }}
imagePullSecrets: {{ toYaml . | nindent 8 }}
{{- end }}
securityContext: {{ toYaml $config.podSecurityContext | nindent 8 }}
restartPolicy: OnFailure
serviceAccountName: {{ tpl $config.serviceAccountName $ctx }}
# Use init containers to do two things before uninstalling
#
#  1. Suspend any running install jobs for the addon
#  2. Install the kubeconfig file from the secret if required
#
# We don't use a regular volume for (2) because we need the hook not to block in the
# case where the secret is not available
initContainers:
- name: suspend-install-jobs
image: {{ printf "%s:%s" $config.image.repository (default $ctx.Chart.AppVersion $config.image.tag) }}
imagePullPolicy: {{ $config.image.pullPolicy }}
securityContext: {{ toYaml $config.securityContext | nindent 12 }}
args:
- /bin/bash
- -c
- |
set -ex
{{- $labels := include "addon.job.selectorLabels" (list $ctx $name "install") | fromYaml }}
{{- range $i, $label := keys $labels -}}
{{- if $i }}
LABELS="$LABELS,{{ $label }}={{ index $labels $label }}"
{{- else }}
LABELS="{{ $label }}={{ index $labels $label }}"
{{- end }}
{{- end }}
for job in $(kubectl get job -n {{ $ctx.Release.Namespace }} -l "$LABELS" -o name); do
kubectl patch $job -n {{ $ctx.Release.Namespace }} -p '{"spec":{"suspend":true}}'
done
resources: {{ toYaml $config.resources | nindent 12 }}
{{- if $config.kubeconfigSecret.name }}
- name: install-kubeconfig
image: {{ printf "%s:%s" $config.image.repository (default $ctx.Chart.AppVersion $config.image.tag) }}
imagePullPolicy: {{ $config.image.pullPolicy }}
securityContext: {{ toYaml $config.securityContext | nindent 12 }}
args:
- /bin/bash
- -c
- |
set -ex
kubectl_get_secret() {
kubectl get secret {{ tpl $config.kubeconfigSecret.name $ctx }} -n {{ $ctx.Release.Namespace }} "$@"
}
if kubectl_get_secret; then
{{- $template := printf "{{ index .data \"%s\" | base64decode }}" $config.kubeconfigSecret.key }}
kubectl_get_secret -o go-template='{{ $template }}' > /config/auth/kubeconfig
fi
resources: {{ toYaml $config.resources | nindent 12 }}
volumeMounts:
- name: kubeconfig
mountPath: /config/auth
{{- end }}
containers:
- name: uninstall
image: {{ printf "%s:%s" $config.image.repository (default $ctx.Chart.AppVersion $config.image.tag) }}
imagePullPolicy: {{ $config.image.pullPolicy }}
securityContext: {{ toYaml $config.securityContext | nindent 12 }}
# We can only make a best effort to delete the addon as we don't want the hook to block
# So we bail without an error if the kubeconfig doesn't exist or the API is not reachable
# and we allow the scripts to fail without preventing execution of the following scripts
args:
- /bin/bash
- -c
- |
{{- if $config.kubeconfigSecret.name }}
test -f "$KUBECONFIG" || exit 0
{{- end }}
kubectl version || exit 0
{{- if $config.hooks.preDelete }}
source ./hook-predelete.sh || true
{{- end }}
source ./delete.sh || true
{{- if $config.hooks.postDelete }}
source ./hook-postdelete.sh || true
{{- end }}
{{- if $config.kubeconfigSecret.name }}
env:
- name: KUBECONFIG
value: /config/auth/kubeconfig
{{- end }}
# Set the working directory to the directory containing the config
workingDir: /config
resources: {{ toYaml $config.resources | nindent 12 }}
volumeMounts:
- name: config
mountPath: /config
readOnly: true
- name: kubeconfig
mountPath: /config/auth
readOnly: true
hostNetwork: {{ $config.hostNetwork }}
{{- with $config.nodeSelector }}
nodeSelector: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with $config.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with $config.tolerations }}
tolerations: {{ toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: config
projected:
sources:
- secret:
name: {{ include "addon.fullname" (list $ctx $name) }}-config
{{- range $config.extraVolumes }}
- {{ toYaml . | nindent 16 }}
{{- end }}
{{- if $config.kubeconfigSecret.name }}
- name: kubeconfig
emptyDir: {}
{{- end }}
{{- end }}

View File

@ -0,0 +1,59 @@
{{- define "addon.config.secret" -}}
{{- $ctx := index . 0 }}
{{- $name := index . 1 }}
{{- $config := index . 2 }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "addon.fullname" (list $ctx $name) }}-config
labels: {{ include "addon.labels" (list $ctx $name) | nindent 4 }}
stringData:
{{- range $filename, $content := $config.extraFiles }}
{{ $filename }}: |
{{- $content | nindent 4 }}
{{- end }}
{{- with $config.hooks.preInstall }}
hook-preinstall.sh: |
{{- tpl . $ctx | nindent 4 }}
{{- end }}
{{- with $config.hooks.postInstall }}
hook-postinstall.sh: |
{{- tpl . $ctx | nindent 4 }}
{{- end }}
{{- with $config.hooks.preDelete }}
hook-predelete.sh: |
{{- tpl . $ctx | nindent 4 }}
{{- end }}
{{- with $config.hooks.postDelete }}
hook-postdelete.sh: |
{{- tpl . $ctx | nindent 4 }}
{{- end }}
{{- if eq $config.installType "helm" }}
values.yaml: |
{{- include "addon.helm.values" (list $ctx $config.helm) | nindent 4 }}
install.sh: |
{{- include "addon.helm.install" $config.helm | nindent 4 }}
delete.sh: |
{{- include "addon.helm.delete" $config.helm | nindent 4 }}
{{- else if eq $config.installType "kustomize" }}
kustomization.yaml: |
{{- include "addon.kustomize.kustomization" (list $ctx $config.kustomize) | nindent 4 }}
install.sh: |
{{- include "addon.kustomize.install" $config.kustomize | nindent 4 }}
delete.sh: |
{{- include "addon.kustomize.delete" $config.kustomize | nindent 4 }}
{{- else if eq $config.installType "custom" }}
install.sh: |
{{-
tpl (required "custom.install is required for a custom job" $config.custom.install) $ctx |
nindent 4
}}
delete.sh: |
{{-
tpl (required "custom.delete is required for a custom job" $config.custom.delete) $ctx |
nindent 4
}}
{{- else }}
{{- fail (printf "Unrecognised install type '%s'" $config.installType) }}
{{- end }}
{{- end }}

View File

@ -1,102 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
# Produce a new job for each release
name: {{ include "addon.fullname" . }}-delete
labels: {{ include "addon.jobLabels" (list . "delete") | nindent 4 }}
annotations:
helm.sh/hook: pre-delete
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
spec:
template:
metadata:
labels: {{ include "addon.jobSelectorLabels" (list . "delete") | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml . | nindent 8 }}
{{- end }}
securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
restartPolicy: OnFailure
# The delete hook pod always uses the service account
serviceAccountName: {{ tpl .Values.serviceAccount.name . }}
# Use an init container to suspend any existing install jobs for the release
# The init container does not include the kubeconfig, even if defined, as it
# only needs to target install jobs on the same cluster as this job
# This functionality only exists from 1.21 (alpha opt-in, beta from 1.22)
# If it is not present, we have no option but to gamble
{{- if semverCompare ">=1.21" .Capabilities.KubeVersion.Version }}
initContainers:
- name: suspend
image: {{ include "addon.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
args:
- /bin/bash
- -c
- |
set -exo pipefail
{{- $labels := include "addon.jobSelectorLabels" (list . "install") | fromYaml }}
LABELS='{{ range $i, $label := keys $labels }}{{ if $i }},{{ end }}{{ $label }}={{ index $labels $label }}{{ end }}'
for job in $(kubectl get job -n {{ .Release.Namespace }} -l "$LABELS" -o name); do
kubectl patch $job -n {{ .Release.Namespace }} -p '{"spec":{"suspend":true}}'
done
resources: {{ toYaml .Values.resources | nindent 12 }}
{{- end }}
containers:
- name: delete
image: {{ include "addon.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
args:
- /bin/bash
- -c
- |
set -exo pipefail
if [ -f ./hook-predelete.sh ]; then
source ./hook-predelete.sh
fi
source ./delete.sh
if [ -f ./hook-postdelete.sh ]; then
source ./hook-postdelete.sh
fi
{{- if .Values.kubeconfigSecret.name }}
# The delete container itself uses the kubeconfig if given, so that it
# targets the remote cluster
env:
- name: KUBECONFIG
value: /config/kubeconfig
{{- end }}
# Set the working directory to the directory containing the config
workingDir: /config
resources: {{ toYaml .Values.resources | nindent 12 }}
volumeMounts:
- name: config
mountPath: /config
readOnly: true
hostNetwork: {{ .Values.hostNetwork }}
{{- with .Values.nodeSelector }}
nodeSelector: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations: {{ toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: config
projected:
sources:
- secret:
name: {{ include "addon.fullname" . }}-config
{{- if .Values.kubeconfigSecret.name }}
- secret:
name: {{ tpl .Values.kubeconfigSecret.name . }}
items:
- key: {{ .Values.kubeconfigSecret.key }}
path: kubeconfig
{{- end }}
{{- range .Values.extraVolumes }}
- {{ toYaml . | nindent 16 }}
{{- end }}

View File

@ -1,106 +0,0 @@
{{/*
Jobs do not get run again when the spec changes, so we need to make a new one when the spec changes.
To do this, we create a new job whenever the checksum of the spec changes and include a fragment of
the checksum in the job name.
The other possible approach would be to include .Release.Revision in the job name, so that a new
job is created each time "helm upgrade" is run regardless of any changes to the values. We cannot
use this approach because this Helm chart is used to create an operator and the way the reconciliation
loop works in the operator means this will spin into an infinite loop of new releases. In any case,
it means that a job will be run when there are no changes to be made, which is unnecessary.
*/}}
{{- define "addon.install-job.spec" -}}
backoffLimit: {{ .Values.backoffLimit }}
activeDeadlineSeconds: {{ .Values.activeDeadlineSeconds }}
template:
metadata:
labels: {{ include "addon.jobSelectorLabels" (list . "install") | nindent 6 }}
annotations:
# Include an annotation containing the checksum of the configuration
# This ensures that the spec of the job changes when the configuration changes,
# resulting in a new job being produced
capi.stackhpc.com/config-checksum: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml . | nindent 6 }}
{{- end }}
securityContext: {{ toYaml .Values.podSecurityContext | nindent 6 }}
restartPolicy: OnFailure
{{- if not .Values.kubeconfigSecret.name }}
serviceAccountName: {{ tpl .Values.serviceAccount.name . }}
{{- end }}
containers:
- name: install
image: {{ include "addon.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext: {{ toYaml .Values.securityContext | nindent 10 }}
args:
- /bin/bash
- -c
- |
set -exo pipefail
if [ -f ./hook-preinstall.sh ]; then
source ./hook-preinstall.sh
fi
source ./install.sh
if [ -f ./hook-postinstall.sh ]; then
source ./hook-postinstall.sh
fi
{{- if .Values.kubeconfigSecret.name }}
env:
- name: KUBECONFIG
value: /config/kubeconfig
{{- end }}
# Set the working directory to the directory containing the config
workingDir: /config
resources: {{ toYaml .Values.resources | nindent 10 }}
volumeMounts:
- name: config
mountPath: /config
readOnly: true
hostNetwork: {{ .Values.hostNetwork }}
{{- with .Values.nodeSelector }}
nodeSelector: {{ toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.affinity }}
affinity: {{ toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations: {{ toYaml . | nindent 6 }}
{{- end }}
volumes:
- name: config
projected:
sources:
- secret:
name: {{ include "addon.fullname" . }}-config
{{- if .Values.kubeconfigSecret.name }}
- secret:
name: {{ tpl .Values.kubeconfigSecret.name . }}
items:
- key: {{ .Values.kubeconfigSecret.key }}
path: kubeconfig
{{- end }}
{{- range .Values.extraVolumes }}
- {{ toYaml . | nindent 14 }}
{{- end }}
{{- end }}
{{- define "addon.install-job.spec.checksum" -}}
{{- include "addon.install-job.spec" . | sha256sum }}
{{- end }}
{{- define "addon.install-job.name" -}}
{{- $checksum := include "addon.install-job.spec.checksum" . }}
{{- include "addon.fullname" . }}-{{ trunc 8 $checksum }}
{{- end }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "addon.install-job.name" . }}
labels: {{ include "addon.jobLabels" (list . "install") | nindent 4 }}
annotations:
capi.stackhpc.com/spec-checksum: {{ include "addon.install-job.spec.checksum" . }}
spec:
{{- include "addon.install-job.spec" . | nindent 2 }}

View File

@ -1,54 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "addon.fullname" . }}-config
labels: {{ include "addon.labels" . | nindent 4 }}
stringData:
{{- range $filename, $content := .Values.extraFiles }}
{{ $filename }}: |
{{- $content | nindent 4 }}
{{- end }}
{{- with .Values.hooks.preInstall }}
hook-preinstall.sh: |
{{- tpl . $ | nindent 4 }}
{{- end }}
{{- with .Values.hooks.postInstall }}
hook-postinstall.sh: |
{{- tpl . $ | nindent 4 }}
{{- end }}
{{- with .Values.hooks.preDelete }}
hook-predelete.sh: |
{{- tpl . $ | nindent 4 }}
{{- end }}
{{- with .Values.hooks.postDelete }}
hook-postdelete.sh: |
{{- tpl . $ | nindent 4 }}
{{- end }}
{{- if eq .Values.installType "helm" }}
values.yaml: |
{{- include "addon.helm.values" . | nindent 4 }}
install.sh: |
{{- include "addon.helm.install" . | nindent 4 }}
delete.sh: |
{{- include "addon.helm.delete" . | nindent 4 }}
{{- else if eq .Values.installType "kustomize" }}
kustomization.yaml: |
{{- include "addon.kustomize.kustomization" . | nindent 4 }}
install.sh: |
{{- include "addon.kustomize.install" . | nindent 4 }}
delete.sh: |
{{- include "addon.kustomize.delete" . | nindent 4 }}
{{- else if eq .Values.installType "custom" }}
install.sh: |
{{-
tpl (required ".Values.custom.install is required for a custom job" .Values.custom.install) . |
nindent 4
}}
delete.sh: |
{{-
tpl (required ".Values.custom.delete is required for a custom job" .Values.custom.delete) . |
nindent 4
}}
{{- else }}
{{- fail (printf "Unrecognised install type '%s'" .Values.installType) }}
{{- end }}

View File

@ -1,7 +0,0 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ tpl .Values.serviceAccount.name . }}
labels: {{ include "addon.labels" . | nindent 4 }}
{{- end }}

View File

@ -4,3 +4,8 @@ description: Helm chart that deploys cluster addons for a CAPI cluster.
type: application
version: 0.1.0
appVersion: main
dependencies:
- name: addon
version: "*"
repository: file://../addon

View File

@ -6,23 +6,31 @@ the cluster management charts from this repository, e.g.
[openstack-cluster](../openstack-cluster), but should work for any Kubernetes cluster.
The addons are deployed by launching
[Kubernetes jobs](https://kubernetes.io/docs/concepts/workloads/controllers/job/) on the
target cluster, each of which is responsible for installing or updating a single addon.
[Kubernetes jobs](https://kubernetes.io/docs/concepts/workloads/controllers/job/), each of
which is responsible for installing or updating a single addon. These jobs can
either install the addons into the local cluster using a service account or a remote cluster
using a `kubeconfig` file in a pre-existing secret. By default, the local cluster is the
target.
The jobs use the [utils image](../../utils) from this repository, which bundles some
useful tools like [jq](https://stedolan.github.io/jq/),
[kubectl](https://kubernetes.io/docs/reference/kubectl/overview/),
[kustomize](https://kustomize.io/) and [helm](https://helm.sh), and the jobs execute
with full permissions on the cluster using the `cluster-admin` cluster role. This is
used rather than a more restrictive role for a few reasons:
[kustomize](https://kustomize.io/) and [helm](https://helm.sh).
1. This chart provides a mechanism to apply custom addons, and there is no way to
When targetting the local cluster, the service account used to run the jobs must have
enough permissions to create all the objects that the addon will create. In practice,
this means that the service account will usually require the `cluster-admin` cluster role
for two reasons:
1. This chart provides a mechanism to specify custom addons, and there is no way to
know in advance what resources those custom addons may need to manage.
1. Addons may need to manage
1. This may even include instances of a
[CRD](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/)
instances that are not covered by a more restrictive role.
that is installed by another addon.
1. Several addons need to create
[RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) resources,
and so could elevate their permissions anyway by creating new roles.
and Kubernetes requires that the account creating RBAC resources has at least the
permissions that it is attempting to apply to another account.
There are two patterns used in this chart for managing addons:
@ -41,20 +49,38 @@ This chart also allows custom addons to be managed using the Helm values, either
specifying manifest content inline, or by specifying a Helm chart to install with the
corresponding values.
## Targetting a remote cluster
By default, the jobs that install the addons target the local cluster using a service account.
It is also possible to target a remote cluster, using a `kubeconfig` file. This must first
be uploaded to the cluster as a secret:
```sh
kubectl create secret generic target-kubeconfig --from-file=kubeconfig=$PWD/kubeconfig
```
Then you can tell the addons to use that `kubeconfig` file using the Helm values:
```yaml
kubeconfigSecret:
name: target-kubeconfig
key: kubeconfig
```
## Container Network Interface (CNI) plugins
This chart can install either
[Calico](https://docs.projectcalico.org/about/about-calico) or
[Weave](https://www.weave.works/docs/net/latest/kubernetes/kube-addon/) as a
This chart can install either [Calico](https://docs.projectcalico.org/about/about-calico) or
[Cilium](https://cilium.io/) as a
[CNI plugin](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/)
to provide the pod networking in a Kubernetes cluster. By default, the Calico CNI will be
to provide the pod networking in a Kubernetes cluster. By default, the Cilium CNI will be
installed.
To switch the CNI to Weave, use the following in your Helm values:
To switch the CNI to Calico, use the following in your Helm values:
```yaml
cni:
type: weave
type: calico
```
And to disable the installation of a CNI completely:
@ -94,16 +120,19 @@ sections of the cloud-config file, you can use Helm values, e.g.:
```yaml
openstack:
cloudConfig:
networking:
public-network-name: public-internet
loadBalancer:
lb-method: LEAST_CONNECTIONS
create-monitor: "true"
blockStorage:
ignore-volume-az: "true"
metadata:
search-order: metadataService
cloudConfig: |
[Networking]
public-network-name=public-internet
[LoadBalancer]
lb-method=LEAST_CONNECTIONS
create-monitor=true
[BlockStorage]
ignore-volume-az=true
[Metadata]
search-order=metadataService
```
The `[Globals]` section is populated using the given `clouds.yaml` (see "OpenStack credentials" below).
@ -124,9 +153,8 @@ manage OpenStack resources on behalf of the cluster. The recommended way to do t
to avoid your password being in stored on the cluster. Application credentials are project-scoped,
and ideally you should use a separate application credential for each cluster in a project.
For ease of use, this chart is written so that a `clouds.yaml` file can be given directly
to the chart as a configuration file. When an application credential is created in Horizon,
the corresponding `clouds.yaml` file can be downloaded, and should look something like this:
When an application credential is created in Horizon, the corresponding `clouds.yaml` file can be
downloaded, and should look something like this:
```yaml
clouds:
@ -141,10 +169,42 @@ clouds:
auth_type: "v3applicationcredential"
```
This file can then be passed to the chart using the `-f|--values` option, e.g.:
The credentials are provided to this Helm chart by putting them into a secret:
```sh
helm install cluster-addons capi/cluster-addons --values ./clouds.yaml [...options]
kubectl create secret generic my-cloud-credential --from-file=clouds.yaml=$PWD/clouds.yaml
```
That secret can then be configured in the Helm values:
```yaml
openstack:
cloudCredentialsSecretName: my-cloud-credential
```
The secret can also contain a certificate file that is used to validate the SSL certificate from
the target cloud:
```sh
kubectl create secret generic my-cloud-credential \
--from-file=clouds.yaml=$PWD/clouds.yaml \
--from-file=cacert=$PWD/ca.crt
```
Alternatively, certificate verification can be disabled in the `clouds.yaml`:
```yaml
clouds:
openstack:
auth:
auth_url: https://my.cloud:5000
application_credential_id: "<app cred id>"
application_credential_secret: "<app cred secret>"
region_name: "RegionOne"
interface: "public"
identity_api_version: 3
auth_type: "v3applicationcredential"
verify: false
```
## cert-manager
@ -159,13 +219,16 @@ the required challenges, and can
[Ingress resources](https://kubernetes.io/docs/concepts/services-networking/ingress/)
using annotations.
cert-manager is enabled by default. To disable it, use the following Helm values:
cert-manager is disabled by default. To enable it, use the following Helm values:
```yaml
certManager:
enabled: false
enabled: true
```
By default, the installation includes a cluster issuer called `letsencrypt-http01` that
targets [Let's Encrypt](https://letsencrypt.org/) for certificate issuing.
Additional configuration options are available for cert-manager - see
[values.yaml](./values.yaml).
@ -188,98 +251,144 @@ particular Ingress resource using
This chart can install the [Nginx Ingress Controller](https://kubernetes.github.io/ingress-nginx/)
onto the target cluster.
The Nginx Ingress Controller is enabled by default. To disable it, use the following Helm values:
The Nginx Ingress Controller is disabled by default. To enable it, use the following Helm values:
```yaml
ingress:
enabled: false
```
## NVIDIA GPU operator
This chart is able to install the
[NVIDIA GPU operator](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/overview.html)
to provide access to NVIDIA GPUs from Kubernetes pods using the
[device plugin framework](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/).
When deployed, the GPU operator will detect nodes with NVIDIA GPUs and automatically install the
NVIDIA software components required to make the GPUs available to Kubernetes. This does not
require any special modifications to the image used to deploy the nodes.
The GPU operator is not enabled by default. To enable it, use the following Helm values:
```yaml
nvidiaGPUOperator:
enabled: true
```
Because of the automatic detection and labelling of nodes with GPUs, there is no need to
manually label nodes. In the case where some nodes have GPUs and some do not, the GPU
operator will do the right thing without the need for manual intervention.
## Extra addons
Additional configuration options are available for the NVIDIA GPU operator - see
[values.yaml](./values.yaml).
This chart is able to manage the application of additional user-specified addons to the target
cluster. These can use Helm, Kustomize or a custom script to install and uninstall the addon,
and can even use a custom image containing specialist tools if required.
## Custom manifests
This chart is able to manage the application of custom user-specified manifests to the
cluster using `kubectl apply`. This can be useful to install cluster-specific resources
such as additional
[storage classes](https://kubernetes.io/docs/concepts/storage/storage-classes/)
or [RBAC rules](https://kubernetes.io/docs/reference/access-authn-authz/rbac/).
To apply custom manifests to the cluster as part of the addon installation, use something
similar to the following in your Helm values:
Each addon should have the form (not all options are required at all times):
```yaml
# This should be a mapping of filenames to manifest content
customManifests:
storageclass-standard.yaml: |
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: standard
provisioner: my-storage-provisioner
pod-reader.yaml: |
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pod-reader
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list"]
# One of helm, kustomize or custom
installType: custom
# Options for a Helm addon
helm:
# List of URLs of manifests containing CRDs
# Helm's handling of CRDs is not great - this helps if CRDs require updates
crdManifests: []
# The information for the Helm chart
chart:
# The URL of the chart repository
repo:
# The name of the chart
name:
# The version of the chart to use
version:
# Information about the Helm release
release:
# The namespace for the release on the target cluster
namespace:
# The name of the release
name:
# The time to wait for the Helm release to install correctly
timeout: 60m
# The values for the release
# These can come from a dict or a template
# The template is rendered with the root context, then the result is merged into the dict
# Values from the template take precedence over the dict
values: {}
valuesTemplate:
# Options for a kustomize addon
kustomize:
# The kustomize configuration
# This can come from a dict or a template
# The template is rendered with the root context, then the result is merged into the dict
# Values from the template take precedence over the dict
kustomization: {}
kustomizationTemplate:
# A list of resources to watch to determine when the addon has installed
# These should be resources that can be used with "kubectl rollout status"
# E.g. "deployment/my-deployment" or "statefulset/my-statefulset"
resources: []
# The namespace on the target cluster to watch resources in
resourceNamespace:
# Options for a custom addon
custom:
# Script that installs the addon
# It is treated as a template, and rendered with the root context
install:
# Script that deletes the addon
# It is also treated as a template and rendered with the root context
delete:
# A list of extra sources to be added to the projected volume used for configuration
# The secrets and configmaps must already exist in the namespace
# https://kubernetes.io/docs/concepts/storage/projected-volumes/
extraVolumes: []
# A map of filename -> content of additional files to include in the config directory
extraFiles: {}
# Hook scripts that execute at certain times in the addon's lifecycle
# Hook scripts are treated as templates during rendering, and are rendered with the root context
hooks:
# Executed before the addon is installed or upgraded
preInstall:
# Executed after the addon is installed or upgraded
postInstall:
# Executed before the addon is deleted
preDelete:
# Executed after the addon is deleted
postDelete:
# Details of a custom image to use, if required
image:
  # The repository of the image
repository:
# The tag to use from the repository
tag:
```
## Custom Helm charts
In addition to simple custom manifests, this chart is also able to manage additional
cluster-specific Helm releases.
To deploy a custom Helm release as part of the addon installation, use something similar
to the following in your Helm values:
For example, the following extra addon will install a couple of additional manifests
into the cluster using Kustomize:
```yaml
customHelmReleases:
# This is the name of the release
extraAddons:
custom-manifests:
installType: kustomize
kustomize:
kustomization:
resources:
- ./storageclass-standard.yaml
- ./pod-reader.yaml
extraFiles:
storageclass-standard.yaml: |
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: standard
provisioner: my-storage-provisioner
pod-reader.yaml: |
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pod-reader
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list"]
```
Or to deploy a custom Helm release as part of the addon installation:
```yaml
extraAddons:
my-wordpress:
chart:
# The repository that the chart is in
repo: https://charts.bitnami.com/bitnami
# The name of the chart
name: wordpress
# The version of the chart to use
# NOTE: THIS IS REQUIRED
version: 12.1.6
# The namespace for the release
# If not given, this defaults to the release name
namespace: wordpress
# The amount of time to wait for the chart to deploy before rolling back
timeout: 5m
# The values for the chart
values:
wordpressUsername: jbloggs
wordpressPassword: supersecretpassword
wordpressBlogName: JBloggs Awesome Blog!
installType: helm
helm:
chart:
repo: https://charts.bitnami.com/bitnami
name: wordpress
version: 12.1.6
release:
namespace: wordpress
name: my-wordpress
values:
wordpressUsername: jbloggs
wordpressPassword: supersecretpassword
wordpressBlogName: JBloggs Awesome Blog!
```

View File

@ -16,19 +16,6 @@ Create a default fully qualified app name.
{{- end }}
{{- end }}
{{/*
Create a fully qualified component name.
*/}}
{{- define "cluster-addons.componentName" -}}
{{- $ctx := index . 0 }}
{{- $componentName := index . 1 }}
{{- if contains $ctx.Chart.Name $ctx.Release.Name }}
{{- printf "%s-%s" $ctx.Release.Name $componentName | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s-%s" $ctx.Release.Name $ctx.Chart.Name $componentName | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
@ -45,306 +32,30 @@ app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Selector labels for a component-level resource.
Labels for a chart-level resource.
*/}}
{{- define "cluster-addons.componentSelectorLabels" -}}
{{- $ctx := index . 0 }}
{{- $componentName := index . 1 }}
{{- include "cluster-addons.selectorLabels" $ctx }}
app.kubernetes.io/component: {{ $componentName }}
{{- end }}
{{/*
Common labels for all resources.
*/}}
{{- define "cluster-addons.commonLabels" -}}
{{- define "cluster-addons.labels" -}}
helm.sh/chart: {{ include "cluster-addons.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
{{- end }}
{{/*
Labels for a chart-level resource.
*/}}
{{- define "cluster-addons.labels" -}}
{{ include "cluster-addons.commonLabels" . }}
{{ include "cluster-addons.selectorLabels" . }}
{{- end }}
{{/*
Component labels
Renders the default job configuration.
*/}}
{{- define "cluster-addons.componentLabels" -}}
{{ include "cluster-addons.commonLabels" (index . 0) }}
{{ include "cluster-addons.componentSelectorLabels" . }}
{{- define "cluster-addons.job.defaults" -}}
{{- with .Values.jobDefaults }}
{{- toYaml . }}
{{- end }}
{{/*
Template for a config secret for use by a job that deploys an addon.
*/}}
{{- define "cluster-addons.job.config" -}}
{{- $ctx := index . 0 }}
{{- $componentName := index . 1 }}
{{- $options := slice (append . dict) 2 | first }}
{{- $configSecretName := printf "%s-config" $componentName }}
{{- $configDataTemplate := printf "cluster-addons.%s.config" $componentName }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "cluster-addons.componentName" (list $ctx $configSecretName) }}
labels: {{ include "cluster-addons.componentLabels" (list $ctx $componentName) | nindent 4 }}
stringData:
{{- if hasKey $options "configData" }}
{{- nindent 2 $options.configData }}
{{- else }}
{{- include $configDataTemplate $ctx | nindent 2 }}
{{- end }}
{{- end }}
{{/*
Base template for a job that deploys an addon.
*/}}
{{- define "cluster-addons.job.base" -}}
{{- $ctx := index . 0 }}
{{- $componentName := index . 1 }}
{{- $options := slice (append . dict) 2 | first }}
{{- $configSecretName := printf "%s-config" $componentName }}
{{- $scriptTemplate := printf "cluster-addons.%s.script" $componentName }}
{{- $bootstrap := dig "bootstrap" false $options }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "cluster-addons.componentName" (list $ctx $componentName) }}-{{ $ctx.Release.Revision }}
labels: {{ include "cluster-addons.componentLabels" (list $ctx $componentName) | nindent 4 }}
spec:
# Keep trying for a decent amount of time before failing
backoffLimit: 1000
# Keep succeeded jobs for 2h after finishing
ttlSecondsAfterFinished: 7200
template:
metadata:
labels: {{ include "cluster-addons.componentSelectorLabels" (list $ctx $componentName) | nindent 8 }}
spec:
# Ensure that we run as a non-root user
securityContext:
runAsUser: 1001
serviceAccountName: {{ include "cluster-addons.componentName" (list $ctx "deployer") }}
restartPolicy: OnFailure
containers:
- name: {{ $componentName }}
image: {{ printf "%s:%s" $ctx.Values.jobImage.repository (default $ctx.Chart.AppVersion $ctx.Values.jobImage.tag) }}
imagePullPolicy: {{ $ctx.Values.jobImage.pullPolicy }}
args:
- /bin/sh
- -c
- |
set -exo pipefail
{{- if hasKey $options "script" }}
{{- nindent 16 $options.script }}
{{- else }}
{{- include $scriptTemplate $ctx | nindent 16 }}
{{- end }}
volumeMounts:
- name: config
mountPath: /config
readOnly: true
volumes:
- name: config
secret:
secretName: {{ include "cluster-addons.componentName" (list $ctx $configSecretName) }}
{{- if $bootstrap }}
hostNetwork: true
tolerations: {{ toYaml $ctx.Values.bootstrapTolerations | nindent 8 }}
{{- end }}
{{- end }}
{{/*
Template for a job that deploys an addon.
*/}}
{{- define "cluster-addons.job" -}}
{{- $ctx := index . 0 }}
{{- $options := slice (append . dict) 2 | first }}
{{- $bootstrap := dig "bootstrap" false $options }}
{{- if or $bootstrap (not $ctx.Values.bootstrapOnly) }}
{{- include "cluster-addons.job.config" . }}
---
{{- include "cluster-addons.job.base" . }}
{{- end }}
{{- end }}
{{/*
Template for producing the configuration required for a Helm release.
*/}}
{{- define "cluster-addons.job.helm.config" -}}
values.yaml: |
{{- toYaml .values | nindent 2 }}
{{- end }}
{{/*
Template for a script that installs or upgrades a Helm release.
Because Helm has poor support for CRDs, there is an option to apply CRD manifests
before installing or upgrading the release.
There is also support for rolling back an interrupted install or upgrade before proceeding
by checking for the pending-[install,upgrade] status.
*/}}
{{- define "cluster-addons.job.helm.script" -}}
{{- if hasKey . "crdManifests" -}}
get_chart_version() {
helm show chart --repo {{ .chart.repo }} --version {{ .chart.version }} {{ .chart.name }} | \
grep -e "^$1" | \
cut -d ":" -f 2 | \
tr -d '[:space:]'
}
CHART_VERSION="$(get_chart_version "version")"
CHART_APPVERSION="$(get_chart_version "appVersion")"
{{- range $manifestName := .crdManifests }}
kubectl apply -f {{ $.crdManifestsBaseURL }}/{{ $manifestName }}
{{- end }}
{{- end }}
helm_release_exists() {
helm status {{ .release.name }} --namespace {{ .release.namespace }}
}
helm_release_status() {
helm status {{ .release.name }} --namespace {{ .release.namespace }} --output json | \
jq -r '.info.status'
}
if helm_release_exists; then
status="$(helm_release_status)"
if [ "$status" = "pending-install" ]; then
helm delete {{ .release.name }} \
--namespace {{ .release.namespace }} \
--wait --timeout {{ .release.timeout }}
elif [ "$status" = "pending-upgrade" ]; then
helm rollback {{ .release.name }} \
--namespace {{ .release.namespace }} \
--cleanup-on-fail \
--wait --wait-for-jobs --timeout {{ .release.timeout }}
fi
fi
helm upgrade {{ .release.name }} {{ .chart.name }} \
--atomic --install \
--namespace {{ .release.namespace }} --create-namespace \
--repo {{ .chart.repo }} \
--version {{ .chart.version }} \
{{- if hasKey . "crdManifests" -}}
--skip-crds \
{{- end }}
--values /config/values.yaml \
--wait --wait-for-jobs --timeout {{ .release.timeout }} \
$HELM_EXTRA_ARGS
{{- end }}
{{/*
Template for a job that deploys an addon using a Helm chart.
*/}}
{{- define "cluster-addons.job.helm" -}}
{{- $ctx := index . 0 }}
{{- $componentName := index . 1 }}
{{- $helmSpec := index . 2 }}
{{- $options := slice (append . dict) 3 | first }}
{{-
include
"cluster-addons.job"
(list
$ctx
$componentName
(merge
(dict
"configData" (include "cluster-addons.job.helm.config" $helmSpec)
"script" (include "cluster-addons.job.helm.script" $helmSpec)
)
$options
)
)
}}
{{- end }}
{{/*
Template for producing the configuration for an addon that uses kustomize.
*/}}
{{- define "cluster-addons.job.kustomize.config" -}}
{{- $ctx := index . 0 }}
{{- $kustomize := index . 1 }}
kustomization.yaml: |
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
{{- range $kustomize.manifests }}
- {{ tpl . $ctx }}
{{- end }}
{{- with $kustomize.kustomization }}
{{- toYaml . | nindent 2 }}
{{- if .Values.kubeconfigSecret.name }}
kubeconfigSecret:
name: {{ tpl .Values.kubeconfigSecret.name . }}
{{- with .Values.kubeconfigSecret.key }}
key: {{ . }}
{{- end }}
{{- end }}
{{/*
Template for producing a script for an addon that uses kustomize.
*/}}
{{- define "cluster-addons.job.kustomize.script" -}}
kustomize build /config | kubectl apply -f -
{{- range . }}
kubectl -n {{ index . 0 }} rollout status {{ index . 1 }}
{{- end }}
{{- end }}
{{/*
Template for a job that deploys an addon using kustomize.
*/}}
{{- define "cluster-addons.job.kustomize" -}}
{{- $ctx := index . 0 }}
{{- $componentName := index . 1 }}
{{- $kustomize := index . 2 }}
{{- $resources := index . 3 }}
{{- $options := slice (append . dict) 4 | first }}
{{-
include
"cluster-addons.job"
(list
$ctx
$componentName
(merge
(dict
"configData" (include "cluster-addons.job.kustomize.config" (list $ctx $kustomize))
"script" (include "cluster-addons.job.kustomize.script" $resources)
)
$options
)
)
}}
{{- end }}
{{/*
Template that merges two variables with the latter taking precedence and outputs the result as YAML.
Lists are merged by concatenating them rather than overwriting.
*/}}
{{- define "cluster-addons.mergeConcat" -}}
{{- $left := index . 0 }}
{{- if kindIs (kindOf list) $left }}
{{- index . 1 | default list | concat $left | toYaml }}
{{- else if kindIs (kindOf dict) $left }}
{{- $right := index . 1 | default dict }}
{{- range $key := concat (keys $left) (keys $right) | uniq }}
{{ $key }}:
{{- if and (hasKey $left $key) (hasKey $right $key) }}
{{-
include "cluster-addons.mergeConcat" (list (index $left $key) (index $right $key)) |
nindent 2
}}
{{- else if hasKey $left $key }}
{{- index $left $key | toYaml | nindent 2 }}
{{- else }}
{{- index $right $key | toYaml | nindent 2 }}
{{- end }}
{{- end }}
{{- else }}
{{- default $left (index . 1) | toYaml }}
{{- end }}
serviceAccountName: {{ tpl .Values.serviceAccount.name . }}
{{- end }}

View File

@ -1,39 +1,47 @@
{{- define "cluster-addons.ccm-openstack.config.patches" -}}
kustomization:
patches:
- patch: |-
- op: add
path: /spec/template/spec/containers/0/args/-
value: --cluster-name={{ .Values.clusterName }}
target:
kind: DaemonSet
name: openstack-cloud-controller-manager
{{- if semverCompare "~1.21.0" .Capabilities.KubeVersion.Version }}
- patch: |-
- op: add
path: /rules/-
value:
apiGroups: [""]
resources: ["serviceaccounts/token"]
verbs: ["create"]
target:
group: rbac.authorization.k8s.io
version: v1
kind: ClusterRole
name: system:cloud-controller-manager
{{- end }}
{{- define "cluster-addons.ccm-openstack.config" -}}
{{- include "cluster-addons.job.defaults" . }}
installType: kustomize
kustomize:
kustomizationTemplate: |
resources:
{{- range .Values.openstack.ccm.manifests }}
- {{ tpl . $ }}
{{- end }}
patches:
- patch: |-
- op: add
path: /spec/template/spec/containers/0/args/-
value: --cluster-name={{ tpl .Values.clusterName . }}
target:
kind: DaemonSet
name: openstack-cloud-controller-manager
{{- if semverCompare "~1.21.0" (tpl .Values.kubernetesVersion . | trimPrefix "v") }}
- patch: |-
- op: add
path: /rules/-
value:
apiGroups: [""]
resources: ["serviceaccounts/token"]
verbs: ["create"]
target:
group: rbac.authorization.k8s.io
version: v1
kind: ClusterRole
name: system:cloud-controller-manager
{{- end }}
{{- with .Values.openstack.ccm.kustomization }}
kustomization: {{ toYaml . | nindent 4 }}
{{- end }}
resourceNamespace: kube-system
resources:
- daemonset/openstack-cloud-controller-manager
{{- end }}
{{- if and .Values.openstack.enabled .Values.openstack.ccm.enabled }}
{{- $patches := include "cluster-addons.ccm-openstack.config.patches" . | fromYaml }}
{{- $config := include "cluster-addons.mergeConcat" (list $patches .Values.openstack.ccm) | fromYaml }}
{{-
include "cluster-addons.job.kustomize" (list
include "addon.job" (list
.
"ccm-openstack"
$config
(list (list "kube-system" "daemonset/openstack-cloud-controller-manager"))
(dict "bootstrap" true)
"cluster-addons.ccm-openstack.config"
(and .Values.openstack.enabled .Values.openstack.ccm.enabled)
)
}}
{{- end }}

View File

@ -1,34 +1,38 @@
{{- define "cluster-addons.cert-manager.config" -}}
{{- include "cluster-addons.job.helm.config" .Values.certManager }}
{{- include "cluster-addons.job.defaults" . }}
installType: helm
helm: {{ omit .Values.certManager "enabled" "acmeHttp01Issuer" | toYaml | nindent 2 }}
{{- if and .Values.ingress.enabled .Values.certManager.acmeHttp01Issuer.enabled }}
acme-http01-issuer.yaml: |
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: {{ .Values.certManager.acmeHttp01Issuer.name }}
spec:
acme:
server: {{ .Values.certManager.acmeHttp01Issuer.server }}
privateKeySecretRef:
name: {{ .Values.certManager.acmeHttp01Issuer.name }}-key
solvers:
- http01:
ingress:
{{- if .Values.ingress.nginx.enabled }}
class: {{ dig "controller" "ingressClassResource" "name" "nginx" .Values.ingress.nginx.values }}
{{- else }}
{{- fail "Ingress is enabled but no controllers are selected" }}
{{- end }}
extraFiles:
acme-http01-issuer.yaml: |
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: {{ .Values.certManager.acmeHttp01Issuer.name }}
spec:
acme:
server: {{ .Values.certManager.acmeHttp01Issuer.server }}
privateKeySecretRef:
name: {{ .Values.certManager.acmeHttp01Issuer.name }}-key
solvers:
- http01:
ingress:
{{- if .Values.ingress.nginx.enabled }}
class: {{ dig "controller" "ingressClassResource" "name" "nginx" .Values.ingress.nginx.release.values }}
{{- else }}
{{- fail "Ingress is enabled but no ingress controllers are enabled" }}
{{- end }}
hooks:
postInstall: |
kubectl apply -f ./acme-http01-issuer.yaml
{{- end }}
{{- end }}
{{- define "cluster-addons.cert-manager.script" -}}
{{- include "cluster-addons.job.helm.script" .Values.certManager }}
{{- if and .Values.ingress.enabled .Values.certManager.acmeHttp01Issuer.enabled }}
kubectl apply -f /config/acme-http01-issuer.yaml
{{- end }}
{{- end }}
{{- if .Values.certManager.enabled }}
{{- include "cluster-addons.job" (list . "cert-manager") }}
{{- end }}
{{-
include "addon.job" (list
.
"cert-manager"
"cluster-addons.cert-manager.config"
.Values.certManager.enabled
)
}}

View File

@ -0,0 +1,53 @@
{{- define "cluster-addons.cloud-config.config" -}}
{{-
$secretNameTemplate := required
".Values.openstack.cloudCredentialsSecretName is required"
.Values.openstack.cloudCredentialsSecretName
}}
{{- $secretName := tpl $secretNameTemplate . }}
{{- include "cluster-addons.job.defaults" . }}
installType: custom
custom:
install: |
gomplate --file secret.yaml.tpl | kubectl apply -f -
delete: |
gomplate --file secret.yaml.tpl | kubectl delete -f -
extraVolumes:
- secret:
name: {{ $secretName }}
extraFiles:
secret.yaml.tpl: |
apiVersion: v1
kind: Secret
metadata:
name: cloud-config
namespace: kube-system
stringData:
# Just include the data for the cloud we will be using
clouds.yaml: |
{{ "{{" }} file.Read "./clouds.yaml" | indent 4 | trimSpace {{ "}}" }}
{{ "{{-" }} if file.Exists "./cacert" {{ "}}" }}
cacert: |
{{ "{{" }} file.Read "./cacert" | indent 4 | trimSpace {{ "}}" }}
{{ "{{-" }} end {{ "}}" }}
cloud.conf: |
[Global]
use-clouds=true
clouds-file=/etc/config/clouds.yaml
cloud={{ .Values.openstack.cloudName }}
{{ "{{-" }} if file.Exists "./cacert" {{ "}}" }}
ca-file=/etc/config/cacert
{{ "{{-" }} end {{ "}}" }}
{{- with .Values.openstack.cloudConfig }}
{{- nindent 8 . }}
{{- end }}
{{- end }}
{{-
include "addon.job" (list
.
"cloud-config"
"cluster-addons.cloud-config.config"
.Values.openstack.enabled
)
}}

View File

@ -1,14 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: "{{ include "cluster-addons.fullname" . }}:deployer"
labels: {{ include "cluster-addons.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ include "cluster-addons.componentName" (list . "deployer") }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin

View File

@ -1,13 +1,33 @@
{{- if and .Values.cni.enabled (eq .Values.cni.type "calico") }}
{{-
include
"cluster-addons.job.kustomize"
(list
.
"cni-calico"
.Values.cni.calico
(list (list "kube-system" "daemonset/calico-node"))
(dict "bootstrap" true)
)
}}
{{- define "cluster-addons.cni-calico.config" -}}
{{- include "cluster-addons.job.defaults" . }}
installType: kustomize
kustomize:
kustomizationTemplate: |
resources:
{{- range .Values.cni.calico.manifests }}
- {{ tpl . $ }}
{{- end }}
patches:
- patch: |-
- op: add
path: /spec/template/spec/containers/0/args/-
value: --kubelet-insecure-tls
target:
kind: Deployment
name: metrics-server
{{- with .Values.cni.calico.kustomization }}
kustomization: {{ toYaml . | nindent 4 }}
{{- end }}
resourceNamespace: kube-system
resources:
- daemonset/calico-node
{{- end }}
{{-
include "addon.job" (list
.
"cni-calico"
"cluster-addons.cni-calico.config"
(and .Values.cni.enabled (eq .Values.cni.type "calico"))
)
}}

View File

@ -1,10 +1,14 @@
{{- if and .Values.cni.enabled (eq .Values.cni.type "cilium") }}
{{- define "cluster-addons.cni-cilium.config" -}}
{{- include "cluster-addons.job.defaults" . }}
installType: helm
helm: {{ toYaml .Values.cni.cilium | nindent 2 }}
{{- end }}
{{-
include "cluster-addons.job.helm" (list
include "addon.job" (list
.
"cni-cilium"
.Values.cni.cilium
(dict "bootstrap" true)
"cluster-addons.cni-cilium.config"
(and .Values.cni.enabled (eq .Values.cni.type "cilium"))
)
}}
{{- end }}

View File

@ -1,13 +0,0 @@
{{- if and .Values.cni.enabled (eq .Values.cni.type "weave") }}
{{-
include
"cluster-addons.job.kustomize"
(list
.
"cni-weave"
.Values.cni.weave
(list (list "kube-system" "daemonset/weave-net"))
(dict "bootstrap" true)
)
}}
{{- end }}

View File

@ -1,38 +1,46 @@
{{- define "cluster-addons.csi-cinder.config" -}}
{{- $config := deepCopy .Values.openstack.csiCinder }}
{{- if .Values.openstack.csiCinder.storageClass.enabled }}
{{- $_ := set $config "manifests" (append $config.manifests "./storageclass.yaml") }}
{{- end }}
{{- include "cluster-addons.job.kustomize.config" (list . $config) }}
{{- include "cluster-addons.job.defaults" . }}
installType: kustomize
kustomize:
kustomizationTemplate: |
resources:
{{- range .Values.openstack.csiCinder.manifests }}
- {{ tpl . $ }}
{{- end }}
{{- if .Values.openstack.csiCinder.storageClass.enabled }}
- ./storageclass.yaml
{{- end }}
{{- with .Values.openstack.csiCinder.kustomization }}
kustomization: {{ toYaml . | nindent 4 }}
{{- end }}
resourceNamespace: kube-system
resources:
- statefulset/csi-cinder-controllerplugin
- daemonset/csi-cinder-nodeplugin
{{- with .Values.openstack.csiCinder.storageClass }}
{{- if .enabled }}
storageclass.yaml: |
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: {{ .name }}
{{- if .isDefault }}
annotations:
storageclass.kubernetes.io/is-default-class: "true"
{{- end }}
provisioner: cinder.csi.openstack.org
reclaimPolicy: {{ .reclaimPolicy }}
allowVolumeExpansion: {{ .allowVolumeExpansion }}
extraFiles:
storageclass.yaml: |
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: {{ .name }}
{{- if .isDefault }}
annotations:
storageclass.kubernetes.io/is-default-class: "true"
{{- end }}
provisioner: cinder.csi.openstack.org
reclaimPolicy: {{ .reclaimPolicy }}
allowVolumeExpansion: {{ .allowVolumeExpansion }}
{{- end }}
{{- end }}
{{- end }}
{{- define "cluster-addons.csi-cinder.script" -}}
{{-
include
"cluster-addons.job.kustomize.script"
(list
(list "kube-system" "statefulset/csi-cinder-controllerplugin")
(list "kube-system" "daemonset/csi-cinder-nodeplugin")
)
include "addon.job" (list
.
"csi-cinder"
"cluster-addons.csi-cinder.config"
(and .Values.openstack.enabled .Values.openstack.csiCinder.enabled)
)
}}
{{- end }}
{{- if and .Values.openstack.enabled .Values.openstack.csiCinder.enabled }}
{{- include "cluster-addons.job" (list . "csi-cinder") }}
{{- end }}

View File

@ -1,19 +0,0 @@
{{- range $releaseName, $releaseSpec := .Values.customHelmReleases }}
{{-
include
"cluster-addons.job.helm"
(list
$
(printf "helm-%s" $releaseName)
(dict
"chart" $releaseSpec.chart
"release" (dict
"name" $releaseName
"namespace" (dig "namespace" $releaseName $releaseSpec)
"timeout" (dig "timeout" "5m" $releaseSpec)
)
"values" (default dict $releaseSpec.values)
)
)
}}
{{- end }}

View File

@ -1,14 +0,0 @@
{{- define "cluster-addons.custom-manifests.config" -}}
{{- range $fileName, $manifestTemplate := .Values.customManifests }}
{{ $fileName }}: |
{{- tpl $manifestTemplate $ | nindent 2 }}
{{- end }}
{{- end }}
{{- define "cluster-addons.custom-manifests.script" -}}
kubectl apply -f /config
{{- end }}
{{- if .Values.customManifests }}
{{- include "cluster-addons.job" (list . "custom-manifests") }}
{{- end }}

View File

@ -0,0 +1,22 @@
{{- define "cluster-addons.extra-addons.config" -}}
{{- $ctx := index . 0 }}
{{- $config := index . 1 }}
{{-
include "cluster-addons.job.defaults" $ctx |
fromYaml |
merge $config |
toYaml
}}
{{- end }}
{{- range $name, $config := .Values.extraAddons -}}
{{- $config := include "cluster-addons.extra-addons.config" (list . $config) }}
{{-
include "addon.job.fromConfig" (list
.
$name
(omit $config "enabled")
(default true $config.enabled)
)
}}
{{- end }}

View File

@ -1,3 +1,14 @@
{{- if and .Values.ingress.enabled .Values.ingress.nginx.enabled }}
{{- include "cluster-addons.job.helm" (list . "ingress-nginx" .Values.ingress.nginx) }}
{{- define "cluster-addons.ingress-nginx.config" -}}
{{- include "cluster-addons.job.defaults" . }}
installType: helm
helm: {{ omit .Values.ingress.nginx "enabled" | toYaml | nindent 2 }}
{{- end }}
{{-
include "addon.job" (list
.
"ingress-nginx"
"cluster-addons.ingress-nginx.config"
(and .Values.ingress.enabled .Values.ingress.nginx.enabled)
)
}}

View File

@ -1,12 +1,25 @@
{{- if .Values.metricsServer.enabled }}
{{-
include
"cluster-addons.job.kustomize"
(list
.
"metrics-server"
.Values.metricsServer
(list (list "kube-system" "deployment/metrics-server"))
)
}}
{{- define "cluster-addons.metrics-server.config" -}}
{{- include "cluster-addons.job.defaults" . }}
installType: kustomize
kustomize:
kustomizationTemplate: |
resources:
{{- range .Values.metricsServer.manifests }}
- {{ tpl . $ }}
{{- end }}
{{- with .Values.metricsServer.kustomization }}
kustomization: {{ toYaml . | nindent 4 }}
{{- end }}
resourceNamespace: kube-system
resources:
- deployment/metrics-server
{{- end }}
{{-
include "addon.job" (list
.
"metrics-server"
"cluster-addons.metrics-server.config"
.Values.metricsServer.enabled
)
}}

View File

@ -1,3 +1,14 @@
{{- if .Values.monitoring.enabled }}
{{- include "cluster-addons.job.helm" (list . "monitoring" .Values.monitoring) }}
{{- define "cluster-addons.monitoring.config" -}}
{{- include "cluster-addons.job.defaults" . }}
installType: helm
helm: {{ omit .Values.monitoring "enabled" | toYaml | nindent 2 }}
{{- end }}
{{-
include "addon.job" (list
.
"monitoring"
"cluster-addons.monitoring.config"
.Values.monitoring.enabled
)
}}

View File

@ -1,3 +0,0 @@
{{- if .Values.nvidiaGPUOperator.enabled }}
{{- include "cluster-addons.job.helm" (list . "nvidia-gpu-operator" .Values.nvidiaGPUOperator) }}
{{- end }}

View File

@ -2,8 +2,8 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "addon.fullname" . }}
labels: {{ include "addon.labels" . | nindent 4 }}
name: {{ include "cluster-addons.fullname" . }}
labels: {{ include "cluster-addons.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
namespace: {{ .Release.Namespace }}

View File

@ -2,9 +2,16 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "addon.fullname" . }}-manage-jobs
labels: {{ include "addon.labels" . | nindent 4 }}
name: {{ include "cluster-addons.fullname" . }}-manage-jobs
labels: {{ include "cluster-addons.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- list
- get
- apiGroups:
- batch
resources:

View File

@ -2,8 +2,8 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "addon.fullname" . }}
labels: {{ include "addon.labels" . | nindent 4 }}
name: {{ include "cluster-addons.fullname" . }}-manage-jobs
labels: {{ include "cluster-addons.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
namespace: {{ .Release.Namespace }}
@ -11,5 +11,5 @@ subjects:
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "addon.fullname" . }}-manage-jobs
name: {{ include "cluster-addons.fullname" . }}-manage-jobs
{{- end }}

View File

@ -1,6 +1,7 @@
---
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "cluster-addons.componentName" (list . "deployer") }}
name: {{ tpl .Values.serviceAccount.name . }}
labels: {{ include "cluster-addons.labels" . | nindent 4 }}
{{- end }}

View File

@ -1,37 +0,0 @@
{{- if .Values.openstack.enabled }}
apiVersion: v1
kind: Secret
metadata:
# The namespace and name must match those expected by the manifests
name: cloud-config
namespace: kube-system
labels: {{ include "cluster-addons.componentLabels" (list . "openstack") | nindent 4 }}
# The addons helm release might be deleted without deleting the OpenStack integrations
# So leave this behind in order for them to continue functioning
annotations:
"helm.sh/resource-policy": keep
stringData:
# Just include the data for the cloud we will be using
clouds.yaml: |
clouds:
openstack:
{{ index .Values.clouds .Values.cloudName | toYaml | indent 8 | trim }}
# Include the certificate if specified
{{- with .Values.cloudCACert }}
cacert: |
{{ . | indent 4 | trim }}
{{- end }}
# In the cloud.conf, reference the clouds.yaml for authentication params
cloud.conf: |
[Global]
use-clouds=true
clouds-file=/etc/config/clouds.yaml
cloud=openstack
{{- if .Values.cloudCACert }}
ca-file=/etc/config/cacert
{{- end }}
{{- with .Values.openstack.cloudConfig }}
{{- nindent 4 . }}
{{- end }}
{{- end }}

View File

@ -1,39 +1,65 @@
# The name of the Kubernetes cluster we are deploying to
clusterName: kubernetes
# Defaults to the release name if not given for use as a dependency of openstack-cluster
clusterName: "{{ .Release.Name }}"
# Content for an OpenStack clouds.yaml file
# Having this as a top-level item allows a clouds.yaml file from OpenStack to be used as a values file
clouds:
# The name of the cloud to use from the specified clouds
cloudName: openstack
# The PEM-encoded CA certificate for the specified cloud
cloudCACert:
# The Kubernetes version of the target cluster
# This is treated as a template at rendering time
kubernetesVersion: v1.22
jobImage:
repository: ghcr.io/stackhpc/k8s-utils
tag: # defaults to chart appVersion
pullPolicy: IfNotPresent
# Details of a secret containing a kubeconfig file for a remote cluster
# If given, this is used in preference to a service account
kubeconfigSecret:
# The name of the secret
# This is treated as a template during rendering
name:
# The key of the kubeconfig file in the secret
key: value
# Indicates whether to install bootstrap charts only
# If this is set to true, then only the CNI and CCM will be installed
bootstrapOnly: false
# Options for the service account to use
# A pre-existing service account can be used, or a new one can be created
#
# A service account is always required as it is used by the pre-delete hook
# to suspend any install jobs that are still running prior to running the deletion
#
# The permissions required by the service account depend on whether the installation
# is targetting a remote cluster or the local cluster
#
# Whether the installation target is local or remote, the service account needs to
# have permission to list and patch jobs in the release namespace for the delete hook
# in order to suspend any running install jobs
#
# When the installation targets the local cluster, the service account must also have
# permission to create any resources that need to be installed, which could be into
# other namespaces - the cluster-admin cluster role is normally used for this
serviceAccount:
# Indicates whether to create a new service account
create: true
# The name of the cluster role to bind the created service account to
clusterRoleName: cluster-admin
# The name of the service account
# If create = true, this is the name of the created service account
# If create = false, this is the name of an existing service account to use
# This is treated as a template during rendering
name: "{{ include \"cluster-addons.fullname\" . }}-deployer"
# The tolerations to use for jobs that are part of bootstrapping the cluster
# e.g. CNI, external cloud provider
bootstrapTolerations:
# Allow the job to run on a control plane node if required
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
# Allow the job to run on a node awaiting initialisation by an external cloud provider
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Equal
value: "true"
effect: NoSchedule
# Allow the job to run on nodes that are not ready
- key: node.kubernetes.io/not-ready
operator: Exists
effect: NoSchedule
# Defaults for job settings
# In all cases, the defaults for the version of the addons chart in use are used
# See the values for the addons chart for details
jobDefaults: {}
# image:
# repository:
# tag:
# pullPolicy:
# imagePullSecrets:
# backoffLimit:
# activeDeadlineSeconds:
# podSecurityContext:
# securityContext:
# resources:
# hostNetwork:
# tolerations:
# nodeSelector:
# affinity:
# Settings for the CNI addon
cni:
@ -62,20 +88,10 @@ cni:
namespace: kube-system
name: cilium
timeout: 5m
# See https://docs.cilium.io/en/stable/gettingstarted/k8s-install-helm/ for details
values:
ipam:
mode: kubernetes
# Settings for the Weave CNI
weave:
# The version of Weave to deploy
# There is one manifest for Kubernetes >= 1.16 and one for the rest
version: "{{ ternary \"v1.16\" \"v1.13\" (semverCompare \">=1.16\" .Capabilities.KubeVersion.Version) }}"
# The URLs of the Weave manifests
manifests:
- https://cloud.weave.works/k8s/{{ tpl .Values.cni.weave.version . }}/net
# Any kustomization to be applied to the Weave manifests
kustomization: {}
# See https://docs.cilium.io/en/stable/gettingstarted/k8s-install-helm/ for details
values:
ipam:
mode: kubernetes
# Settings for the OpenStack integrations
openstack:
@ -83,10 +99,16 @@ openstack:
enabled: false
# The version of the OpenStack cloud provider to install
# By default, use the release branch for the Kubernetes version of the target cluster
version: release-{{ .Capabilities.KubeVersion.Major }}.{{ .Capabilities.KubeVersion.Minor }}
version: release-{{ tpl .Values.kubernetesVersion . | trimPrefix "v" }}
# The base URL for OpenStack cloud provider manifests
# By default, pull the manifests from GitHub
# By default, pull the manifests from GitHub at the specified version
manifestsBaseURL: https://raw.githubusercontent.com/kubernetes/cloud-provider-openstack/{{ tpl .Values.openstack.version . }}
# The name of a secret containing a clouds.yaml file and optional cacert
# If the cacert is present, it should be referred to in the clouds.yaml file as /etc/config/cacert
# See https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html#ssl-settings
cloudCredentialsSecretName:
# The name of the cloud to use in the clouds.yaml
cloudName: openstack
# cloud-config options for the OpenStack integrations
# The [Global] section is configured to use the specified cloud from .Values.clouds
# See https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md#config-openstack-cloud-controller-manager
@ -99,11 +121,17 @@ openstack:
enabled: true
# The prefix for RBAC manifests
# Unfortunately, this changes for different Kubernetes versions
rbacManifestsPrefix: "{{ ternary \"/manifests/controller-manager\" \"/cluster/addons/rbac\" (semverCompare \">=1.22\" .Capabilities.KubeVersion.Version) }}"
rbacManifestsPrefix: >-
{{
tpl .Values.kubernetesVersion . |
trimPrefix "v" |
semverCompare ">=1.22" |
ternary "manifests/controller-manager" "cluster/addons/rbac"
}}
# The URLs to use for the manifests
manifests:
- "{{ tpl .Values.openstack.manifestsBaseURL . }}{{ tpl .Values.openstack.ccm.rbacManifestsPrefix . }}/cloud-controller-manager-roles.yaml"
- "{{ tpl .Values.openstack.manifestsBaseURL . }}{{ tpl .Values.openstack.ccm.rbacManifestsPrefix . }}/cloud-controller-manager-role-bindings.yaml"
- "{{ tpl .Values.openstack.manifestsBaseURL . }}/{{ tpl .Values.openstack.ccm.rbacManifestsPrefix . }}/cloud-controller-manager-roles.yaml"
- "{{ tpl .Values.openstack.manifestsBaseURL . }}/{{ tpl .Values.openstack.ccm.rbacManifestsPrefix . }}/cloud-controller-manager-role-bindings.yaml"
- "{{ tpl .Values.openstack.manifestsBaseURL . }}/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml"
# Any kustomization to apply to the OpenStack CCM manifests
kustomization: {}
@ -157,7 +185,7 @@ metricsServer:
# Settings for cert-manager
certManager:
# Indicates if cert-manager should be enabled
enabled: true
enabled: false
chart:
repo: https://charts.jetstack.io
name: cert-manager
@ -166,14 +194,15 @@ certManager:
namespace: cert-manager
name: cert-manager
timeout: 5m
# See https://cert-manager.io/docs/installation/helm/ for available values
values:
# By default, make sure the cert-manager CRDs are installed
installCRDs: true
# Disable Prometheus support for now
prometheus:
enabled: false
# See https://cert-manager.io/docs/installation/helm/ for available values
values:
# By default, make sure the cert-manager CRDs are installed
installCRDs: true
# Disable Prometheus support for now
prometheus:
enabled: false
# Settings for automatic ACME HTTP01 support using Let's Encrypt
# This is only enabled if ingress is also enabled
acmeHttp01Issuer:
enabled: yes
name: letsencrypt-http01
@ -182,7 +211,7 @@ certManager:
# Settings for ingress controllers
ingress:
# Indicates if ingress controllers should be enabled
enabled: true
enabled: false
# Settings for the Nginx ingress controller
nginx:
# Indicates if the Nginx ingress controller should be enabled
@ -195,73 +224,31 @@ ingress:
namespace: ingress-nginx
name: ingress-nginx
timeout: 5m
# See https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx#configuration
values: {}
# Settings for the NVIDIA GPU operator
nvidiaGPUOperator:
# Indicates if the NVIDIA GPU operator should be enabled
enabled: false
chart:
repo: https://nvidia.github.io/gpu-operator
name: gpu-operator
version: v1.8.1
release:
namespace: gpu-operator
name: gpu-operator
timeout: 5m
values:
# By default, CAPI clusters use containerd
operator:
defaultRuntime: containerd
# See https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx#configuration
values: {}
# Settings for cluster monitoring
monitoring:
# Indicates if the cluster monitoring should be enabled
enabled: true
# Because Helm doesn't upgrade CRDs, we install them manually instead
# In the manifest base URL, "${CHART_VERSION}" and "${CHART_APPVERSION}" are replaced with the
# version and appVersion of the Helm chart respectively
crdManifestsBaseURL: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v${CHART_APPVERSION}/example/prometheus-operator-crd
enabled: false
crdManifests:
- monitoring.coreos.com_alertmanagerconfigs.yaml
- monitoring.coreos.com_alertmanagers.yaml
- monitoring.coreos.com_podmonitors.yaml
- monitoring.coreos.com_probes.yaml
- monitoring.coreos.com_prometheuses.yaml
- monitoring.coreos.com_prometheusrules.yaml
- monitoring.coreos.com_servicemonitors.yaml
- monitoring.coreos.com_thanosrulers.yaml
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
- https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
chart:
repo: https://prometheus-community.github.io/helm-charts
name: kube-prometheus-stack
version: 18.0.8
version: 30.0.1
release:
namespace: monitoring-system
name: kube-prometheus-stack
timeout: 5m
values:
values: {}
# Custom manifests to apply
# This should be a map of filenames to manifest content
customManifests:
# Custom Helm releases to apply
# This should be a map of release name to release parameters
customHelmReleases:
# my-wordpress:
# chart:
# # The repository that the chart is in
# repo: https://charts.bitnami.com/bitnami
# # The name of the chart
# name: wordpress
# # The version of the chart to install
# # NOTE: THIS IS REQUIRED
# version: 12.1.6
# # The namespace for the release
# # If not given, this defaults to the release name
# namespace: my-wordpress
# # The amount of time to wait for the chart to install before rolling back
# timeout: 5m
# # The values for the chart
# values: {}
# Map of extra addons in the form "component name" -> "addon spec"
extraAddons: {}

View File

@ -4,3 +4,10 @@ description: Helm chart for deploying a cluster on an OpenStack cloud using Clus
type: application
version: 0.1.0
appVersion: main
dependencies:
- name: cluster-addons
version: "*"
repository: file://../cluster-addons
alias: addons
conditions: addons.enabled

View File

@ -110,7 +110,8 @@ working cluster:
```yaml
# The target Kubernetes version
kubernetesVersion: 1.22.1
global:
kubernetesVersion: 1.22.1
# An image with the required software installed at the target version
machineImage: ubuntu-2004-kube-v{{ .Values.kubernetesVersion }}
@ -171,21 +172,22 @@ command again. Some examples of updates that can be performed are:
### Cluster addons
The cluster addons are enabled by default, however by default only a CNI and the
OpenStack CCM are enabled.
The cluster addons are enabled by default, however by default only a CNI, the
[Metrics Server](https://github.com/kubernetes-sigs/metrics-server) and the
OpenStack CCM and Cinder CSI are enabled.
You can configure which addons are deployed and the configuration of those addons
by specifying values for the addons Helm chart:
```yaml
addons:
values:
nvidiaGPUOperator:
enabled: true
# Enable the Nginx ingress controller
ingress:
enabled: true
```
The available options under `addons.values` correspond to the available options
for the [cluster-addons chart](../cluster-addons).
The available options under `addons` correspond to the chart values for the
[cluster-addons chart](../cluster-addons).
The cluster addons also can be disabled completely using the following configuration:
@ -200,10 +202,6 @@ addons:
enabled: false
```
Note that changing this after the initial deployment will **not** uninstall any
addons that have already been installed, but it will prevent updates to addons
from being applied.
## Accessing a workload cluster
To access the cluster, use `clusterctl` to generate a kubeconfig file:

View File

@ -72,12 +72,12 @@ capi.stackhpc.com/node-group: {{ $nodeGroupName }}
{{- end -}}
{{/*
Name of the cloud-config secret.
Name of the secret containing the cloud credentials.
*/}}
{{- define "openstack-cluster.cloudConfigSecretName" -}}
{{- if .Values.cloudConfigSecretName -}}
{{- .Values.cloudConfigSecretName -}}
{{- define "openstack-cluster.cloudCredentialsSecretName" -}}
{{- if .Values.global.cloudCredentialsSecretName -}}
{{- .Values.global.cloudCredentialsSecretName -}}
{{- else -}}
{{ include "openstack-cluster.componentName" (list . "cloud-config") -}}
{{ include "openstack-cluster.componentName" (list . "cloud-credentials") -}}
{{- end -}}
{{- end -}}

View File

@ -1,130 +0,0 @@
{{- if .Values.addons.enabled }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "openstack-cluster.componentName" (list . "addons") }}-{{ .Release.Revision }}
labels: {{ include "openstack-cluster.componentLabels" (list . "addons") | nindent 4 }}
spec:
# Keep trying for a decent amount of time before failing
backoffLimit: 1000
template:
metadata:
labels:
capi.stackhpc.com/cluster: {{ include "openstack-cluster.clusterName" . }}
capi.stackhpc.com/component: addons
spec:
serviceAccountName: {{ include "openstack-cluster.componentName" (list . "addon-deployer") }}
# Ensure that we run as a non-root user
securityContext:
runAsUser: 1001
restartPolicy: OnFailure
containers:
- name: addons
image: {{ printf "%s:%s" .Values.addons.jobImage.repository (default .Chart.AppVersion .Values.addons.jobImage.tag) }}
imagePullPolicy: {{ .Values.addons.jobImage.pullPolicy }}
args:
- /bin/sh
- -c
# On the first deployment, we need to install the CNI and CCM before the control plane
# can be "Ready". However the control plane will become "Available" once "kubeadm init"
# has run successfully on the first control plane node - we can watch for this condition
# and install the bootstrap components.
# We wait for the control plane to be "Ready" before installing all other addons.
#
# On subsequent deployments, we wait for the control plane to be "Ready" before updating
# any addons.
- |
set -exo pipefail
controlplane_condition() {
kubectl get kubeadmcontrolplane \
{{ include "openstack-cluster.componentName" (list . "control-plane") }} \
-o json 2>/dev/null | \
jq -e ".status.conditions[] | select(.type == \"$1\") | .status == \"True\"" > /dev/null 2>&1
}
helm_release_exists() {
helm status {{ .Values.addons.release.name }} \
--kubeconfig /kubeconfig/value \
--namespace {{ .Values.addons.release.namespace }}
}
helm_release_status() {
helm status {{ .Values.addons.release.name }} \
--kubeconfig /kubeconfig/value \
--namespace {{ .Values.addons.release.namespace }} \
--output json | \
jq -r '.info.status'
}
helm_release() {
if helm_release_exists; then
status="$(helm_release_status)"
if [ "$status" = "pending-install" ]; then
helm delete {{ .Values.addons.release.name }} \
--kubeconfig /kubeconfig/value \
--namespace {{ .Values.addons.release.namespace }} \
--wait --timeout {{ .Values.addons.release.timeout }}
elif [ "$status" = "pending-upgrade" ]; then
helm rollback {{ .Values.addons.release.name }} \
--kubeconfig /kubeconfig/value \
--namespace {{ .Values.addons.release.namespace }} \
--cleanup-on-fail \
--wait --wait-for-jobs --timeout {{ .Values.addons.release.timeout }}
fi
fi
helm upgrade {{ .Values.addons.release.name }} {{ .Values.addons.chart.name }} \
--kubeconfig /kubeconfig/value \
--atomic --install \
--namespace {{ .Values.addons.release.namespace }} --create-namespace \
--repo {{ .Values.addons.chart.repo }} \
--version {{ default .Chart.Version .Values.addons.chart.version }} \
--values /cloud-config/clouds.yaml \
{{- if .Values.cloudCACert }}
--set-file cloudCACert=/cloud-config/cacert \
{{- end }}
--values /config/values.yaml \
--set clusterName={{ include "openstack-cluster.clusterName" . }} \
--wait --wait-for-jobs --timeout {{ .Values.addons.release.timeout }} \
"$@"
}
{{- if .Release.IsInstall }}
while ! controlplane_condition "Available" ; do
echo "Waiting for control plane to become available..."
sleep 10
done
echo "Installing bootstrap addon components..."
helm_release --set bootstrapOnly=true
{{- end }}
while ! controlplane_condition "Ready" ; do
echo "Waiting for control plane to become ready..."
sleep 10
done
echo "{{ if .Release.IsInstall }}Installing{{ else }}Updating{{ end }} addon components..."
helm_release
volumeMounts:
- name: kubeconfig
mountPath: /kubeconfig
readOnly: true
- name: cloud-config
mountPath: /cloud-config
readOnly: true
- name: helm-values
mountPath: /config
readOnly: true
volumes:
- name: kubeconfig
secret:
secretName: {{ include "openstack-cluster.componentName" (list . "kubeconfig") }}
- name: cloud-config
secret:
secretName: {{ include "openstack-cluster.cloudConfigSecretName" . }}
- name: helm-values
secret:
secretName: {{ include "openstack-cluster.componentName" (list . "addons-values") }}
{{- end }}

View File

@ -1,14 +0,0 @@
{{- if .Values.addons.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "openstack-cluster.componentName" (list . "addon-deployer") }}
labels: {{ include "openstack-cluster.componentLabels" (list . "addons") | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ include "openstack-cluster.componentName" (list . "addon-deployer") }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "openstack-cluster.componentName" (list . "addon-deployer") }}
{{- end }}

View File

@ -1,14 +0,0 @@
{{- if .Values.addons.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "openstack-cluster.componentName" (list . "addon-deployer") }}
labels: {{ include "openstack-cluster.componentLabels" (list . "addons") | nindent 4 }}
rules:
# Just grant access to the control plane for this cluster
- apiGroups: ["controlplane.cluster.x-k8s.io"]
resources: ["kubeadmcontrolplanes"]
resourceNames:
- {{ include "openstack-cluster.componentName" (list . "control-plane") }}
verbs: ["get"]
{{- end }}

View File

@ -1,10 +0,0 @@
{{- if .Values.addons.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "openstack-cluster.componentName" (list . "addons-values") }}
labels: {{ include "openstack-cluster.componentLabels" (list . "addons") | nindent 4 }}
stringData:
values.yaml: |
{{- toYaml .Values.addons.values | nindent 4 }}
{{- end }}

View File

@ -1,7 +0,0 @@
{{- if .Values.addons.enabled }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "openstack-cluster.componentName" (list . "addon-deployer") }}
labels: {{ include "openstack-cluster.componentLabels" (list . "addons") | nindent 4 }}
{{- end }}

View File

@ -7,7 +7,7 @@ metadata:
spec:
identityRef:
kind: Secret
name: {{ include "openstack-cluster.cloudConfigSecretName" . }}
name: {{ include "openstack-cluster.cloudCredentialsSecretName" . }}
cloudName: openstack
{{- with .Values.controlPlaneEndpoint }}
controlPlaneEndpoint: {{ . | toYaml | nindent 4 }}

View File

@ -5,7 +5,7 @@ metadata:
name: {{ include "openstack-cluster.componentName" (list . "control-plane") }}
labels: {{ include "openstack-cluster.componentLabels" (list . "control-plane") | nindent 4 }}
spec:
version: {{ .Values.kubernetesVersion | required ".Values.kubernetesVersion is required" }}
version: {{ .Values.global.kubernetesVersion | required ".Values.global.kubernetesVersion is required" }}
replicas: {{ .Values.controlPlane.machineCount }}
rolloutStrategy: {{ toYaml .Values.controlPlane.rolloutStrategy | nindent 4 }}
machineTemplate:

View File

@ -7,7 +7,7 @@ template:
spec:
identityRef:
kind: Secret
name: {{ include "openstack-cluster.cloudConfigSecretName" . }}
name: {{ include "openstack-cluster.cloudCredentialsSecretName" . }}
cloudName: openstack
flavor: {{ .Values.controlPlane.machineFlavor | required ".Values.controlPlane.machineFlavor is required" }}
{{- with .Values.machineSSHKeyName }}

View File

@ -17,7 +17,7 @@ spec:
labels: {{ include "openstack-cluster.nodeGroupSelectorLabels" (list $ $nodeGroup.name) | nindent 8 }}
spec:
clusterName: {{ include "openstack-cluster.clusterName" $ }}
version: {{ $.Values.kubernetesVersion }}
version: {{ $.Values.global.kubernetesVersion }}
failureDomain: {{ $nodeGroup.failureDomain }}
bootstrap:
configRef:

View File

@ -9,7 +9,7 @@ template:
spec:
identityRef:
kind: Secret
name: {{ include "openstack-cluster.cloudConfigSecretName" $ctx }}
name: {{ include "openstack-cluster.cloudCredentialsSecretName" $ctx }}
cloudName: openstack
flavor: {{ $nodeGroup.machineFlavor | required (printf "no flavor specified for node group '%s'" $nodeGroup.name) }}
{{- with $ctx.Values.machineSSHKeyName }}

View File

@ -1,4 +1,4 @@
{{- if not .Values.cloudConfigSecretName }}
{{- if not .Values.cloudCredentialsSecretName }}
{{- $cloud := index .Values.clouds .Values.cloudName }}
{{- if not (dig "auth" "project_id" nil $cloud) }}
{{- fail "clouds.yaml must contain the project ID" }}
@ -7,9 +7,9 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "openstack-cluster.cloudConfigSecretName" . }}
labels: {{ include "openstack-cluster.componentLabels" (list . "cloud-config") | nindent 4 }}
# If the cloud-config is deleted before the cluster has finished deleting, then the cluster
name: {{ include "openstack-cluster.cloudCredentialsSecretName" . }}
labels: {{ include "openstack-cluster.componentLabels" (list . "cloud-credentials") | nindent 4 }}
# If the cloud credentials are deleted before the cluster has finished deleting, then the cluster
# deletion cannot proceed any further. So prevent Helm from deleting it.
annotations:
"helm.sh/resource-policy": keep

View File

@ -1,5 +1,10 @@
# The name of an existing secret containing the cloud-config
cloudConfigSecretName:
# Configuration that is shared between the cluster and the addons
global:
# The Kubernetes version of the cluster
# This should match the version of kubelet and kubeadm in the image
kubernetesVersion:
# The name of an existing secret containing a clouds.yaml and optional cacert
cloudCredentialsSecretName:
# Content for the clouds.yaml file
# Having this as a top-level item allows a clouds.yaml file from OpenStack to be used as a values file
@ -19,10 +24,6 @@ machineImageId:
# The name of the SSH key to inject into cluster machines
machineSSHKeyName:
# The Kubernetes version to deploy
# This should match the version of kubelet in the image
kubernetesVersion:
# Values for the Kubernetes cluster network
kubeNetwork:
# By default, use the private network range 172.16.0.0/12 for the cluster network
@ -222,25 +223,14 @@ nodeGroups:
addons:
# Indicates if cluster addons should be deployed
enabled: true
# The image to use for the job that deploys the addons
jobImage:
repository: ghcr.io/stackhpc/k8s-utils
tag: # defaults to chart appVersion
pullPolicy: IfNotPresent
# The chart to use for deploying addons
chart:
repo: https://stackhpc.github.io/capi-helm-charts
name: cluster-addons
version: # This defaults to the version that matches this chart
# Release details for the addons deployment
release:
namespace: capi-system
name: cluster-addons
timeout: 60m
# Values for the addons
# See https://github.com/stackhpc/capi-helm-charts/blob/main/charts/cluster-addons for details
# The clouds.yaml used for cluster deployment will be given in addition to these
values:
# By default, enable the OpenStack integrations
openstack:
enabled: true
clusterName: "{{ include \"openstack-cluster.clusterName\" . }}"
# The Kubernetes version for the addons should be v<major>.<minor>
kubernetesVersion: "v{{ .Values.global.kubernetesVersion | splitList \".\" | reverse | rest | reverse | join \".\" }}"
# Launch addons on the workload cluster using the kubeconfig file created by CAPI
kubeconfigSecret:
name: "{{ include \"openstack-cluster.componentName\" (list . \"kubeconfig\") }}"
key: value
# By default, enable the OpenStack integrations
openstack:
enabled: true
cloudCredentialsSecretName: "{{ include \"openstack-cluster.cloudCredentialsSecretName\" . }}"

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,6 +0,0 @@
apiVersion: v2
name: capi-operator
description: Helm chart for deploying the StackHPC CAPI operator.
type: application
version: 0.1.0
appVersion: main

View File

@ -1,28 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: addons.capi.stackhpc.com
spec:
group: capi.stackhpc.com
names:
plural: addons
kind: Addon
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
properties:
spec:
description: The configuration for the addon.
type: object
x-kubernetes-preserve-unknown-fields: true
status:
description: The observed state of the addon.
type: object
x-kubernetes-preserve-unknown-fields: true
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -1,34 +0,0 @@
{{- define "operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "operator.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{- define "operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "operator.selectorLabels" -}}
app.kubernetes.io/name: {{ include "operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- define "operator.labels" -}}
helm.sh/chart: {{ include "operator.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
{{ include "operator.selectorLabels" . }}
{{- end }}

View File

@ -1,10 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "operator.fullname" . }}-metrics-reader
labels: {{ include "operator.labels" . | nindent 4 }}
rules:
- nonResourceURLs:
- /metrics
verbs:
- get

View File

@ -1,18 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "operator.fullname" . }}-proxy-role
labels: {{ include "operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create

View File

@ -1,13 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "operator.fullname" . }}-manager-rolebinding
labels: {{ include "operator.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: {{ include "operator.fullname" . }}-controller-manager
namespace: {{ .Release.Namespace }}

View File

@ -1,13 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "operator.fullname" . }}-proxy-rolebinding
labels: {{ include "operator.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "operator.fullname" . }}-proxy-role
subjects:
- kind: ServiceAccount
name: {{ include "operator.fullname" . }}-controller-manager
namespace: {{ .Release.Namespace }}

View File

@ -1,16 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "operator.fullname" . }}-manager-config
labels: {{ include "operator.labels" . | nindent 4 }}
data:
controller_manager_config.yaml: |
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
kind: ControllerManagerConfig
health:
healthProbeBindAddress: :8081
metrics:
bindAddress: 127.0.0.1:8080
leaderElection:
leaderElect: true
resourceName: 811c9dc5.stackhpc.com

View File

@ -1,68 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "operator.fullname" . }}-controller-manager
labels: {{ include "operator.labels" . | nindent 4 }}
# control-plane: controller-manager
spec:
replicas: 1
selector:
matchLabels: {{ include "operator.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: manager
labels: {{ include "operator.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml . | nindent 8 }}
{{- end }}
securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
serviceAccountName: {{ include "operator.fullname" . }}-controller-manager
terminationGracePeriodSeconds: 10
containers:
- name: kube-rbac-proxy
image: {{ printf "%s:%s" .Values.kubeRbacProxy.image.repository .Values.kubeRbacProxy.image.tag }}
imagePullPolicy: {{ .Values.kubeRbacProxy.image.pullPolicy }}
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=10
ports:
- containerPort: 8443
name: https
protocol: TCP
resources: {{ toYaml .Values.kubeRbacProxy.resources | nindent 12 }}
- name: manager
image: {{ printf "%s:%s" .Values.image.repository (default .Chart.AppVersion .Values.image.tag) }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=127.0.0.1:8080
- --leader-elect
- --leader-election-id={{ include "operator.fullname" . }}-leader-election
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources: {{ toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations: {{ toYaml . | nindent 8 }}
{{- end }}

View File

@ -1,37 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "operator.fullname" . }}-leader-election-role
labels: {{ include "operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch

View File

@ -1,13 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "operator.fullname" . }}-leader-election-rolebinding
labels: {{ include "operator.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "operator.fullname" . }}-leader-election-role
subjects:
- kind: ServiceAccount
name: {{ include "operator.fullname" . }}-controller-manager
namespace: {{ .Release.Namespace }}

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "operator.fullname" . }}-controller-manager-metrics-service
labels: {{ include "operator.labels" . | nindent 4 }}
# control-plane: controller-manager
spec:
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https
selector: {{ include "operator.selectorLabels" . | nindent 4 }}

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "operator.fullname" . }}-controller-manager
labels: {{ include "operator.labels" . | nindent 4 }}

View File

@ -1,30 +0,0 @@
image:
repository: ghcr.io/stackhpc/capi-operator
tag:
pullPolicy: IfNotPresent
imagePullSecrets: []
podSecurityContext:
runAsNonRoot: true
securityContext:
allowPrivilegeEscalation: false
resources: {}
# Node selector for operator pods
nodeSelector: {}
# Affinity rules for operator pods
affinity: {}
# Tolerations for operator pods
tolerations: []
kubeRbacProxy:
image:
repository: gcr.io/kubebuilder/kube-rbac-proxy
tag: v0.8.0
pullPolicy: IfNotPresent
resources: {}

View File

@ -1,37 +0,0 @@
apiVersion: capi.stackhpc.com/v1alpha1
kind: Addon
metadata:
name: cert-manager
spec:
installType: helm
helm:
chart:
repo: https://charts.jetstack.io
name: cert-manager
version: v1.6.1
release:
namespace: cert-manager
name: cert-manager
values:
installCRDs: true
prometheus:
enabled: false
# Use extraFiles and afterScript to install an issuer
extraFiles:
acme-http01-issuer.yaml: |
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-http01
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-http01-key
solvers:
- http01:
ingress:
class: nginx
hooks:
postInstall: |
kubectl apply -f acme-http01-issuer.yaml

View File

@ -1,26 +0,0 @@
apiVersion: capi.stackhpc.com/v1alpha1
kind: Addon
metadata:
name: metrics-server
spec:
# We can access this in the kustomization template
# Just make sure it doesn't conflict with any actual chart options!
version: v0.5.2
installType: kustomize
kustomize:
kustomizationTemplate: |
resources:
- https://github.com/kubernetes-sigs/metrics-server/releases/download/{{ .Values.version }}/components.yaml
kustomization:
patches:
- patch: |-
- op: add
path: /spec/template/spec/containers/0/args/-
value: --kubelet-insecure-tls
target:
kind: Deployment
name: metrics-server
# Define the resources to watch for
resourceNamespace: kube-system
resources:
- deployment/metrics-server

View File

@ -17,6 +17,8 @@ RUN apt-get update && \
apt-get install -y curl jq tini && \
rm -rf /var/lib/apt/lists/*
COPY --from=hairyhenderson/gomplate:v3.10.0 /gomplate /usr/bin/gomplate
ARG KUBECTL_VN_1_20=v1.20.14
RUN set -ex; \
OS_ARCH="$(uname -m)"; \

View File

@ -45,6 +45,9 @@ if helm-exists $RELEASE $NAMESPACE_ARG; then
elif [ "$status" = "pending-upgrade" ]; then
echo "Rolling back failed upgrade..."
helm rollback $RELEASE $NAMESPACE_ARG --cleanup-on-fail --wait --wait-for-jobs $TIMEOUT_ARG
elif [ "$status" = "pending-rollback" ]; then
echo "Competing pending rollback..."
helm rollback $RELEASE $NAMESPACE_ARG --cleanup-on-fail --wait --wait-for-jobs $TIMEOUT_ARG
fi
fi

View File

@ -9,32 +9,11 @@
# the simplest possible API request... :fingerscrossed:
#####
KUBECONFIG_ARG=
KUBECTL_ARGS=
while :; do
case $1 in
--kubeconfig)
KUBECONFIG_ARG="$1 $2"
shift
;;
--kubeconfig=?*)
KUBECONFIG_ARG="$1"
;;
?*)
KUBECTL_ARGS="$KUBECTL_ARGS $1"
;;
*)
break
esac
shift
done
set -eo pipefail
# Use the latest version of kubectl to detect the server version
kubectl_exe=kubectl-$KUBECTL_VN_LATEST
server_version="$($kubectl_exe $KUBECONFIG_ARG version -o json | jq -r '"v" + .serverVersion.major + "." + .serverVersion.minor')"
server_version="$($kubectl_exe version -o json | jq -r '"v" + .serverVersion.major + "." + .serverVersion.minor')"
# Account for the case where we don't have the correct kubectl version by falling back to using the latest
which "kubectl-$server_version" > /dev/null && kubectl_exe="kubectl-$server_version"
exec $kubectl_exe $KUBECONFIG_ARG $KUBECTL_ARGS
exec $kubectl_exe "$@"

View File

@ -1,4 +0,0 @@
- group: capi.stackhpc.com
version: v1alpha1
kind: Addon
chart: helm-charts/addon