Use Helm to manage resources templated by kustomize in releases

This commit is contained in:
Matt Pryor 2022-01-28 12:44:44 +00:00
parent 8f32d7a912
commit d172024e54
16 changed files with 150 additions and 138 deletions

View File

@ -101,7 +101,7 @@ values obtained from rendering the valuesTemplate.
*/}}
{{- define "addon.helm.values" }}
{{- $ctx := index . 0 }}
{{- $config := index . 1 }}
{{- $config := index . 2 }}
{{- if $config.release.valuesTemplate }}
{{- $templateValues := tpl $config.release.valuesTemplate $ctx | fromYaml }}
{{- include "addon.mergeConcat" (list $config.release.values $templateValues) }}
@ -122,49 +122,46 @@ There is also support for rolling back an interrupted install or upgrade before
by checking for the pending-[install,upgrade] status.
*/}}
{{- define "addon.helm.install" -}}
{{- $name := index . 0 }}
{{- $config := index . 1 }}
{{-
$chartRepo := required
"chart.repo is required for a Helm job"
.chart.repo
$config.chart.repo
}}
{{-
$chartName := required
"chart.name is required for a Helm job"
.chart.name
$config.chart.name
}}
{{-
$chartVersion := required
"chart.version is required for a Helm job"
.chart.version
$config.chart.version
}}
{{-
$releaseNamespace := required
"release.namespace is required for a Helm job"
.release.namespace
$config.release.namespace
}}
{{-
$releaseName := required
"release.name is required for a Helm job"
.release.name
}}
{{- range .crdManifests }}
{{- range $config.crdManifests }}
kubectl create -f {{ . }} || \
kubectl replace -f {{ . }}
{{- end }}
helm-upgrade {{ $releaseName }} {{ $chartName }} \
helm-upgrade {{ $name }} {{ $chartName }} \
--atomic \
--install \
--namespace {{ $releaseNamespace }} \
--create-namespace \
--repo {{ $chartRepo }} \
--version {{ $chartVersion }} \
{{- if .crdManifests -}}
{{- if $config.crdManifests -}}
--skip-crds \
{{- end }}
--values values.yaml \
--wait \
--wait-for-jobs \
--timeout {{ .release.timeout }} \
--timeout 24h \
$HELM_EXTRA_ARGS
{{- end }}
@ -172,21 +169,18 @@ helm-upgrade {{ $releaseName }} {{ $chartName }} \
Template for a script that deletes a Helm release.
*/}}
{{- define "addon.helm.delete" -}}
{{- $name := index . 0 }}
{{- $config := index . 1 }}
{{-
$releaseNamespace := required
"release.namespace is required for a Helm job"
.release.namespace
$config.release.namespace
}}
{{-
$releaseName := required
"release.name is required for a Helm job"
.release.name
}}
helm-delete {{ $releaseName }} \
helm-delete {{ $name }} \
--namespace {{ $releaseNamespace }} \
--wait \
--timeout {{ .release.timeout }}
{{- range .crdManifests }}
--timeout 24h
{{- range $config.crdManifests }}
kubectl delete -f {{ . }}
{{- end }}
{{- end }}
@ -196,7 +190,7 @@ Template for a kustomization file for use with Kustomize.
*/}}
{{- define "addon.kustomize.kustomization" }}
{{- $ctx := index . 0 }}
{{- $config := index . 1 }}
{{- $config := index . 2 }}
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
{{- if $config.kustomizationTemplate }}
@ -211,52 +205,51 @@ kind: Kustomization
{{/*
Template for a script that installs or upgrades resources using Kustomize.
Because kustomize has no release semantics, which we want, what we actually do is
convert the output of kustomize into an ephemeral Helm chart which is then installed
with no values.
*/}}
{{- define "addon.kustomize.install" }}
kustomize build . | kubectl apply -f -
{{- range .watches }}
{{-
$namespace := required
"namespace is required for a resource to watch"
.namespace
}}
{{-
$kind := required
"kind is required for a resource to watch"
.kind
}}
{{-
$name := required
"name is required for a resource to watch"
.name
}}
kubectl -n {{ $namespace }} rollout status {{ $kind }}/{{ $name }}
{{- end }}
{{- $name := index . 0 }}
{{- $config := index . 1 }}
CHART_DIR="$(mktemp -d)"
kustomize build . | make-chart {{ $name }} "$CHART_DIR"
# Install the CRDs separately as Helm doesn't install updates
for crdfile in $(ls "$CHART_DIR/crds"); do
kubectl create -f "$crdfile" || kubectl replace -f "$crdfile"
done
helm-upgrade {{ $name }} "$CHART_DIR" \
--atomic \
--install \
--namespace kustomize-releases \
--create-namespace \
--skip-crds \
--wait \
--wait-for-jobs \
--timeout 24h \
$HELM_EXTRA_ARGS
{{- end }}
{{/*
Template for a script that deletes resources using Kustomize.
Because we are using Helm releases to manage resources templated by kustomize,
this just means deleting the Helm release. However we still want to run customize
in order to generate the CRDs that need deleting.
*/}}
{{- define "addon.kustomize.delete" }}
kustomize build . | kubectl delete -f -
{{- range .watches }}
{{-
$namespace := required
"namespace is required for a resource to watch"
.namespace
}}
{{-
$kind := required
"kind is required for a resource to watch"
.kind
}}
{{-
$name := required
"name is required for a resource to watch"
.name
}}
kubectl -n {{ $namespace }} wait --for=delete {{ $kind }}/{{ $name }}
{{- end }}
{{- $name := index . 0 }}
{{- $config := index . 1 }}
CHART_DIR="$(mktemp -d)"
kustomize build . | make-chart {{ $name }} "$CHART_DIR"
helm-delete {{ $name }} \
--namespace kustomize-releases \
--wait \
--timeout 24h
for crdfile in $(ls "$CHART_DIR/crds"); do
kubectl delete -f "$crdfile"
done
{{- end }}
{{/*
@ -289,8 +282,6 @@ helm:
version:
release:
namespace:
name:
timeout: 60m
# The template is rendered with the root context, then the result is merged into the dict
# Values from the template take precedence over the dict
values: {}
@ -300,13 +291,6 @@ kustomize:
# Values from the template take precedence over the dict
kustomization: {}
kustomizationTemplate:
# List of resources to watch to determine if the rollout is complete
# Resources should be usable with "kubectl rollout status"
watches: []
# The resources should be specified in the form
# namespace:
# kind:
# name:
custom:
# Scripts are treated as templates during rendering
install:

View File

@ -4,9 +4,13 @@
checksum of the configuration as an annotation, so that the job spec changes when
the configuration does.
Most elements of the job spec are immutable, so this guarantees we get new jobs
at the right times. Job labels are OK to update, and we include the revision there
in order to be able to order the jobs in time.
This guarantees we get new jobs only when there is a change to make. Even if there is
not a new job, the job labels are updated to include the current revision so that we
can order the jobs in time.
The job spec is immutable, which can cause issues with updates. To mitigate this, we
use the spec from the existing job when a job exists with the same name (and hence
the same checksum).
*/}}
{{- define "addon.job.install.spec" -}}
{{- $ctx := index . 0 -}}
@ -60,7 +64,7 @@ template:
- |
set -ex
{{- $labels := include "addon.job.selectorLabels" (list $ctx $dep "install") | fromYaml }}
{{- range $i, $label := keys $labels -}}
{{- range $i, $label := (keys $labels | sortAlpha) -}}
{{- if $i }}
LABELS="$LABELS,{{ $label }}={{ index $labels $label }}"
{{- else }}
@ -134,7 +138,14 @@ apiVersion: batch/v1
kind: Job
metadata:
{{- $checksum := include "addon.job.install.spec" . | sha256sum }}
name: {{ include "addon.job.name" (list $ctx $name "install") }}-{{ trunc 5 $checksum }}
{{- $jobName := printf "%s-%s" (include "addon.job.name" (list $ctx $name "install")) (trunc 5 $checksum) }}
name: {{ $jobName }}
labels: {{ include "addon.job.labels" (list $ctx $name "install") | nindent 4 }}
spec: {{ include "addon.job.install.spec" . | nindent 2 }}
spec:
{{- $existingJob := lookup "batch/v1" "Job" $ctx.Release.Namespace $jobName }}
{{- if $existingJob }}
{{- toYaml $existingJob.spec | nindent 2 }}
{{- else }}
{{- include "addon.job.install.spec" . | nindent 2 }}
{{- end }}
{{- end }}

View File

@ -30,18 +30,18 @@ stringData:
{{- end }}
{{- if eq $config.installType "helm" }}
values.yaml: |
{{- include "addon.helm.values" (list $ctx $config.helm) | nindent 4 }}
{{- include "addon.helm.values" (list $ctx $name $config.helm) | nindent 4 }}
install.sh: |
{{- include "addon.helm.install" $config.helm | nindent 4 }}
{{- include "addon.helm.install" (list $name $config.helm) | nindent 4 }}
delete.sh: |
{{- include "addon.helm.delete" $config.helm | nindent 4 }}
{{- include "addon.helm.delete" (list $name $config.helm) | nindent 4 }}
{{- else if eq $config.installType "kustomize" }}
kustomization.yaml: |
{{- include "addon.kustomize.kustomization" (list $ctx $config.kustomize) | nindent 4 }}
{{- include "addon.kustomize.kustomization" (list $ctx $name $config.kustomize) | nindent 4 }}
install.sh: |
{{- include "addon.kustomize.install" $config.kustomize | nindent 4 }}
{{- include "addon.kustomize.install" (list $name $config.kustomize) | nindent 4 }}
delete.sh: |
{{- include "addon.kustomize.delete" $config.kustomize | nindent 4 }}
{{- include "addon.kustomize.delete" (list $name $config.kustomize) | nindent 4 }}
{{- else if eq $config.installType "custom" }}
install.sh: |
{{-

View File

@ -304,10 +304,6 @@ helm:
release:
# The namespace for the release on the target cluster
namespace:
# The name of the release
name:
# The time to wait for the Helm release to install correctly
timeout: 60m
# The values for the release
# These can come from a dict or a template
# The template is rendered with the root context, then the result is merged into the dict
@ -322,13 +318,6 @@ kustomize:
# Values from the template take precedence over the dict
kustomization: {}
kustomizationTemplate:
# A list of resources to watch to determine when the addon has installed
watches: []
# These should be of the form
# - namespace: my-namespace
# kind: Deployment
# name: my-deployment
# condition: Available
# Options for a custom addon
custom:
# Script that installs the addon

View File

@ -152,16 +152,21 @@ value:
{{- end }}
{{/*
Produces the dependencies for an addon, ensuring uniqueness and only including
those that are enabled.
Produces the dependencies for an addon, ensuring uniqueness and a consistent ordering
and only including those that are enabled.
The result is returned as an object so it can be used with fromYaml.
*/}}
{{- define "cluster-addons.dependsOn.enabled" -}}
{{- $ctx := index . 0 }}
{{- $unique := (include "cluster-addons.dependsOn.all" . | fromYaml).value | default list | uniq }}
{{-
$sortedUnique := (include "cluster-addons.dependsOn.all" . | fromYaml).value |
default list |
uniq |
sortAlpha
}}
value:
{{- range $unique }}
{{- range $sortedUnique }}
{{- if eq (include "cluster-addons.enabled" (list $ctx .)) "true" }}
- {{ . }}
{{- end }}

View File

@ -32,10 +32,6 @@ kustomize:
{{- with .Values.openstack.ccm.kustomization }}
kustomization: {{ toYaml . | nindent 4 }}
{{- end }}
watches:
- namespace: kube-system
kind: DaemonSet
name: openstack-cloud-controller-manager
{{- end }}
{{-

View File

@ -18,10 +18,6 @@ kustomize:
{{- with .Values.cni.calico.kustomization }}
kustomization: {{ toYaml . | nindent 4 }}
{{- end }}
watches:
- namespace: kube-system
kind: DaemonSet
name: calico-node
{{- end }}
{{-

View File

@ -13,17 +13,6 @@ kustomize:
{{- with .Values.openstack.csiCinder.kustomization }}
kustomization: {{ toYaml . | nindent 4 }}
{{- end }}
watches:
- namespace: kube-system
{{- if semverCompare ">=1.23" (tpl .Values.kubernetesVersion . | trimPrefix "v") }}
kind: Deployment
{{- else }}
kind: StatefulSet
{{- end }}
name: csi-cinder-controllerplugin
- namespace: kube-system
kind: Daemonset
name: csi-cinder-nodeplugin
{{- with .Values.openstack.csiCinder.storageClass }}
{{- if .enabled }}
extraFiles:

View File

@ -10,10 +10,6 @@ kustomize:
{{- with .Values.metricsServer.kustomization }}
kustomization: {{ toYaml . | nindent 4 }}
{{- end }}
watches:
- namespace: kube-system
kind: Deployment
name: metrics-server
{{- end }}
{{-

View File

@ -92,8 +92,6 @@ cni:
version: 1.10.4
release:
namespace: kube-system
name: cilium
timeout: 30m
# See https://docs.cilium.io/en/stable/gettingstarted/k8s-install-helm/ for details
values:
ipam:
@ -198,8 +196,6 @@ certManager:
version: v1.5.3
release:
namespace: cert-manager
name: cert-manager
timeout: 30m
# See https://cert-manager.io/docs/installation/helm/ for available values
values:
# By default, make sure the cert-manager CRDs are installed
@ -228,8 +224,6 @@ ingress:
version: 4.0.1
release:
namespace: ingress-nginx
name: ingress-nginx
timeout: 30m
# See https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx#configuration
values: {}
@ -252,8 +246,6 @@ monitoring:
version: 30.0.1
release:
namespace: monitoring-system
name: kube-prometheus-stack
timeout: 30m
values: {}
# Map of extra addons in the form "component name" -> "addon spec"

View File

@ -14,8 +14,9 @@ RUN groupadd --gid $UTILS_GID $UTILS_GROUP && \
$UTILS_USER
RUN apt-get update && \
apt-get install -y curl jq tini && \
rm -rf /var/lib/apt/lists/*
apt-get install -y curl git jq python3 python3-pip tini && \
rm -rf /var/lib/apt/lists/* && \
pip install --no-cache pyyaml
COPY --from=hairyhenderson/gomplate:v3.10.0 /gomplate /usr/bin/gomplate
@ -96,7 +97,7 @@ RUN set -ex; \
kustomize version
ENV KUBECTL_VN_LATEST v1.23
COPY ./scripts/* /usr/bin/
COPY ./bin/* /usr/bin/
USER $UTILS_UID
ENTRYPOINT ["tini", "-g", "--"]

View File

@ -9,7 +9,6 @@ RELEASE=$1
shift
NAMESPACE_ARG=
TIMEOUT_ARG=
HELM_ARGS=
while :; do
@ -22,10 +21,6 @@ while :; do
NAMESPACE_ARG="$1 $2"
shift
;;
--timeout)
TIMEOUT_ARG="$1 $2"
shift
;;
?*)
HELM_ARGS="$HELM_ARGS $1"
;;
@ -38,5 +33,5 @@ done
set -e
if helm-exists $RELEASE $NAMESPACE_ARG; then
exec helm delete $RELEASE $NAMESPACE_ARG $TIMEOUT_ARG $HELM_ARGS
exec helm delete $RELEASE $NAMESPACE_ARG $HELM_ARGS
fi

58
utils/bin/make-chart Executable file
View File

@ -0,0 +1,58 @@
#!/usr/bin/env python3
#####
# This script accepts a name, a target directory and a stream of manifests
# on stdin and creates an ephemeral Helm chart in the specified directory
#
# This allows Helm release semantics to be used with manifests generated by
# other tools that are missing that functionality, e.g. kustomize
#
# In particular, the Helm release semantics give us:
# * The tracking of resources across releases
# * Removal of resources that no longer exist
# * Logic for waiting for resources to become ready
#####
import pathlib
import sys
import yaml
CHART_YAML = """\
apiVersion: v2
name: {name}
version: 0.1.0
"""
def main(name, directory):
# Make sure that the crd and templates directories exist within the chart directory
chart_directory = pathlib.Path(directory).resolve()
crds_directory = chart_directory / "crds"
crds_directory.mkdir(parents = True, exist_ok = True)
templates_directory = chart_directory / "templates"
templates_directory.mkdir(parents = True, exist_ok = True)
# Write the Chart.yaml file
chart_file = chart_directory / "Chart.yaml"
with chart_file.open("w") as f:
f.write(CHART_YAML.format(name = name))
# For each YAML document in the stdin, write it to a separate file in the chart directory
# CRDs go in the crds directory and everything else in the templates directory
for document in yaml.safe_load_all(sys.stdin):
filename = "{}_{}_{}.yaml".format(
document["apiVersion"].replace("/", "_"),
document["kind"].lower(),
document["metadata"]["name"]
)
if document["kind"] == "CustomResourceDefinition":
path = crds_directory / filename
else:
path = templates_directory / filename
with path.open("w") as f:
yaml.safe_dump(document, f)
if __name__ == "__main__":
name, directory = sys.argv[1:]
main(name, directory)