70 lines
3.2 KiB
YAML
70 lines
3.2 KiB
YAML
{{/*
|
||
In order for changes to the kubeadm config to be applied to existing machines,
|
||
kubeadm config templates must be treated as immutable. This means that we must
|
||
create a new one when the spec changes and update the ref in the corresponding
|
||
machine deployment, which will trigger a rollout of new machines with the new
|
||
kubeadm config.
|
||
|
||
If the kubeadm config is updated in place, it will apply to new machine (e.g. on
|
||
a scale out operation), but will not be applied to existing machines.
|
||
|
||
To do this, we create a new template whenever the checksum of the spec changes.
|
||
*/}}
|
||
|
||
{{- define "openstack-cluster.nodegroup.kct.spec.nodeLabels" -}}
|
||
{{- $ctx := index . 0 }}
|
||
{{- $nodeGroup := index . 1 }}
|
||
{{- $defaultLabels := dict (printf "%s/node-group" $ctx.Values.projectPrefix) $nodeGroup.name }}
|
||
{{- $nodeLabels := dig "nodeLabels" dict $nodeGroup | mergeOverwrite $defaultLabels }}
|
||
joinConfiguration: {{ include "openstack-cluster.nodeRegistration.nodeLabels" $nodeLabels | nindent 2 }}
|
||
{{- end }}
|
||
|
||
{{- define "openstack-cluster.nodegroup.kct.spec" -}}
|
||
{{- $ctx := index . 0 }}
|
||
{{- $nodeGroup := index . 1 }}
|
||
{{-
|
||
list
|
||
(include "openstack-cluster.nodegroup.kct.spec.nodeLabels" (list $ctx $nodeGroup) | fromYaml)
|
||
(include "openstack-cluster.kubeadmConfigSpec" (list $ctx $nodeGroup.kubeadmConfigSpec) | fromYaml)
|
||
(omit (include "openstack-cluster.osDistroKubeadmConfigSpec" (list $ctx) | fromYaml) "initConfiguration")
|
||
(pick (include "openstack-cluster.patchConfigSpec" (list $ctx ) | fromYaml) "preKubeadmCommands") |
|
||
include "openstack-cluster.mergeConcatMany" |
|
||
fromYaml |
|
||
toYaml
|
||
}}
|
||
{{- end }}
|
||
|
||
{{- define "openstack-cluster.nodegroup.kct.checksum" -}}
|
||
{{- include "openstack-cluster.nodegroup.kct.spec" . | sha256sum }}
|
||
{{- end }}
|
||
|
||
{{- define "openstack-cluster.nodegroup.kct.name" -}}
|
||
{{- $ctx := index . 0 }}
|
||
{{- $nodeGroup := index . 1 }}
|
||
{{- $checksum := include "openstack-cluster.nodegroup.kct.checksum" . }}
|
||
{{- include "openstack-cluster.componentName" (list $ctx $nodeGroup.name) }}-{{ trunc 8 $checksum }}
|
||
{{- end }}
|
||
|
||
{{- range $nodeGroupOverrides := .Values.nodeGroups }}
|
||
{{- $nodeGroup := deepCopy $.Values.nodeGroupDefaults | mustMerge $nodeGroupOverrides }}
|
||
---
|
||
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
|
||
kind: KubeadmConfigTemplate
|
||
metadata:
|
||
name: {{ include "openstack-cluster.nodegroup.kct.name" (list $ $nodeGroup) }}
|
||
labels:
|
||
{{- include "openstack-cluster.componentLabels" (list $ "worker") | nindent 4 }}
|
||
{{ $.Values.projectPrefix }}/node-group: {{ $nodeGroup.name }}
|
||
annotations:
|
||
{{ $.Values.projectPrefix }}/template-checksum: {{ include "openstack-cluster.nodegroup.kct.checksum" (list $ $nodeGroup) }}
|
||
# Cluster API complains when old templates disappear before it has rolled all the machines over
|
||
# When deploying with Helm, leave the resource behind and let Cluster API clean it up
|
||
helm.sh/resource-policy: keep
|
||
# Allow Argo to clean up old templates by ignoring the non-controller owner references,
|
||
# but only after the cluster has become healthy again
|
||
argocd.argoproj.io/sync-options: "ControllerReferencesOnly=true,PruneLast=true"
|
||
spec:
|
||
template:
|
||
spec: {{ include "openstack-cluster.nodegroup.kct.spec" (list $ $nodeGroup) | nindent 6 }}
|
||
{{- end }}
|