Add NoSchedule tolerations for application charts
Add a toleration for the node-role.kubernetes.io/master:NoSchedule taint. This taint will be restored to all standard (non-AIO) master nodes to prevent user pods from being scheduled and run. These workloads will be scheduled and run on a worker node. This change will ensure that the rbd/cephfs provisioner and ceph audit pods will continue run on the master nodes (as designed). The following tests were executed: - Using an the existing app, remove the application, add the node-role.kubernetes.io/master:NoSchedule taint and confirmed that upon application-apply that the application will not apply as the pods will be in a Pending state. - With the existing application applied, add the node-role.kubernetes.io/master:NoSchedule taint and execute an application-update to a new application with these changes. Confirmed that the application updates successfully. Confirmed that toleration is present when describing the pod(s). Change-Id: I0a6368c717d336ac6c024bda596c283d2943285b Depends-On: https://review.opendev.org/c/starlingx/config/+/812629 Story: 2009232 Task: 43346 Signed-off-by: Robert Church <robert.church@windriver.com>
This commit is contained in:
parent
660266cff0
commit
e20c067e50
@ -1,4 +1,6 @@
|
||||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
|
@ -48,10 +48,10 @@ spec:
|
||||
restartPolicy: OnFailure
|
||||
nodeSelector:
|
||||
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
|
||||
{{- with .Values.tolerations }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: ceph-pools-bin
|
||||
configMap:
|
||||
|
@ -92,7 +92,11 @@ spec:
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
restartPolicy: OnFailure
|
||||
{{- if .Values.global.nodeSelector }}
|
||||
{{- if .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
||||
{{- end }}
|
||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.global.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end}}
|
||||
|
@ -25,13 +25,20 @@ global:
|
||||
#
|
||||
# Node tolerations for cephfs-provisioner scheduling to nodes with taints.
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
# Example:
|
||||
# [
|
||||
# {
|
||||
# "key": "node-role.kubernetes.io/master",
|
||||
# "operator": "Exists"
|
||||
# }
|
||||
# ]
|
||||
# Examples :
|
||||
# tolerations:
|
||||
# [
|
||||
# {
|
||||
# key: "node-role.kubernetes.io/master",
|
||||
# operator: "Exists",
|
||||
# effect: "NoSchedule"
|
||||
# }
|
||||
# ]
|
||||
#
|
||||
# tolerations:
|
||||
# - key: "node-role.kubernetes.io/master"
|
||||
# operator: "Exists"
|
||||
# effect: "NoSchedule"
|
||||
#
|
||||
tolerations: []
|
||||
# If configured, resources will set the requests/limits field to the Pod.
|
||||
|
@ -26,9 +26,9 @@ spec:
|
||||
{{- if or .Values.global.rbac .Values.global.reuseRbac }}
|
||||
serviceAccountName: {{ .Values.rbac.serviceAccount }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.tolerations }}
|
||||
{{- with .Values.global.tolerations }}
|
||||
tolerations:
|
||||
{{ .Values.global.tolerations | toYaml | trim | indent 8 }}
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
|
@ -47,10 +47,10 @@ spec:
|
||||
nodeSelector:
|
||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.tolerations }}
|
||||
{{- with .Values.global.tolerations }}
|
||||
tolerations:
|
||||
{{ .Values.global.tolerations | toYaml | trim | indent 8 }}
|
||||
{{- end}}
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.resources }}
|
||||
resources:
|
||||
{{ .Values.global.resources | toYaml | trim | indent 8 }}
|
||||
|
@ -192,10 +192,14 @@ spec:
|
||||
- name: config-volume-{{- $root.Values.global.name }}
|
||||
mountPath: {{ $mount }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.nodeSelector }}
|
||||
{{- if $root.Values.global.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
||||
{{- end }}
|
||||
{{ $root.Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with $root.Values.global.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end}}
|
||||
|
||||
---
|
||||
# This ConfigMap is needed because we're not using ceph's helm chart
|
||||
|
@ -51,15 +51,23 @@ global:
|
||||
#
|
||||
# Node tolerations for rbd-volume-provisioner scheduling to nodes with taints.
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
# Example:
|
||||
# [
|
||||
# {
|
||||
# "key": "node-role.kubernetes.io/master",
|
||||
# "operator": "Exists"
|
||||
# }
|
||||
# ]
|
||||
# Examples :
|
||||
# tolerations:
|
||||
# [
|
||||
# {
|
||||
# key: "node-role.kubernetes.io/master",
|
||||
# operator: "Exists",
|
||||
# effect: "NoSchedule"
|
||||
# }
|
||||
# ]
|
||||
#
|
||||
# tolerations:
|
||||
# - key: "node-role.kubernetes.io/master"
|
||||
# operator: "Exists"
|
||||
# effect: "NoSchedule"
|
||||
#
|
||||
tolerations: []
|
||||
#
|
||||
# If configured, resources will set the requests/limits field to the Pod.
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
# Example:
|
||||
|
@ -36,6 +36,12 @@ data:
|
||||
- type: job
|
||||
labels:
|
||||
app: rbd-provisioner
|
||||
values:
|
||||
global:
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
source:
|
||||
type: tar
|
||||
location: http://172.17.0.1:8080/helm_charts/stx-platform/rbd-provisioner-0.1.0.tgz
|
||||
@ -65,6 +71,12 @@ data:
|
||||
- type: job
|
||||
labels:
|
||||
app: cephfs-provisioner
|
||||
values:
|
||||
global:
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
source:
|
||||
type: tar
|
||||
location: http://172.17.0.1:8080/helm_charts/stx-platform/cephfs-provisioner-0.1.0.tgz
|
||||
@ -94,6 +106,11 @@ data:
|
||||
- type: job
|
||||
labels:
|
||||
app: ceph-pools-audit
|
||||
values:
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
source:
|
||||
type: tar
|
||||
location: http://172.17.0.1:8080/helm_charts/stx-platform/ceph-pools-audit-0.1.0.tgz
|
||||
|
Loading…
x
Reference in New Issue
Block a user