Add NoSchedule tolerations for application charts
Add a toleration for the node-role.kubernetes.io/master:NoSchedule taint. This taint will be restored to all standard (non-AIO) master nodes to prevent user pods from being scheduled and run. These workloads will be scheduled and run on a worker node. This change will ensure that the rbd/cephfs provisioner and ceph audit pods will continue run on the master nodes (as designed). The following tests were executed: - Using an the existing app, remove the application, add the node-role.kubernetes.io/master:NoSchedule taint and confirmed that upon application-apply that the application will not apply as the pods will be in a Pending state. - With the existing application applied, add the node-role.kubernetes.io/master:NoSchedule taint and execute an application-update to a new application with these changes. Confirmed that the application updates successfully. Confirmed that toleration is present when describing the pod(s). Change-Id: I0a6368c717d336ac6c024bda596c283d2943285b Depends-On: https://review.opendev.org/c/starlingx/config/+/812629 Story: 2009232 Task: 43346 Signed-off-by: Robert Church <robert.church@windriver.com>
This commit is contained in:
parent
660266cff0
commit
e20c067e50
@ -1,4 +1,6 @@
|
|||||||
#
|
#
|
||||||
|
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
|
@ -48,10 +48,10 @@ spec:
|
|||||||
restartPolicy: OnFailure
|
restartPolicy: OnFailure
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
|
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
|
||||||
{{- with .Values.tolerations }}
|
{{- with .Values.tolerations }}
|
||||||
tolerations:
|
tolerations:
|
||||||
{{ toYaml . | indent 12 }}
|
{{ toYaml . | indent 12 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
volumes:
|
volumes:
|
||||||
- name: ceph-pools-bin
|
- name: ceph-pools-bin
|
||||||
configMap:
|
configMap:
|
||||||
|
@ -92,7 +92,11 @@ spec:
|
|||||||
readOnly: true
|
readOnly: true
|
||||||
{{- end }}
|
{{- end }}
|
||||||
restartPolicy: OnFailure
|
restartPolicy: OnFailure
|
||||||
{{- if .Values.global.nodeSelector }}
|
{{- if .Values.global.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with .Values.global.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{ toYaml . | indent 8 }}
|
||||||
|
{{- end}}
|
||||||
|
@ -25,13 +25,20 @@ global:
|
|||||||
#
|
#
|
||||||
# Node tolerations for cephfs-provisioner scheduling to nodes with taints.
|
# Node tolerations for cephfs-provisioner scheduling to nodes with taints.
|
||||||
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||||
# Example:
|
# Examples :
|
||||||
# [
|
# tolerations:
|
||||||
# {
|
# [
|
||||||
# "key": "node-role.kubernetes.io/master",
|
# {
|
||||||
# "operator": "Exists"
|
# key: "node-role.kubernetes.io/master",
|
||||||
# }
|
# operator: "Exists",
|
||||||
# ]
|
# effect: "NoSchedule"
|
||||||
|
# }
|
||||||
|
# ]
|
||||||
|
#
|
||||||
|
# tolerations:
|
||||||
|
# - key: "node-role.kubernetes.io/master"
|
||||||
|
# operator: "Exists"
|
||||||
|
# effect: "NoSchedule"
|
||||||
#
|
#
|
||||||
tolerations: []
|
tolerations: []
|
||||||
# If configured, resources will set the requests/limits field to the Pod.
|
# If configured, resources will set the requests/limits field to the Pod.
|
||||||
|
@ -26,9 +26,9 @@ spec:
|
|||||||
{{- if or .Values.global.rbac .Values.global.reuseRbac }}
|
{{- if or .Values.global.rbac .Values.global.reuseRbac }}
|
||||||
serviceAccountName: {{ .Values.rbac.serviceAccount }}
|
serviceAccountName: {{ .Values.rbac.serviceAccount }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.global.tolerations }}
|
{{- with .Values.global.tolerations }}
|
||||||
tolerations:
|
tolerations:
|
||||||
{{ .Values.global.tolerations | toYaml | trim | indent 8 }}
|
{{ toYaml . | indent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.global.nodeSelector }}
|
{{- if .Values.global.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
|
@ -47,10 +47,10 @@ spec:
|
|||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.global.tolerations }}
|
{{- with .Values.global.tolerations }}
|
||||||
tolerations:
|
tolerations:
|
||||||
{{ .Values.global.tolerations | toYaml | trim | indent 8 }}
|
{{ toYaml . | indent 8 }}
|
||||||
{{- end}}
|
{{- end }}
|
||||||
{{- if .Values.global.resources }}
|
{{- if .Values.global.resources }}
|
||||||
resources:
|
resources:
|
||||||
{{ .Values.global.resources | toYaml | trim | indent 8 }}
|
{{ .Values.global.resources | toYaml | trim | indent 8 }}
|
||||||
|
@ -192,10 +192,14 @@ spec:
|
|||||||
- name: config-volume-{{- $root.Values.global.name }}
|
- name: config-volume-{{- $root.Values.global.name }}
|
||||||
mountPath: {{ $mount }}
|
mountPath: {{ $mount }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.global.nodeSelector }}
|
{{- if $root.Values.global.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{ .Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
{{ $root.Values.global.nodeSelector | toYaml | trim | indent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with $root.Values.global.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{ toYaml . | indent 8 }}
|
||||||
|
{{- end}}
|
||||||
|
|
||||||
---
|
---
|
||||||
# This ConfigMap is needed because we're not using ceph's helm chart
|
# This ConfigMap is needed because we're not using ceph's helm chart
|
||||||
|
@ -51,15 +51,23 @@ global:
|
|||||||
#
|
#
|
||||||
# Node tolerations for rbd-volume-provisioner scheduling to nodes with taints.
|
# Node tolerations for rbd-volume-provisioner scheduling to nodes with taints.
|
||||||
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||||
# Example:
|
# Examples :
|
||||||
# [
|
# tolerations:
|
||||||
# {
|
# [
|
||||||
# "key": "node-role.kubernetes.io/master",
|
# {
|
||||||
# "operator": "Exists"
|
# key: "node-role.kubernetes.io/master",
|
||||||
# }
|
# operator: "Exists",
|
||||||
# ]
|
# effect: "NoSchedule"
|
||||||
|
# }
|
||||||
|
# ]
|
||||||
|
#
|
||||||
|
# tolerations:
|
||||||
|
# - key: "node-role.kubernetes.io/master"
|
||||||
|
# operator: "Exists"
|
||||||
|
# effect: "NoSchedule"
|
||||||
#
|
#
|
||||||
tolerations: []
|
tolerations: []
|
||||||
|
#
|
||||||
# If configured, resources will set the requests/limits field to the Pod.
|
# If configured, resources will set the requests/limits field to the Pod.
|
||||||
# Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
# Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||||
# Example:
|
# Example:
|
||||||
|
@ -36,6 +36,12 @@ data:
|
|||||||
- type: job
|
- type: job
|
||||||
labels:
|
labels:
|
||||||
app: rbd-provisioner
|
app: rbd-provisioner
|
||||||
|
values:
|
||||||
|
global:
|
||||||
|
tolerations:
|
||||||
|
- key: "node-role.kubernetes.io/master"
|
||||||
|
operator: "Exists"
|
||||||
|
effect: "NoSchedule"
|
||||||
source:
|
source:
|
||||||
type: tar
|
type: tar
|
||||||
location: http://172.17.0.1:8080/helm_charts/stx-platform/rbd-provisioner-0.1.0.tgz
|
location: http://172.17.0.1:8080/helm_charts/stx-platform/rbd-provisioner-0.1.0.tgz
|
||||||
@ -65,6 +71,12 @@ data:
|
|||||||
- type: job
|
- type: job
|
||||||
labels:
|
labels:
|
||||||
app: cephfs-provisioner
|
app: cephfs-provisioner
|
||||||
|
values:
|
||||||
|
global:
|
||||||
|
tolerations:
|
||||||
|
- key: "node-role.kubernetes.io/master"
|
||||||
|
operator: "Exists"
|
||||||
|
effect: "NoSchedule"
|
||||||
source:
|
source:
|
||||||
type: tar
|
type: tar
|
||||||
location: http://172.17.0.1:8080/helm_charts/stx-platform/cephfs-provisioner-0.1.0.tgz
|
location: http://172.17.0.1:8080/helm_charts/stx-platform/cephfs-provisioner-0.1.0.tgz
|
||||||
@ -94,6 +106,11 @@ data:
|
|||||||
- type: job
|
- type: job
|
||||||
labels:
|
labels:
|
||||||
app: ceph-pools-audit
|
app: ceph-pools-audit
|
||||||
|
values:
|
||||||
|
tolerations:
|
||||||
|
- key: "node-role.kubernetes.io/master"
|
||||||
|
operator: "Exists"
|
||||||
|
effect: "NoSchedule"
|
||||||
source:
|
source:
|
||||||
type: tar
|
type: tar
|
||||||
location: http://172.17.0.1:8080/helm_charts/stx-platform/ceph-pools-audit-0.1.0.tgz
|
location: http://172.17.0.1:8080/helm_charts/stx-platform/ceph-pools-audit-0.1.0.tgz
|
||||||
|
Loading…
x
Reference in New Issue
Block a user