
Add new manifest files for the FluxCD version of platform-integ-apps This creates two tarballs, one for aramada and one for FluxCD * job-ceph-pools-audit.yaml change to support helmv3 Differences from armada-app: * Helm-toolkit release is disabled as it is included in other charts Test (AIO-SX): PASS: Application is applied Story: 2009138 Task: 45118 Depends-on: https://review.opendev.org/c/starlingx/config/+/838321 Signed-off-by: Lucas Cavalcante <lucasmedeiros.cavalcante@windriver.com> Change-Id: I417d6b7cf6a12968f36a1045dc98c2c26579a9b8
92 lines
3.2 KiB
YAML
92 lines
3.2 KiB
YAML
{{/*
|
|
#
|
|
# Copyright (c) 2020 Wind River Systems, Inc.
|
|
#
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
#
|
|
*/}}
|
|
|
|
{{- if .Values.manifests.job_ceph_pools_audit }}
|
|
{{- $envAll := . }}
|
|
|
|
{{- $serviceAccountName := "ceph-pools-audit" }}
|
|
{{ tuple $envAll "job_ceph_pools_audit" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
|
---
|
|
#
|
|
# The CronJob makes sure all the Ceph pools have the right replication,
|
|
# as present in the attributes of the Ceph backends.
|
|
# This is needed for:
|
|
# - charts that don't manage pool configuration
|
|
# - pools created dynamically by services that may not have the current
|
|
# pool configuration uploaded (ex: swift)
|
|
# - when replication is changed and we don't want to reinstall all the
|
|
# charts that created Ceph pools
|
|
#
|
|
apiVersion: batch/v1beta1
|
|
kind: CronJob
|
|
metadata:
|
|
name: ceph-pools-audit
|
|
spec:
|
|
schedule: {{ .Values.jobs.job_ceph_pools_audit.cron | quote }}
|
|
successfulJobsHistoryLimit: {{ .Values.jobs.job_ceph_pools_audit.history.success }}
|
|
failedJobsHistoryLimit: {{ .Values.jobs.job_ceph_pools_audit.history.failed }}
|
|
concurrencyPolicy: Forbid
|
|
startingDeadlineSeconds: {{ .Values.jobs.job_ceph_pools_audit.startingDeadlineSeconds }}
|
|
jobTemplate:
|
|
metadata:
|
|
name: "{{$envAll.Release.Name}}"
|
|
namespace: {{ $envAll.Release.namespace }}
|
|
labels:
|
|
app: ceph-pools-audit
|
|
spec:
|
|
template:
|
|
metadata:
|
|
labels:
|
|
app: ceph-pools-audit
|
|
spec:
|
|
serviceAccountName: {{ $serviceAccountName }}
|
|
restartPolicy: OnFailure
|
|
nodeSelector:
|
|
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value | toString | quote }}
|
|
{{- with .Values.tolerations }}
|
|
tolerations:
|
|
{{ toYaml . | indent 12 }}
|
|
{{- end }}
|
|
volumes:
|
|
- name: ceph-pools-bin
|
|
configMap:
|
|
name: ceph-pools-bin
|
|
defaultMode: 0555
|
|
- name: etcceph
|
|
emptyDir: {}
|
|
- name: ceph-etc
|
|
configMap:
|
|
name: {{ $envAll.Values.ceph_client.configmap }}
|
|
defaultMode: 0444
|
|
containers:
|
|
{{- range $tierConfig := $envAll.Values.conf.ceph.storage_tiers }}
|
|
- name: ceph-pools-audit-{{- $tierConfig.name }}
|
|
image: {{ $envAll.Values.images.tags.ceph_config_helper | quote }}
|
|
env:
|
|
- name: RBD_POOL_REPLICATION
|
|
value: {{ $tierConfig.replication | quote }}
|
|
- name: RBD_POOL_MIN_REPLICATION
|
|
value: {{ $tierConfig.min_replication | quote }}
|
|
- name: RBD_POOL_CRUSH_RULE_NAME
|
|
value: {{ $tierConfig.crush_rule_name | quote }}
|
|
command:
|
|
- /tmp/ceph-pools-audit.sh
|
|
volumeMounts:
|
|
- name: ceph-pools-bin
|
|
mountPath: /tmp/ceph-pools-audit.sh
|
|
subPath: ceph-pools-audit.sh
|
|
readOnly: true
|
|
- name: etcceph
|
|
mountPath: /etc/ceph
|
|
- name: ceph-etc
|
|
mountPath: /etc/ceph/ceph.conf
|
|
subPath: ceph.conf
|
|
readOnly: true
|
|
{{- end }}
|
|
{{- end }}
|