platform-armada-app/platform-helm/debian/deb_folder/patches/0008-ceph-csi-rbd-add-storage-init.yaml.patch
Hediberto Cavalcante da Silva 69c37e9978 Migration to ceph-csi for RBD/CephFS provisioners
Remove old RBD/CephFS provisioners and replace with a currently
supported and evolving set of provisioners based on
https://github.com/ceph/ceph-csi version 3.6.2.

Test Plan:
PASS: AIO-SX app upload/apply/remove/delete/update
PASS: AIO-DX app upload/apply/remove/delete
PASS: Storage 2+2+2 app upload/apply/remove/delete
PASS: Create pvc using storageclass general (rbd) on SX/DX/Storage
PASS: Create pod using rbd pvc on SX/DX/Storage
PASS: Create pvc using storageclass cephfs on SX/DX/Storage
PASS: Create pod using cephfs pvc on SX/DX/Storage

Story: 2009987
Task: 45050

Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
Change-Id: Iffcd56f689aa70788c4c2abbbf2c9a02b5a797cf
2022-11-09 20:23:32 -05:00

300 lines
10 KiB
Diff

From d58e048aea5ec70f830f1703245b811d1ee54a7b Mon Sep 17 00:00:00 2001
From: Hediberto Cavalcante da Silva
<hediberto.cavalcantedasilva@windriver.com>
Date: Thu, 3 Nov 2022 19:54:49 -0300
Subject: [PATCH] ceph-csi-rbd: add storage-init.yaml
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
---
.../ceph-csi-rbd/templates/storage-init.yaml | 279 ++++++++++++++++++
1 file changed, 279 insertions(+)
create mode 100644 charts/ceph-csi-rbd/templates/storage-init.yaml
diff --git a/charts/ceph-csi-rbd/templates/storage-init.yaml b/charts/ceph-csi-rbd/templates/storage-init.yaml
new file mode 100644
index 0000000..8e8c4de
--- /dev/null
+++ b/charts/ceph-csi-rbd/templates/storage-init.yaml
@@ -0,0 +1,279 @@
+{{/*
+#
+# Copyright (c) 2020-2022 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+*/}}
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rbd-rbac-secrets-namespaces
+ labels:
+ app: {{ include "ceph-csi-rbd.name" . }}
+ chart: {{ include "ceph-csi-rbd.chart" . }}
+ component: {{ .Values.provisioner.name }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ annotations:
+ "meta.helm.sh/release-name": {{ .Release.Name }}
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
+ "helm.sh/hook": "pre-upgrade, pre-install"
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+rules:
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "create", "list", "update"]
+
+---
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: rbd-rbac-secrets-namespaces
+ labels:
+ app: {{ include "ceph-csi-rbd.name" . }}
+ chart: {{ include "ceph-csi-rbd.chart" . }}
+ component: {{ .Values.provisioner.name }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ annotations:
+ "meta.helm.sh/release-name": {{ .Release.Name }}
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
+ "helm.sh/hook": "pre-upgrade, pre-install"
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: rbd-rbac-secrets-namespaces
+ apiGroup: rbac.authorization.k8s.io
+
+---
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: rbd-storage-init
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ include "ceph-csi-rbd.name" . }}
+ chart: {{ include "ceph-csi-rbd.chart" . }}
+ component: {{ .Values.provisioner.name }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ annotations:
+ "meta.helm.sh/release-name": {{ .Release.Name }}
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
+ "helm.sh/hook": "pre-upgrade, pre-install"
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+data:
+ ceph.conf: |
+ #
+ # Copyright (c) 2020-2022 Wind River Systems, Inc.
+ #
+ # SPDX-License-Identifier: Apache-2.0
+ #
+
+ [global]
+ # For version 0.55 and beyond, you must explicitly enable
+ # or disable authentication with "auth" entries in [global].
+ auth_cluster_required = none
+ auth_service_required = none
+ auth_client_required = none
+
+ {{ $monitors := .Values.classDefaults.monitors }}
+ {{ range $index, $monitor := $monitors}}
+ [mon.{{- $index }}]
+ mon_addr = {{ $monitor }}
+ {{- end }}
+
+ storage-init.sh: |
+ #
+ # Copyright (c) 2020-2022 Wind River Systems, Inc.
+ #
+ # SPDX-License-Identifier: Apache-2.0
+ #
+
+ #! /bin/bash
+
+ # Copy from read only mount to Ceph config folder
+ cp /tmp/ceph.conf /etc/ceph/
+
+ if [ -n "${CEPH_ADMIN_SECRET}" ]; then
+ kubectl get secret -n ${NAMESPACE} | grep ${CEPH_ADMIN_SECRET}
+ if [ $? -ne 0 ]; then
+ echo "Create ${CEPH_ADMIN_SECRET} secret"
+ kubectl create secret generic ${CEPH_ADMIN_SECRET} --type="kubernetes.io/rbd" --from-literal=key= --namespace=${NAMESPACE}
+ if [ $? -ne 0 ]; then
+ echo "Error creating secret ${CEPH_ADMIN_SECRET}, exit"
+ exit 1
+ fi
+ fi
+ fi
+
+ touch /etc/ceph/ceph.client.admin.keyring
+
+ # Check if ceph is accessible
+ echo "===================================="
+ ceph -s
+ if [ $? -ne 0 ]; then
+ echo "Error: Ceph cluster is not accessible, check Pod logs for details."
+ exit 1
+ fi
+
+ set -ex
+ # Make sure the pool exists.
+ ceph osd pool stats ${POOL_NAME} || ceph osd pool create ${POOL_NAME} ${POOL_CHUNK_SIZE}
+ # Set pool configuration.
+ ceph osd pool application enable ${POOL_NAME} rbd
+ ceph osd pool set ${POOL_NAME} size ${POOL_REPLICATION}
+ ceph osd pool set ${POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
+ set +ex
+
+ if [[ -z "${USER_ID}" && -z "${CEPH_USER_SECRET}" ]]; then
+ echo "No need to create secrets for pool ${POOL_NAME}"
+ exit 0
+ fi
+
+ set -ex
+ KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
+ # Set up pool key in Ceph format
+ CEPH_USER_KEYRING=/etc/ceph/ceph.client.${USER_ID}.keyring
+ echo $KEYRING > $CEPH_USER_KEYRING
+ set +ex
+
+ if [ -n "${CEPH_USER_SECRET}" ]; then
+ kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null
+ if [ $? -ne 0 ]; then
+ echo "Create ${CEPH_USER_SECRET} secret"
+ kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
+ if [ $? -ne 0 ]; then
+ echo"Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit"
+ exit 1
+ fi
+ else
+ echo "Secret ${CEPH_USER_SECRET} already exists"
+ fi
+
+ # Support creating namespaces and Ceph user secrets for additional
+ # namespaces other than that which the provisioner is installed. This
+ # allows the provisioner to set up and provide PVs for multiple
+ # applications across many namespaces.
+ if [ -n "${ADDITIONAL_NAMESPACES}" ]; then
+ for ns in $(IFS=,; echo ${ADDITIONAL_NAMESPACES}); do
+ kubectl get namespace $ns 2>/dev/null
+ if [ $? -ne 0 ]; then
+ kubectl create namespace $ns
+ if [ $? -ne 0 ]; then
+ echo "Error creating namespace $ns, exit"
+ continue
+ fi
+ fi
+
+ kubectl get secret -n $ns ${CEPH_USER_SECRET} 2>/dev/null
+ if [ $? -ne 0 ]; then
+ echo "Creating secret ${CEPH_USER_SECRET} for namespace $ns"
+ kubectl create secret generic -n $ns ${CEPH_USER_SECRET} --type="kubernetes.io/rbd" --from-literal=key=$KEYRING
+ if [ $? -ne 0 ]; then
+ echo "Error creating secret ${CEPH_USER_SECRET} in $ns, exit"
+ fi
+ else
+ echo "Secret ${CEPH_USER_SECRET} for namespace $ns already exists"
+ fi
+ done
+ fi
+ fi
+
+ # Check if pool is accessible using provided credentials
+ echo "====================================="
+ timeout --preserve-status 10 rbd -p ${POOL_NAME} --user ${USER_ID} ls -K $CEPH_USER_KEYRING
+ if [ $? -ne 143 ]; then
+ if [ $? -ne 0 ]; then
+ echo "Error: Ceph pool ${POOL_NAME} is not accessible using credentials for user ${USER_ID}, check Pod logs for details."
+ exit 1
+ else
+ echo "Pool ${POOL_NAME} accessible"
+ fi
+ else
+ echo "rbd command timed out and was sent a SIGTERM. Make sure OSDs have been provisioned."
+ fi
+
+ ceph -s
+
+---
+
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: rbd-storage-init
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ include "ceph-csi-rbd.name" . }}
+ chart: {{ include "ceph-csi-rbd.chart" . }}
+ component: {{ .Values.provisioner.name }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ annotations:
+ "meta.helm.sh/release-name": {{ .Release.Name }}
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
+ "helm.sh/hook": "post-install, pre-upgrade, pre-rollback"
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ backoffLimit: 5
+ activeDeadlineSeconds: 360
+ template:
+ metadata:
+ name: "{{ .Release.Name }}"
+ namespace: {{ .Release.Namespace }}
+ labels:
+ heritage: {{ .Release.Service | quote }}
+ release: {{ .Release.Name | quote }}
+ chart: "{{ .Chart.Name }}-{{- .Chart.Version }}"
+ spec:
+ serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
+ restartPolicy: OnFailure
+ volumes:
+ - name: rbd-storage-init-configmap-volume
+ configMap:
+ name: rbd-storage-init
+ containers:
+ - name: storage-init-{{- .Values.storageClass.name }}
+ image: {{ .Values.images.tags.rbd_provisioner_storage_init | quote }}
+ command: [ "/bin/bash", "/tmp/storage-init.sh" ]
+ env:
+ - name: NAMESPACE
+ value: {{ .Release.Namespace }}
+ - name: ADDITIONAL_NAMESPACES
+ value: {{ join "," .Values.storageClass.additionalNamespaces | quote }}
+ - name: CEPH_ADMIN_SECRET
+ value: {{ .Values.classDefaults.adminSecretName }}
+ - name: CEPH_USER_SECRET
+ value: {{ .Values.storageClass.userSecretName }}
+ - name: USER_ID
+ value: {{ .Values.storageClass.userId }}
+ - name: POOL_NAME
+ value: {{ .Values.storageClass.pool }}
+ - name: POOL_REPLICATION
+ value: {{ .Values.storageClass.replication | quote }}
+ - name: POOL_CRUSH_RULE_NAME
+ value: {{ .Values.storageClass.crush_rule_name | quote }}
+ - name: POOL_CHUNK_SIZE
+ value: {{ .Values.storageClass.chunk_size | quote }}
+ volumeMounts:
+ - name: rbd-storage-init-configmap-volume
+ mountPath: /tmp
+{{- if .Values.provisioner.nodeSelector }}
+ nodeSelector:
+{{ .Values.provisioner.nodeSelector | toYaml | trim | indent 8 }}
+{{- end }}
+{{- with .Values.provisioner.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+{{- end }}
--
2.17.1