
Remove old RBD/CephFS provisioners and replace with a currently supported and evolving set of provisioners based on https://github.com/ceph/ceph-csi version 3.6.2. Test Plan: PASS: AIO-SX app upload/apply/remove/delete/update PASS: AIO-DX app upload/apply/remove/delete PASS: Storage 2+2+2 app upload/apply/remove/delete PASS: Create pvc using storageclass general (rbd) on SX/DX/Storage PASS: Create pod using rbd pvc on SX/DX/Storage PASS: Create pvc using storageclass cephfs on SX/DX/Storage PASS: Create pod using cephfs pvc on SX/DX/Storage Story: 2009987 Task: 45050 Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com> Change-Id: Iffcd56f689aa70788c4c2abbbf2c9a02b5a797cf
275 lines
9.7 KiB
Diff
275 lines
9.7 KiB
Diff
From 30a69b72f9367802b4ebeb2667db921420328de0 Mon Sep 17 00:00:00 2001
|
|
From: Hediberto Cavalcante da Silva
|
|
<hediberto.cavalcantedasilva@windriver.com>
|
|
Date: Thu, 3 Nov 2022 19:56:35 -0300
|
|
Subject: [PATCH] ceph-csi-cephfs: add storage-init.yaml
|
|
|
|
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
|
|
---
|
|
.../templates/storage-init.yaml | 254 ++++++++++++++++++
|
|
1 file changed, 254 insertions(+)
|
|
create mode 100644 charts/ceph-csi-cephfs/templates/storage-init.yaml
|
|
|
|
diff --git a/charts/ceph-csi-cephfs/templates/storage-init.yaml b/charts/ceph-csi-cephfs/templates/storage-init.yaml
|
|
new file mode 100644
|
|
index 0000000..5c0f00d
|
|
--- /dev/null
|
|
+++ b/charts/ceph-csi-cephfs/templates/storage-init.yaml
|
|
@@ -0,0 +1,254 @@
|
|
+{{/*
|
|
+#
|
|
+# Copyright (c) 2020-2022 Wind River Systems, Inc.
|
|
+#
|
|
+# SPDX-License-Identifier: Apache-2.0
|
|
+#
|
|
+*/}}
|
|
+
|
|
+kind: ClusterRole
|
|
+apiVersion: rbac.authorization.k8s.io/v1
|
|
+metadata:
|
|
+ name: cephfs-rbac-secrets-namespaces
|
|
+ labels:
|
|
+ app: {{ include "ceph-csi-cephfs.name" . }}
|
|
+ chart: {{ include "ceph-csi-cephfs.chart" . }}
|
|
+ component: {{ .Values.provisioner.name }}
|
|
+ release: {{ .Release.Name }}
|
|
+ heritage: {{ .Release.Service }}
|
|
+ annotations:
|
|
+ "meta.helm.sh/release-name": {{ .Release.Name }}
|
|
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
|
|
+ "helm.sh/hook": "pre-upgrade, pre-install"
|
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
|
+rules:
|
|
+ - apiGroups: [""]
|
|
+ resources: ["secrets"]
|
|
+ verbs: ["get", "list", "watch", "create", "delete"]
|
|
+ - apiGroups: [""]
|
|
+ resources: ["namespaces"]
|
|
+ verbs: ["get", "create", "list", "update"]
|
|
+
|
|
+---
|
|
+
|
|
+kind: ClusterRoleBinding
|
|
+apiVersion: rbac.authorization.k8s.io/v1
|
|
+metadata:
|
|
+ name: cephfs-rbac-secrets-namespaces
|
|
+ labels:
|
|
+ app: {{ include "ceph-csi-cephfs.name" . }}
|
|
+ chart: {{ include "ceph-csi-cephfs.chart" . }}
|
|
+ component: {{ .Values.provisioner.name }}
|
|
+ release: {{ .Release.Name }}
|
|
+ heritage: {{ .Release.Service }}
|
|
+ annotations:
|
|
+ "meta.helm.sh/release-name": {{ .Release.Name }}
|
|
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
|
|
+ "helm.sh/hook": "pre-upgrade, pre-install"
|
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
|
+subjects:
|
|
+ - kind: ServiceAccount
|
|
+ name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
|
+ namespace: {{ .Values.classDefaults.cephFSNamespace }}
|
|
+roleRef:
|
|
+ kind: ClusterRole
|
|
+ name: cephfs-rbac-secrets-namespaces
|
|
+ apiGroup: rbac.authorization.k8s.io
|
|
+
|
|
+---
|
|
+
|
|
+apiVersion: v1
|
|
+kind: ConfigMap
|
|
+metadata:
|
|
+ name: cephfs-storage-init
|
|
+ namespace: {{ .Values.classDefaults.cephFSNamespace }}
|
|
+ labels:
|
|
+ app: {{ include "ceph-csi-cephfs.name" . }}
|
|
+ chart: {{ include "ceph-csi-cephfs.chart" . }}
|
|
+ component: {{ .Values.provisioner.name }}
|
|
+ release: {{ .Release.Name }}
|
|
+ heritage: {{ .Release.Service }}
|
|
+ annotations:
|
|
+ "meta.helm.sh/release-name": {{ .Release.Name }}
|
|
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
|
|
+ "helm.sh/hook": "pre-upgrade, pre-install"
|
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
|
+data:
|
|
+ ceph.conf: |
|
|
+ #
|
|
+ # Copyright (c) 2020-2022 Wind River Systems, Inc.
|
|
+ #
|
|
+ # SPDX-License-Identifier: Apache-2.0
|
|
+ #
|
|
+
|
|
+ [global]
|
|
+ # For version 0.55 and beyond, you must explicitly enable
|
|
+ # or disable authentication with "auth" entries in [global].
|
|
+ auth_cluster_required = none
|
|
+ auth_service_required = none
|
|
+ auth_client_required = none
|
|
+
|
|
+ {{ $monitors := .Values.classDefaults.monitors }}
|
|
+ {{ range $index, $monitor := $monitors}}
|
|
+ [mon.{{- $index }}]
|
|
+ mon_addr = {{ $monitor }}
|
|
+ {{- end }}
|
|
+
|
|
+ storage-init.sh: |
|
|
+ #
|
|
+ # Copyright (c) 2020-2022 Wind River Systems, Inc.
|
|
+ #
|
|
+ # SPDX-License-Identifier: Apache-2.0
|
|
+ #
|
|
+
|
|
+ #! /bin/bash
|
|
+
|
|
+ # Copy from read only mount to Ceph config folder
|
|
+ cp /tmp/ceph.conf /etc/ceph/
|
|
+
|
|
+ set -x
|
|
+
|
|
+ touch /etc/ceph/ceph.client.admin.keyring
|
|
+
|
|
+ # Check if ceph is accessible
|
|
+ echo "===================================="
|
|
+ ceph -s
|
|
+ if [ $? -ne 0 ]; then
|
|
+ echo "Error: Ceph cluster is not accessible, check Pod logs for details."
|
|
+ exit 1
|
|
+ fi
|
|
+
|
|
+ set -ex
|
|
+ KEYRING=$(ceph auth get-or-create client.${USER_ID} mon "allow r" osd "allow rwx pool=${POOL_NAME}" | sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p')
|
|
+ # Set up pool key in Ceph format
|
|
+ CEPH_USER_KEYRING=/etc/ceph/ceph.client.${USER_ID}.keyring
|
|
+ echo $KEYRING > $CEPH_USER_KEYRING
|
|
+ set +ex
|
|
+
|
|
+ if [ -n "${CEPH_USER_SECRET}" ]; then
|
|
+ kubectl get secret -n ${NAMESPACE} ${CEPH_USER_SECRET} 2>/dev/null
|
|
+ if [ $? -ne 0 ]; then
|
|
+ echo "Create ${CEPH_USER_SECRET} secret"
|
|
+ kubectl create secret generic -n ${NAMESPACE} ${CEPH_USER_SECRET} --type="kubernetes.io/cephfs" --from-literal=adminKey=$KEYRING --from-literal=adminID=${ADMIN_ID}
|
|
+ if [ $? -ne 0 ]; then
|
|
+ echo "Error creating secret ${CEPH_USER_SECRET} in ${NAMESPACE}, exit"
|
|
+ exit 1
|
|
+ fi
|
|
+ else
|
|
+ echo "Secret ${CEPH_USER_SECRET} already exists"
|
|
+ fi
|
|
+
|
|
+ # Support creating namespaces and Ceph user secrets for additional
|
|
+ # namespaces other than that which the provisioner is installed. This
|
|
+ # allows the provisioner to set up and provide PVs for multiple
|
|
+ # applications across many namespaces.
|
|
+ if [ -n "${ADDITIONAL_NAMESPACES}" ]; then
|
|
+ for ns in $(
|
|
+ IFS=,
|
|
+ echo ${ADDITIONAL_NAMESPACES}
|
|
+ ); do
|
|
+ kubectl get namespace $ns 2>/dev/null
|
|
+ if [ $? -ne 0 ]; then
|
|
+ kubectl create namespace $ns
|
|
+ if [ $? -ne 0 ]; then
|
|
+ echo "Error creating namespace $ns, exit"
|
|
+ continue
|
|
+ fi
|
|
+ fi
|
|
+
|
|
+ kubectl get secret -n $ns ${CEPH_USER_SECRET} 2>/dev/null
|
|
+ if [ $? -ne 0 ]; then
|
|
+ echo "Creating secret ${CEPH_USER_SECRET} for namespace $ns"
|
|
+ kubectl create secret generic -n $ns ${CEPH_USER_SECRET} --type="kubernetes.io/cephfs" --from-literal=adminKey=$KEYRING --from-literal=adminID=${ADMIN_ID}
|
|
+ if [ $? -ne 0 ]; then
|
|
+ echo "Error creating secret ${CEPH_USER_SECRET} in $ns, exit"
|
|
+ fi
|
|
+ else
|
|
+ echo "Secret ${CEPH_USER_SECRET} for namespace $ns already exists"
|
|
+ fi
|
|
+ done
|
|
+ fi
|
|
+ fi
|
|
+
|
|
+ ceph osd pool stats ${POOL_NAME} || ceph osd pool create ${POOL_NAME} ${CHUNK_SIZE}
|
|
+ ceph osd pool application enable ${POOL_NAME} cephfs
|
|
+ ceph osd pool set ${POOL_NAME} size ${POOL_REPLICATION}
|
|
+ ceph osd pool set ${POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
|
|
+
|
|
+ ceph osd pool stats ${METADATA_POOL_NAME} || ceph osd pool create ${METADATA_POOL_NAME} ${CHUNK_SIZE}
|
|
+ ceph osd pool application enable ${METADATA_POOL_NAME} cephfs
|
|
+ ceph osd pool set ${METADATA_POOL_NAME} size ${POOL_REPLICATION}
|
|
+ ceph osd pool set ${METADATA_POOL_NAME} crush_rule ${POOL_CRUSH_RULE_NAME}
|
|
+
|
|
+ ceph fs ls | grep ${FS_NAME} || ceph fs new ${FS_NAME} ${METADATA_POOL_NAME} ${POOL_NAME}
|
|
+
|
|
+ ceph -s
|
|
+
|
|
+
|
|
+---
|
|
+
|
|
+apiVersion: batch/v1
|
|
+kind: Job
|
|
+metadata:
|
|
+ name: cephfs-storage-init
|
|
+ namespace: {{ .Values.classDefaults.cephFSNamespace }}
|
|
+ labels:
|
|
+ app: {{ include "ceph-csi-cephfs.name" . }}
|
|
+ chart: {{ include "ceph-csi-cephfs.chart" . }}
|
|
+ component: {{ .Values.provisioner.name }}
|
|
+ release: {{ .Release.Name }}
|
|
+ heritage: {{ .Release.Service }}
|
|
+ annotations:
|
|
+ "meta.helm.sh/release-name": {{ .Release.Name }}
|
|
+ "meta.helm.sh/release-namespace": {{ .Release.Namespace }}
|
|
+ "helm.sh/hook": "post-install, pre-upgrade, pre-rollback"
|
|
+ "helm.sh/hook-delete-policy": "before-hook-creation"
|
|
+spec:
|
|
+ backoffLimit: 5
|
|
+ template:
|
|
+ spec:
|
|
+ serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
|
+ volumes:
|
|
+ - name: cephfs-storage-init-configmap-volume
|
|
+ configMap:
|
|
+ name: cephfs-storage-init
|
|
+ defaultMode: 0555
|
|
+ containers:
|
|
+ - name: storage-init-{{- .Values.storageClass.name }}
|
|
+ image: {{ .Values.images.tags.cephfs_provisioner_storage_init | quote }}
|
|
+ command: ["/bin/bash", "/tmp/storage-init.sh"]
|
|
+ env:
|
|
+ - name: NAMESPACE
|
|
+ value: {{ .Values.classDefaults.cephFSNamespace }}
|
|
+ - name: ADDITIONAL_NAMESPACES
|
|
+ value: {{ join "," .Values.storageClass.additionalNamespaces | quote }}
|
|
+ - name: CEPH_USER_SECRET
|
|
+ value: {{ .Values.storageClass.userSecretName }}
|
|
+ - name: USER_ID
|
|
+ value: {{ .Values.storageClass.userId }}
|
|
+ - name: ADMIN_ID
|
|
+ value: {{ .Values.classDefaults.adminId }}
|
|
+ - name: POOL_NAME
|
|
+ value: {{ .Values.storageClass.pool }}
|
|
+ - name: METADATA_POOL_NAME
|
|
+ value: {{ .Values.storageClass.metadata_pool }}
|
|
+ - name: FS_NAME
|
|
+ value: {{ .Values.storageClass.fsName }}
|
|
+ - name: CHUNK_SIZE
|
|
+ value: {{ .Values.storageClass.chunk_size | quote }}
|
|
+ - name: POOL_REPLICATION
|
|
+ value: {{ .Values.storageClass.replication | quote }}
|
|
+ - name: POOL_CRUSH_RULE_NAME
|
|
+ value: {{ .Values.storageClass.crush_rule_name | quote }}
|
|
+ volumeMounts:
|
|
+ - name: cephfs-storage-init-configmap-volume
|
|
+ mountPath: /tmp
|
|
+ restartPolicy: OnFailure
|
|
+{{- if .Values.provisioner.nodeSelector }}
|
|
+ nodeSelector:
|
|
+{{ .Values.provisioner.nodeSelector | toYaml | trim | indent 8 }}
|
|
+{{- end }}
|
|
+{{- with .Values.provisioner.tolerations }}
|
|
+ tolerations:
|
|
+{{ toYaml . | indent 8 }}
|
|
+{{- end}}
|
|
--
|
|
2.17.1
|