Merge remote-tracking branch 'openstack-helm-infra/master'
The openstack-helm and openstack-helm-infra are merged into one. See https://lists.openstack.org/archives/list/openstack-discuss@lists.openstack.org/thread/YRWSN6X2MTVGFPCULJ344RSDMCQDO7ZG/ for the discussion which led up to this.
This commit is contained in:
commit
b95bb67678
19
CONTRIBUTING.rst
Normal file
19
CONTRIBUTING.rst
Normal file
@ -0,0 +1,19 @@
|
||||
The source repository for this project can be found at:
|
||||
|
||||
https://opendev.org/openstack/openstack-helm-infra
|
||||
|
||||
Pull requests submitted through GitHub are not monitored.
|
||||
|
||||
To start contributing to OpenStack, follow the steps in the contribution guide
|
||||
to set up and use Gerrit:
|
||||
|
||||
https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
|
||||
|
||||
Bugs should be filed on StoryBoard:
|
||||
|
||||
https://storyboard.openstack.org/#!/project/openstack/openstack-helm-infra
|
||||
|
||||
For more specific information about contributing to this repository, see the
|
||||
openstack-helm infra contributor guide:
|
||||
|
||||
https://docs.openstack.org/openstack-helm-infra/latest/contributor/contributing.html
|
24
ca-clusterissuer/Chart.yaml
Normal file
24
ca-clusterissuer/Chart.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: v2
|
||||
appVersion: "1.0"
|
||||
description: Certificate Issuer chart for OSH
|
||||
home: https://cert-manager.io/
|
||||
name: ca-clusterissuer
|
||||
version: 2024.2.0
|
||||
dependencies:
|
||||
- name: helm-toolkit
|
||||
repository: file://../helm-toolkit
|
||||
version: ">= 0.1.0"
|
||||
...
|
28
ca-clusterissuer/templates/clusterissuer-ca.yaml
Normal file
28
ca-clusterissuer/templates/clusterissuer-ca.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if .Values.manifests.clusterissuer }}
|
||||
{{- $envAll := . }}
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: {{ .Values.conf.ca.issuer.name }}
|
||||
labels:
|
||||
{{ tuple $envAll "cert-manager" "clusterissuer" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
spec:
|
||||
ca:
|
||||
secretName: {{ .Values.conf.ca.secret.name }}
|
||||
...
|
||||
{{- end }}
|
26
ca-clusterissuer/templates/secret-ca.yaml
Normal file
26
ca-clusterissuer/templates/secret-ca.yaml
Normal file
@ -0,0 +1,26 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if .Values.manifests.secret_ca }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Values.conf.ca.secret.name }}
|
||||
namespace: {{ .Values.conf.ca.secret.namespace }}
|
||||
data:
|
||||
tls.crt: {{ .Values.conf.ca.secret.crt | default "" | b64enc }}
|
||||
tls.key: {{ .Values.conf.ca.secret.key | default "" | b64enc }}
|
||||
...
|
||||
{{- end }}
|
27
ca-clusterissuer/values.yaml
Normal file
27
ca-clusterissuer/values.yaml
Normal file
@ -0,0 +1,27 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
conf:
|
||||
ca:
|
||||
issuer:
|
||||
name: ca-clusterissuer
|
||||
secret:
|
||||
name: secret-name
|
||||
# Namespace where cert-manager is deployed.
|
||||
namespace: cert-manager
|
||||
crt: null
|
||||
key: null
|
||||
|
||||
manifests:
|
||||
clusterissuer: true
|
||||
secret_ca: true
|
||||
...
|
24
ca-issuer/Chart.yaml
Normal file
24
ca-issuer/Chart.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: v2
|
||||
appVersion: "1.0"
|
||||
description: Certificate Issuer chart for OSH
|
||||
home: https://cert-manager.io/
|
||||
name: ca-issuer
|
||||
version: 2024.2.0
|
||||
dependencies:
|
||||
- name: helm-toolkit
|
||||
repository: file://../helm-toolkit
|
||||
version: ">= 0.1.0"
|
||||
...
|
33
ca-issuer/templates/issuer-ca.yaml
Normal file
33
ca-issuer/templates/issuer-ca.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if .Values.manifests.issuer }}
|
||||
{{- $envAll := . }}
|
||||
---
|
||||
{{- if semverCompare "< v1.0.0" .Values.cert_manager_version }}
|
||||
apiVersion: cert-manager.io/v1alpha3
|
||||
{{- else }}
|
||||
apiVersion: cert-manager.io/v1
|
||||
{{- end }}
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: {{ .Values.conf.ca.issuer.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{ tuple $envAll "cert-manager" "issuer" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
spec:
|
||||
ca:
|
||||
secretName: {{ .Values.conf.ca.secret.name }}
|
||||
...
|
||||
{{- end }}
|
26
ca-issuer/templates/secret-ca.yaml
Normal file
26
ca-issuer/templates/secret-ca.yaml
Normal file
@ -0,0 +1,26 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if .Values.manifests.secret_ca }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Values.conf.ca.secret.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
data:
|
||||
tls.crt: {{ .Values.conf.ca.secret.crt | default "" | b64enc }}
|
||||
tls.key: {{ .Values.conf.ca.secret.key | default "" | b64enc }}
|
||||
...
|
||||
{{- end }}
|
30
ca-issuer/values.yaml
Normal file
30
ca-issuer/values.yaml
Normal file
@ -0,0 +1,30 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
---
|
||||
conf:
|
||||
ca:
|
||||
issuer:
|
||||
name: ca-issuer
|
||||
secret:
|
||||
name: secret-name
|
||||
crt: null
|
||||
key: null
|
||||
|
||||
# Default Version of jetstack/cert-manager being deployed.
|
||||
# Starting at v1.0.0, api-version: cert-manager.io/v1 is used
|
||||
# For previous apiVersion: cert-manager.io/v1alpha3, change to older version (such as v0.15.0)
|
||||
cert_manager_version: v1.0.0
|
||||
|
||||
manifests:
|
||||
issuer: true
|
||||
secret_ca: true
|
||||
...
|
24
ceph-adapter-rook/Chart.yaml
Normal file
24
ceph-adapter-rook/Chart.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: v2
|
||||
appVersion: v1.0.0
|
||||
description: OpenStack-Helm Ceph Adapter Rook
|
||||
name: ceph-adapter-rook
|
||||
version: 2024.2.0
|
||||
home: https://github.com/ceph/ceph
|
||||
dependencies:
|
||||
- name: helm-toolkit
|
||||
repository: file://../helm-toolkit
|
||||
version: ">= 0.1.0"
|
||||
...
|
18
ceph-adapter-rook/README.md
Normal file
18
ceph-adapter-rook/README.md
Normal file
@ -0,0 +1,18 @@
|
||||
# Summary
|
||||
This is the minimal set of templates necessary to make the rest
|
||||
of Openstack-Helm charts work with Ceph clusters managed by the
|
||||
Rook operator. Rook operator not only deploys Ceph clusters but
|
||||
also provides convenience when interfacing with those clusters
|
||||
via CRDs which can be used for managing pools/keys/users etc.
|
||||
However Openstack-Helm charts do not utilize Rook CRDs but instead
|
||||
manage Ceph assets like pools/keyrings/users/buckets etc. by means
|
||||
of running bootstrap scripts. Before using Openstack-Helm charts we
|
||||
have to provision a minimal set of assets like Ceph admin key and
|
||||
Ceph client config.
|
||||
|
||||
# Usage
|
||||
helm upgrade --install ceph-adapter-rook ./ceph-adapter-rook \
|
||||
--namespace=openstack
|
||||
```
|
||||
|
||||
Once all the jobs are finished you can deploy other Openstack-Helm charts.
|
26
ceph-adapter-rook/templates/bin/_config-manager.sh.tpl
Normal file
26
ceph-adapter-rook/templates/bin/_config-manager.sh.tpl
Normal file
@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
{{- $envAll := . }}
|
||||
|
||||
ENDPOINTS=$(kubectl --namespace ${CEPH_CLUSTER_NAMESPACE} get configmap rook-ceph-mon-endpoints -o jsonpath='{.data.data}' | sed 's/.=//g')
|
||||
|
||||
kubectl get cm ${CEPH_CONF_ETC} -n ${DEPLOYMENT_NAMESPACE} -o yaml | \
|
||||
sed "s#mon_host.*#mon_host = ${ENDPOINTS}#g" | \
|
||||
kubectl apply -f -
|
||||
|
||||
kubectl get cm ${CEPH_CONF_ETC} -n ${DEPLOYMENT_NAMESPACE} -o yaml
|
44
ceph-adapter-rook/templates/bin/_key-manager.sh.tpl
Normal file
44
ceph-adapter-rook/templates/bin/_key-manager.sh.tpl
Normal file
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
{{- $envAll := . }}
|
||||
|
||||
# We expect rook-ceph-tools pod to be up and running
|
||||
ROOK_CEPH_TOOLS_POD=$(kubectl -n ${CEPH_CLUSTER_NAMESPACE} get pods --no-headers | awk '/rook-ceph-tools/{print $1}')
|
||||
CEPH_ADMIN_KEY=$(kubectl -n ${CEPH_CLUSTER_NAMESPACE} exec ${ROOK_CEPH_TOOLS_POD} -- ceph auth ls | grep -A1 "client.admin" | awk '/key:/{print $2}')
|
||||
|
||||
ceph_activate_namespace() {
|
||||
kube_namespace=$1
|
||||
secret_type=$2
|
||||
secret_name=$3
|
||||
ceph_key=$4
|
||||
{
|
||||
cat <<EOF
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: "${secret_name}"
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "rbd" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
type: "${secret_type}"
|
||||
data:
|
||||
key: $( echo ${ceph_key} | base64 | tr -d '\n' )
|
||||
EOF
|
||||
} | kubectl apply --namespace ${kube_namespace} -f -
|
||||
}
|
||||
|
||||
ceph_activate_namespace ${DEPLOYMENT_NAMESPACE} "kubernetes.io/rbd" ${SECRET_NAME} "${CEPH_ADMIN_KEY}"
|
28
ceph-adapter-rook/templates/configmap-bin.yaml
Normal file
28
ceph-adapter-rook/templates/configmap-bin.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if .Values.manifests.configmap_bin }}
|
||||
{{- $envAll := . }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
||||
data:
|
||||
key-manager.sh: |
|
||||
{{ tuple "bin/_key-manager.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
config-manager.sh: |
|
||||
{{ tuple "bin/_config-manager.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
{{- end }}
|
49
ceph-adapter-rook/templates/configmap-etc-client.yaml
Normal file
49
ceph-adapter-rook/templates/configmap-etc-client.yaml
Normal file
@ -0,0 +1,49 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- define "ceph.configmap.etc" }}
|
||||
{{- $configMapName := index . 0 }}
|
||||
{{- $envAll := index . 1 }}
|
||||
{{- with $envAll }}
|
||||
|
||||
{{/*
|
||||
{{- if empty .Values.conf.ceph.global.mon_host -}}
|
||||
{{- $monHost := tuple "ceph_mon" "internal" "mon_msgr2" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}
|
||||
{{- $_ := $monHost | set .Values.conf.ceph.global "mon_host" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if empty .Values.conf.ceph.osd.cluster_network -}}
|
||||
{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd "cluster_network" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if empty .Values.conf.ceph.osd.public_network -}}
|
||||
{{- $_ := .Values.network.public | set .Values.conf.ceph.osd "public_network" -}}
|
||||
{{- end -}}
|
||||
*/}}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $configMapName }}
|
||||
data:
|
||||
ceph.conf: |
|
||||
{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph | indent 4 }}
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.manifests.configmap_etc_client }}
|
||||
{{- list .Values.configmap_name . | include "ceph.configmap.etc" }}
|
||||
{{- end }}
|
@ -0,0 +1,134 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.job_namespace_client_ceph_config }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $randStringSuffix := randAlphaNum 5 | lower }}
|
||||
|
||||
{{- $serviceAccountName := print $envAll.Release.Name "-namespace-client-ceph-config" }}
|
||||
{{ tuple $envAll "namespace_client_ceph_config" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ $serviceAccountName }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $serviceAccountName }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
|
||||
namespace: {{ .Values.ceph_cluster_namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
|
||||
namespace: {{ .Values.ceph_cluster_namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $serviceAccountName }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "namespace-client-ceph-config" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "namespace-client-ceph-config" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
annotations:
|
||||
{{ dict "envAll" $envAll "podName" $serviceAccountName "containerNames" (list "ceph-storage-keys-generator" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "namespace_client_ceph_config" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
restartPolicy: OnFailure
|
||||
nodeSelector:
|
||||
{{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "namespace-client-ceph-config-init" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
containers:
|
||||
- name: namespace-client-ceph-config
|
||||
{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.jobs.namespace_client_ceph_config | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "namespace_client_ceph_config" "container" "ceph_storage_keys_generator" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
env:
|
||||
- name: CEPH_CONF_ETC
|
||||
value: {{ .Values.configmap_name }}
|
||||
- name: DEPLOYMENT_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CEPH_CLUSTER_NAMESPACE
|
||||
value: {{ .Values.ceph_cluster_namespace }}
|
||||
command:
|
||||
- /tmp/config-manager.sh
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: bin
|
||||
mountPath: /tmp/config-manager.sh
|
||||
subPath: config-manager.sh
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: bin
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
||||
defaultMode: 0555
|
||||
{{- end }}
|
140
ceph-adapter-rook/templates/job-namespace-client-key.yaml
Normal file
140
ceph-adapter-rook/templates/job-namespace-client-key.yaml
Normal file
@ -0,0 +1,140 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if .Values.manifests.job_namespace_client_key }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $randStringSuffix := randAlphaNum 5 | lower }}
|
||||
|
||||
{{- $serviceAccountName := print $envAll.Release.Name "-namespace-client-key" }}
|
||||
{{ tuple $envAll "namespace-client-key" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ $serviceAccountName }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $serviceAccountName }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
|
||||
namespace: {{ .Values.ceph_cluster_namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/exec
|
||||
verbs:
|
||||
- create
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
|
||||
namespace: {{ .Values.ceph_cluster_namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $serviceAccountName }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "namespace-client-key" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "namespace-client-key" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
annotations:
|
||||
{{ dict "envAll" $envAll "podName" $serviceAccountName "containerNames" (list "ceph-storage-keys-generator" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "namespace-client-key" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
restartPolicy: OnFailure
|
||||
nodeSelector:
|
||||
{{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "namespace-client-key-init" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
containers:
|
||||
- name: namespace-client-key
|
||||
{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.jobs.namespace_client_key | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "namespace-client-key" "container" "namespace-client-key" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
env:
|
||||
- name: DEPLOYMENT_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: SECRET_NAME
|
||||
value: {{ .Values.secret_name }}
|
||||
- name: CEPH_CLUSTER_NAMESPACE
|
||||
value: {{ .Values.ceph_cluster_namespace }}
|
||||
command:
|
||||
- /tmp/key-manager.sh
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: bin
|
||||
mountPath: /tmp/key-manager.sh
|
||||
subPath: key-manager.sh
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: bin
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
||||
defaultMode: 0555
|
||||
{{- end }}
|
71
ceph-adapter-rook/values.yaml
Normal file
71
ceph-adapter-rook/values.yaml
Normal file
@ -0,0 +1,71 @@
|
||||
---
|
||||
images:
|
||||
pull_policy: IfNotPresent
|
||||
tags:
|
||||
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_19.2.1-1-20250207'
|
||||
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal'
|
||||
image_repo_sync: 'docker.io/library/docker:17.07.0'
|
||||
local_registry:
|
||||
active: false
|
||||
exclude:
|
||||
- dep_check
|
||||
- image_repo_sync
|
||||
|
||||
labels:
|
||||
job:
|
||||
node_selector_key: openstack-control-plane
|
||||
node_selector_value: enabled
|
||||
|
||||
pod:
|
||||
security_context:
|
||||
namespace_client_key:
|
||||
pod:
|
||||
runAsUser: 99
|
||||
container:
|
||||
namespace_client_key:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
dns_policy: "ClusterFirstWithHostNet"
|
||||
resources:
|
||||
enabled: false
|
||||
jobs:
|
||||
namespace_client_key:
|
||||
limits:
|
||||
memory: "1024Mi"
|
||||
cpu: "2000m"
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
namespace_client_ceph_config:
|
||||
limits:
|
||||
memory: "1024Mi"
|
||||
cpu: "2000m"
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
|
||||
|
||||
ceph_cluster_namespace: ceph
|
||||
|
||||
secret_name: pvc-ceph-client-key
|
||||
configmap_name: ceph-etc
|
||||
|
||||
conf:
|
||||
ceph:
|
||||
global:
|
||||
# TODO: Get mon host from rook-ceph-mon-endpoints configmap
|
||||
mon_host: "will be discovered"
|
||||
|
||||
dependencies:
|
||||
static:
|
||||
namespace_client_key:
|
||||
jobs: null
|
||||
namespace_client_ceph_config:
|
||||
jobs: null
|
||||
|
||||
manifests:
|
||||
configmap_bin: true
|
||||
configmap_etc_client: true
|
||||
job_namespace_client_ceph_config: true
|
||||
job_namespace_client_key: true
|
||||
...
|
24
ceph-client/Chart.yaml
Normal file
24
ceph-client/Chart.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: v2
|
||||
appVersion: v1.0.0
|
||||
description: OpenStack-Helm Ceph Client
|
||||
name: ceph-client
|
||||
version: 2024.2.0
|
||||
home: https://github.com/ceph/ceph-client
|
||||
dependencies:
|
||||
- name: helm-toolkit
|
||||
repository: file://../helm-toolkit
|
||||
version: ">= 0.1.0"
|
||||
...
|
18
ceph-client/templates/bin/_bootstrap.sh.tpl
Normal file
18
ceph-client/templates/bin/_bootstrap.sh.tpl
Normal file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
{{ .Values.bootstrap.script | default "echo 'Not Enabled'" }}
|
429
ceph-client/templates/bin/_helm-tests.sh.tpl
Executable file
429
ceph-client/templates/bin/_helm-tests.sh.tpl
Executable file
@ -0,0 +1,429 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
function check_cluster_status() {
|
||||
echo "#### Start: Checking Ceph cluster status ####"
|
||||
ceph_status_output=$(ceph -s -f json | jq -r '.health')
|
||||
ceph_health_status=$(echo $ceph_status_output | jq -r '.status')
|
||||
|
||||
if [ "x${ceph_health_status}" == "xHEALTH_OK" ]; then
|
||||
echo "Ceph status is HEALTH_OK"
|
||||
else
|
||||
echo "Ceph cluster status is not HEALTH_OK, checking PG states"
|
||||
pg_validation
|
||||
fi
|
||||
}
|
||||
|
||||
function check_recovery_flags() {
|
||||
echo "### Start: Checking for flags that will prevent recovery"
|
||||
|
||||
# Ensure there are no flags set that will prevent recovery of degraded PGs
|
||||
if [[ $(ceph osd stat | grep "norecover\|nobackfill\|norebalance") ]]; then
|
||||
ceph osd stat
|
||||
echo "Flags are set that prevent recovery of degraded PGs"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function check_osd_count() {
|
||||
echo "#### Start: Checking OSD count ####"
|
||||
noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
|
||||
osd_stat=$(ceph osd stat -f json-pretty)
|
||||
num_osd=$(awk '/"num_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||
num_in_osds=$(awk '/"num_in_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||
num_up_osds=$(awk '/"num_up_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||
|
||||
MIN_OSDS=$((${num_osd}*$REQUIRED_PERCENT_OF_OSDS/100))
|
||||
if [ ${MIN_OSDS} -lt 1 ]; then
|
||||
MIN_OSDS=1
|
||||
fi
|
||||
|
||||
if [ "${noup_flag}" ]; then
|
||||
osd_status=$(ceph osd dump -f json | jq -c '.osds[] | .state')
|
||||
count=0
|
||||
for osd in $osd_status; do
|
||||
if [[ "$osd" == *"up"* || "$osd" == *"new"* ]]; then
|
||||
((count=count+1))
|
||||
fi
|
||||
done
|
||||
echo "Caution: noup flag is set. ${count} OSDs in up/new state. Required number of OSDs: ${MIN_OSDS}."
|
||||
if [ $MIN_OSDS -gt $count ]; then
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if [ "${num_osd}" -eq 0 ]; then
|
||||
echo "There are no osds in the cluster"
|
||||
exit 1
|
||||
elif [ "${num_in_osds}" -ge "${MIN_OSDS}" ] && [ "${num_up_osds}" -ge "${MIN_OSDS}" ]; then
|
||||
echo "Required number of OSDs (${MIN_OSDS}) are UP and IN status"
|
||||
else
|
||||
echo "Required number of OSDs (${MIN_OSDS}) are NOT UP and IN status. Cluster shows OSD count=${num_osd}, UP=${num_up_osds}, IN=${num_in_osds}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function check_failure_domain_count_per_pool() {
|
||||
echo "#### Start: Checking failure domain count per pool ####"
|
||||
pools=$(ceph osd pool ls)
|
||||
for pool in ${pools}
|
||||
do
|
||||
crush_rule=$(ceph osd pool get ${pool} crush_rule | awk '{print $2}')
|
||||
bucket_type=$(ceph osd crush rule dump ${crush_rule} | grep '"type":' | awk -F'"' 'NR==2 {print $4}')
|
||||
num_failure_domains=$(ceph osd tree | grep ${bucket_type} | wc -l)
|
||||
pool_replica_size=$(ceph osd pool get ${pool} size | awk '{print $2}')
|
||||
if [[ ${num_failure_domains} -ge ${pool_replica_size} ]]; then
|
||||
echo "--> Info: Pool ${pool} is configured with enough failure domains ${num_failure_domains} to satisfy pool replica size ${pool_replica_size}"
|
||||
else
|
||||
echo "--> Error : Pool ${pool} is NOT configured with enough failure domains ${num_failure_domains} to satisfy pool replica size ${pool_replica_size}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function mgr_validation() {
|
||||
echo "#### Start: MGR validation ####"
|
||||
mgr_dump=$(ceph mgr dump -f json-pretty)
|
||||
echo "Checking for ${MGR_COUNT} MGRs"
|
||||
|
||||
mgr_avl=$(echo ${mgr_dump} | jq -r '.["available"]')
|
||||
|
||||
if [ "x${mgr_avl}" == "xtrue" ]; then
|
||||
mgr_active=$(echo ${mgr_dump} | jq -r '.["active_name"]')
|
||||
echo "Out of ${MGR_COUNT}, 1 MGR is active"
|
||||
|
||||
# Now lets check for standby managers
|
||||
mgr_stdby_count=$(echo ${mgr_dump} | jq -r '.["standbys"]' | jq length)
|
||||
|
||||
#Total MGR Count - 1 Active = Expected MGRs
|
||||
expected_standbys=$(( MGR_COUNT -1 ))
|
||||
|
||||
if [ $mgr_stdby_count -eq $expected_standbys ]
|
||||
then
|
||||
echo "Cluster has 1 Active MGR, $mgr_stdby_count Standbys MGR"
|
||||
else
|
||||
echo "Warning. Cluster Standbys MGR: Expected count= $expected_standbys Available=$mgr_stdby_count"
|
||||
echo "If this is not expected behavior, please investigate and take some additional actions."
|
||||
fi
|
||||
|
||||
else
|
||||
echo "No Active Manager found, Expected 1 MGR to be active out of ${MGR_COUNT}"
|
||||
retcode=1
|
||||
fi
|
||||
|
||||
if [ "x${retcode}" == "x1" ]
|
||||
then
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function pool_validation() {
|
||||
|
||||
echo "#### Start: Checking Ceph pools ####"
|
||||
|
||||
echo "From env variables, RBD pool replication count is: ${RBD}"
|
||||
|
||||
# Assuming all pools have same replication count as RBD
|
||||
# If RBD replication count is greater then 1, POOLMINSIZE should be 1 less then replication count
|
||||
# If RBD replication count is not greate then 1, then POOLMINSIZE should be 1
|
||||
|
||||
if [ ${RBD} -gt 1 ]; then
|
||||
EXPECTED_POOLMINSIZE=$[${RBD}-1]
|
||||
else
|
||||
EXPECTED_POOLMINSIZE=1
|
||||
fi
|
||||
|
||||
echo "EXPECTED_POOLMINSIZE: ${EXPECTED_POOLMINSIZE}"
|
||||
|
||||
expectedCrushRuleId=""
|
||||
nrules=$(echo ${OSD_CRUSH_RULE_DUMP} | jq length)
|
||||
c=$[nrules-1]
|
||||
for n in $(seq 0 ${c})
|
||||
do
|
||||
osd_crush_rule_obj=$(echo ${OSD_CRUSH_RULE_DUMP} | jq -r .[${n}])
|
||||
|
||||
name=$(echo ${osd_crush_rule_obj} | jq -r .rule_name)
|
||||
echo "Expected Crushrule: ${EXPECTED_CRUSHRULE}, Pool Crushmap: ${name}"
|
||||
|
||||
if [ "x${EXPECTED_CRUSHRULE}" == "x${name}" ]; then
|
||||
expectedCrushRuleId=$(echo ${osd_crush_rule_obj} | jq .rule_id)
|
||||
echo "Checking against rule: id: ${expectedCrushRuleId}, name:${name}"
|
||||
else
|
||||
echo "Didn't match"
|
||||
fi
|
||||
done
|
||||
echo "Checking cluster for size:${RBD}, min_size:${EXPECTED_POOLMINSIZE}, crush_rule:${EXPECTED_CRUSHRULE}, crush_rule_id:${expectedCrushRuleId}"
|
||||
|
||||
npools=$(echo ${OSD_POOLS_DETAILS} | jq length)
|
||||
i=$[npools - 1]
|
||||
for n in $(seq 0 ${i})
|
||||
do
|
||||
pool_obj=$(echo ${OSD_POOLS_DETAILS} | jq -r ".[${n}]")
|
||||
|
||||
size=$(echo ${pool_obj} | jq -r .size)
|
||||
min_size=$(echo ${pool_obj} | jq -r .min_size)
|
||||
pg_num=$(echo ${pool_obj} | jq -r .pg_num)
|
||||
pg_placement_num=$(echo ${pool_obj} | jq -r .pg_placement_num)
|
||||
crush_rule=$(echo ${pool_obj} | jq -r .crush_rule)
|
||||
name=$(echo ${pool_obj} | jq -r .pool_name)
|
||||
pg_autoscale_mode=$(echo ${pool_obj} | jq -r .pg_autoscale_mode)
|
||||
if [[ "${ENABLE_AUTOSCALER}" == "true" ]]; then
|
||||
if [[ "${pg_autoscale_mode}" != "on" ]]; then
|
||||
echo "pg autoscaler not enabled on ${name} pool"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||
if [ "x${size}" != "x${RBD}" ] || [ "x${min_size}" != "x${EXPECTED_POOLMINSIZE}" ] \
|
||||
|| [ "x${crush_rule}" != "x${expectedCrushRuleId}" ]; then
|
||||
echo "Pool ${name} has incorrect parameters!!! Size=${size}, Min_Size=${min_size}, Rule=${crush_rule}, PG_Autoscale_Mode=${pg_autoscale_mode}"
|
||||
exit 1
|
||||
else
|
||||
echo "Pool ${name} seems configured properly. Size=${size}, Min_Size=${min_size}, Rule=${crush_rule}, PG_Autoscale_Mode=${pg_autoscale_mode}"
|
||||
fi
|
||||
else
|
||||
if [ "x${size}" != "x${RBD}" ] || [ "x${min_size}" != "x${EXPECTED_POOLMINSIZE}" ] \
|
||||
|| [ "x${pg_num}" != "x${pg_placement_num}" ] || [ "x${crush_rule}" != "x${expectedCrushRuleId}" ]; then
|
||||
echo "Pool ${name} has incorrect parameters!!! Size=${size}, Min_Size=${min_size}, PG=${pg_num}, PGP=${pg_placement_num}, Rule=${crush_rule}"
|
||||
exit 1
|
||||
else
|
||||
echo "Pool ${name} seems configured properly. Size=${size}, Min_Size=${min_size}, PG=${pg_num}, PGP=${pg_placement_num}, Rule=${crush_rule}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function pool_failuredomain_validation() {
|
||||
echo "#### Start: Checking Pools are configured with specific failure domain ####"
|
||||
|
||||
expectedCrushRuleId=""
|
||||
nrules=$(echo ${OSD_CRUSH_RULE_DUMP} | jq length)
|
||||
c=$[nrules-1]
|
||||
for n in $(seq 0 ${c})
|
||||
do
|
||||
osd_crush_rule_obj=$(echo ${OSD_CRUSH_RULE_DUMP} | jq -r .[${n}])
|
||||
|
||||
name=$(echo ${osd_crush_rule_obj} | jq -r .rule_name)
|
||||
|
||||
if [ "x${EXPECTED_CRUSHRULE}" == "x${name}" ]; then
|
||||
expectedCrushRuleId=$(echo ${osd_crush_rule_obj} | jq .rule_id)
|
||||
echo "Checking against rule: id: ${expectedCrushRuleId}, name:${name}"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Checking OSD pools are configured with Crush rule name:${EXPECTED_CRUSHRULE}, id:${expectedCrushRuleId}"
|
||||
|
||||
npools=$(echo ${OSD_POOLS_DETAILS} | jq length)
|
||||
i=$[npools-1]
|
||||
for p in $(seq 0 ${i})
|
||||
do
|
||||
pool_obj=$(echo ${OSD_POOLS_DETAILS} | jq -r ".[${p}]")
|
||||
|
||||
pool_crush_rule_id=$(echo $pool_obj | jq -r .crush_rule)
|
||||
pool_name=$(echo $pool_obj | jq -r .pool_name)
|
||||
|
||||
if [ "x${pool_crush_rule_id}" == "x${expectedCrushRuleId}" ]; then
|
||||
echo "--> Info: Pool ${pool_name} is configured with the correct rule ${pool_crush_rule_id}"
|
||||
else
|
||||
echo "--> Error : Pool ${pool_name} is NOT configured with the correct rule ${pool_crush_rule_id}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function check_transient_pgs_file() {
|
||||
current_time=$1
|
||||
pg_failed_list=()
|
||||
|
||||
# Remove the lines NOT having the word "current" as these are the old
|
||||
# PGs that are no longer in transition.
|
||||
sed -i '/current/!d' ${transient_pgs_file}
|
||||
|
||||
# For all remaining lines (PGs currently inactive), check for PGs which
|
||||
# are older than the limit.
|
||||
IFS=$'\n' read -d '' -r -a lines < ${transient_pgs_file} || true
|
||||
for pg_data in "${lines[@]}"; do
|
||||
pg=$(echo ${pg_data} | awk '{print $1}')
|
||||
pg_ts=$(echo ${pg_data} | awk '{print $2}')
|
||||
if [[ $((${current_time} - ${pg_ts})) -gt ${pg_inactive_timeout} ]]; then
|
||||
pg_failed_list+=("${pg}")
|
||||
fi
|
||||
done
|
||||
|
||||
# Remove the current designation for all PGs, as we no longer need it
|
||||
# for this check.
|
||||
sed -i 's/ current//g' ${transient_pgs_file}
|
||||
|
||||
cat ${transient_pgs_file}
|
||||
if [[ ${#pg_failed_list[@]} -gt 0 ]]; then
|
||||
echo "The following PGs have been in a transient state for longer than ${pg_inactive_timeout} seconds:"
|
||||
echo ${pg_failed_list[*]}
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function update_transient_pgs_file() {
|
||||
pg=$1
|
||||
current_ts=$2
|
||||
|
||||
pg_data=$(grep "${pg} " ${transient_pgs_file} || true)
|
||||
if [[ "${pg_data}" == "" ]]; then
|
||||
echo "${pg} ${current_ts} current" >> ${transient_pgs_file}
|
||||
else
|
||||
# Add the word "current" to the end of the line which has this PG
|
||||
sed -i '/^'"${pg} "'/s/$/ current/' ${transient_pgs_file}
|
||||
fi
|
||||
}
|
||||
|
||||
function check_transient_pgs() {
|
||||
local -n pg_array=$1
|
||||
|
||||
# Use a temporary transient PGs file to track the amount of time PGs
|
||||
# are spending in a transitional state.
|
||||
now=$(date +%s)
|
||||
for pg in "${pg_array[@]}"; do
|
||||
update_transient_pgs_file ${pg} ${now}
|
||||
done
|
||||
check_transient_pgs_file ${now}
|
||||
}
|
||||
|
||||
function check_pgs() {
|
||||
pgs_transitioning=false
|
||||
|
||||
ceph --cluster ${CLUSTER} pg dump_stuck inactive -f json-pretty > ${stuck_pgs_file}
|
||||
|
||||
# Check if there are any stuck PGs, which could indicate a serious problem
|
||||
# if it does not resolve itself soon.
|
||||
stuck_pgs=(`cat ${stuck_pgs_file} | awk -F "\"" '/pgid/{print $4}'`)
|
||||
if [[ ${#stuck_pgs[*]} -gt 0 ]]; then
|
||||
# We have at least one stuck pg
|
||||
echo "Some PGs are stuck: "
|
||||
echo ${stuck_pgs[*]}
|
||||
# Not a critical error - yet
|
||||
pgs_transitioning=true
|
||||
|
||||
# Check to see if any transitioning PG has been stuck for too long
|
||||
check_transient_pgs stuck_pgs
|
||||
else
|
||||
# Examine the PGs that have non-active states. Consider those PGs that
|
||||
# are in a "premerge" state to be similar to active. "premerge" PGs may
|
||||
# stay in that state for several minutes, and this is considered ok.
|
||||
ceph --cluster ${CLUSTER} pg ls -f json-pretty | grep '"pgid":\|"state":' | grep -v -E "active|premerge" | grep -B1 '"state":' > ${inactive_pgs_file} || true
|
||||
|
||||
# If the inactive pgs file is non-empty, there are some inactive pgs in the cluster.
|
||||
inactive_pgs=(`cat ${inactive_pgs_file} | awk -F "\"" '/pgid/{print $4}'`)
|
||||
echo "This is the list of inactive pgs in the cluster: "
|
||||
echo ${inactive_pgs[*]}
|
||||
|
||||
echo "Checking to see if the cluster is rebalancing or recovering some PG's..."
|
||||
|
||||
# Check for PGs that are down. These are critical errors.
|
||||
down_pgs=(`cat ${inactive_pgs_file} | grep -B1 'down' | awk -F "\"" '/pgid/{print $4}'`)
|
||||
if [[ ${#down_pgs[*]} -gt 0 ]]; then
|
||||
# Some PGs could be down. This is really bad situation and test must fail.
|
||||
echo "Some PGs are down: "
|
||||
echo ${down_pgs[*]}
|
||||
echo "This is critical error, exiting. "
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for PGs that are in some transient state due to rebalancing,
|
||||
# peering or backfilling. If we see other states which are not in the
|
||||
# following list of states, then we likely have a problem and need to
|
||||
# exit.
|
||||
transient_states='peer|recover|activating|creating|unknown'
|
||||
non_transient_pgs=(`cat ${inactive_pgs_file} | grep '"state":' | grep -v -E "${transient_states}" || true`)
|
||||
if [[ ${#non_transient_pgs[*]} -gt 0 ]]; then
|
||||
# Some PGs could be inactive and not peering. Better we fail.
|
||||
echo "We don't have down/stuck PGs, but we have some inactive pgs that"
|
||||
echo "are not in the list of allowed transient states: "
|
||||
pg_list=(`sed -n '/peer\|recover\|activating\|creating\|unknown/{s/.*//;x;d;};x;p;${x;p;}' ${inactive_pgs_file} | sed '/^$/d' | awk -F "\"" '/pgid/{print $4}'`)
|
||||
echo ${pg_list[*]}
|
||||
echo ${non_transient_pgs[*]}
|
||||
# Critical error. Fail/exit the script
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check and note which PGs are in a transient state. This script
|
||||
# will allow these transient states for a period of time
|
||||
# (time_between_retries * max_retries seconds).
|
||||
transient_pgs=(`cat ${inactive_pgs_file} | grep -B1 -E "${transient_states}" | awk -F "\"" '/pgid/{print $4}'`)
|
||||
if [[ ${#transient_pgs[*]} -gt 0 ]]; then
|
||||
# Some PGs are not in an active state but peering and/or cluster is recovering
|
||||
echo "Some PGs are peering and/or cluster is recovering: "
|
||||
echo ${transient_pgs[*]}
|
||||
echo "This is normal but will wait a while to verify the PGs are not stuck in a transient state."
|
||||
# not critical, just wait
|
||||
pgs_transitioning=true
|
||||
|
||||
# Check to see if any transitioning PG has been stuck for too long
|
||||
check_transient_pgs transient_pgs
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function pg_validation() {
|
||||
retries=0
|
||||
time_between_retries=3
|
||||
max_retries=60
|
||||
pg_inactive_timeout=30
|
||||
pgs_transitioning=false
|
||||
stuck_pgs_file=$(mktemp -p /tmp)
|
||||
inactive_pgs_file=$(mktemp -p /tmp)
|
||||
transient_pgs_file=$(mktemp -p /tmp)
|
||||
|
||||
# Check this over a period of retries. Fail/stop if any critical errors found.
|
||||
while check_pgs && [[ "${pgs_transitioning}" == "true" ]] && [[ retries -lt ${max_retries} ]]; do
|
||||
echo "Sleep for a bit waiting on the pg(s) to become active/unstuck..."
|
||||
sleep ${time_between_retries}
|
||||
((retries=retries+1))
|
||||
done
|
||||
|
||||
# Check if transitioning PGs have gone active after retries have expired
|
||||
if [[ retries -ge ${max_retries} ]]; then
|
||||
((timeout_sec=${time_between_retries}*${max_retries}))
|
||||
echo "Some PGs have not become active after ${timeout_sec} seconds. Exiting..."
|
||||
# This is ok, as the autoscaler might still be adjusting the PGs.
|
||||
fi
|
||||
}
|
||||
|
||||
function check_ceph_osd_crush_weight(){
|
||||
OSDS_WITH_ZERO_WEIGHT=(`ceph --cluster ${CLUSTER} osd df -f json-pretty | awk -F"[, ]*" '/"crush_weight":/{if ($3 == 0) print $3}'`)
|
||||
if [[ ${#OSDS_WITH_ZERO_WEIGHT[*]} -eq 0 ]]; then
|
||||
echo "All OSDs from namespace have crush weight!"
|
||||
else
|
||||
echo "OSDs from namespace have zero crush weight"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_osd_count
|
||||
mgr_validation
|
||||
|
||||
OSD_POOLS_DETAILS=$(ceph osd pool ls detail -f json-pretty)
|
||||
OSD_CRUSH_RULE_DUMP=$(ceph osd crush rule dump -f json-pretty)
|
||||
PG_STAT=$(ceph pg stat -f json-pretty)
|
||||
|
||||
ceph -s
|
||||
pg_validation
|
||||
pool_validation
|
||||
pool_failuredomain_validation
|
||||
check_failure_domain_count_per_pool
|
||||
check_cluster_status
|
||||
check_recovery_flags
|
||||
check_ceph_osd_crush_weight
|
44
ceph-client/templates/bin/_init-dirs.sh.tpl
Normal file
44
ceph-client/templates/bin/_init-dirs.sh.tpl
Normal file
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
export LC_ALL=C
|
||||
: "${HOSTNAME:=$(uname -n)}"
|
||||
: "${MGR_NAME:=${HOSTNAME}}"
|
||||
: "${MDS_NAME:=mds-${HOSTNAME}}"
|
||||
: "${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}"
|
||||
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
|
||||
|
||||
for keyring in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING}; do
|
||||
mkdir -p "$(dirname "$keyring")"
|
||||
done
|
||||
|
||||
# Let's create the ceph directories
|
||||
for DIRECTORY in mds tmp mgr crash; do
|
||||
mkdir -p "/var/lib/ceph/${DIRECTORY}"
|
||||
done
|
||||
|
||||
# Create socket directory
|
||||
mkdir -p /run/ceph
|
||||
|
||||
# Create the MDS directory
|
||||
mkdir -p "/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}"
|
||||
|
||||
# Create the MGR directory
|
||||
mkdir -p "/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}"
|
||||
|
||||
# Adjust the owner of all those directories
|
||||
chown -R ceph. /run/ceph/ /var/lib/ceph/*
|
86
ceph-client/templates/bin/mds/_start.sh.tpl
Normal file
86
ceph-client/templates/bin/mds/_start.sh.tpl
Normal file
@ -0,0 +1,86 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
export LC_ALL=C
|
||||
: "${HOSTNAME:=$(uname -n)}"
|
||||
: "${CEPHFS_CREATE:=0}"
|
||||
: "${CEPHFS_NAME:=cephfs}"
|
||||
: "${CEPHFS_DATA_POOL:=${CEPHFS_NAME}_data}"
|
||||
: "${CEPHFS_DATA_POOL_PG:=8}"
|
||||
: "${CEPHFS_METADATA_POOL:=${CEPHFS_NAME}_metadata}"
|
||||
: "${CEPHFS_METADATA_POOL_PG:=8}"
|
||||
: "${MDS_NAME:=mds-${HOSTNAME}}"
|
||||
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
|
||||
: "${MDS_KEYRING:=/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}/keyring}"
|
||||
: "${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}"
|
||||
: "${CEPH_CONF:="/etc/ceph/${CLUSTER}.conf"}"
|
||||
|
||||
{{ include "helm-toolkit.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
if [[ ! -e ${CEPH_CONF}.template ]]; then
|
||||
echo "ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon"
|
||||
exit 1
|
||||
else
|
||||
ENDPOINT=$(mon_host_from_k8s_ep "${NAMESPACE}" ceph-mon-discovery)
|
||||
if [[ "${ENDPOINT}" == "" ]]; then
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | tee ${CEPH_CONF}" || true
|
||||
else
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}" || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check to see if we are a new MDS
|
||||
if [ ! -e "${MDS_KEYRING}" ]; then
|
||||
|
||||
if [ -e "${ADMIN_KEYRING}" ]; then
|
||||
KEYRING_OPT=(--name client.admin --keyring "${ADMIN_KEYRING}")
|
||||
elif [ -e "${MDS_BOOTSTRAP_KEYRING}" ]; then
|
||||
KEYRING_OPT=(--name client.bootstrap-mds --keyring "${MDS_BOOTSTRAP_KEYRING}")
|
||||
else
|
||||
echo "ERROR- Failed to bootstrap MDS: could not find admin or bootstrap-mds keyring. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-mds -o ${MDS_BOOTSTRAP_KEYRING}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
timeout 10 ceph --cluster "${CLUSTER}" "${KEYRING_OPT[@]}" health || exit 1
|
||||
|
||||
# Generate the MDS key
|
||||
ceph --cluster "${CLUSTER}" "${KEYRING_OPT[@]}" auth get-or-create "mds.${MDS_NAME}" osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o "${MDS_KEYRING}"
|
||||
chown ceph. "${MDS_KEYRING}"
|
||||
chmod 600 "${MDS_KEYRING}"
|
||||
|
||||
fi
|
||||
|
||||
# NOTE (leseb): having the admin keyring is really a security issue
|
||||
# If we need to bootstrap a MDS we should probably create the following on the monitors
|
||||
# I understand that this handy to do this here
|
||||
# but having the admin key inside every container is a concern
|
||||
|
||||
# Create the Ceph filesystem, if necessary
|
||||
if [ $CEPHFS_CREATE -eq 1 ]; then
|
||||
|
||||
if [[ ! -e ${ADMIN_KEYRING} ]]; then
|
||||
echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$(ceph --cluster "${CLUSTER}" fs ls | grep -c name:.${CEPHFS_NAME},)" -eq 0 ]]; then
|
||||
# Make sure the specified data pool exists
|
||||
if ! ceph --cluster "${CLUSTER}" osd pool stats ${CEPHFS_DATA_POOL} > /dev/null 2>&1; then
|
||||
ceph --cluster "${CLUSTER}" osd pool create ${CEPHFS_DATA_POOL} ${CEPHFS_DATA_POOL_PG}
|
||||
fi
|
||||
|
||||
# Make sure the specified metadata pool exists
|
||||
if ! ceph --cluster "${CLUSTER}" osd pool stats ${CEPHFS_METADATA_POOL} > /dev/null 2>&1; then
|
||||
ceph --cluster "${CLUSTER}" osd pool create ${CEPHFS_METADATA_POOL} ${CEPHFS_METADATA_POOL_PG}
|
||||
fi
|
||||
|
||||
ceph --cluster "${CLUSTER}" fs new ${CEPHFS_NAME} ${CEPHFS_METADATA_POOL} ${CEPHFS_DATA_POOL}
|
||||
fi
|
||||
fi
|
||||
|
||||
# NOTE: prefixing this with exec causes it to die (commit suicide)
|
||||
/usr/bin/ceph-mds \
|
||||
--cluster "${CLUSTER}" \
|
||||
--setuser "ceph" \
|
||||
--setgroup "ceph" \
|
||||
-d \
|
||||
-i "${MDS_NAME}"
|
44
ceph-client/templates/bin/pool/_calc.py.tpl
Normal file
44
ceph-client/templates/bin/pool/_calc.py.tpl
Normal file
@ -0,0 +1,44 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
#NOTE(portdirect): this is a simple approximation of https://ceph.com/pgcalc/
|
||||
|
||||
import math
|
||||
import sys
|
||||
|
||||
replication = int(sys.argv[1])
|
||||
number_of_osds = int(sys.argv[2])
|
||||
percentage_data = float(sys.argv[3])
|
||||
target_pgs_per_osd = int(sys.argv[4])
|
||||
|
||||
raw_pg_num_opt = target_pgs_per_osd * number_of_osds \
|
||||
* (math.ceil(percentage_data) / 100.0) / replication
|
||||
|
||||
raw_pg_num_min = number_of_osds / replication
|
||||
|
||||
if raw_pg_num_min >= raw_pg_num_opt:
|
||||
raw_pg_num = raw_pg_num_min
|
||||
else:
|
||||
raw_pg_num = raw_pg_num_opt
|
||||
|
||||
max_pg_num = int(math.pow(2, math.ceil(math.log(raw_pg_num, 2))))
|
||||
min_pg_num = int(math.pow(2, math.floor(math.log(raw_pg_num, 2))))
|
||||
|
||||
if min_pg_num >= (raw_pg_num * 0.75):
|
||||
print(min_pg_num)
|
||||
else:
|
||||
print(max_pg_num)
|
460
ceph-client/templates/bin/pool/_init.sh.tpl
Normal file
460
ceph-client/templates/bin/pool/_init.sh.tpl
Normal file
@ -0,0 +1,460 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
export LC_ALL=C
|
||||
|
||||
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
|
||||
: "${CEPH_CONF:="/etc/ceph/${CLUSTER}.conf"}"
|
||||
|
||||
{{ include "helm-toolkit.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
if [[ ! -e ${CEPH_CONF}.template ]]; then
|
||||
echo "ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon"
|
||||
exit 1
|
||||
else
|
||||
ENDPOINT=$(mon_host_from_k8s_ep "${NAMESPACE}" ceph-mon-discovery)
|
||||
if [[ "${ENDPOINT}" == "" ]]; then
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | tee ${CEPH_CONF}" || true
|
||||
else
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}" || true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ! -e ${ADMIN_KEYRING} ]]; then
|
||||
echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function wait_for_pid() {
|
||||
tail --pid=$1 -f /dev/null
|
||||
}
|
||||
|
||||
function wait_for_pgs () {
|
||||
echo "#### Start: Checking pgs ####"
|
||||
|
||||
pgs_ready=0
|
||||
query='map({state: .state}) | group_by(.state) | map({state: .[0].state, count: length}) | .[] | select(.state | contains("active") or contains("premerge") | not)'
|
||||
|
||||
if [[ $(ceph mon versions | awk '/version/{print $3}' | sort -n | head -n 1 | cut -d. -f1) -ge 14 ]]; then
|
||||
query=".pg_stats | ${query}"
|
||||
fi
|
||||
|
||||
# Loop until all pgs are active
|
||||
while [[ $pgs_ready -lt 3 ]]; do
|
||||
pgs_state=$(ceph --cluster ${CLUSTER} pg ls -f json | jq -c "${query}")
|
||||
if [[ $(jq -c '. | select(.state | contains("peer") or contains("activating") or contains("recover") or contains("unknown") or contains("creating") | not)' <<< "${pgs_state}") ]]; then
|
||||
# If inactive PGs aren't in the allowed set of states above, fail
|
||||
echo "Failure, found inactive PGs that aren't in the allowed set of states"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "${pgs_state}" ]]; then
|
||||
pgs_ready=0
|
||||
else
|
||||
(( pgs_ready+=1 ))
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
}
|
||||
|
||||
function check_recovery_flags () {
|
||||
echo "### Start: Checking for flags that will prevent recovery"
|
||||
|
||||
# Ensure there are no flags set that will prevent recovery of degraded PGs
|
||||
if [[ $(ceph osd stat | grep "norecover\|nobackfill\|norebalance") ]]; then
|
||||
ceph osd stat
|
||||
echo "Flags are set that prevent recovery of degraded PGs"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function check_osd_count() {
|
||||
echo "#### Start: Checking OSD count ####"
|
||||
noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
|
||||
osd_stat=$(ceph osd stat -f json-pretty)
|
||||
num_osd=$(awk '/"num_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||
num_in_osds=$(awk '/"num_in_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||
num_up_osds=$(awk '/"num_up_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||
|
||||
EXPECTED_OSDS={{.Values.conf.pool.target.osd}}
|
||||
EXPECTED_FINAL_OSDS={{.Values.conf.pool.target.final_osd}}
|
||||
REQUIRED_PERCENT_OF_OSDS={{.Values.conf.pool.target.required_percent_of_osds}}
|
||||
|
||||
if [ ${num_up_osds} -gt ${EXPECTED_FINAL_OSDS} ]; then
|
||||
echo "More running OSDs (${num_up_osds}) than expected (${EXPECTED_FINAL_OSDS}). Please correct the expected value (.Values.conf.pool.target.final_osd)."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
MIN_OSDS=$(($EXPECTED_OSDS*$REQUIRED_PERCENT_OF_OSDS/100))
|
||||
if [ ${MIN_OSDS} -lt 1 ]; then
|
||||
MIN_OSDS=1
|
||||
fi
|
||||
|
||||
if [ "${noup_flag}" ]; then
|
||||
osd_status=$(ceph osd dump -f json | jq -c '.osds[] | .state')
|
||||
count=0
|
||||
for osd in $osd_status; do
|
||||
if [[ "$osd" == *"up"* || "$osd" == *"new"* ]]; then
|
||||
((count=count+1))
|
||||
fi
|
||||
done
|
||||
echo "Caution: noup flag is set. ${count} OSDs in up/new state. Required number of OSDs: ${MIN_OSDS}."
|
||||
if [ $MIN_OSDS -gt $count ]; then
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if [ "${num_osd}" -eq 0 ]; then
|
||||
echo "There are no osds in the cluster"
|
||||
exit 1
|
||||
elif [ "${num_in_osds}" -ge "${MIN_OSDS}" ] && [ "${num_up_osds}" -ge "${MIN_OSDS}" ]; then
|
||||
echo "Required number of OSDs (${MIN_OSDS}) are UP and IN status"
|
||||
else
|
||||
echo "Required number of OSDs (${MIN_OSDS}) are NOT UP and IN status. Cluster shows OSD count=${num_osd}, UP=${num_up_osds}, IN=${num_in_osds}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function create_crushrule () {
|
||||
CRUSH_NAME=$1
|
||||
CRUSH_RULE=$2
|
||||
CRUSH_FAILURE_DOMAIN=$3
|
||||
CRUSH_DEVICE_CLASS=$4
|
||||
if ! ceph --cluster "${CLUSTER}" osd crush rule ls | grep -q "^\$CRUSH_NAME$"; then
|
||||
ceph --cluster "${CLUSTER}" osd crush rule $CRUSH_RULE $CRUSH_NAME default $CRUSH_FAILURE_DOMAIN $CRUSH_DEVICE_CLASS || true
|
||||
fi
|
||||
}
|
||||
|
||||
# Set mons to use the msgr2 protocol on nautilus
|
||||
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||
ceph --cluster "${CLUSTER}" mon enable-msgr2
|
||||
fi
|
||||
|
||||
check_osd_count
|
||||
{{- range $crush_rule := .Values.conf.pool.crush_rules -}}
|
||||
{{- with $crush_rule }}
|
||||
create_crushrule {{ .name }} {{ .crush_rule }} {{ .failure_domain }} {{ .device_class }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
function reweight_osds () {
|
||||
OSD_DF_OUTPUT=$(ceph --cluster "${CLUSTER}" osd df --format json-pretty)
|
||||
for OSD_ID in $(ceph --cluster "${CLUSTER}" osd ls); do
|
||||
OSD_EXPECTED_WEIGHT=$(echo "${OSD_DF_OUTPUT}" | grep -A7 "\bosd.${OSD_ID}\b" | awk '/"kb"/{ gsub(",",""); d= $2/1073741824 ; r = sprintf("%.2f", d); print r }');
|
||||
OSD_WEIGHT=$(echo "${OSD_DF_OUTPUT}" | grep -A3 "\bosd.${OSD_ID}\b" | awk '/crush_weight/{print $2}' | cut -d',' -f1)
|
||||
if [[ "${OSD_EXPECTED_WEIGHT}" != "0.00" ]] && [[ "${OSD_WEIGHT}" != "${OSD_EXPECTED_WEIGHT}" ]]; then
|
||||
ceph --cluster "${CLUSTER}" osd crush reweight osd.${OSD_ID} ${OSD_EXPECTED_WEIGHT};
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function enable_autoscaling () {
|
||||
CEPH_MAJOR_VERSION=$(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1)
|
||||
|
||||
if [[ ${CEPH_MAJOR_VERSION} -ge 16 ]]; then
|
||||
# Pacific introduced the noautoscale flag to make this simpler
|
||||
ceph osd pool unset noautoscale
|
||||
else
|
||||
if [[ ${CEPH_MAJOR_VERSION} -eq 14 ]]; then
|
||||
ceph mgr module enable pg_autoscaler # only required for nautilus
|
||||
fi
|
||||
ceph config set global osd_pool_default_pg_autoscale_mode on
|
||||
fi
|
||||
}
|
||||
|
||||
function disable_autoscaling () {
|
||||
CEPH_MAJOR_VERSION=$(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1)
|
||||
|
||||
if [[ ${CEPH_MAJOR_VERSION} -ge 16 ]]; then
|
||||
# Pacific introduced the noautoscale flag to make this simpler
|
||||
ceph osd pool set noautoscale
|
||||
else
|
||||
if [[ ${CEPH_MAJOR_VERSION} -eq 14 ]]; then
|
||||
ceph mgr module disable pg_autoscaler # only required for nautilus
|
||||
fi
|
||||
ceph config set global osd_pool_default_pg_autoscale_mode off
|
||||
fi
|
||||
}
|
||||
|
||||
function set_cluster_flags () {
|
||||
if [[ -n "${CLUSTER_SET_FLAGS}" ]]; then
|
||||
for flag in ${CLUSTER_SET_FLAGS}; do
|
||||
ceph osd set ${flag}
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
function unset_cluster_flags () {
|
||||
if [[ -n "${CLUSTER_UNSET_FLAGS}" ]]; then
|
||||
for flag in ${CLUSTER_UNSET_FLAGS}; do
|
||||
ceph osd unset ${flag}
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
function run_cluster_commands () {
|
||||
{{- range .Values.conf.features.cluster_commands }}
|
||||
ceph --cluster "${CLUSTER}" {{ . }}
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
# Helper function to set pool properties only if the target value differs from
|
||||
# the current value to optimize performance
|
||||
function set_pool_property() {
|
||||
POOL_NAME=$1
|
||||
PROPERTY_NAME=$2
|
||||
CURRENT_PROPERTY_VALUE=$3
|
||||
TARGET_PROPERTY_VALUE=$4
|
||||
REALLY_MEAN_IT=""
|
||||
|
||||
if [[ "${PROPERTY_NAME}" == "size" ]]; then
|
||||
REALLY_MEAN_IT="--yes-i-really-mean-it"
|
||||
fi
|
||||
|
||||
if [[ "${CURRENT_PROPERTY_VALUE}" != "${TARGET_PROPERTY_VALUE}" ]]; then
|
||||
ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" "${PROPERTY_NAME}" "${TARGET_PROPERTY_VALUE}" ${REALLY_MEAN_IT}
|
||||
fi
|
||||
|
||||
echo "${TARGET_PROPERTY_VALUE}"
|
||||
}
|
||||
|
||||
function create_pool () {
|
||||
POOL_APPLICATION=$1
|
||||
POOL_NAME=$2
|
||||
POOL_REPLICATION=$3
|
||||
POOL_PLACEMENT_GROUPS=$4
|
||||
POOL_CRUSH_RULE=$5
|
||||
POOL_PROTECTION=$6
|
||||
PG_NUM_MIN=$7
|
||||
if ! ceph --cluster "${CLUSTER}" osd pool stats "${POOL_NAME}" > /dev/null 2>&1; then
|
||||
if [[ ${POOL_PLACEMENT_GROUPS} -gt 0 ]]; then
|
||||
ceph --cluster "${CLUSTER}" osd pool create "${POOL_NAME}" ${POOL_PLACEMENT_GROUPS}
|
||||
else
|
||||
ceph --cluster "${CLUSTER}" osd pool create "${POOL_NAME}" ${PG_NUM_MIN} --pg-num-min ${PG_NUM_MIN}
|
||||
fi
|
||||
while [ $(ceph --cluster "${CLUSTER}" -s | grep creating -c) -gt 0 ]; do echo -n .;sleep 1; done
|
||||
ceph --cluster "${CLUSTER}" osd pool application enable "${POOL_NAME}" "${POOL_APPLICATION}"
|
||||
fi
|
||||
|
||||
# 'tr' and 'awk' are needed here to strip off text that is echoed before the JSON string.
|
||||
# In some cases, errors/warnings are written to stdout and the JSON doesn't parse correctly.
|
||||
pool_values=$(ceph --cluster "${CLUSTER}" osd pool get "${POOL_NAME}" all -f json | tr -d '\n' | awk -F{ '{print "{" $2}')
|
||||
|
||||
if [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||
if [[ "${ENABLE_AUTOSCALER}" == "true" ]]; then
|
||||
pg_num=$(jq -r '.pg_num' <<< "${pool_values}")
|
||||
pgp_num=$(jq -r '.pgp_num' <<< "${pool_values}")
|
||||
pg_num_min=$(jq -r '.pg_num_min' <<< "${pool_values}")
|
||||
pg_autoscale_mode=$(jq -r '.pg_autoscale_mode' <<< "${pool_values}")
|
||||
# set pg_num_min to PG_NUM_MIN before enabling autoscaler
|
||||
if [[ ${pg_num} -lt ${PG_NUM_MIN} ]]; then
|
||||
pg_autoscale_mode=$(set_pool_property "${POOL_NAME}" pg_autoscale_mode "${pg_autoscale_mode}" "off")
|
||||
pg_num=$(set_pool_property "${POOL_NAME}" pg_num "${pg_num}" "${PG_NUM_MIN}")
|
||||
pgp_num=$(set_pool_property "${POOL_NAME}" pgp_num "${pgp_num}" "${PG_NUM_MIN}")
|
||||
fi
|
||||
pg_num_min=$(set_pool_property "${POOL_NAME}" pg_num_min "${pg_num_min}" "${PG_NUM_MIN}")
|
||||
pg_autoscale_mode=$(set_pool_property "${POOL_NAME}" pg_autoscale_mode "${pg_autoscale_mode}" "on")
|
||||
else
|
||||
pg_autoscale_mode=$(set_pool_property "${POOL_NAME}" pg_autoscale_mode "${pg_autoscale_mode}" "off")
|
||||
fi
|
||||
fi
|
||||
#
|
||||
# Make sure pool is not protected after creation AND expansion so we can manipulate its settings.
|
||||
# Final protection settings are applied once parameters (size, pg) have been adjusted.
|
||||
#
|
||||
nosizechange=$(jq -r '.nosizechange' <<< "${pool_values}")
|
||||
nopschange=$(jq -r '.nopschange' <<< "${pool_values}")
|
||||
nodelete=$(jq -r '.nodelete' <<< "${pool_values}")
|
||||
size=$(jq -r '.size' <<< "${pool_values}")
|
||||
crush_rule=$(jq -r '.crush_rule' <<< "${pool_values}")
|
||||
nosizechange=$(set_pool_property "${POOL_NAME}" nosizechange "${nosizechange}" "false")
|
||||
nopgchange=$(set_pool_property "${POOL_NAME}" nopgchange "${nopgchange}" "false")
|
||||
nodelete=$(set_pool_property "${POOL_NAME}" nodelete "${nodelete}" "false")
|
||||
size=$(set_pool_property "${POOL_NAME}" size "${size}" "${POOL_REPLICATION}")
|
||||
crush_rule=$(set_pool_property "${POOL_NAME}" crush_rule "${crush_rule}" "${POOL_CRUSH_RULE}")
|
||||
# set pg_num to pool
|
||||
if [[ ${POOL_PLACEMENT_GROUPS} -gt 0 ]]; then
|
||||
pg_num=$(jq -r ".pg_num" <<< "${pool_values}")
|
||||
pgp_num=$(jq -r ".pgp_num" <<< "${pool_values}")
|
||||
pg_num=$(set_pool_property "${POOL_NAME}" pg_num "${pg_num}" "${POOL_PLACEMENT_GROUPS}")
|
||||
pgp_num=$(set_pool_property "${POOL_NAME}" pgp_num "${pgp_num}" "${POOL_PLACEMENT_GROUPS}")
|
||||
fi
|
||||
|
||||
#This is to handle cluster expansion case where replication may change from intilization
|
||||
if [ ${POOL_REPLICATION} -gt 1 ]; then
|
||||
min_size=$(jq -r '.min_size' <<< "${pool_values}")
|
||||
EXPECTED_POOLMINSIZE=$[${POOL_REPLICATION}-1]
|
||||
min_size=$(set_pool_property "${POOL_NAME}" min_size "${min_size}" "${EXPECTED_POOLMINSIZE}")
|
||||
fi
|
||||
#
|
||||
# Handling of .Values.conf.pool.target.protected:
|
||||
# Possible settings
|
||||
# - true | 1 = Protect the pools after they get created
|
||||
# - false | 0 = Do not protect the pools once they get created and let Ceph defaults apply
|
||||
# - Absent = Do not protect the pools once they get created and let Ceph defaults apply
|
||||
#
|
||||
# If protection is not requested through values.yaml, just use the Ceph defaults. With Luminous we do not
|
||||
# apply any protection to the pools when they get created.
|
||||
#
|
||||
# Note: If the /etc/ceph/ceph.conf file modifies the defaults the deployment will fail on pool creation
|
||||
# - nosizechange = Do not allow size and min_size changes on the pool
|
||||
# - nodelete = Do not allow deletion of the pool
|
||||
#
|
||||
if [ "x${POOL_PROTECTION}" == "xtrue" ] || [ "x${POOL_PROTECTION}" == "x1" ]; then
|
||||
nosizechange=$(set_pool_property "${POOL_NAME}" nosizechange "${nosizechange}" "true")
|
||||
nodelete=$(set_pool_property "${POOL_NAME}" nodelete "${nodelete}" "true")
|
||||
fi
|
||||
}
|
||||
|
||||
function manage_pool () {
|
||||
POOL_APPLICATION=$1
|
||||
POOL_NAME=$2
|
||||
POOL_REPLICATION=$3
|
||||
TOTAL_DATA_PERCENT=$4
|
||||
TARGET_PG_PER_OSD=$5
|
||||
POOL_CRUSH_RULE=$6
|
||||
POOL_QUOTA=$7
|
||||
POOL_PROTECTION=$8
|
||||
CLUSTER_CAPACITY=$9
|
||||
POOL_PG_NUM_MIN=${10}
|
||||
TOTAL_OSDS={{.Values.conf.pool.target.osd}}
|
||||
POOL_PLACEMENT_GROUPS=0
|
||||
if [[ -n "${TOTAL_DATA_PERCENT}" ]]; then
|
||||
if [[ "${ENABLE_AUTOSCALER}" == "false" ]] || [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -lt 14 ]]; then
|
||||
POOL_PLACEMENT_GROUPS=$(python3 /tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD})
|
||||
fi
|
||||
fi
|
||||
create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}" "${POOL_PROTECTION}" "${POOL_PG_NUM_MIN}"
|
||||
ceph --cluster "${CLUSTER}" osd pool set-quota "${POOL_NAME}" max_bytes $POOL_QUOTA
|
||||
}
|
||||
|
||||
# Helper to convert TiB, TB, GiB, GB, MiB, MB, KiB, KB, or bytes to bytes
|
||||
function convert_to_bytes() {
|
||||
value=${1}
|
||||
value="$(echo "${value}" | sed 's/TiB/ \* 1024GiB/g')"
|
||||
value="$(echo "${value}" | sed 's/TB/ \* 1000GB/g')"
|
||||
value="$(echo "${value}" | sed 's/GiB/ \* 1024MiB/g')"
|
||||
value="$(echo "${value}" | sed 's/GB/ \* 1000MB/g')"
|
||||
value="$(echo "${value}" | sed 's/MiB/ \* 1024KiB/g')"
|
||||
value="$(echo "${value}" | sed 's/MB/ \* 1000KB/g')"
|
||||
value="$(echo "${value}" | sed 's/KiB/ \* 1024/g')"
|
||||
value="$(echo "${value}" | sed 's/KB/ \* 1000/g')"
|
||||
python3 -c "print(int(${value}))"
|
||||
}
|
||||
|
||||
set_cluster_flags
|
||||
unset_cluster_flags
|
||||
run_cluster_commands
|
||||
reweight_osds
|
||||
|
||||
{{ $targetOSDCount := .Values.conf.pool.target.osd }}
|
||||
{{ $targetFinalOSDCount := .Values.conf.pool.target.final_osd }}
|
||||
{{ $targetPGperOSD := .Values.conf.pool.target.pg_per_osd }}
|
||||
{{ $crushRuleDefault := .Values.conf.pool.default.crush_rule }}
|
||||
{{ $targetQuota := .Values.conf.pool.target.quota | default 100 }}
|
||||
{{ $targetProtection := .Values.conf.pool.target.protected | default "false" | quote | lower }}
|
||||
{{ $targetPGNumMin := .Values.conf.pool.target.pg_num_min }}
|
||||
cluster_capacity=$(ceph --cluster "${CLUSTER}" df -f json-pretty | grep '"total_bytes":' | head -n1 | awk '{print $2}' | tr -d ',')
|
||||
|
||||
# Check to make sure pool quotas don't exceed the expected cluster capacity in its final state
|
||||
target_quota=$(python3 -c "print(int(${cluster_capacity} * {{ $targetFinalOSDCount }} / {{ $targetOSDCount }} * {{ $targetQuota }} / 100))")
|
||||
quota_sum=0
|
||||
|
||||
{{- range $pool := .Values.conf.pool.spec -}}
|
||||
{{- with $pool }}
|
||||
# Read the pool quota from the pool spec (no quota if absent)
|
||||
# Set pool_quota to 0 if target_quota is 0
|
||||
[[ ${target_quota} -eq 0 ]] && pool_quota=0 || pool_quota="$(convert_to_bytes {{ .pool_quota | default 0 }})"
|
||||
quota_sum=$(python3 -c "print(int(${quota_sum} + (${pool_quota} * {{ .replication }})))")
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
if [[ ${quota_sum} -gt ${target_quota} ]]; then
|
||||
echo "The sum of all pool quotas exceeds the target quota for the cluster"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]] && [[ "${ENABLE_AUTOSCALER}" != "true" ]]; then
|
||||
disable_autoscaling
|
||||
fi
|
||||
|
||||
# Track the manage_pool() PIDs in an array so we can wait for them to finish
|
||||
MANAGE_POOL_PIDS=()
|
||||
|
||||
{{- range $pool := .Values.conf.pool.spec -}}
|
||||
{{- with $pool }}
|
||||
pool_name="{{ .name }}"
|
||||
{{- if .rename }}
|
||||
# If a renamed pool exists, that name should be used for idempotence
|
||||
if [[ -n "$(ceph --cluster ${CLUSTER} osd pool ls | grep ^{{ .rename }}$)" ]]; then
|
||||
pool_name="{{ .rename }}"
|
||||
fi
|
||||
{{- end }}
|
||||
# Read the pool quota from the pool spec (no quota if absent)
|
||||
# Set pool_quota to 0 if target_quota is 0
|
||||
[[ ${target_quota} -eq 0 ]] && pool_quota=0 || pool_quota="$(convert_to_bytes {{ .pool_quota | default 0 }})"
|
||||
pool_crush_rule="{{ $crushRuleDefault }}"
|
||||
{{- if .crush_rule }}
|
||||
pool_crush_rule="{{ .crush_rule }}"
|
||||
{{- end }}
|
||||
pool_pg_num_min={{ $targetPGNumMin }}
|
||||
{{- if .pg_num_min }}
|
||||
pool_pg_num_min={{ .pg_num_min }}
|
||||
{{- end }}
|
||||
manage_pool {{ .application }} ${pool_name} {{ .replication }} {{ .percent_total_data }} {{ $targetPGperOSD }} $pool_crush_rule $pool_quota {{ $targetProtection }} ${cluster_capacity} ${pool_pg_num_min} &
|
||||
MANAGE_POOL_PID=$!
|
||||
MANAGE_POOL_PIDS+=( $MANAGE_POOL_PID )
|
||||
{{- if .rename }}
|
||||
# Wait for manage_pool() to finish for this pool before trying to rename the pool
|
||||
wait_for_pid $MANAGE_POOL_PID
|
||||
# If a rename value exists, the pool exists, and a pool with the rename value doesn't exist, rename the pool
|
||||
pool_list=$(ceph --cluster ${CLUSTER} osd pool ls)
|
||||
if [[ -n $(grep ^{{ .name }}$ <<< "${pool_list}") ]] &&
|
||||
[[ -z $(grep ^{{ .rename }}$ <<< "${pool_list}") ]]; then
|
||||
ceph --cluster "${CLUSTER}" osd pool rename "{{ .name }}" "{{ .rename }}"
|
||||
pool_name="{{ .rename }}"
|
||||
fi
|
||||
{{- end }}
|
||||
{{- if and .delete .delete_all_pool_data }}
|
||||
# Wait for manage_pool() to finish for this pool before trying to delete the pool
|
||||
wait_for_pid $MANAGE_POOL_PID
|
||||
# If delete is set to true and delete_all_pool_data is also true, delete the pool
|
||||
if [[ "true" == "{{ .delete }}" ]] &&
|
||||
[[ "true" == "{{ .delete_all_pool_data }}" ]]; then
|
||||
ceph --cluster "${CLUSTER}" tell mon.* injectargs '--mon-allow-pool-delete=true'
|
||||
ceph --cluster "${CLUSTER}" osd pool set "${pool_name}" nodelete false
|
||||
ceph --cluster "${CLUSTER}" osd pool delete "${pool_name}" "${pool_name}" --yes-i-really-really-mean-it
|
||||
ceph --cluster "${CLUSTER}" tell mon.* injectargs '--mon-allow-pool-delete=false'
|
||||
fi
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
# Wait for all manage_pool() instances to finish before proceeding
|
||||
for pool_pid in ${MANAGE_POOL_PIDS[@]}; do
|
||||
wait_for_pid $pool_pid
|
||||
done
|
||||
|
||||
if [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]] && [[ "${ENABLE_AUTOSCALER}" == "true" ]]; then
|
||||
enable_autoscaling
|
||||
fi
|
||||
|
||||
{{- if .Values.conf.pool.crush.tunables }}
|
||||
ceph --cluster "${CLUSTER}" osd crush tunables {{ .Values.conf.pool.crush.tunables }}
|
||||
{{- end }}
|
||||
|
||||
wait_for_pgs
|
||||
check_recovery_flags
|
38
ceph-client/templates/bin/utils/_checkDNS.sh.tpl
Normal file
38
ceph-client/templates/bin/utils/_checkDNS.sh.tpl
Normal file
@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
: "${CEPH_CONF:="/etc/ceph/${CLUSTER}.conf"}"
|
||||
ENDPOINT="{$1}"
|
||||
|
||||
function check_mon_dns () {
|
||||
GREP_CMD=$(grep -rl 'ceph-mon' ${CEPH_CONF})
|
||||
|
||||
if [[ "${ENDPOINT}" == "{up}" ]]; then
|
||||
echo "If DNS is working, we are good here"
|
||||
elif [[ "${ENDPOINT}" != "" ]]; then
|
||||
if [[ ${GREP_CMD} != "" ]]; then
|
||||
# No DNS, write CEPH MONs IPs into ${CEPH_CONF}
|
||||
sh -c -e "cat ${CEPH_CONF}.template | sed 's/mon_host.*/mon_host = ${ENDPOINT}/g' | tee ${CEPH_CONF}" > /dev/null 2>&1
|
||||
else
|
||||
echo "endpoints are already cached in ${CEPH_CONF}"
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_mon_dns
|
||||
|
||||
exit
|
70
ceph-client/templates/bin/utils/_checkDNS_start.sh.tpl
Normal file
70
ceph-client/templates/bin/utils/_checkDNS_start.sh.tpl
Normal file
@ -0,0 +1,70 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -xe
|
||||
|
||||
{{ include "helm-toolkit.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
{{- $rgwNameSpaces := "" }}
|
||||
{{- $sep := "" }}
|
||||
{{- range $_, $ns := .Values.endpoints.ceph_object_store.endpoint_namespaces }}
|
||||
{{- $rgwNameSpaces = printf "%s%s%s" $rgwNameSpaces $sep $ns }}
|
||||
{{- $sep = " " }}
|
||||
{{- end }}
|
||||
|
||||
rgwNameSpaces={{- printf "\"%s\"" $rgwNameSpaces }}
|
||||
|
||||
function check_mon_dns {
|
||||
NS=${1}
|
||||
# RGWs and the rgw namespace could not exist. Let's check this and prevent this script from failing
|
||||
if [[ $(kubectl get ns ${NS} -o json | jq -r '.status.phase') == "Active" ]]; then
|
||||
DNS_CHECK=$(getent hosts ceph-mon | head -n1)
|
||||
PODS=$(kubectl get pods --namespace=${NS} --selector=application=ceph --field-selector=status.phase=Running \
|
||||
--output=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep -E 'ceph-mon|ceph-osd|ceph-mgr|ceph-mds|ceph-rgw')
|
||||
ENDPOINT=$(mon_host_from_k8s_ep "${NAMESPACE}" ceph-mon-discovery)
|
||||
|
||||
if [[ ${PODS} == "" || "${ENDPOINT}" == "" ]]; then
|
||||
echo "Something went wrong, no PODS or ENDPOINTS are available!"
|
||||
elif [[ ${DNS_CHECK} == "" ]]; then
|
||||
for POD in ${PODS}; do
|
||||
kubectl exec -t ${POD} --namespace=${NS} -- \
|
||||
sh -c -e "/tmp/utils-checkDNS.sh "${ENDPOINT}""
|
||||
done
|
||||
else
|
||||
for POD in ${PODS}; do
|
||||
kubectl exec -t ${POD} --namespace=${NS} -- \
|
||||
sh -c -e "/tmp/utils-checkDNS.sh up"
|
||||
done
|
||||
fi
|
||||
else
|
||||
echo "The namespace ${NS} is not ready, yet"
|
||||
fi
|
||||
}
|
||||
|
||||
function watch_mon_dns {
|
||||
while [ true ]; do
|
||||
echo "checking DNS health"
|
||||
for myNS in ${NAMESPACE} ${rgwNameSpaces}; do
|
||||
check_mon_dns ${myNS} || true
|
||||
done
|
||||
echo "sleep 300 sec"
|
||||
sleep 300
|
||||
done
|
||||
}
|
||||
|
||||
watch_mon_dns
|
||||
|
||||
exit
|
21
ceph-client/templates/bin/utils/_checkPGs.sh.tpl
Normal file
21
ceph-client/templates/bin/utils/_checkPGs.sh.tpl
Normal file
@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
mgrPod=$(kubectl get pods --namespace=${DEPLOYMENT_NAMESPACE} --selector=application=ceph --selector=component=mgr --output=jsonpath={.items[0].metadata.name} 2>/dev/null)
|
||||
|
||||
kubectl exec -t ${mgrPod} --namespace=${DEPLOYMENT_NAMESPACE} -- python3 /tmp/utils-checkPGs.py All 2>/dev/null
|
29
ceph-client/templates/bin/utils/_defragOSDs.sh.tpl
Normal file
29
ceph-client/templates/bin/utils/_defragOSDs.sh.tpl
Normal file
@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
PODS=$(kubectl get pods --namespace=${NAMESPACE} \
|
||||
--selector=application=ceph,component=osd --field-selector=status.phase=Running \
|
||||
'--output=jsonpath={range .items[*]}{.metadata.name}{"\n"}{end}')
|
||||
|
||||
for POD in ${PODS}; do
|
||||
kubectl exec -t ${POD} -c ceph-osd-default --namespace=${NAMESPACE} -- \
|
||||
sh -c -e "/tmp/utils-defragOSDs.sh"
|
||||
done
|
||||
|
||||
|
||||
exit 0
|
57
ceph-client/templates/configmap-bin.yaml
Normal file
57
ceph-client/templates/configmap-bin.yaml
Normal file
@ -0,0 +1,57 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.configmap_bin .Values.deployment.ceph }}
|
||||
{{- $envAll := . }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ceph-client-bin
|
||||
data:
|
||||
{{- if .Values.images.local_registry.active }}
|
||||
image-repo-sync.sh: |
|
||||
{{- include "helm-toolkit.scripts.image_repo_sync" . | indent 4 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.bootstrap.enabled }}
|
||||
bootstrap.sh: |
|
||||
{{ tuple "bin/_bootstrap.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
{{- end }}
|
||||
|
||||
init-dirs.sh: |
|
||||
{{ tuple "bin/_init-dirs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
pool-init.sh: |
|
||||
{{ tuple "bin/pool/_init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
pool-calc.py: |
|
||||
{{ tuple "bin/pool/_calc.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
mds-start.sh: |
|
||||
{{ tuple "bin/mds/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
helm-tests.sh: |
|
||||
{{ tuple "bin/_helm-tests.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
utils-checkDNS.sh: |
|
||||
{{ tuple "bin/utils/_checkDNS.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
utils-checkDNS_start.sh: |
|
||||
{{ tuple "bin/utils/_checkDNS_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
utils-checkPGs.sh: |
|
||||
{{ tuple "bin/utils/_checkPGs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
utils-defragOSDs.sh: |
|
||||
{{ tuple "bin/utils/_defragOSDs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
{{- end }}
|
50
ceph-client/templates/configmap-etc-client.yaml
Normal file
50
ceph-client/templates/configmap-etc-client.yaml
Normal file
@ -0,0 +1,50 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- define "ceph.configmap.etc" }}
|
||||
{{- $configMapName := index . 0 }}
|
||||
{{- $envAll := index . 1 }}
|
||||
{{- with $envAll }}
|
||||
|
||||
{{- if .Values.deployment.ceph }}
|
||||
|
||||
{{- if empty .Values.conf.ceph.global.mon_host -}}
|
||||
{{- $monHost := tuple "ceph_mon" "internal" "mon_msgr2" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}
|
||||
{{- $_ := $monHost | set .Values.conf.ceph.global "mon_host" -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- if empty .Values.conf.ceph.osd.cluster_network -}}
|
||||
{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd "cluster_network" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if empty .Values.conf.ceph.osd.public_network -}}
|
||||
{{- $_ := .Values.network.public | set .Values.conf.ceph.osd "public_network" -}}
|
||||
{{- end -}}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $configMapName }}
|
||||
data:
|
||||
ceph.conf: |
|
||||
{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph | indent 4 }}
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.manifests.configmap_etc }}
|
||||
{{- list "ceph-client-etc" . | include "ceph.configmap.etc" }}
|
||||
{{- end }}
|
150
ceph-client/templates/cronjob-checkPGs.yaml
Normal file
150
ceph-client/templates/cronjob-checkPGs.yaml
Normal file
@ -0,0 +1,150 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if .Values.manifests.cronjob_checkPGs }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $serviceAccountName := "ceph-pool-checkpgs" }}
|
||||
{{ tuple $envAll "pool_checkpgs" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/exec
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ $serviceAccountName }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $serviceAccountName }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "pool-checkpgs" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
spec:
|
||||
schedule: {{ .Values.jobs.pool_checkPGs.cron | quote }}
|
||||
successfulJobsHistoryLimit: {{ .Values.jobs.pool_checkPGs.history.successJob }}
|
||||
failedJobsHistoryLimit: {{ .Values.jobs.pool_checkPGs.history.failJob }}
|
||||
concurrencyPolicy: {{ .Values.jobs.pool_checkPGs.concurrency.execPolicy }}
|
||||
startingDeadlineSeconds: {{ .Values.jobs.pool_checkPGs.startingDeadlineSecs }}
|
||||
jobTemplate:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "pool-checkpgs" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "pool-checkpgs" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
|
||||
spec:
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
nodeSelector:
|
||||
{{ .Values.labels.mgr.node_selector_key }}: {{ .Values.labels.mgr.node_selector_value }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "pool_checkpgs" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 12 }}
|
||||
containers:
|
||||
- name: {{ $serviceAccountName }}
|
||||
{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 12 }}
|
||||
env:
|
||||
- name: DEPLOYMENT_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
command:
|
||||
- /tmp/utils-checkPGs.sh
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: ceph-client-bin
|
||||
mountPath: /tmp/utils-checkPGs.sh
|
||||
subPath: utils-checkPGs.sh
|
||||
readOnly: true
|
||||
- name: ceph-client-etc
|
||||
mountPath: /etc/ceph/ceph.conf
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
- mountPath: /etc/ceph/ceph.client.admin.keyring
|
||||
name: ceph-client-admin-keyring
|
||||
readOnly: true
|
||||
subPath: ceph.client.admin.keyring
|
||||
- mountPath: /etc/ceph/ceph.mon.keyring.seed
|
||||
name: ceph-mon-keyring
|
||||
readOnly: true
|
||||
subPath: ceph.mon.keyring
|
||||
- mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring
|
||||
name: ceph-bootstrap-osd-keyring
|
||||
readOnly: true
|
||||
subPath: ceph.keyring
|
||||
- mountPath: /var/lib/ceph/bootstrap-mds/ceph.keyring
|
||||
name: ceph-bootstrap-mds-keyring
|
||||
readOnly: true
|
||||
subPath: ceph.keyring
|
||||
restartPolicy: Never
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: ceph-client-bin
|
||||
configMap:
|
||||
name: ceph-client-bin
|
||||
defaultMode: 0555
|
||||
- name: ceph-client-etc
|
||||
configMap:
|
||||
name: ceph-client-etc
|
||||
defaultMode: 0444
|
||||
- name: ceph-client-admin-keyring
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: ceph-client-admin-keyring
|
||||
- name: ceph-mon-keyring
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: ceph-mon-keyring
|
||||
- name: ceph-bootstrap-osd-keyring
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: ceph-bootstrap-osd-keyring
|
||||
- name: ceph-bootstrap-mds-keyring
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: ceph-bootstrap-mds-keyring
|
||||
|
||||
{{- end }}
|
110
ceph-client/templates/cronjob-defragosds.yaml
Normal file
110
ceph-client/templates/cronjob-defragosds.yaml
Normal file
@ -0,0 +1,110 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if .Values.manifests.cronjob_defragosds }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $serviceAccountName := "ceph-defragosds" }}
|
||||
{{ tuple $envAll "ceph_defragosds" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/exec
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ $serviceAccountName }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $serviceAccountName }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "ceph-defragosds" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
spec:
|
||||
schedule: {{ .Values.jobs.ceph_defragosds.cron | quote }}
|
||||
successfulJobsHistoryLimit: {{ .Values.jobs.ceph_defragosds.history.successJob }}
|
||||
failedJobsHistoryLimit: {{ .Values.jobs.ceph_defragosds.history.failJob }}
|
||||
concurrencyPolicy: {{ .Values.jobs.ceph_defragosds.concurrency.execPolicy }}
|
||||
startingDeadlineSeconds: {{ .Values.jobs.ceph_defragosds.startingDeadlineSecs }}
|
||||
jobTemplate:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "ceph-defragosds" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "ceph-defragosds" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
|
||||
spec:
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
nodeSelector:
|
||||
{{ .Values.labels.mgr.node_selector_key }}: {{ .Values.labels.mgr.node_selector_value }}
|
||||
containers:
|
||||
- name: {{ $serviceAccountName }}
|
||||
{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 12 }}
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: KUBECTL_PARAM
|
||||
value: {{ tuple $envAll "ceph" "ceph-defragosd" | include "helm-toolkit.snippets.kubernetes_kubectl_params" }}
|
||||
command:
|
||||
- /tmp/utils-defragOSDs.sh
|
||||
- cron
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: ceph-client-bin
|
||||
mountPath: /tmp/utils-defragOSDs.sh
|
||||
subPath: utils-defragOSDs.sh
|
||||
readOnly: true
|
||||
restartPolicy: Never
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: ceph-client-bin
|
||||
configMap:
|
||||
name: ceph-client-bin
|
||||
defaultMode: 0555
|
||||
{{- end }}
|
130
ceph-client/templates/deployment-checkdns.yaml
Normal file
130
ceph-client/templates/deployment-checkdns.yaml
Normal file
@ -0,0 +1,130 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License: is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.deployment_checkdns .Values.deployment.ceph }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $serviceAccountName := "ceph-checkdns" }}
|
||||
{{/*
|
||||
We will give different name to the RoleBinding resource (see $cephRoleBindingName variable below).
|
||||
This is neccessary, because the RoleBinding with the default name "ceph-checkdns" exists in the system,
|
||||
and its reference can not be changed.
|
||||
*/}}
|
||||
{{- $cephRoleBindingName := "ceph-checkdns-rolebinding" }}
|
||||
|
||||
{{ tuple $envAll "checkdns" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "clusterrole-checkdns" | quote }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- endpoints
|
||||
- pods/exec
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ printf "%s-for-%s" $cephRoleBindingName $envAll.Release.Namespace }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "clusterrole-checkdns" | quote }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $serviceAccountName }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: ceph-checkdns
|
||||
annotations:
|
||||
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "checkdns" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ tuple $envAll "ceph" "checkdns" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "checkdns" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
|
||||
{{ dict "envAll" $envAll "podName" "ceph-checkdns" "containerNames" (list "ceph-checkdns" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "checkdns" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
affinity:
|
||||
{{ tuple $envAll "ceph" "checkdns" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
|
||||
{{ tuple $envAll "checkdns" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
|
||||
nodeSelector:
|
||||
{{ .Values.labels.checkdns.node_selector_key }}: {{ .Values.labels.checkdns.node_selector_value }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "checkdns" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
hostNetwork: true
|
||||
dnsPolicy: {{ .Values.pod.dns_policy }}
|
||||
containers:
|
||||
- name: ceph-checkdns
|
||||
{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.checkdns | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "checkdns" "container" "checkdns" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
env:
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
- name: K8S_HOST_NETWORK
|
||||
value: "1"
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: MON_PORT
|
||||
value: {{ tuple "ceph_mon" "internal" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" | quote }}
|
||||
- name: MON_PORT_V2
|
||||
value: {{ tuple "ceph_mon" "internal" "mon_msgr2" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" | quote }}
|
||||
- name: KUBECTL_PARAM
|
||||
value: {{ tuple $envAll "ceph" "checkdns" | include "helm-toolkit.snippets.kubernetes_kubectl_params" }}
|
||||
command:
|
||||
- /tmp/_start.sh
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: ceph-client-bin
|
||||
mountPath: /tmp/_start.sh
|
||||
subPath: utils-checkDNS_start.sh
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: ceph-client-bin
|
||||
configMap:
|
||||
name: ceph-client-bin
|
||||
defaultMode: 0555
|
||||
{{- end }}
|
176
ceph-client/templates/deployment-mds.yaml
Normal file
176
ceph-client/templates/deployment-mds.yaml
Normal file
@ -0,0 +1,176 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- define "livenessProbeTemplate" }}
|
||||
tcpSocket:
|
||||
port: 6800
|
||||
{{- end }}
|
||||
|
||||
{{- define "readinessProbeTemplate" }}
|
||||
tcpSocket:
|
||||
port: 6800
|
||||
{{- end }}
|
||||
|
||||
{{- if and .Values.manifests.deployment_mds ( and .Values.deployment.ceph .Values.conf.features.mds) }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $serviceAccountName := "ceph-mds" }}
|
||||
{{ tuple $envAll "mds" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: ceph-mds
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "mds" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.pod.replicas.mds }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ tuple $envAll "ceph" "mds" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.kubernetes_upgrades_deployment" | indent 2 }}
|
||||
template:
|
||||
metadata:
|
||||
name: ceph-mds
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "mds" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
|
||||
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
configmap-etc-client-hash: {{ tuple "configmap-etc-client.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
{{ dict "envAll" $envAll "podName" "ceph-mds" "containerNames" (list "ceph-mds" "ceph-init-dirs") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "mds" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
affinity:
|
||||
{{ tuple $envAll "ceph" "mds" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
|
||||
{{ tuple $envAll "mds" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
|
||||
nodeSelector:
|
||||
{{ .Values.labels.mds.node_selector_key }}: {{ .Values.labels.mds.node_selector_value }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "mds" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
- name: ceph-init-dirs
|
||||
{{ tuple $envAll "ceph_mds" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "mds" "container" "init_dirs" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
command:
|
||||
- /tmp/init-dirs.sh
|
||||
env:
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-run
|
||||
mountPath: /run
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: ceph-client-bin
|
||||
mountPath: /tmp/init-dirs.sh
|
||||
subPath: init-dirs.sh
|
||||
readOnly: true
|
||||
- name: pod-var-lib-ceph
|
||||
mountPath: /var/lib/ceph
|
||||
readOnly: false
|
||||
- name: pod-var-lib-ceph-crash
|
||||
mountPath: /var/lib/ceph/crash
|
||||
readOnly: false
|
||||
containers:
|
||||
- name: ceph-mds
|
||||
{{ tuple $envAll "ceph_mds" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.mds | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "mds" "container" "mds" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
command:
|
||||
- /tmp/mds-start.sh
|
||||
env:
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
- name: CEPHFS_CREATE
|
||||
value: "1"
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: MON_PORT
|
||||
value: {{ tuple "ceph_mon" "internal" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" | quote }}
|
||||
- name: MON_PORT_V2
|
||||
value: {{ tuple "ceph_mon" "internal" "mon_msgr2" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" | quote }}
|
||||
ports:
|
||||
- containerPort: 6800
|
||||
{{ dict "envAll" . "component" "ceph" "container" "ceph-mds" "type" "liveness" "probeTemplate" (include "livenessProbeTemplate" . | fromYaml) | include "helm-toolkit.snippets.kubernetes_probe" | trim | indent 10 }}
|
||||
{{ dict "envAll" . "component" "ceph" "container" "ceph-mds" "type" "readiness" "probeTemplate" (include "readinessProbeTemplate" . | fromYaml) | include "helm-toolkit.snippets.kubernetes_probe" | trim | indent 10 }}
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-run
|
||||
mountPath: /run
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: ceph-client-bin
|
||||
mountPath: /tmp/mds-start.sh
|
||||
subPath: mds-start.sh
|
||||
readOnly: true
|
||||
- name: ceph-client-bin
|
||||
mountPath: /tmp/utils-checkDNS.sh
|
||||
subPath: utils-checkDNS.sh
|
||||
readOnly: true
|
||||
- name: ceph-client-etc
|
||||
mountPath: /etc/ceph/ceph.conf.template
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
- name: ceph-client-admin-keyring
|
||||
mountPath: /etc/ceph/ceph.client.admin.keyring
|
||||
subPath: ceph.client.admin.keyring
|
||||
readOnly: true
|
||||
- name: ceph-bootstrap-mds-keyring
|
||||
mountPath: /var/lib/ceph/bootstrap-mds/ceph.keyring
|
||||
subPath: ceph.keyring
|
||||
readOnly: false
|
||||
- name: pod-var-lib-ceph
|
||||
mountPath: /var/lib/ceph
|
||||
readOnly: false
|
||||
- name: pod-var-lib-ceph-crash
|
||||
mountPath: /var/lib/ceph/crash
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-run
|
||||
emptyDir:
|
||||
medium: "Memory"
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: ceph-client-etc
|
||||
configMap:
|
||||
name: ceph-client-etc
|
||||
defaultMode: 0444
|
||||
- name: ceph-client-bin
|
||||
configMap:
|
||||
name: ceph-client-bin
|
||||
defaultMode: 0555
|
||||
- name: pod-var-lib-ceph
|
||||
emptyDir: {}
|
||||
- name: pod-var-lib-ceph-crash
|
||||
hostPath:
|
||||
path: /var/lib/openstack-helm/ceph/crash
|
||||
type: DirectoryOrCreate
|
||||
- name: ceph-client-admin-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.admin }}
|
||||
- name: ceph-bootstrap-mds-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.mds }}
|
||||
{{- end }}
|
83
ceph-client/templates/job-bootstrap.yaml
Normal file
83
ceph-client/templates/job-bootstrap.yaml
Normal file
@ -0,0 +1,83 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $serviceAccountName := "ceph-client-bootstrap" }}
|
||||
{{ tuple $envAll "bootstrap" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ceph-client-bootstrap
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "bootstrap" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "bootstrap" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
|
||||
{{ dict "envAll" $envAll "podName" "ceph-client-bootstrap" "containerNames" (list "ceph-client-bootstrap" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "bootstrap" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
restartPolicy: OnFailure
|
||||
nodeSelector:
|
||||
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "bootstrap" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
containers:
|
||||
- name: ceph-client-bootstrap
|
||||
{{ tuple $envAll "ceph_bootstrap" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "bootstrap" "container" "bootstrap" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
command:
|
||||
- /tmp/bootstrap.sh
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: ceph-client-bin
|
||||
mountPath: /tmp/bootstrap.sh
|
||||
subPath: bootstrap.sh
|
||||
readOnly: true
|
||||
- name: ceph-client-etc
|
||||
mountPath: /etc/ceph/ceph.conf
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
- name: ceph-client-admin-keyring
|
||||
mountPath: /etc/ceph/ceph.client.admin.keyring
|
||||
subPath: ceph.client.admin.keyring
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: ceph-client-bin
|
||||
configMap:
|
||||
name: ceph-client-bin
|
||||
defaultMode: 0555
|
||||
- name: ceph-client-etc
|
||||
configMap:
|
||||
name: ceph-client-etc
|
||||
defaultMode: 0444
|
||||
- name: ceph-client-admin-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.admin }}
|
||||
{{- end }}
|
18
ceph-client/templates/job-image-repo-sync.yaml
Normal file
18
ceph-client/templates/job-image-repo-sync.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}
|
||||
{{- $imageRepoSyncJob := dict "envAll" . "serviceName" "ceph-client" -}}
|
||||
{{ $imageRepoSyncJob | include "helm-toolkit.manifests.job_image_repo_sync" }}
|
||||
{{- end }}
|
117
ceph-client/templates/job-rbd-pool.yaml
Normal file
117
ceph-client/templates/job-rbd-pool.yaml
Normal file
@ -0,0 +1,117 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.job_rbd_pool .Values.deployment.ceph }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $serviceAccountName := "ceph-rbd-pool" }}
|
||||
{{ tuple $envAll "rbd_pool" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ceph-rbd-pool
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "rbd-pool" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: ceph-rbd-pool
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "rbd-pool" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
annotations:
|
||||
{{ dict "envAll" $envAll "podName" "ceph-rbd-pool" "containerNames" (list "ceph-rbd-pool" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "rbd_pool" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
restartPolicy: {{ $envAll.Values.jobs.rbd_pool.restartPolicy | quote }}
|
||||
affinity:
|
||||
{{ tuple $envAll "ceph" "rbd-pool" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
|
||||
nodeSelector:
|
||||
{{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "rbd_pool" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
containers:
|
||||
- name: ceph-rbd-pool
|
||||
{{ tuple $envAll "ceph_rbd_pool" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.jobs.rbd_pool | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "rbd_pool" "container" "rbd_pool" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
env:
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: ENABLE_AUTOSCALER
|
||||
value: {{ .Values.conf.features.pg_autoscaler | quote }}
|
||||
- name: CLUSTER_SET_FLAGS
|
||||
value: {{ .Values.conf.features.cluster_flags.set | quote }}
|
||||
- name: CLUSTER_UNSET_FLAGS
|
||||
value: {{ .Values.conf.features.cluster_flags.unset | quote }}
|
||||
command:
|
||||
- /tmp/pool-init.sh
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: ceph-client-bin
|
||||
mountPath: /tmp/pool-init.sh
|
||||
subPath: pool-init.sh
|
||||
readOnly: true
|
||||
- name: ceph-client-bin
|
||||
mountPath: /tmp/pool-calc.py
|
||||
subPath: pool-calc.py
|
||||
readOnly: true
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
readOnly: false
|
||||
- name: ceph-client-etc
|
||||
mountPath: /etc/ceph/ceph.conf.template
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
- name: ceph-client-admin-keyring
|
||||
mountPath: /etc/ceph/ceph.client.admin.keyring
|
||||
subPath: ceph.client.admin.keyring
|
||||
readOnly: true
|
||||
- name: pod-var-lib-ceph
|
||||
mountPath: /var/lib/ceph
|
||||
readOnly: false
|
||||
- name: pod-run
|
||||
mountPath: /run
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: ceph-client-etc
|
||||
configMap:
|
||||
name: ceph-client-etc
|
||||
defaultMode: 0444
|
||||
- name: ceph-client-bin
|
||||
configMap:
|
||||
name: ceph-client-bin
|
||||
defaultMode: 0555
|
||||
- name: pod-var-lib-ceph
|
||||
emptyDir: {}
|
||||
- name: pod-run
|
||||
emptyDir:
|
||||
medium: "Memory"
|
||||
- name: ceph-client-admin-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.admin }}
|
||||
{{- end }}
|
92
ceph-client/templates/pod-helm-tests.yaml
Normal file
92
ceph-client/templates/pod-helm-tests.yaml
Normal file
@ -0,0 +1,92 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if .Values.manifests.helm_tests }}
|
||||
{{- $envAll := . }}
|
||||
{{- $serviceAccountName := printf "%s-%s" $envAll.Release.Name "test" }}
|
||||
{{ tuple $envAll "tests" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph-client" "test" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
{{ dict "envAll" $envAll "podName" "ceph-client-test" "containerNames" (list "init" "ceph-cluster-helm-test") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 4 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "test" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 2 }}
|
||||
restartPolicy: Never
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
nodeSelector:
|
||||
{{ .Values.labels.test.node_selector_key }}: {{ .Values.labels.test.node_selector_value }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "tests" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 4 }}
|
||||
containers:
|
||||
- name: ceph-cluster-helm-test
|
||||
{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 6 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.jobs.tests | include "helm-toolkit.snippets.kubernetes_resources" | indent 6 }}
|
||||
{{ dict "envAll" $envAll "application" "test" "container" "test" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 6 }}
|
||||
env:
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
- name: CEPH_DEPLOYMENT_NAMESPACE
|
||||
value: {{ .Release.Namespace }}
|
||||
- name: REQUIRED_PERCENT_OF_OSDS
|
||||
value: {{ .Values.conf.pool.target.required_percent_of_osds | ceil | quote }}
|
||||
- name: EXPECTED_CRUSHRULE
|
||||
value: {{ .Values.conf.pool.default.crush_rule | default "replicated_rule" | quote }}
|
||||
- name: MGR_COUNT
|
||||
value: {{ .Values.pod.replicas.mgr | default "1" | quote }}
|
||||
- name: ENABLE_AUTOSCALER
|
||||
value: {{ .Values.conf.features.pg_autoscaler | quote }}
|
||||
{{- range $pool := .Values.conf.pool.spec -}}
|
||||
{{- with $pool }}
|
||||
- name: {{ .name | upper | replace "." "_" }}
|
||||
value: {{ .replication | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- /tmp/helm-tests.sh
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: ceph-client-bin
|
||||
mountPath: /tmp/helm-tests.sh
|
||||
subPath: helm-tests.sh
|
||||
readOnly: true
|
||||
- name: ceph-client-admin-keyring
|
||||
mountPath: /etc/ceph/ceph.client.admin.keyring
|
||||
subPath: ceph.client.admin.keyring
|
||||
readOnly: true
|
||||
- name: ceph-client-etc
|
||||
mountPath: /etc/ceph/ceph.conf
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: ceph-client-bin
|
||||
configMap:
|
||||
name: ceph-client-bin
|
||||
defaultMode: 0555
|
||||
- name: ceph-client-admin-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.admin }}
|
||||
- name: ceph-client-etc
|
||||
configMap:
|
||||
name: ceph-client-etc
|
||||
defaultMode: 0444
|
||||
{{- end }}
|
17
ceph-client/templates/secret-registry.yaml
Normal file
17
ceph-client/templates/secret-registry.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}
|
||||
{{ include "helm-toolkit.manifests.secret_registry" ( dict "envAll" . "registryUser" .Chart.Name ) }}
|
||||
{{- end }}
|
603
ceph-client/values.yaml
Normal file
603
ceph-client/values.yaml
Normal file
@ -0,0 +1,603 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Default values for ceph-client.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare name/value pairs to be passed into your templates.
|
||||
# name: value
|
||||
|
||||
---
|
||||
deployment:
|
||||
ceph: true
|
||||
|
||||
release_group: null
|
||||
|
||||
images:
|
||||
pull_policy: IfNotPresent
|
||||
tags:
|
||||
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_jammy_19.2.1-1-20250207'
|
||||
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_19.2.1-1-20250207'
|
||||
ceph_mds: 'docker.io/openstackhelm/ceph-daemon:ubuntu_jammy_19.2.1-1-20250207'
|
||||
ceph_rbd_pool: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_19.2.1-1-20250207'
|
||||
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal'
|
||||
image_repo_sync: 'docker.io/library/docker:17.07.0'
|
||||
local_registry:
|
||||
active: false
|
||||
exclude:
|
||||
- dep_check
|
||||
- image_repo_sync
|
||||
|
||||
labels:
|
||||
job:
|
||||
node_selector_key: openstack-control-plane
|
||||
node_selector_value: enabled
|
||||
test:
|
||||
node_selector_key: openstack-control-plane
|
||||
node_selector_value: enabled
|
||||
mgr:
|
||||
node_selector_key: ceph-mgr
|
||||
node_selector_value: enabled
|
||||
mds:
|
||||
node_selector_key: ceph-mds
|
||||
node_selector_value: enabled
|
||||
checkdns:
|
||||
node_selector_key: ceph-mon
|
||||
node_selector_value: enabled
|
||||
|
||||
pod:
|
||||
security_context:
|
||||
checkdns:
|
||||
pod:
|
||||
runAsUser: 65534
|
||||
container:
|
||||
checkdns:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
mds:
|
||||
pod:
|
||||
runAsUser: 65534
|
||||
container:
|
||||
init_dirs:
|
||||
runAsUser: 0
|
||||
readOnlyRootFilesystem: true
|
||||
mds:
|
||||
runAsUser: 64045
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
bootstrap:
|
||||
pod:
|
||||
runAsUser: 65534
|
||||
container:
|
||||
bootstrap:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
rbd_pool:
|
||||
pod:
|
||||
runAsUser: 65534
|
||||
container:
|
||||
rbd_pool:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
test:
|
||||
pod:
|
||||
runAsUser: 65534
|
||||
container:
|
||||
test:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
dns_policy: "ClusterFirstWithHostNet"
|
||||
replicas:
|
||||
mds: 2
|
||||
lifecycle:
|
||||
upgrades:
|
||||
deployments:
|
||||
pod_replacement_strategy: RollingUpdate
|
||||
revision_history: 3
|
||||
rolling_update:
|
||||
max_surge: 25%
|
||||
max_unavailable: 25%
|
||||
affinity:
|
||||
anti:
|
||||
type:
|
||||
default: preferredDuringSchedulingIgnoredDuringExecution
|
||||
topologyKey:
|
||||
default: kubernetes.io/hostname
|
||||
weight:
|
||||
default: 10
|
||||
resources:
|
||||
enabled: false
|
||||
mds:
|
||||
requests:
|
||||
memory: "10Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "50Mi"
|
||||
cpu: "500m"
|
||||
checkdns:
|
||||
requests:
|
||||
memory: "5Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "50Mi"
|
||||
cpu: "500m"
|
||||
jobs:
|
||||
bootstrap:
|
||||
limits:
|
||||
memory: "1024Mi"
|
||||
cpu: "2000m"
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
image_repo_sync:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "1024Mi"
|
||||
cpu: "2000m"
|
||||
rbd_pool:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "1024Mi"
|
||||
cpu: "2000m"
|
||||
tests:
|
||||
requests:
|
||||
memory: "10Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "50Mi"
|
||||
cpu: "500m"
|
||||
tolerations:
|
||||
checkdns:
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
tolerationSeconds: 60
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/unreachable
|
||||
operator: Exists
|
||||
tolerationSeconds: 60
|
||||
mds:
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
tolerationSeconds: 60
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/unreachable
|
||||
operator: Exists
|
||||
tolerationSeconds: 60
|
||||
probes:
|
||||
ceph:
|
||||
ceph-mds:
|
||||
readiness:
|
||||
enabled: true
|
||||
params:
|
||||
timeoutSeconds: 5
|
||||
liveness:
|
||||
enabled: true
|
||||
params:
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
|
||||
secrets:
|
||||
keyrings:
|
||||
mon: ceph-mon-keyring
|
||||
mds: ceph-bootstrap-mds-keyring
|
||||
osd: ceph-bootstrap-osd-keyring
|
||||
rgw: ceph-bootstrap-rgw-keyring
|
||||
mgr: ceph-bootstrap-mgr-keyring
|
||||
admin: ceph-client-admin-keyring
|
||||
oci_image_registry:
|
||||
ceph-client: ceph-client-oci-image-registry
|
||||
|
||||
network:
|
||||
public: 192.168.0.0/16
|
||||
cluster: 192.168.0.0/16
|
||||
|
||||
jobs:
|
||||
ceph_defragosds:
|
||||
# Execute the 1st of each month
|
||||
cron: "0 0 1 * *"
|
||||
history:
|
||||
# Number of successful job to keep
|
||||
successJob: 1
|
||||
# Number of failed job to keep
|
||||
failJob: 1
|
||||
concurrency:
|
||||
# Skip new job if previous job still active
|
||||
execPolicy: Forbid
|
||||
startingDeadlineSecs: 60
|
||||
pool_checkPGs:
|
||||
# Execute every 15 minutes
|
||||
cron: "*/15 * * * *"
|
||||
history:
|
||||
# Number of successful job to keep
|
||||
successJob: 1
|
||||
# Number of failed job to keep
|
||||
failJob: 1
|
||||
concurrency:
|
||||
# Skip new job if previous job still active
|
||||
execPolicy: Forbid
|
||||
startingDeadlineSecs: 60
|
||||
rbd_pool:
|
||||
restartPolicy: OnFailure
|
||||
|
||||
conf:
|
||||
features:
|
||||
mds: true
|
||||
pg_autoscaler: true
|
||||
cluster_flags:
|
||||
# List of flags to set or unset separated by spaces
|
||||
set: ""
|
||||
unset: ""
|
||||
cluster_commands:
|
||||
# Add additional commands to run against the Ceph cluster here
|
||||
# NOTE: Beginning with Pacific, mon_allow_pool_size_one must be
|
||||
# configured here to allow gate scripts to use 1x replication.
|
||||
# Adding it to /etc/ceph/ceph.conf doesn't seem to be effective.
|
||||
- config set global mon_allow_pool_size_one true
|
||||
- osd require-osd-release squid
|
||||
- status
|
||||
pool:
|
||||
# NOTE(portdirect): this drives a simple approximation of
|
||||
# https://ceph.com/pgcalc/, the `target.osd` key should be set to match the
|
||||
# expected number of osds in a cluster, and the `target.pg_per_osd` should be
|
||||
# set to match the desired number of placement groups on each OSD.
|
||||
crush:
|
||||
# NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series
|
||||
# kernel this should be set to `hammer`
|
||||
tunables: null
|
||||
target:
|
||||
# NOTE(portdirect): arbitrarily we set the default number of expected OSD's to 5
|
||||
# to match the number of nodes in the OSH gate.
|
||||
osd: 5
|
||||
# This the number of OSDs expected in the final state. This is to allow the above
|
||||
# target to be smaller initially in the event of a partial deployment. This way
|
||||
# helm tests can still pass at deployment time and pool quotas can be set based on
|
||||
# the expected final state (actual target quota = final_osd / osd * quota).
|
||||
final_osd: 5
|
||||
# This is just for helm tests to proceed the deployment if we have mentioned % of
|
||||
# osds are up and running.
|
||||
required_percent_of_osds: 75
|
||||
pg_per_osd: 100
|
||||
# NOTE(bw6938): When pools are created with the autoscaler enabled, a pg_num_min
|
||||
# value specifies the minimum value of pg_num that the autoscaler will target.
|
||||
# That default was recently changed from 8 to 32 which severely limits the number
|
||||
# of pools in a small cluster per https://github.com/rook/rook/issues/5091. This change
|
||||
# overrides the default pg_num_min value of 32 with a value of 8, matching the default
|
||||
# pg_num value of 8.
|
||||
pg_num_min: 8
|
||||
protected: true
|
||||
# NOTE(st053q): target quota should be set to the overall cluster full percentage
|
||||
# to be tolerated as a quota (percent full to allow in order to tolerate some
|
||||
# level of failure)
|
||||
# Set target quota to "0" (must be quoted) to remove quotas for all pools
|
||||
quota: 100
|
||||
default:
|
||||
# NOTE(supamatt): Accepted values are taken from `crush_rules` list.
|
||||
crush_rule: replicated_rule
|
||||
crush_rules:
|
||||
# NOTE(supamatt): Device classes must remain undefined if all OSDs are the
|
||||
# same device type of backing disks (ie, all HDD or all SDD).
|
||||
- name: same_host
|
||||
crush_rule: create-simple
|
||||
failure_domain: osd
|
||||
device_class:
|
||||
- name: replicated_rule
|
||||
crush_rule: create-simple
|
||||
failure_domain: host
|
||||
device_class:
|
||||
- name: rack_replicated_rule
|
||||
crush_rule: create-simple
|
||||
failure_domain: rack
|
||||
device_class:
|
||||
# - name: replicated_rule-ssd
|
||||
# crush_rule: create-replicated
|
||||
# failure_domain: host
|
||||
# device_class: sdd
|
||||
# - name: replicated_rule-hdd
|
||||
# crush_rule: create-replicated
|
||||
# failure_domain: host
|
||||
# device_class: hdd
|
||||
# - name: rack_replicated_rule-ssd
|
||||
# crush_rule: create-replicated
|
||||
# failure_domain: rack
|
||||
# device_class: ssd
|
||||
# - name: rack_replicated_rule-hdd
|
||||
# crush_rule: create-replicated
|
||||
# failure_domain: rack
|
||||
# device_class: hdd
|
||||
# - name: row_replicated_rule
|
||||
# crush_rule: create-simple
|
||||
# failure_domain: row
|
||||
# device_class:
|
||||
|
||||
# NOTE(portdirect): this section describes the pools that will be managed by
|
||||
# the ceph pool management job, as it tunes the pgs and crush rule, based on
|
||||
# the above.
|
||||
spec:
|
||||
# Health metrics pool
|
||||
- name: .mgr
|
||||
application: mgr_devicehealth
|
||||
replication: 1
|
||||
percent_total_data: 5
|
||||
# RBD pool
|
||||
- name: rbd
|
||||
# An optional "rename" value may be used to change the name of an existing pool.
|
||||
# If the pool doesn't exist, it will be created and renamed. If the pool exists with
|
||||
# the original name, it will be renamed. If the pool exists and has already been
|
||||
# renamed, the name will not be changed. If two pools exist with the two names, the
|
||||
# pool matching the renamed value will be configured and the other left alone.
|
||||
# rename: rbd-new
|
||||
# Optional "delete" and "delete_all_pool_data" values may be used to delete an
|
||||
# existing pool. Both must exist and must be set to true in order to delete a pool.
|
||||
# NOTE: Deleting a pool deletes all of its data and is unrecoverable. This is why
|
||||
# both values are required in order to delete a pool. Neither value does
|
||||
# anything by itself.
|
||||
# delete: false
|
||||
# delete_all_pool_data: false
|
||||
application: rbd
|
||||
replication: 3
|
||||
percent_total_data: 40
|
||||
# Example of 100 GiB pool_quota for rbd pool (no pool quota if absent)
|
||||
# May be specified in TiB, TB, GiB, GB, MiB, MB, KiB, KB, or bytes
|
||||
# NOTE: This should always be a string value to avoid Helm issues with large integers
|
||||
# pool_quota: "100GiB"
|
||||
# Example of an overridden pg_num_min value for a single pool
|
||||
# pg_num_min: 32
|
||||
# NOTE(supamatt): By default the crush rules used to create each pool will be
|
||||
# taken from the pool default `crush_rule` unless a pool specific `crush_rule`
|
||||
# is specified. The rule MUST exist for it to be defined here.
|
||||
# crush_rule: replicated_rule
|
||||
# CephFS pools
|
||||
- name: cephfs_metadata
|
||||
application: cephfs
|
||||
replication: 3
|
||||
percent_total_data: 5
|
||||
- name: cephfs_data
|
||||
application: cephfs
|
||||
replication: 3
|
||||
percent_total_data: 10
|
||||
# RadosGW pools
|
||||
- name: .rgw.root
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.control
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.data.root
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.gc
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.log
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.intent-log
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.meta
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.usage
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.keys
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.email
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.swift
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.users.uid
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.buckets.extra
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 0.1
|
||||
- name: default.rgw.buckets.index
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 3
|
||||
- name: default.rgw.buckets.data
|
||||
application: rgw
|
||||
replication: 3
|
||||
percent_total_data: 29
|
||||
|
||||
ceph:
|
||||
global:
|
||||
# auth
|
||||
cephx: true
|
||||
cephx_require_signatures: false
|
||||
cephx_cluster_require_signatures: true
|
||||
cephx_service_require_signatures: false
|
||||
objecter_inflight_op_bytes: "1073741824"
|
||||
objecter_inflight_ops: 10240
|
||||
debug_ms: "0/0"
|
||||
log_file: /dev/stdout
|
||||
mon_cluster_log_file: /dev/stdout
|
||||
osd:
|
||||
osd_mkfs_type: xfs
|
||||
osd_mkfs_options_xfs: -f -i size=2048
|
||||
osd_max_object_name_len: 256
|
||||
ms_bind_port_min: 6800
|
||||
ms_bind_port_max: 7100
|
||||
|
||||
dependencies:
|
||||
dynamic:
|
||||
common:
|
||||
local_image_registry:
|
||||
jobs:
|
||||
- ceph-client-image-repo-sync
|
||||
services:
|
||||
- endpoint: node
|
||||
service: local_image_registry
|
||||
static:
|
||||
bootstrap:
|
||||
jobs: null
|
||||
services:
|
||||
- endpoint: internal
|
||||
service: ceph_mon
|
||||
cephfs_client_key_generator:
|
||||
jobs: null
|
||||
mds:
|
||||
jobs:
|
||||
- ceph-storage-keys-generator
|
||||
- ceph-mds-keyring-generator
|
||||
- ceph-rbd-pool
|
||||
services:
|
||||
- endpoint: internal
|
||||
service: ceph_mon
|
||||
pool_checkpgs:
|
||||
jobs:
|
||||
- ceph-rbd-pool
|
||||
services:
|
||||
- endpoint: internal
|
||||
service: ceph_mgr
|
||||
checkdns:
|
||||
services:
|
||||
- endpoint: internal
|
||||
service: ceph_mon
|
||||
namespace_client_key_cleaner:
|
||||
jobs: null
|
||||
namespace_client_key_generator:
|
||||
jobs: null
|
||||
rbd_pool:
|
||||
services:
|
||||
- endpoint: internal
|
||||
service: ceph_mon
|
||||
- endpoint: internal
|
||||
service: ceph_mgr
|
||||
image_repo_sync:
|
||||
services:
|
||||
- endpoint: internal
|
||||
service: local_image_registry
|
||||
tests:
|
||||
jobs:
|
||||
- ceph-rbd-pool
|
||||
- ceph-mgr-keyring-generator
|
||||
services:
|
||||
- endpoint: internal
|
||||
service: ceph_mon
|
||||
- endpoint: internal
|
||||
service: ceph_mgr
|
||||
|
||||
bootstrap:
|
||||
enabled: false
|
||||
script: |
|
||||
ceph -s
|
||||
function ensure_pool () {
|
||||
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
||||
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
|
||||
ceph osd pool application enable $1 $3
|
||||
fi
|
||||
}
|
||||
#ensure_pool volumes 8 cinder
|
||||
|
||||
endpoints:
|
||||
cluster_domain_suffix: cluster.local
|
||||
local_image_registry:
|
||||
name: docker-registry
|
||||
namespace: docker-registry
|
||||
hosts:
|
||||
default: localhost
|
||||
internal: docker-registry
|
||||
node: localhost
|
||||
host_fqdn_override:
|
||||
default: null
|
||||
port:
|
||||
registry:
|
||||
node: 5000
|
||||
oci_image_registry:
|
||||
name: oci-image-registry
|
||||
namespace: oci-image-registry
|
||||
auth:
|
||||
enabled: false
|
||||
ceph-client:
|
||||
username: ceph-client
|
||||
password: password
|
||||
hosts:
|
||||
default: localhost
|
||||
host_fqdn_override:
|
||||
default: null
|
||||
port:
|
||||
registry:
|
||||
default: null
|
||||
ceph_mon:
|
||||
namespace: null
|
||||
hosts:
|
||||
default: ceph-mon
|
||||
discovery: ceph-mon-discovery
|
||||
host_fqdn_override:
|
||||
default: null
|
||||
port:
|
||||
mon:
|
||||
default: 6789
|
||||
mon_msgr2:
|
||||
default: 3300
|
||||
ceph_mgr:
|
||||
namespace: null
|
||||
hosts:
|
||||
default: ceph-mgr
|
||||
host_fqdn_override:
|
||||
default: null
|
||||
port:
|
||||
mgr:
|
||||
default: 7000
|
||||
metrics:
|
||||
default: 9283
|
||||
scheme:
|
||||
default: http
|
||||
ceph_object_store:
|
||||
endpoint_namespaces:
|
||||
- openstack
|
||||
- ceph
|
||||
# hosts:
|
||||
# default: ceph-rgw
|
||||
# host_fqdn_override:
|
||||
# default: null
|
||||
|
||||
manifests:
|
||||
configmap_bin: true
|
||||
configmap_test_bin: true
|
||||
configmap_etc: true
|
||||
deployment_mds: true
|
||||
deployment_checkdns: true
|
||||
job_bootstrap: false
|
||||
job_cephfs_client_key: true
|
||||
job_image_repo_sync: true
|
||||
job_rbd_pool: true
|
||||
helm_tests: true
|
||||
cronjob_checkPGs: true
|
||||
cronjob_defragosds: true
|
||||
secret_registry: true
|
||||
...
|
24
ceph-mon/Chart.yaml
Normal file
24
ceph-mon/Chart.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: v2
|
||||
appVersion: v1.0.0
|
||||
description: OpenStack-Helm Ceph Mon
|
||||
name: ceph-mon
|
||||
version: 2024.2.0
|
||||
home: https://github.com/ceph/ceph
|
||||
dependencies:
|
||||
- name: helm-toolkit
|
||||
repository: file://../helm-toolkit
|
||||
version: ">= 0.1.0"
|
||||
...
|
18
ceph-mon/templates/bin/_bootstrap.sh.tpl
Normal file
18
ceph-mon/templates/bin/_bootstrap.sh.tpl
Normal file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
{{ .Values.bootstrap.script | default "echo 'Not Enabled'" }}
|
44
ceph-mon/templates/bin/_init-dirs.sh.tpl
Normal file
44
ceph-mon/templates/bin/_init-dirs.sh.tpl
Normal file
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
export LC_ALL=C
|
||||
: "${HOSTNAME:=$(uname -n)}"
|
||||
: "${MGR_NAME:=${HOSTNAME}}"
|
||||
: "${MDS_NAME:=mds-${HOSTNAME}}"
|
||||
: "${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}"
|
||||
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
|
||||
|
||||
for keyring in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING} ; do
|
||||
mkdir -p "$(dirname "$keyring")"
|
||||
done
|
||||
|
||||
# Let's create the ceph directories
|
||||
for DIRECTORY in mon osd mds radosgw tmp mgr crash; do
|
||||
mkdir -p "/var/lib/ceph/${DIRECTORY}"
|
||||
done
|
||||
|
||||
# Create socket directory
|
||||
mkdir -p /run/ceph
|
||||
|
||||
# Create the MDS directory
|
||||
mkdir -p "/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}"
|
||||
|
||||
# Create the MGR directory
|
||||
mkdir -p "/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}"
|
||||
|
||||
# Adjust the owner of all those directories
|
||||
chown -R ceph. /run/ceph/ /var/lib/ceph/*
|
132
ceph-mon/templates/bin/_post-apply.sh.tpl
Normal file
132
ceph-mon/templates/bin/_post-apply.sh.tpl
Normal file
@ -0,0 +1,132 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
export LC_ALL=C
|
||||
|
||||
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
|
||||
|
||||
if [[ ! -f /etc/ceph/${CLUSTER}.conf ]]; then
|
||||
echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -f ${ADMIN_KEYRING} ]]; then
|
||||
echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ceph --cluster ${CLUSTER} -s
|
||||
function wait_for_pods() {
|
||||
timeout=${2:-1800}
|
||||
end=$(date -ud "${timeout} seconds" +%s)
|
||||
# Selecting containers with "ceph-mon" name and
|
||||
# counting them based on "ready" field.
|
||||
count_pods=".items | map(.status.containerStatuses | .[] | \
|
||||
select(.name==\"ceph-mon\")) | \
|
||||
group_by(.ready) | map({(.[0].ready | tostring): length}) | .[]"
|
||||
min_mons="add | if .true >= (.false + .true) \
|
||||
then \"pass\" else \"fail\" end"
|
||||
while true; do
|
||||
# Leave while loop if all mons are ready.
|
||||
state=$(kubectl get pods --namespace="${1}" -l component=mon -o json | jq "${count_pods}")
|
||||
mon_state=$(jq -s "${min_mons}" <<< "${state}")
|
||||
if [[ "${mon_state}" == \"pass\" ]]; then
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
|
||||
if [ $(date -u +%s) -gt $end ] ; then
|
||||
echo -e "Containers failed to start after $timeout seconds\n"
|
||||
kubectl get pods --namespace "${1}" -o wide -l component=mon
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function check_ds() {
|
||||
for ds in `kubectl get ds --namespace=$CEPH_NAMESPACE -l component=mon --no-headers=true|awk '{print $1}'`
|
||||
do
|
||||
ds_query=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status`
|
||||
if echo $ds_query |grep -i "numberAvailable" ;then
|
||||
currentNumberScheduled=`echo $ds_query|jq -r .currentNumberScheduled`
|
||||
desiredNumberScheduled=`echo $ds_query|jq -r .desiredNumberScheduled`
|
||||
numberAvailable=`echo $ds_query|jq -r .numberAvailable`
|
||||
numberReady=`echo $ds_query|jq -r .numberReady`
|
||||
updatedNumberScheduled=`echo $ds_query|jq -r .updatedNumberScheduled`
|
||||
ds_check=`echo "$currentNumberScheduled $desiredNumberScheduled $numberAvailable $numberReady $updatedNumberScheduled"| \
|
||||
tr ' ' '\n'|sort -u|wc -l`
|
||||
if [ $ds_check != 1 ]; then
|
||||
echo "Some pods in daemonset $ds are not ready"
|
||||
exit
|
||||
else
|
||||
echo "All pods in deamonset $ds are ready"
|
||||
fi
|
||||
else
|
||||
echo "There are no mons under daemonset $ds"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function restart_mons() {
|
||||
mon_pods=`kubectl get po -n $CEPH_NAMESPACE -l component=mon --no-headers | awk '{print $1}'`
|
||||
|
||||
for pod in ${mon_pods}
|
||||
do
|
||||
if [[ -n "$pod" ]]; then
|
||||
echo "Restarting pod $pod"
|
||||
kubectl delete pod -n $CEPH_NAMESPACE $pod
|
||||
fi
|
||||
echo "Waiting for the pod $pod to restart"
|
||||
# The pod will not be ready in first 60 seconds. Thus we can reduce
|
||||
# amount of queries to kubernetes.
|
||||
sleep 60
|
||||
wait_for_pods
|
||||
ceph -s
|
||||
done
|
||||
}
|
||||
|
||||
wait_for_pods $CEPH_NAMESPACE
|
||||
|
||||
require_upgrade=0
|
||||
max_release=0
|
||||
|
||||
for ds in `kubectl get ds --namespace=$CEPH_NAMESPACE -l component=mon --no-headers=true|awk '{print $1}'`
|
||||
do
|
||||
updatedNumberScheduled=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.updatedNumberScheduled`
|
||||
desiredNumberScheduled=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.desiredNumberScheduled`
|
||||
if [[ $updatedNumberScheduled != $desiredNumberScheduled ]]; then
|
||||
if kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status|grep -i "numberAvailable" ;then
|
||||
require_upgrade=$((require_upgrade+1))
|
||||
_release=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.observedGeneration`
|
||||
max_release=$(( max_release > _release ? max_release : _release ))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Latest revision of the helm chart(s) is : $max_release"
|
||||
|
||||
if [[ "$UNCONDITIONAL_MON_RESTART" == "true" ]] || [[ $max_release -gt 1 ]]; then
|
||||
if [[ "$UNCONDITIONAL_MON_RESTART" == "true" ]] || [[ $require_upgrade -gt 0 ]]; then
|
||||
echo "Restart ceph-mon pods one at a time to prevent disruption"
|
||||
restart_mons
|
||||
fi
|
||||
|
||||
# Check all the ceph-mon daemonsets
|
||||
echo "checking DS"
|
||||
check_ds
|
||||
else
|
||||
echo "No revisions found for upgrade"
|
||||
fi
|
@ -0,0 +1,14 @@
|
||||
#!/bin/python
|
||||
import os
|
||||
import struct
|
||||
import time
|
||||
import base64
|
||||
key = os.urandom(16)
|
||||
header = struct.pack(
|
||||
'<hiih',
|
||||
1, # le16 type: CEPH_CRYPTO_AES
|
||||
int(time.time()), # le32 created: seconds
|
||||
0, # le32 created: nanoseconds,
|
||||
len(key), # le16: len(key)
|
||||
)
|
||||
print(base64.b64encode(header + key).decode('ascii'))
|
@ -0,0 +1,62 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
{{ if .Release.IsInstall }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
function ceph_gen_key () {
|
||||
python3 ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
|
||||
}
|
||||
|
||||
function kube_ceph_keyring_gen () {
|
||||
CEPH_KEY=$1
|
||||
CEPH_KEY_TEMPLATE=$2
|
||||
sed "s|{{"{{"}} key {{"}}"}}|${CEPH_KEY}|" ${CEPH_TEMPLATES_DIR}/${CEPH_KEY_TEMPLATE} | base64 -w0 | tr -d '\n'
|
||||
}
|
||||
|
||||
function create_kube_key () {
|
||||
CEPH_KEYRING=$1
|
||||
CEPH_KEYRING_NAME=$2
|
||||
CEPH_KEYRING_TEMPLATE=$3
|
||||
KUBE_SECRET_NAME=$4
|
||||
if ! kubectl get --namespace ${DEPLOYMENT_NAMESPACE} secrets ${KUBE_SECRET_NAME}; then
|
||||
{
|
||||
cat <<EOF
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: ${KUBE_SECRET_NAME}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "bootstrap" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
${CEPH_KEYRING_NAME}: $( kube_ceph_keyring_gen ${CEPH_KEYRING} ${CEPH_KEYRING_TEMPLATE} )
|
||||
EOF
|
||||
} | kubectl apply --namespace ${DEPLOYMENT_NAMESPACE} -f -
|
||||
fi
|
||||
}
|
||||
|
||||
#create_kube_key <ceph_key> <ceph_keyring_name> <ceph_keyring_template> <kube_secret_name>
|
||||
create_kube_key $(ceph_gen_key) ${CEPH_KEYRING_NAME} ${CEPH_KEYRING_TEMPLATE} ${KUBE_SECRET_NAME}
|
||||
|
||||
{{ else }}
|
||||
|
||||
echo "Not touching ${KUBE_SECRET_NAME} as this is not the initial deployment"
|
||||
|
||||
{{- end -}}
|
100
ceph-mon/templates/bin/keys/_storage-keyring-manager.sh.tpl
Normal file
100
ceph-mon/templates/bin/keys/_storage-keyring-manager.sh.tpl
Normal file
@ -0,0 +1,100 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
{{ if .Release.IsInstall }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
function ceph_gen_key () {
|
||||
python3 ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
|
||||
}
|
||||
|
||||
function kube_ceph_keyring_gen () {
|
||||
CEPH_KEY=$1
|
||||
CEPH_KEY_TEMPLATE=$2
|
||||
sed "s|{{"{{"}} key {{"}}"}}|${CEPH_KEY}|" ${CEPH_TEMPLATES_DIR}/${CEPH_KEY_TEMPLATE} | base64 -w0 | tr -d '\n'
|
||||
}
|
||||
|
||||
CEPH_CLIENT_KEY=""
|
||||
ROOK_CEPH_TOOLS_POD=$(kubectl -n ${DEPLOYMENT_NAMESPACE} get pods --no-headers | awk '/rook-ceph-tools/{print $1}')
|
||||
|
||||
if [[ -n "${ROOK_CEPH_TOOLS_POD}" ]]; then
|
||||
CEPH_AUTH_KEY_NAME=$(echo "${CEPH_KEYRING_NAME}" | awk -F. '{print $2 "." $3}')
|
||||
CEPH_CLIENT_KEY=$(kubectl -n ${DEPLOYMENT_NAMESPACE} exec ${ROOK_CEPH_TOOLS_POD} -- ceph auth ls | grep -A1 "${CEPH_AUTH_KEY_NAME}" | awk '/key:/{print $2}')
|
||||
fi
|
||||
|
||||
if [[ -z "${CEPH_CLIENT_KEY}" ]]; then
|
||||
CEPH_CLIENT_KEY=$(ceph_gen_key)
|
||||
fi
|
||||
|
||||
function create_kube_key () {
|
||||
CEPH_KEYRING=$1
|
||||
CEPH_KEYRING_NAME=$2
|
||||
CEPH_KEYRING_TEMPLATE=$3
|
||||
KUBE_SECRET_NAME=$4
|
||||
|
||||
if ! kubectl get --namespace ${DEPLOYMENT_NAMESPACE} secrets ${KUBE_SECRET_NAME}; then
|
||||
{
|
||||
cat <<EOF
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: ${KUBE_SECRET_NAME}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "admin" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
${CEPH_KEYRING_NAME}: $( kube_ceph_keyring_gen ${CEPH_KEYRING} ${CEPH_KEYRING_TEMPLATE} )
|
||||
EOF
|
||||
} | kubectl apply --namespace ${DEPLOYMENT_NAMESPACE} -f -
|
||||
fi
|
||||
}
|
||||
#create_kube_key <ceph_key> <ceph_keyring_name> <ceph_keyring_template> <kube_secret_name>
|
||||
create_kube_key ${CEPH_CLIENT_KEY} ${CEPH_KEYRING_NAME} ${CEPH_KEYRING_TEMPLATE} ${CEPH_KEYRING_ADMIN_NAME}
|
||||
|
||||
function create_kube_storage_key () {
|
||||
CEPH_KEYRING=$1
|
||||
KUBE_SECRET_NAME=$2
|
||||
|
||||
if ! kubectl get --namespace ${DEPLOYMENT_NAMESPACE} secrets ${KUBE_SECRET_NAME}; then
|
||||
{
|
||||
cat <<EOF
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: ${KUBE_SECRET_NAME}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "admin" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
type: kubernetes.io/rbd
|
||||
data:
|
||||
key: $( echo ${CEPH_KEYRING} | base64 | tr -d '\n' )
|
||||
userID: $( echo -n "admin" | base64 | tr -d '\n' )
|
||||
userKey: $( echo -n ${CEPH_KEYRING} | base64 | tr -d '\n' )
|
||||
EOF
|
||||
} | kubectl apply --namespace ${DEPLOYMENT_NAMESPACE} -f -
|
||||
fi
|
||||
}
|
||||
#create_kube_storage_key <ceph_key> <kube_secret_name>
|
||||
create_kube_storage_key ${CEPH_CLIENT_KEY} ${CEPH_STORAGECLASS_ADMIN_SECRET_NAME}
|
||||
create_kube_storage_key ${CEPH_CLIENT_KEY} ${CEPH_STORAGECLASS_ADMIN_SECRET_NAME_NODE}
|
||||
|
||||
{{ else }}
|
||||
|
||||
echo "Not touching ${KUBE_SECRET_NAME} as this is not the initial deployment"
|
||||
|
||||
{{ end }}
|
42
ceph-mon/templates/bin/mgr/_check.sh.tpl
Normal file
42
ceph-mon/templates/bin/mgr/_check.sh.tpl
Normal file
@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
export LC_ALL=C
|
||||
|
||||
COMMAND="${@:-liveness}"
|
||||
|
||||
function heath_check () {
|
||||
ASOK=$(ls /var/run/ceph/${CLUSTER}-mgr*)
|
||||
MGR_NAME=$(basename ${ASOK} | sed -e 's/.asok//' | cut -f 1 -d '.' --complement)
|
||||
MGR_STATE=$(ceph --cluster ${CLUSTER} --connect-timeout 1 daemon mgr.${MGR_NAME} status|grep "osd_epoch")
|
||||
if [ $? = 0 ]; then
|
||||
exit 0
|
||||
else
|
||||
echo $MGR_STATE
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function liveness () {
|
||||
heath_check
|
||||
}
|
||||
|
||||
function readiness () {
|
||||
heath_check
|
||||
}
|
||||
|
||||
$COMMAND
|
79
ceph-mon/templates/bin/mgr/_start.sh.tpl
Normal file
79
ceph-mon/templates/bin/mgr/_start.sh.tpl
Normal file
@ -0,0 +1,79 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
: "${CEPH_GET_ADMIN_KEY:=0}"
|
||||
: "${MGR_NAME:=$(uname -n)}"
|
||||
: "${MGR_KEYRING:=/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}/keyring}"
|
||||
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
|
||||
: "${CEPH_CONF:="/etc/ceph/${CLUSTER}.conf"}"
|
||||
|
||||
{{ include "helm-toolkit.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
if [[ ! -e ${CEPH_CONF}.template ]]; then
|
||||
echo "ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon"
|
||||
exit 1
|
||||
else
|
||||
ENDPOINT=$(mon_host_from_k8s_ep "${NAMESPACE}" ceph-mon-discovery)
|
||||
if [[ "${ENDPOINT}" == "" ]]; then
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | tee ${CEPH_CONF}" || true
|
||||
else
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}" || true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ${CEPH_GET_ADMIN_KEY} -eq 1 ]; then
|
||||
if [[ ! -e ${ADMIN_KEYRING} ]]; then
|
||||
echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create a MGR keyring
|
||||
rm -rf $MGR_KEYRING
|
||||
if [ ! -e "$MGR_KEYRING" ]; then
|
||||
# Create ceph-mgr key
|
||||
timeout 10 ceph --cluster "${CLUSTER}" auth get-or-create mgr."${MGR_NAME}" mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o "$MGR_KEYRING"
|
||||
chown --verbose ceph. "$MGR_KEYRING"
|
||||
chmod 600 "$MGR_KEYRING"
|
||||
fi
|
||||
|
||||
echo "SUCCESS"
|
||||
|
||||
ceph --cluster "${CLUSTER}" -v
|
||||
|
||||
# Env. variables matching the pattern "<module>_" will be
|
||||
# found and parsed for config-key settings by
|
||||
# ceph config set mgr mgr/<module>/<key> <value>
|
||||
MODULES_TO_DISABLE=`ceph mgr dump | python3 -c "import json, sys; print(' '.join(json.load(sys.stdin)['modules']))"`
|
||||
|
||||
for module in ${ENABLED_MODULES}; do
|
||||
# This module may have been enabled in the past
|
||||
# remove it from the disable list if present
|
||||
MODULES_TO_DISABLE=${MODULES_TO_DISABLE/$module/}
|
||||
|
||||
options=`env | grep ^${module}_ || true`
|
||||
for option in ${options}; do
|
||||
#strip module name
|
||||
option=${option/${module}_/}
|
||||
key=`echo $option | cut -d= -f1`
|
||||
value=`echo $option | cut -d= -f2`
|
||||
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||
ceph --cluster "${CLUSTER}" config set mgr mgr/$module/$key $value --force
|
||||
else
|
||||
ceph --cluster "${CLUSTER}" config set mgr mgr/$module/$key $value
|
||||
fi
|
||||
done
|
||||
ceph --cluster "${CLUSTER}" mgr module enable ${module} --force
|
||||
done
|
||||
|
||||
for module in $MODULES_TO_DISABLE; do
|
||||
ceph --cluster "${CLUSTER}" mgr module disable ${module}
|
||||
done
|
||||
|
||||
echo "SUCCESS"
|
||||
# start ceph-mgr
|
||||
exec /usr/bin/ceph-mgr \
|
||||
--cluster "${CLUSTER}" \
|
||||
--setuser "ceph" \
|
||||
--setgroup "ceph" \
|
||||
-d \
|
||||
-i "${MGR_NAME}"
|
61
ceph-mon/templates/bin/mon/_check.sh.tpl
Normal file
61
ceph-mon/templates/bin/mon/_check.sh.tpl
Normal file
@ -0,0 +1,61 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
COMMAND="${@:-liveness}"
|
||||
: ${K8S_HOST_NETWORK:=0}
|
||||
|
||||
function heath_check () {
|
||||
SOCKDIR=${CEPH_SOCKET_DIR:-/run/ceph}
|
||||
SBASE=${CEPH_OSD_SOCKET_BASE:-ceph-mon}
|
||||
SSUFFIX=${CEPH_SOCKET_SUFFIX:-asok}
|
||||
|
||||
MON_ID=$(ps auwwx | grep ceph-mon | grep -v "$1" | grep -v grep | sed 's/.*-i\ //;s/\ .*//'|awk '{print $1}')
|
||||
|
||||
if [ -z "${MON_ID}" ]; then
|
||||
if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
|
||||
MON_NAME=${POD_NAME}
|
||||
else
|
||||
MON_NAME=${NODE_NAME}
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -S "${SOCKDIR}/${SBASE}.${MON_NAME}.${SSUFFIX}" ]; then
|
||||
MON_STATE=$(ceph -f json-pretty --connect-timeout 1 --admin-daemon "${SOCKDIR}/${SBASE}.${MON_NAME}.${SSUFFIX}" mon_status|grep state|sed 's/.*://;s/[^a-z]//g')
|
||||
echo "MON ${MON_ID} ${MON_STATE}";
|
||||
# this might be a stricter check than we actually want. what are the
|
||||
# other values for the "state" field?
|
||||
for S in ${MON_LIVE_STATE}; do
|
||||
if [ "x${MON_STATE}x" = "x${S}x" ]; then
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
fi
|
||||
# if we made it this far, things are not running
|
||||
exit 1
|
||||
}
|
||||
|
||||
function liveness () {
|
||||
MON_LIVE_STATE="probing electing synchronizing leader peon"
|
||||
heath_check
|
||||
}
|
||||
|
||||
function readiness () {
|
||||
MON_LIVE_STATE="leader peon"
|
||||
heath_check
|
||||
}
|
||||
|
||||
$COMMAND
|
114
ceph-mon/templates/bin/mon/_start.sh.tpl
Normal file
114
ceph-mon/templates/bin/mon/_start.sh.tpl
Normal file
@ -0,0 +1,114 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
export LC_ALL=C
|
||||
: "${K8S_HOST_NETWORK:=0}"
|
||||
: "${MON_KEYRING:=/etc/ceph/${CLUSTER}.mon.keyring}"
|
||||
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
|
||||
: "${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}"
|
||||
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
|
||||
: "${CEPH_CONF:="/etc/ceph/${CLUSTER}.conf"}"
|
||||
|
||||
{{ include "helm-toolkit.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
if [[ ! -e ${CEPH_CONF}.template ]]; then
|
||||
echo "ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon"
|
||||
exit 1
|
||||
else
|
||||
|
||||
ENDPOINT=$(mon_host_from_k8s_ep "${NAMESPACE}" ceph-mon-discovery)
|
||||
|
||||
if [[ -z "${ENDPOINT}" ]]; then
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | tee ${CEPH_CONF}" || true
|
||||
else
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}" || true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -z "$CEPH_PUBLIC_NETWORK" ]]; then
|
||||
echo "ERROR- CEPH_PUBLIC_NETWORK must be defined as the name of the network for the OSDs"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$MON_IP" ]]; then
|
||||
echo "ERROR- MON_IP must be defined as the IP address of the monitor"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
|
||||
MON_NAME=${POD_NAME}
|
||||
else
|
||||
MON_NAME=${NODE_NAME}
|
||||
fi
|
||||
MON_DATA_DIR="/var/lib/ceph/mon/${CLUSTER}-${MON_NAME}"
|
||||
MONMAP="/etc/ceph/monmap-${CLUSTER}"
|
||||
|
||||
# Make the monitor directory
|
||||
/bin/sh -c "mkdir -p \"${MON_DATA_DIR}\""
|
||||
|
||||
function get_mon_config {
|
||||
# Get fsid from ceph.conf
|
||||
local fsid=$(ceph-conf --lookup fsid -c /etc/ceph/${CLUSTER}.conf)
|
||||
|
||||
timeout=10
|
||||
MONMAP_ADD=""
|
||||
|
||||
while [[ -z "${MONMAP_ADD// }" && "${timeout}" -gt 0 ]]; do
|
||||
# Get the ceph mon pods (name and IP) from the Kubernetes API. Formatted as a set of monmap params
|
||||
if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
|
||||
MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--addv {{`{{.metadata.name}}`}} [v1:{{`{{.status.podIP}}`}}:${MON_PORT},v2:{{`{{.status.podIP}}`}}:${MON_PORT_V2}] {{`{{end}}`}} {{`{{end}}`}}")
|
||||
else
|
||||
MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--addv {{`{{.spec.nodeName}}`}} [v1:{{`{{.status.podIP}}`}}:${MON_PORT},v2:{{`{{.status.podIP}}`}}:${MON_PORT_V2}] {{`{{end}}`}} {{`{{end}}`}}")
|
||||
fi
|
||||
(( timeout-- ))
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [[ -z "${MONMAP_ADD// }" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create a monmap with the Pod Names and IP
|
||||
monmaptool --create ${MONMAP_ADD} --fsid ${fsid} ${MONMAP} --clobber
|
||||
}
|
||||
|
||||
get_mon_config
|
||||
|
||||
# If we don't have a monitor keyring, this is a new monitor
|
||||
if [ ! -e "${MON_DATA_DIR}/keyring" ]; then
|
||||
if [ ! -e ${MON_KEYRING}.seed ]; then
|
||||
echo "ERROR- ${MON_KEYRING}.seed must exist. You can extract it from your current monitor by running 'ceph auth get mon. -o ${MON_KEYRING}' or use a KV Store"
|
||||
exit 1
|
||||
else
|
||||
cp -vf ${MON_KEYRING}.seed ${MON_KEYRING}
|
||||
fi
|
||||
|
||||
if [ ! -e ${MONMAP} ]; then
|
||||
echo "ERROR- ${MONMAP} must exist. You can extract it from your current monitor by running 'ceph mon getmap -o ${MONMAP}' or use a KV Store"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Testing if it's not the first monitor, if one key doesn't exist we assume none of them exist
|
||||
for KEYRING in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING} ${ADMIN_KEYRING}; do
|
||||
ceph-authtool ${MON_KEYRING} --import-keyring ${KEYRING}
|
||||
done
|
||||
|
||||
# Prepare the monitor daemon's directory with the map and keyring
|
||||
ceph-mon --setuser ceph --setgroup ceph --cluster "${CLUSTER}" --mkfs -i ${MON_NAME} --inject-monmap ${MONMAP} --keyring ${MON_KEYRING} --mon-data "${MON_DATA_DIR}"
|
||||
else
|
||||
echo "Trying to get the most recent monmap..."
|
||||
# Ignore when we timeout, in most cases that means the cluster has no quorum or
|
||||
# no mons are up and running yet
|
||||
timeout 5 ceph --cluster "${CLUSTER}" mon getmap -o ${MONMAP} || true
|
||||
ceph-mon --setuser ceph --setgroup ceph --cluster "${CLUSTER}" -i ${MON_NAME} --inject-monmap ${MONMAP} --keyring ${MON_KEYRING} --mon-data "${MON_DATA_DIR}"
|
||||
timeout 7 ceph --cluster "${CLUSTER}" mon add "${MON_NAME}" "${MON_IP}:${MON_PORT_V2}" || true
|
||||
fi
|
||||
|
||||
# start MON
|
||||
exec /usr/bin/ceph-mon \
|
||||
--cluster "${CLUSTER}" \
|
||||
--setuser "ceph" \
|
||||
--setgroup "ceph" \
|
||||
-d \
|
||||
-i ${MON_NAME} \
|
||||
--mon-data "${MON_DATA_DIR}" \
|
||||
--public-addr "${MON_IP}:${MON_PORT_V2}"
|
14
ceph-mon/templates/bin/mon/_stop.sh.tpl
Normal file
14
ceph-mon/templates/bin/mon/_stop.sh.tpl
Normal file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
NUMBER_OF_MONS=$(ceph mon stat | awk '$3 == "mons" {print $2}')
|
||||
if [[ "${NUMBER_OF_MONS}" -gt "3" ]]; then
|
||||
if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
|
||||
ceph mon remove "${POD_NAME}"
|
||||
else
|
||||
ceph mon remove "${NODE_NAME}"
|
||||
fi
|
||||
else
|
||||
echo "doing nothing since we are running less than or equal to 3 mons"
|
||||
fi
|
50
ceph-mon/templates/bin/moncheck/_reap-zombies.py.tpl
Normal file
50
ceph-mon/templates/bin/moncheck/_reap-zombies.py.tpl
Normal file
@ -0,0 +1,50 @@
|
||||
#!/usr/bin/python
|
||||
import re
|
||||
import os
|
||||
import subprocess # nosec
|
||||
import json
|
||||
|
||||
MON_REGEX = r"^\d: \[((v\d+:([0-9\.]*):\d+\/\d+,*)+)] mon.([^ ]*)$"
|
||||
# kubctl_command = 'kubectl get pods --namespace=${NAMESPACE} -l component=mon,application=ceph -o template --template="{ {{"}}"}}range .items{{"}}"}} \\"{{"}}"}}.metadata.name{{"}}"}}\\": \\"{{"}}"}}.status.podIP{{"}}"}}\\" , {{"}}"}}end{{"}}"}} }"'
|
||||
if int(os.getenv('K8S_HOST_NETWORK', 0)) > 0:
|
||||
kubectl_command = 'kubectl get pods --namespace=${NAMESPACE} -l component=mon,application=ceph -o template --template="{ {{"{{"}}range \$i, \$v := .items{{"}}"}} {{"{{"}} if \$i{{"}}"}} , {{"{{"}} end {{"}}"}} \\"{{"{{"}}\$v.spec.nodeName{{"}}"}}\\": \\"{{"{{"}}\$v.status.podIP{{"}}"}}\\" {{"{{"}}end{{"}}"}} }"'
|
||||
else:
|
||||
kubectl_command = 'kubectl get pods --namespace=${NAMESPACE} -l component=mon,application=ceph -o template --template="{ {{"{{"}}range \$i, \$v := .items{{"}}"}} {{"{{"}} if \$i{{"}}"}} , {{"{{"}} end {{"}}"}} \\"{{"{{"}}\$v.metadata.name{{"}}"}}\\": \\"{{"{{"}}\$v.status.podIP{{"}}"}}\\" {{"{{"}}end{{"}}"}} }"'
|
||||
|
||||
monmap_command = "ceph --cluster=${CLUSTER} mon getmap > /tmp/monmap && monmaptool -f /tmp/monmap --print"
|
||||
|
||||
|
||||
def extract_mons_from_monmap():
|
||||
monmap = subprocess.check_output(monmap_command, shell=True).decode('utf-8') # nosec
|
||||
mons = {}
|
||||
for line in monmap.split("\n"):
|
||||
m = re.match(MON_REGEX, line)
|
||||
if m is not None:
|
||||
mons[m.group(4)] = m.group(3)
|
||||
return mons
|
||||
|
||||
def extract_mons_from_kubeapi():
|
||||
kubemap = subprocess.check_output(kubectl_command, shell=True).decode('utf-8') # nosec
|
||||
return json.loads(kubemap)
|
||||
|
||||
current_mons = extract_mons_from_monmap()
|
||||
expected_mons = extract_mons_from_kubeapi()
|
||||
|
||||
print("current mons: %s" % current_mons)
|
||||
print("expected mons: %s" % expected_mons)
|
||||
|
||||
removed_mon = False
|
||||
for mon in current_mons:
|
||||
if not mon in expected_mons:
|
||||
print("removing zombie mon %s" % mon)
|
||||
subprocess.call(["ceph", "--cluster", os.environ["NAMESPACE"], "mon", "remove", mon]) # nosec
|
||||
removed_mon = True
|
||||
elif current_mons[mon] != expected_mons[mon]: # check if for some reason the ip of the mon changed
|
||||
print("ip change detected for pod %s" % mon)
|
||||
subprocess.call(["kubectl", "--namespace", os.environ["NAMESPACE"], "delete", "pod", mon]) # nosec
|
||||
removed_mon = True
|
||||
print("deleted mon %s via the kubernetes api" % mon)
|
||||
|
||||
|
||||
if not removed_mon:
|
||||
print("no zombie mons found ...")
|
74
ceph-mon/templates/bin/moncheck/_start.sh.tpl
Normal file
74
ceph-mon/templates/bin/moncheck/_start.sh.tpl
Normal file
@ -0,0 +1,74 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
export LC_ALL=C
|
||||
: "${CEPH_CONF:="/etc/ceph/${CLUSTER}.conf"}"
|
||||
|
||||
{{ include "helm-toolkit.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
if [[ ! -e ${CEPH_CONF}.template ]]; then
|
||||
echo "ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon"
|
||||
exit 1
|
||||
else
|
||||
ENDPOINT=$(mon_host_from_k8s_ep ${NAMESPACE} ceph-mon-discovery)
|
||||
if [[ "${ENDPOINT}" == "" ]]; then
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | tee ${CEPH_CONF}" || true
|
||||
else
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}" || true
|
||||
fi
|
||||
fi
|
||||
|
||||
function check_mon_msgr2 {
|
||||
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||
if ceph health detail|grep -i "MON_MSGR2_NOT_ENABLED"; then
|
||||
echo "ceph-mon msgr v2 not enabled on all ceph mons so enabling"
|
||||
ceph mon enable-msgr2
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function get_mon_count {
|
||||
ceph mon count-metadata hostname | jq '. | length'
|
||||
}
|
||||
|
||||
function check_mon_addrs {
|
||||
local mon_dump=$(ceph mon dump)
|
||||
local mon_hostnames=$(echo "${mon_dump}" | awk '/mon\./{print $3}' | sed 's/mon\.//g')
|
||||
local mon_endpoints=$(kubectl get endpoints ceph-mon-discovery -n ${NAMESPACE} -o json)
|
||||
local v1_port=$(jq '.subsets[0].ports[] | select(.name == "mon") | .port' <<< ${mon_endpoints})
|
||||
local v2_port=$(jq '.subsets[0].ports[] | select(.name == "mon-msgr2") | .port' <<< ${mon_endpoints})
|
||||
|
||||
for mon in ${mon_hostnames}; do
|
||||
local mon_endpoint=$(echo "${mon_dump}" | awk "/${mon}/{print \$2}")
|
||||
local mon_ip=$(jq -r ".subsets[0].addresses[] | select(.nodeName == \"${mon}\") | .ip" <<< ${mon_endpoints})
|
||||
|
||||
# Skip this mon if it doesn't appear in the list of kubernetes endpoints
|
||||
if [[ -n "${mon_ip}" ]]; then
|
||||
local desired_endpoint=$(printf '[v1:%s:%s/0,v2:%s:%s/0]' ${mon_ip} ${v1_port} ${mon_ip} ${v2_port})
|
||||
|
||||
if [[ "${mon_endpoint}" != "${desired_endpoint}" ]]; then
|
||||
echo "endpoint for ${mon} is ${mon_endpoint}, setting it to ${desired_endpoint}"
|
||||
ceph mon set-addrs ${mon} ${desired_endpoint}
|
||||
fi
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function watch_mon_health {
|
||||
previous_mon_count=$(get_mon_count)
|
||||
while [ true ]; do
|
||||
mon_count=$(get_mon_count)
|
||||
if [[ ${mon_count} -ne ${previous_mon_count} ]]; then
|
||||
echo "checking for zombie mons"
|
||||
python3 /tmp/moncheck-reap-zombies.py || true
|
||||
fi
|
||||
previous_mon_count=${mon_count}
|
||||
echo "checking for ceph-mon msgr v2"
|
||||
check_mon_msgr2
|
||||
echo "checking mon endpoints in monmap"
|
||||
check_mon_addrs
|
||||
echo "sleep 30 sec"
|
||||
sleep 30
|
||||
done
|
||||
}
|
||||
|
||||
watch_mon_health
|
38
ceph-mon/templates/bin/utils/_checkDNS.sh.tpl
Normal file
38
ceph-mon/templates/bin/utils/_checkDNS.sh.tpl
Normal file
@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
: "${CEPH_CONF:="/etc/ceph/${CLUSTER}.conf"}"
|
||||
ENDPOINT="{$1}"
|
||||
|
||||
function check_mon_dns () {
|
||||
GREP_CMD=$(grep -rl 'ceph-mon' ${CEPH_CONF})
|
||||
|
||||
if [[ "${ENDPOINT}" == "{up}" ]]; then
|
||||
echo "If DNS is working, we are good here"
|
||||
elif [[ "${ENDPOINT}" != "" ]]; then
|
||||
if [[ ${GREP_CMD} != "" ]]; then
|
||||
# No DNS, write CEPH MONs IPs into ${CEPH_CONF}
|
||||
sh -c -e "cat ${CEPH_CONF}.template | sed 's/mon_host.*/mon_host = ${ENDPOINT}/g' | tee ${CEPH_CONF}" > /dev/null 2>&1
|
||||
else
|
||||
echo "endpoints are already cached in ${CEPH_CONF}"
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_mon_dns
|
||||
|
||||
exit
|
31
ceph-mon/templates/bin/utils/_checkObjectReplication.py.tpl
Executable file
31
ceph-mon/templates/bin/utils/_checkObjectReplication.py.tpl
Executable file
@ -0,0 +1,31 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import subprocess # nosec
|
||||
import json
|
||||
import sys
|
||||
import collections
|
||||
|
||||
if (int(len(sys.argv)) == 1):
|
||||
print("Please provide pool name to test , example: checkObjectReplication.py <pool name>")
|
||||
sys.exit(1)
|
||||
else:
|
||||
poolName = sys.argv[1]
|
||||
cmdRep = 'ceph osd map' + ' ' + str(poolName) + ' ' + 'testreplication -f json-pretty'
|
||||
objectRep = subprocess.check_output(cmdRep, shell=True) # nosec
|
||||
repOut = json.loads(objectRep)
|
||||
osdNumbers = repOut['up']
|
||||
print("Test object got replicated on these osds: %s" % str(osdNumbers))
|
||||
|
||||
osdHosts= []
|
||||
for osd in osdNumbers:
|
||||
cmdFind = 'ceph osd find' + ' ' + str(osd)
|
||||
osdFind = subprocess.check_output(cmdFind , shell=True) # nosec
|
||||
osdHost = json.loads(osdFind)
|
||||
osdHostLocation = osdHost['crush_location']
|
||||
osdHosts.append(osdHostLocation['host'])
|
||||
|
||||
print("Test object got replicated on these hosts: %s" % str(osdHosts))
|
||||
|
||||
print("Hosts hosting multiple copies of a placement groups are: %s" %
|
||||
str([item for item, count in collections.Counter(osdHosts).items() if count > 1]))
|
||||
sys.exit(0)
|
263
ceph-mon/templates/bin/utils/_checkPGs.py.tpl
Executable file
263
ceph-mon/templates/bin/utils/_checkPGs.py.tpl
Executable file
@ -0,0 +1,263 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import subprocess # nosec
|
||||
import json
|
||||
import sys
|
||||
from argparse import *
|
||||
|
||||
class cephCRUSH():
|
||||
"""
|
||||
Currently, this script is coded to work with the ceph clusters that have
|
||||
these type-ids -- osd, host, rack, root. To add other type_ids to the
|
||||
CRUSH map, this script needs enhancements to include the new type_ids.
|
||||
|
||||
type_id name
|
||||
------- ----
|
||||
0 osd
|
||||
1 host
|
||||
2 chassis
|
||||
3 rack
|
||||
4 row
|
||||
5 pdu
|
||||
6 pod
|
||||
7 room
|
||||
8 datacenter
|
||||
9 region
|
||||
10 root
|
||||
|
||||
Ceph organizes the CRUSH map in hierarchical topology. At the top, it is
|
||||
the root. The next levels are racks, hosts, and OSDs, respectively. The
|
||||
OSDs are at the leaf level. This script looks at OSDs in each placement
|
||||
group of a ceph pool. For each OSD, starting from the OSD leaf level, this
|
||||
script traverses up to the root. Along the way, the host and rack are
|
||||
recorded and then verified to make sure the paths to the root are in
|
||||
separate failure domains. This script reports the offending PGs to stdout.
|
||||
"""
|
||||
|
||||
"""
|
||||
This list stores the ceph crush hierarchy retrieved from the
|
||||
ceph osd crush tree -f json-pretty
|
||||
"""
|
||||
crushHierarchy = []
|
||||
|
||||
"""
|
||||
Failure Domains - currently our crush map uses these type IDs - osd,
|
||||
host, rack, root
|
||||
If we need to add chassis type (or other types) later on, add the
|
||||
type to the if statement in the crushFD construction section.
|
||||
|
||||
crushFD[0] = {'id': -2, 'name': 'host1', 'type': 'host'}
|
||||
crushFD[23] = {'id': -5, 'name': 'host2', 'type': 'host'}
|
||||
crushFD[68] = {'id': -7, 'name': 'host3', 'type': 'host'}
|
||||
rack_FD[-2] = {'id': -9, 'name': 'rack1', 'type': 'rack' }
|
||||
rack_FD[-15] = {'id': -17, 'name': 'rack2', 'type': 'rack' }
|
||||
root_FD[-17] = {'id': -1, 'name': 'default', 'type': 'root' }}
|
||||
root_FD[-9] = {'id': -1, 'name': 'default', 'type': 'root' }}
|
||||
"""
|
||||
crushFD = {}
|
||||
|
||||
def __init__(self, poolName):
|
||||
if 'all' in poolName or 'All' in poolName:
|
||||
try:
|
||||
poolLs = 'ceph osd pool ls -f json-pretty'
|
||||
poolstr = subprocess.check_output(poolLs, shell=True) # nosec
|
||||
self.listPoolName = json.loads(poolstr)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print('{}'.format(e))
|
||||
"""Unable to get all pools - cannot proceed"""
|
||||
sys.exit(2)
|
||||
else:
|
||||
self.listPoolName = poolName
|
||||
|
||||
try:
|
||||
"""Retrieve the crush hierarchies"""
|
||||
crushTree = "ceph osd crush tree -f json-pretty | jq .nodes"
|
||||
chstr = subprocess.check_output(crushTree, shell=True) # nosec
|
||||
self.crushHierarchy = json.loads(chstr)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print('{}'.format(e))
|
||||
"""Unable to get crush hierarchy - cannot proceed"""
|
||||
sys.exit(2)
|
||||
|
||||
"""
|
||||
Number of racks configured in the ceph cluster. The racks that are
|
||||
present in the crush hierarchy may not be used. The un-used rack
|
||||
would not show up in the crushFD.
|
||||
"""
|
||||
self.count_racks = 0
|
||||
|
||||
"""depth level - 3 is OSD, 2 is host, 1 is rack, 0 is root"""
|
||||
self.osd_depth = 0
|
||||
"""Construct the Failure Domains - OSD -> Host -> Rack -> Root"""
|
||||
for chitem in self.crushHierarchy:
|
||||
if chitem['type'] == 'host' or \
|
||||
chitem['type'] == 'rack' or \
|
||||
chitem['type'] == 'root':
|
||||
for child in chitem['children']:
|
||||
self.crushFD[child] = {'id': chitem['id'], 'name': chitem['name'], 'type': chitem['type']}
|
||||
if chitem['type'] == 'rack' and len(chitem['children']) > 0:
|
||||
self.count_racks += 1
|
||||
elif chitem['type'] == 'osd':
|
||||
if self.osd_depth == 0:
|
||||
self.osd_depth = chitem['depth']
|
||||
|
||||
"""[ { 'pg-name' : [osd.1, osd.2, osd.3] } ... ]"""
|
||||
self.poolPGs = []
|
||||
"""Replica of the pool. Initialize to 0."""
|
||||
self.poolSize = 0
|
||||
|
||||
def isSupportedRelease(self):
|
||||
cephMajorVer = int(subprocess.check_output("ceph mon versions | awk '/version/{print $3}' | cut -d. -f1", shell=True)) # nosec
|
||||
return cephMajorVer >= 14
|
||||
|
||||
def getPoolSize(self, poolName):
|
||||
"""
|
||||
size (number of replica) is an attribute of a pool
|
||||
{ "pool": "rbd", "pool_id": 1, "size": 3 }
|
||||
"""
|
||||
pSize = {}
|
||||
"""Get the size attribute of the poolName"""
|
||||
try:
|
||||
poolGet = 'ceph osd pool get ' + poolName + ' size -f json-pretty'
|
||||
szstr = subprocess.check_output(poolGet, shell=True) # nosec
|
||||
pSize = json.loads(szstr)
|
||||
self.poolSize = pSize['size']
|
||||
except subprocess.CalledProcessError as e:
|
||||
print('{}'.format(e))
|
||||
self.poolSize = 0
|
||||
"""Continue on"""
|
||||
return
|
||||
|
||||
def checkPGs(self, poolName):
|
||||
poolPGs = self.poolPGs['pg_stats'] if self.isSupportedRelease() else self.poolPGs
|
||||
if not poolPGs:
|
||||
return
|
||||
print('Checking PGs in pool {} ...'.format(poolName)),
|
||||
badPGs = False
|
||||
for pg in poolPGs:
|
||||
osdUp = pg['up']
|
||||
"""
|
||||
Construct the OSD path from the leaf to the root. If the
|
||||
replica is set to 3 and there are 3 racks. Each OSD has its
|
||||
own rack (failure domain). If more than one OSD has the
|
||||
same rack, this is a violation. If the number of rack is
|
||||
one, then we need to make sure the hosts for the three OSDs
|
||||
are different.
|
||||
"""
|
||||
check_FD = {}
|
||||
checkFailed = False
|
||||
for osd in osdUp:
|
||||
traverseID = osd
|
||||
"""Start the level with 1 to include the OSD leaf"""
|
||||
traverseLevel = 1
|
||||
while (self.crushFD[traverseID]['type'] != 'root'):
|
||||
crushType = self.crushFD[traverseID]['type']
|
||||
crushName = self.crushFD[traverseID]['name']
|
||||
if crushType in check_FD:
|
||||
check_FD[crushType].append(crushName)
|
||||
else:
|
||||
check_FD[crushType] = [crushName]
|
||||
"""traverse up (to the root) one level"""
|
||||
traverseID = self.crushFD[traverseID]['id']
|
||||
traverseLevel += 1
|
||||
if not (traverseLevel == self.osd_depth):
|
||||
raise Exception("OSD depth mismatch")
|
||||
"""
|
||||
check_FD should have
|
||||
{
|
||||
'host': ['host1', 'host2', 'host3', 'host4'],
|
||||
'rack': ['rack1', 'rack2', 'rack3']
|
||||
}
|
||||
Not checking for the 'root' as there is only one root.
|
||||
"""
|
||||
for ktype in check_FD:
|
||||
kvalue = check_FD[ktype]
|
||||
if ktype == 'host':
|
||||
"""
|
||||
At the host level, every OSD should come from different
|
||||
host. It is a violation if duplicate hosts are found.
|
||||
"""
|
||||
if len(kvalue) != len(set(kvalue)):
|
||||
if not badPGs:
|
||||
print('Failed')
|
||||
badPGs = True
|
||||
print('OSDs {} in PG {} failed check in host {}'.format(pg['up'], pg['pgid'], kvalue))
|
||||
elif ktype == 'rack':
|
||||
if len(kvalue) == len(set(kvalue)):
|
||||
continue
|
||||
else:
|
||||
"""
|
||||
There are duplicate racks. This could be due to
|
||||
situation like pool's size is 3 and there are only
|
||||
two racks (or one rack). OSDs should come from
|
||||
different hosts as verified in the 'host' section.
|
||||
"""
|
||||
if self.count_racks == len(set(kvalue)):
|
||||
continue
|
||||
elif self.count_racks > len(set(kvalue)):
|
||||
"""Not all the racks were used to allocate OSDs"""
|
||||
if not badPGs:
|
||||
print('Failed')
|
||||
badPGs = True
|
||||
print('OSDs {} in PG {} failed check in rack {}'.format(pg['up'], pg['pgid'], kvalue))
|
||||
check_FD.clear()
|
||||
if not badPGs:
|
||||
print('Passed')
|
||||
return
|
||||
|
||||
def checkPoolPGs(self):
|
||||
for pool in self.listPoolName:
|
||||
self.getPoolSize(pool)
|
||||
if self.poolSize == 1:
|
||||
"""No need to check pool with the size set to 1 copy"""
|
||||
print('Checking PGs in pool {} ... {}'.format(pool, 'Skipped'))
|
||||
continue
|
||||
elif self.poolSize == 0:
|
||||
print('Pool {} was not found.'.format(pool))
|
||||
continue
|
||||
if not self.poolSize > 1:
|
||||
raise Exception("Pool size was incorrectly set")
|
||||
|
||||
try:
|
||||
"""Get the list of PGs in the pool"""
|
||||
lsByPool = 'ceph pg ls-by-pool ' + pool + ' -f json-pretty'
|
||||
pgstr = subprocess.check_output(lsByPool, shell=True) # nosec
|
||||
self.poolPGs = json.loads(pgstr)
|
||||
"""Check that OSDs in the PG are in separate failure domains"""
|
||||
self.checkPGs(pool)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print('{}'.format(e))
|
||||
"""Continue to the next pool (if any)"""
|
||||
return
|
||||
|
||||
def Main():
|
||||
parser = ArgumentParser(description='''
|
||||
Cross-check the OSDs assigned to the Placement Groups (PGs) of a ceph pool
|
||||
with the CRUSH topology. The cross-check compares the OSDs in a PG and
|
||||
verifies the OSDs reside in separate failure domains. PGs with OSDs in
|
||||
the same failure domain are flagged as violation. The offending PGs are
|
||||
printed to stdout.
|
||||
|
||||
This CLI is executed on-demand on a ceph-mon pod. To invoke the CLI, you
|
||||
can specify one pool or list of pools to check. The special pool name
|
||||
All (or all) checks all the pools in the ceph cluster.
|
||||
''',
|
||||
formatter_class=RawTextHelpFormatter)
|
||||
parser.add_argument('PoolName', type=str, nargs='+',
|
||||
help='List of pools (or All) to validate the PGs and OSDs mapping')
|
||||
args = parser.parse_args()
|
||||
|
||||
if ('all' in args.PoolName or
|
||||
'All' in args.PoolName) and len(args.PoolName) > 1:
|
||||
print('You only need to give one pool with special pool All')
|
||||
sys.exit(1)
|
||||
|
||||
"""
|
||||
Retrieve the crush hierarchies and store it. Cross-check the OSDs
|
||||
in each PG searching for failure domain violation.
|
||||
"""
|
||||
ccm = cephCRUSH(args.PoolName)
|
||||
ccm.checkPoolPGs()
|
||||
|
||||
if __name__ == '__main__':
|
||||
Main()
|
69
ceph-mon/templates/configmap-bin.yaml
Normal file
69
ceph-mon/templates/configmap-bin.yaml
Normal file
@ -0,0 +1,69 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.configmap_bin .Values.deployment.ceph }}
|
||||
{{- $envAll := . }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
||||
data:
|
||||
{{- if .Values.images.local_registry.active }}
|
||||
image-repo-sync.sh: |
|
||||
{{- include "helm-toolkit.scripts.image_repo_sync" . | indent 4 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.bootstrap.enabled }}
|
||||
bootstrap.sh: |
|
||||
{{ tuple "bin/_bootstrap.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
{{- end }}
|
||||
post-apply.sh: |
|
||||
{{ tuple "bin/_post-apply.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
init-dirs.sh: |
|
||||
{{ tuple "bin/_init-dirs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
keys-bootstrap-keyring-generator.py: |
|
||||
{{ tuple "bin/keys/_bootstrap-keyring-generator.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
keys-bootstrap-keyring-manager.sh: |
|
||||
{{ tuple "bin/keys/_bootstrap-keyring-manager.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
keys-storage-keyring-manager.sh: |
|
||||
{{ tuple "bin/keys/_storage-keyring-manager.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
mon-start.sh: |
|
||||
{{ tuple "bin/mon/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
mon-stop.sh: |
|
||||
{{ tuple "bin/mon/_stop.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
mon-check.sh: |
|
||||
{{ tuple "bin/mon/_check.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
mgr-start.sh: |
|
||||
{{ tuple "bin/mgr/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
mgr-check.sh: |
|
||||
{{ tuple "bin/mgr/_check.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
moncheck-start.sh: |
|
||||
{{ tuple "bin/moncheck/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
moncheck-reap-zombies.py: |
|
||||
{{ tuple "bin/moncheck/_reap-zombies.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
|
||||
utils-checkObjectReplication.py: |
|
||||
{{ tuple "bin/utils/_checkObjectReplication.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
utils-checkDNS.sh: |
|
||||
{{ tuple "bin/utils/_checkDNS.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
{{- end }}
|
||||
|
||||
utils-checkPGs.py: |
|
||||
{{ tuple "bin/utils/_checkPGs.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
51
ceph-mon/templates/configmap-etc.yaml
Normal file
51
ceph-mon/templates/configmap-etc.yaml
Normal file
@ -0,0 +1,51 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- define "ceph.mon.configmap.etc" }}
|
||||
{{- $configMapName := index . 0 }}
|
||||
{{- $envAll := index . 1 }}
|
||||
{{- with $envAll }}
|
||||
|
||||
{{- if .Values.deployment.ceph }}
|
||||
|
||||
{{- if empty .Values.conf.ceph.global.mon_host -}}
|
||||
{{- $monHost := tuple "ceph_mon" "discovery" "mon_msgr2" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}
|
||||
{{- $_ := $monHost | set .Values.conf.ceph.global "mon_host" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if empty .Values.conf.ceph.global.fsid -}}
|
||||
{{- $_ := uuidv4 | set .Values.conf.ceph.global "fsid" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if empty .Values.conf.ceph.osd.cluster_network -}}
|
||||
{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd "cluster_network" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if empty .Values.conf.ceph.osd.public_network -}}
|
||||
{{- $_ := .Values.network.public | set .Values.conf.ceph.osd "public_network" -}}
|
||||
{{- end -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $configMapName }}
|
||||
data:
|
||||
ceph.conf: |
|
||||
{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph | indent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.manifests.configmap_etc }}
|
||||
{{- list (printf "%s-%s" .Release.Name "etc") . | include "ceph.mon.configmap.etc" }}
|
||||
{{- end }}
|
33
ceph-mon/templates/configmap-templates.yaml
Normal file
33
ceph-mon/templates/configmap-templates.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.configmap_templates .Values.deployment.storage_secrets }}
|
||||
{{- $envAll := . }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "templates" | quote }}
|
||||
data:
|
||||
admin.keyring: |
|
||||
{{ .Values.conf.templates.keyring.admin | indent 4 }}
|
||||
mon.keyring: |
|
||||
{{ .Values.conf.templates.keyring.mon | indent 4 }}
|
||||
bootstrap.keyring.mds: |
|
||||
{{ .Values.conf.templates.keyring.bootstrap.mds | indent 4 }}
|
||||
bootstrap.keyring.mgr: |
|
||||
{{ .Values.conf.templates.keyring.bootstrap.mgr | indent 4 }}
|
||||
bootstrap.keyring.osd: |
|
||||
{{ .Values.conf.templates.keyring.bootstrap.osd | indent 4 }}
|
||||
{{- end }}
|
295
ceph-mon/templates/daemonset-mon.yaml
Normal file
295
ceph-mon/templates/daemonset-mon.yaml
Normal file
@ -0,0 +1,295 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- define "monLivenessProbeTemplate" -}}
|
||||
exec:
|
||||
command:
|
||||
- /tmp/mon-check.sh
|
||||
{{- end -}}
|
||||
|
||||
{{- define "monReadinessProbeTemplate" -}}
|
||||
exec:
|
||||
command:
|
||||
- /tmp/mon-check.sh
|
||||
{{- end -}}
|
||||
|
||||
{{- if and .Values.manifests.daemonset_mon .Values.deployment.ceph }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $serviceAccountName := (printf "%s" .Release.Name) }}
|
||||
{{ tuple $envAll "mon" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ $serviceAccountName }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $serviceAccountName }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "ceph.mon.daemonset" }}
|
||||
{{- $daemonset := index . 0 }}
|
||||
{{- $configMapName := index . 1 }}
|
||||
{{- $serviceAccountName := index . 2 }}
|
||||
{{- $envAll := index . 3 }}
|
||||
{{- with $envAll }}
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: ceph-mon
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
|
||||
{{ tuple $envAll "mon" | include "helm-toolkit.snippets.kubernetes_upgrades_daemonset" | indent 2 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
|
||||
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
{{ dict "envAll" $envAll "podName" "ceph-mon" "containerNames" (list "ceph-mon" "ceph-init-dirs" "ceph-log-ownership") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "mon" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
nodeSelector:
|
||||
{{ .Values.labels.mon.node_selector_key }}: {{ .Values.labels.mon.node_selector_value }}
|
||||
hostNetwork: true
|
||||
shareProcessNamespace: true
|
||||
dnsPolicy: {{ .Values.pod.dns_policy }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "mon" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
- name: ceph-init-dirs
|
||||
{{ tuple $envAll "ceph_mon" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "mon" "container" "ceph_init_dirs" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
command:
|
||||
- /tmp/init-dirs.sh
|
||||
env:
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-run
|
||||
mountPath: /run
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/init-dirs.sh
|
||||
subPath: init-dirs.sh
|
||||
readOnly: true
|
||||
- name: pod-var-lib-ceph
|
||||
mountPath: /var/lib/ceph
|
||||
readOnly: false
|
||||
- name: pod-var-lib-ceph-crash
|
||||
mountPath: /var/lib/ceph/crash
|
||||
readOnly: false
|
||||
- name: ceph-log-ownership
|
||||
{{ tuple $envAll "ceph_mon" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "mon" "container" "ceph_log_ownership" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
command:
|
||||
- chown
|
||||
- -R
|
||||
- ceph:root
|
||||
- /var/log/ceph
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-run
|
||||
mountPath: /run
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: pod-var-log
|
||||
mountPath: /var/log/ceph
|
||||
readOnly: false
|
||||
containers:
|
||||
- name: ceph-mon
|
||||
{{ tuple $envAll "ceph_mon" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.mon | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "mon" "container" "ceph_mon" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
env:
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
- name: K8S_HOST_NETWORK
|
||||
value: "1"
|
||||
- name: MONMAP
|
||||
value: /var/lib/ceph/mon/monmap
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CEPH_PUBLIC_NETWORK
|
||||
value: {{ .Values.network.public | quote }}
|
||||
- name: KUBECTL_PARAM
|
||||
value: {{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_kubectl_params" }}
|
||||
- name: MON_PORT
|
||||
value: {{ tuple "ceph_mon" "internal" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" | quote }}
|
||||
- name: MON_PORT_V2
|
||||
value: {{ tuple "ceph_mon" "internal" "mon_msgr2" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" | quote }}
|
||||
- name: MON_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
command:
|
||||
- /tmp/mon-start.sh
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /tmp/mon-stop.sh
|
||||
ports:
|
||||
- containerPort: {{ tuple "ceph_mon" "internal" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
- containerPort: {{ tuple "ceph_mon" "internal" "mon_msgr2" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
{{ dict "envAll" . "component" "ceph" "container" "ceph-mon" "type" "liveness" "probeTemplate" (include "monLivenessProbeTemplate" . | fromYaml) | include "helm-toolkit.snippets.kubernetes_probe" | trim | indent 10 }}
|
||||
{{ dict "envAll" . "component" "ceph" "container" "ceph-mon" "type" "readiness" "probeTemplate" (include "monReadinessProbeTemplate" . | fromYaml) | include "helm-toolkit.snippets.kubernetes_probe" | trim | indent 10 }}
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-run
|
||||
mountPath: /run
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/mon-start.sh
|
||||
subPath: mon-start.sh
|
||||
readOnly: true
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/mon-stop.sh
|
||||
subPath: mon-stop.sh
|
||||
readOnly: true
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/mon-check.sh
|
||||
subPath: mon-check.sh
|
||||
readOnly: true
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/checkObjectReplication.py
|
||||
subPath: utils-checkObjectReplication.py
|
||||
readOnly: true
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/utils-checkDNS.sh
|
||||
subPath: utils-checkDNS.sh
|
||||
readOnly: true
|
||||
- name: ceph-mon-etc
|
||||
mountPath: /etc/ceph/ceph.conf.template
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
- name: ceph-client-admin-keyring
|
||||
mountPath: /etc/ceph/ceph.client.admin.keyring
|
||||
subPath: ceph.client.admin.keyring
|
||||
readOnly: true
|
||||
- name: ceph-mon-keyring
|
||||
mountPath: /etc/ceph/ceph.mon.keyring.seed
|
||||
subPath: ceph.mon.keyring
|
||||
readOnly: true
|
||||
- name: ceph-bootstrap-osd-keyring
|
||||
mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring
|
||||
subPath: ceph.keyring
|
||||
readOnly: true
|
||||
- name: ceph-bootstrap-mds-keyring
|
||||
mountPath: /var/lib/ceph/bootstrap-mds/ceph.keyring
|
||||
subPath: ceph.keyring
|
||||
readOnly: true
|
||||
- name: pod-var-lib-ceph
|
||||
mountPath: /var/lib/ceph
|
||||
readOnly: false
|
||||
- name: pod-var-lib-ceph-crash
|
||||
mountPath: /var/lib/ceph/crash
|
||||
readOnly: false
|
||||
- name: pod-var-log
|
||||
mountPath: /var/log/ceph
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-run
|
||||
emptyDir:
|
||||
medium: "Memory"
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: pod-var-log
|
||||
emptyDir: {}
|
||||
- name: ceph-mon-bin
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
||||
defaultMode: 0555
|
||||
- name: ceph-mon-etc
|
||||
configMap:
|
||||
name: {{ $configMapName }}
|
||||
defaultMode: 0444
|
||||
- name: pod-var-lib-ceph
|
||||
hostPath:
|
||||
path: {{ .Values.conf.storage.mon.directory }}
|
||||
- name: pod-var-lib-ceph-crash
|
||||
hostPath:
|
||||
path: /var/lib/openstack-helm/ceph/crash
|
||||
type: DirectoryOrCreate
|
||||
- name: ceph-client-admin-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.admin }}
|
||||
- name: ceph-mon-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.mon }}
|
||||
- name: ceph-bootstrap-osd-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.osd }}
|
||||
- name: ceph-bootstrap-mds-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.mds }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.manifests.daemonset_mon }}
|
||||
{{- $daemonset := .Values.daemonset.prefix_name }}
|
||||
{{- $configMapName := (printf "%s-%s" .Release.Name "etc") }}
|
||||
{{- $serviceAccountName := (printf "%s" .Release.Name) }}
|
||||
{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include "ceph.mon.daemonset" | toString | fromYaml }}
|
||||
{{- $configmap_yaml := "ceph.mon.configmap.etc" }}
|
||||
{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include "ceph.utils.mon_daemonset_overrides" }}
|
||||
{{- end }}
|
208
ceph-mon/templates/deployment-mgr.yaml
Normal file
208
ceph-mon/templates/deployment-mgr.yaml
Normal file
@ -0,0 +1,208 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- define "mgrLivenessProbeTemplate" -}}
|
||||
exec:
|
||||
command:
|
||||
- /tmp/mgr-check.sh
|
||||
{{- end }}
|
||||
|
||||
{{- define "mgrReadinessProbeTemplate" -}}
|
||||
exec:
|
||||
command:
|
||||
- /tmp/mgr-check.sh
|
||||
{{- end }}
|
||||
|
||||
{{- if and .Values.manifests.deployment_mgr (and .Values.deployment.ceph .Values.conf.features.mgr ) }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $serviceAccountName := "ceph-mgr" }}
|
||||
# This protective IF prevents an attempt of repeated creation
|
||||
# of ceph-mgr service account.
|
||||
# To be considered: the separation of SA and Deployment manifests.
|
||||
{{- if .Values.manifests.deployment_mgr_sa }}
|
||||
{{ tuple $envAll "mgr" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
{{- end }}
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: ceph-mgr
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.pod.replicas.mgr }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
|
||||
strategy:
|
||||
type: {{ .Values.pod.updateStrategy.mgr.type }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
|
||||
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
{{ dict "envAll" $envAll "podName" "ceph-mgr" "containerNames" (list "ceph-mgr" "ceph-init-dirs") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "mgr" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
affinity:
|
||||
{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
|
||||
{{ tuple $envAll "mgr" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
|
||||
nodeSelector:
|
||||
{{ .Values.labels.mgr.node_selector_key }}: {{ .Values.labels.mgr.node_selector_value }}
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
dnsPolicy: {{ .Values.pod.dns_policy }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "mgr" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
- name: ceph-init-dirs
|
||||
{{ tuple $envAll "ceph_mgr" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "mgr" "container" "init_dirs" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
command:
|
||||
- /tmp/init-dirs.sh
|
||||
env:
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-run
|
||||
mountPath: /run
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/init-dirs.sh
|
||||
subPath: init-dirs.sh
|
||||
readOnly: true
|
||||
- name: pod-var-lib-ceph
|
||||
mountPath: /var/lib/ceph
|
||||
readOnly: false
|
||||
- name: pod-var-lib-ceph-crash
|
||||
mountPath: /var/lib/ceph/crash
|
||||
readOnly: false
|
||||
containers:
|
||||
- name: ceph-mgr
|
||||
{{ tuple $envAll "ceph_mgr" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.mgr | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "mgr" "container" "mgr" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
env:
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: MON_PORT
|
||||
value: {{ tuple "ceph_mon" "internal" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" | quote }}
|
||||
- name: MON_PORT_V2
|
||||
value: {{ tuple "ceph_mon" "internal" "mon_msgr2" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" | quote }}
|
||||
{{- if .Values.ceph_mgr_enabled_modules }}
|
||||
- name: ENABLED_MODULES
|
||||
value: |-
|
||||
{{- range $value := .Values.ceph_mgr_enabled_modules }}
|
||||
{{ $value }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.ceph_mgr_modules_config }}
|
||||
{{- range $module,$params := .Values.ceph_mgr_modules_config }}
|
||||
{{- range $key, $value := $params }}
|
||||
- name: {{ $module }}_{{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- /mgr-start.sh
|
||||
ports:
|
||||
- name: mgr
|
||||
containerPort: {{ tuple "ceph_mgr" "internal" "mgr" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
{{- if (has "prometheus" .Values.ceph_mgr_enabled_modules) }}
|
||||
- name: metrics
|
||||
containerPort: {{ tuple "ceph_mgr" "internal" "metrics" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
{{ end -}}
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-run
|
||||
mountPath: /run
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /mgr-start.sh
|
||||
subPath: mgr-start.sh
|
||||
readOnly: true
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/mgr-check.sh
|
||||
subPath: mgr-check.sh
|
||||
readOnly: true
|
||||
- name: ceph-mon-etc
|
||||
mountPath: /etc/ceph/ceph.conf.template
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
- name: ceph-mon-admin-keyring
|
||||
mountPath: /etc/ceph/ceph.client.admin.keyring
|
||||
subPath: ceph.client.admin.keyring
|
||||
readOnly: true
|
||||
- name: ceph-bootstrap-mgr-keyring
|
||||
mountPath: /var/lib/ceph/bootstrap-mgr/ceph.keyring
|
||||
subPath: ceph.keyring
|
||||
readOnly: false
|
||||
- name: pod-var-lib-ceph
|
||||
mountPath: /var/lib/ceph
|
||||
readOnly: false
|
||||
- name: pod-var-lib-ceph-crash
|
||||
mountPath: /var/lib/ceph/crash
|
||||
readOnly: false
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/utils-checkPGs.py
|
||||
subPath: utils-checkPGs.py
|
||||
readOnly: true
|
||||
{{ dict "envAll" . "component" "ceph" "container" "ceph-mgr" "type" "liveness" "probeTemplate" (include "mgrLivenessProbeTemplate" . | fromYaml) | include "helm-toolkit.snippets.kubernetes_probe" | trim | indent 10 }}
|
||||
{{ dict "envAll" . "component" "ceph" "container" "ceph-mgr" "type" "readiness" "probeTemplate" (include "mgrReadinessProbeTemplate" . | fromYaml) | include "helm-toolkit.snippets.kubernetes_probe" | trim | indent 10 }}
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-run
|
||||
emptyDir:
|
||||
medium: "Memory"
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: ceph-mon-bin
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
||||
defaultMode: 0555
|
||||
- name: ceph-mon-etc
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "etc" | quote }}
|
||||
defaultMode: 0444
|
||||
- name: pod-var-lib-ceph
|
||||
emptyDir: {}
|
||||
- name: pod-var-lib-ceph-crash
|
||||
hostPath:
|
||||
path: /var/lib/openstack-helm/ceph/crash
|
||||
type: DirectoryOrCreate
|
||||
- name: ceph-mon-admin-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.admin }}
|
||||
- name: ceph-bootstrap-mgr-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.mgr }}
|
||||
{{- end }}
|
130
ceph-mon/templates/deployment-moncheck.yaml
Normal file
130
ceph-mon/templates/deployment-moncheck.yaml
Normal file
@ -0,0 +1,130 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.deployment_moncheck .Values.deployment.ceph }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $serviceAccountName := "ceph-mon-check" }}
|
||||
{{ tuple $envAll "moncheck" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: ceph-mon-check
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "moncheck" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.pod.replicas.mon_check }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ tuple $envAll "ceph" "moncheck" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "moncheck" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
|
||||
{{ dict "envAll" $envAll "podName" "ceph-mon-check" "containerNames" (list "ceph-mon" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "moncheck" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
affinity:
|
||||
{{ tuple $envAll "ceph" "moncheck" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
|
||||
{{ tuple $envAll "mon_check" | include "helm-toolkit.snippets.kubernetes_tolerations" | indent 6 }}
|
||||
nodeSelector:
|
||||
{{ .Values.labels.mon.node_selector_key }}: {{ .Values.labels.mon.node_selector_value }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "moncheck" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
containers:
|
||||
- name: ceph-mon
|
||||
{{ tuple $envAll "ceph_mon_check" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.moncheck | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "moncheck" "container" "ceph_mon" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
env:
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
- name: K8S_HOST_NETWORK
|
||||
value: "1"
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: MON_PORT
|
||||
value: {{ tuple "ceph_mon" "internal" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" | quote }}
|
||||
- name: MON_PORT_V2
|
||||
value: {{ tuple "ceph_mon" "internal" "mon_msgr2" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" | quote }}
|
||||
command:
|
||||
- /tmp/moncheck-start.sh
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-run
|
||||
mountPath: /run
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/moncheck-start.sh
|
||||
subPath: moncheck-start.sh
|
||||
readOnly: true
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/moncheck-reap-zombies.py
|
||||
subPath: moncheck-reap-zombies.py
|
||||
readOnly: true
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/utils-checkDNS.sh
|
||||
subPath: utils-checkDNS.sh
|
||||
readOnly: true
|
||||
- name: ceph-mon-etc
|
||||
mountPath: /etc/ceph/ceph.conf.template
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
- name: ceph-client-admin-keyring
|
||||
mountPath: /etc/ceph/ceph.client.admin.keyring
|
||||
subPath: ceph.client.admin.keyring
|
||||
readOnly: true
|
||||
- name: ceph-mon-keyring
|
||||
mountPath: /etc/ceph/ceph.mon.keyring
|
||||
subPath: ceph.mon.keyring
|
||||
readOnly: true
|
||||
- name: pod-var-lib-ceph
|
||||
mountPath: /var/lib/ceph
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-run
|
||||
emptyDir:
|
||||
medium: "Memory"
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: ceph-mon-etc
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "etc" | quote }}
|
||||
defaultMode: 0444
|
||||
- name: ceph-mon-bin
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
||||
defaultMode: 0555
|
||||
- name: pod-var-lib-ceph
|
||||
emptyDir: {}
|
||||
- name: ceph-client-admin-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.admin }}
|
||||
- name: ceph-mon-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.mon }}
|
||||
{{- end }}
|
85
ceph-mon/templates/job-bootstrap.yaml
Normal file
85
ceph-mon/templates/job-bootstrap.yaml
Normal file
@ -0,0 +1,85 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $serviceAccountName := "ceph-bootstrap" }}
|
||||
{{ tuple $envAll "bootstrap" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ceph-bootstrap
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "bootstrap" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "bootstrap" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
|
||||
{{ dict "envAll" $envAll "podName" "ceph-bootstrap" "containerNames" (list "ceph-bootstrap" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "bootstrap" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
restartPolicy: OnFailure
|
||||
nodeSelector:
|
||||
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "bootstrap" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
containers:
|
||||
- name: ceph-bootstrap
|
||||
{{ tuple $envAll "ceph_bootstrap" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "bootstrap" "container" "ceph_bootstrap" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
command:
|
||||
- /tmp/bootstrap.sh
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/bootstrap.sh
|
||||
subPath: bootstrap.sh
|
||||
readOnly: true
|
||||
- name: ceph-mon-etc
|
||||
mountPath: /etc/ceph/ceph.conf
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
- name: ceph-client-admin-keyring
|
||||
mountPath: /etc/ceph/ceph.client.admin.keyring
|
||||
subPath: ceph.client.admin.keyring
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: ceph-mon-bin
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
||||
defaultMode: 0555
|
||||
- name: ceph-mon-etc
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "etc" | quote }}
|
||||
defaultMode: 0444
|
||||
- name: ceph-client-admin-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.admin }}
|
||||
{{- end }}
|
18
ceph-mon/templates/job-image-repo-sync.yaml
Normal file
18
ceph-mon/templates/job-image-repo-sync.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}
|
||||
{{- $imageRepoSyncJob := dict "envAll" . "serviceName" "ceph-mon" -}}
|
||||
{{ $imageRepoSyncJob | include "helm-toolkit.manifests.job_image_repo_sync" }}
|
||||
{{- end }}
|
133
ceph-mon/templates/job-keyring.yaml
Normal file
133
ceph-mon/templates/job-keyring.yaml
Normal file
@ -0,0 +1,133 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.job_keyring .Values.deployment.storage_secrets }}
|
||||
{{- $envAll := . }}
|
||||
{{- range $key1, $cephBootstrapKey := tuple "mds" "osd" "mon" "mgr" }}
|
||||
{{- $component := print $cephBootstrapKey "-keyring-generator" }}
|
||||
{{- $jobName := print "ceph-" $component }}
|
||||
|
||||
{{- $serviceAccountName := $jobName }}
|
||||
{{ tuple $envAll "job_keyring_generator" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ $serviceAccountName }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $serviceAccountName }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ $jobName }}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" $jobName | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" $jobName | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
annotations:
|
||||
{{ dict "envAll" $envAll "podName" $jobName "containerNames" (list $jobName "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "ceph" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
restartPolicy: OnFailure
|
||||
nodeSelector:
|
||||
{{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "job_keyring_generator" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
containers:
|
||||
- name: {{ $jobName }}
|
||||
{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.jobs.secret_provisioning | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "ceph" "container" $jobName | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
env:
|
||||
- name: DEPLOYMENT_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CEPH_GEN_DIR
|
||||
value: /tmp
|
||||
- name: CEPH_TEMPLATES_DIR
|
||||
value: /tmp/templates
|
||||
{{- if eq $cephBootstrapKey "mon" }}
|
||||
- name: CEPH_KEYRING_NAME
|
||||
value: ceph.mon.keyring
|
||||
- name: CEPH_KEYRING_TEMPLATE
|
||||
value: mon.keyring
|
||||
{{- else }}
|
||||
- name: CEPH_KEYRING_NAME
|
||||
value: ceph.keyring
|
||||
- name: CEPH_KEYRING_TEMPLATE
|
||||
value: bootstrap.keyring.{{ $cephBootstrapKey }}
|
||||
{{- end }}
|
||||
- name: KUBE_SECRET_NAME
|
||||
value: {{ index $envAll.Values.secrets.keyrings $cephBootstrapKey }}
|
||||
command:
|
||||
- /tmp/keys-bootstrap-keyring-manager.sh
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/keys-bootstrap-keyring-manager.sh
|
||||
subPath: keys-bootstrap-keyring-manager.sh
|
||||
readOnly: true
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/keys-bootstrap-keyring-generator.py
|
||||
subPath: keys-bootstrap-keyring-generator.py
|
||||
readOnly: true
|
||||
- name: ceph-templates
|
||||
mountPath: /tmp/templates
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: ceph-mon-bin
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
||||
defaultMode: 0555
|
||||
- name: ceph-templates
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "templates" | quote }}
|
||||
defaultMode: 0444
|
||||
{{- end }}
|
||||
{{- end }}
|
145
ceph-mon/templates/job-post-apply.yaml
Normal file
145
ceph-mon/templates/job-post-apply.yaml
Normal file
@ -0,0 +1,145 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if eq .Values.pod.lifecycle.upgrades.daemonsets.pod_replacement_strategy "OnDelete" }}
|
||||
{{- if and .Values.manifests.job_post_apply }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $serviceAccountName := printf "%s-%s" .Release.Name "post-apply" }}
|
||||
{{ tuple $envAll "post-apply" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- pods
|
||||
- events
|
||||
- jobs
|
||||
- pods/exec
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- delete
|
||||
- list
|
||||
- apiGroups:
|
||||
- 'apps'
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- 'batch'
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $serviceAccountName }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ $serviceAccountName }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
labels:
|
||||
{{ tuple $envAll "ceph-upgrade" "post-apply" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph-upgrade" "post-apply" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
annotations:
|
||||
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
{{ dict "envAll" $envAll "podName" "ceph-mon-post-apply" "containerNames" (list "ceph-mon-post-apply" "init" ) | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "post_apply" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
restartPolicy: OnFailure
|
||||
nodeSelector:
|
||||
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "post-apply" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
containers:
|
||||
- name: ceph-mon-post-apply
|
||||
{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "post_apply" "container" "ceph_mon_post_apply" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
env:
|
||||
- name: CLUSTER
|
||||
value: "ceph"
|
||||
- name: CEPH_NAMESPACE
|
||||
value: {{ .Release.Namespace }}
|
||||
- name: RELEASE_GROUP_NAME
|
||||
value: {{ .Release.Name }}
|
||||
- name: UNCONDITIONAL_MON_RESTART
|
||||
value: {{ .Values.conf.storage.unconditional_mon_restart | quote }}
|
||||
command:
|
||||
- /tmp/post-apply.sh
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/post-apply.sh
|
||||
subPath: post-apply.sh
|
||||
readOnly: true
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/wait-for-pods.sh
|
||||
subPath: wait-for-pods.sh
|
||||
readOnly: true
|
||||
- name: ceph-mon-etc
|
||||
mountPath: /etc/ceph/ceph.conf
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
- name: ceph-mon-admin-keyring
|
||||
mountPath: /etc/ceph/ceph.client.admin.keyring
|
||||
subPath: ceph.client.admin.keyring
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: ceph-mon-bin
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
||||
defaultMode: 0555
|
||||
- name: ceph-mon-etc
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "etc" | quote }}
|
||||
defaultMode: 0444
|
||||
- name: ceph-mon-admin-keyring
|
||||
secret:
|
||||
secretName: {{ .Values.secrets.keyrings.admin }}
|
||||
{{- end }}
|
||||
{{- end }}
|
132
ceph-mon/templates/job-storage-admin-keys.yaml
Normal file
132
ceph-mon/templates/job-storage-admin-keys.yaml
Normal file
@ -0,0 +1,132 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.job_storage_admin_keys .Values.deployment.storage_secrets }}
|
||||
{{- $envAll := . }}
|
||||
|
||||
{{- $serviceAccountName := "ceph-storage-keys-generator" }}
|
||||
{{ tuple $envAll "storage_keys_generator" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/exec
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- patch
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ $serviceAccountName }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ $serviceAccountName }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $serviceAccountName }}
|
||||
namespace: {{ $envAll.Release.Namespace }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ceph-storage-keys-generator
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "storage-keys-generator" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "storage-keys-generator" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
|
||||
annotations:
|
||||
{{ tuple $envAll | include "helm-toolkit.snippets.release_uuid" | indent 8 }}
|
||||
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.utils.hash" }}
|
||||
{{ dict "envAll" $envAll "podName" "ceph-storage-keys-generator" "containerNames" (list "ceph-storage-keys-generator" "init") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
|
||||
spec:
|
||||
{{ dict "envAll" $envAll "application" "storage_keys_generator" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 6 }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
restartPolicy: OnFailure
|
||||
nodeSelector:
|
||||
{{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "storage_keys_generator" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
containers:
|
||||
- name: ceph-storage-keys-generator
|
||||
{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.jobs.secret_provisioning | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "storage_keys_generator" "container" "ceph_storage_keys_generator" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
env:
|
||||
- name: DEPLOYMENT_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CEPH_GEN_DIR
|
||||
value: /tmp
|
||||
- name: CEPH_TEMPLATES_DIR
|
||||
value: /tmp/templates
|
||||
- name: CEPH_KEYRING_NAME
|
||||
value: ceph.client.admin.keyring
|
||||
- name: CEPH_KEYRING_TEMPLATE
|
||||
value: admin.keyring
|
||||
- name: CEPH_KEYRING_ADMIN_NAME
|
||||
value: {{ .Values.secrets.keyrings.admin }}
|
||||
- name: CEPH_STORAGECLASS_ADMIN_SECRET_NAME
|
||||
value: {{ .Values.storageclass.rbd.parameters.adminSecretName }}
|
||||
- name: CEPH_STORAGECLASS_ADMIN_SECRET_NAME_NODE
|
||||
value: {{ .Values.storageclass.rbd.parameters.adminSecretNameNode }}
|
||||
command:
|
||||
- /tmp/keys-storage-keyring-manager.sh
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: pod-etc-ceph
|
||||
mountPath: /etc/ceph
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/keys-storage-keyring-manager.sh
|
||||
subPath: keys-storage-keyring-manager.sh
|
||||
readOnly: true
|
||||
- name: ceph-mon-bin
|
||||
mountPath: /tmp/keys-bootstrap-keyring-generator.py
|
||||
subPath: keys-bootstrap-keyring-generator.py
|
||||
readOnly: true
|
||||
- name: ceph-templates
|
||||
mountPath: /tmp/templates
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: pod-etc-ceph
|
||||
emptyDir: {}
|
||||
- name: ceph-mon-bin
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
||||
defaultMode: 0555
|
||||
- name: ceph-templates
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "templates" | quote }}
|
||||
defaultMode: 0444
|
||||
{{- end }}
|
17
ceph-mon/templates/secret-registry.yaml
Normal file
17
ceph-mon/templates/secret-registry.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.secret_registry .Values.endpoints.oci_image_registry.auth.enabled }}
|
||||
{{ include "helm-toolkit.manifests.secret_registry" ( dict "envAll" . "registryUser" .Chart.Name ) }}
|
||||
{{- end }}
|
42
ceph-mon/templates/service-mgr.yaml
Normal file
42
ceph-mon/templates/service-mgr.yaml
Normal file
@ -0,0 +1,42 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.service_mgr ( and .Values.deployment.ceph .Values.conf.features.mgr ) }}
|
||||
{{- $envAll := . }}
|
||||
{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.ceph_mgr }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ceph-mgr
|
||||
labels:
|
||||
{{ tuple $envAll "ceph" "manager" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.monitoring.prometheus.enabled }}
|
||||
{{ tuple $prometheus_annotations | include "helm-toolkit.snippets.prometheus_service_annotations" | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
ports:
|
||||
- name: ceph-mgr
|
||||
port: {{ tuple "ceph_mgr" "internal" "mgr" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
protocol: TCP
|
||||
targetPort: {{ tuple "ceph_mgr" "internal" "mgr" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
{{ if (has "prometheus" .Values.ceph_mgr_enabled_modules) }}
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: {{ tuple "ceph_mgr" "internal" "metrics" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
{{ end }}
|
||||
selector:
|
||||
{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
{{- end }}
|
41
ceph-mon/templates/service-mon-discovery.yaml
Normal file
41
ceph-mon/templates/service-mon-discovery.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.service_mon_discovery .Values.deployment.ceph }}
|
||||
{{- $envAll := . }}
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ tuple "ceph_mon" "discovery" . | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }}
|
||||
spec:
|
||||
ports:
|
||||
- name: mon
|
||||
port: {{ tuple "ceph_mon" "discovery" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
protocol: TCP
|
||||
targetPort: {{ tuple "ceph_mon" "discovery" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
- name: mon-msgr2
|
||||
port: {{ tuple "ceph_mon" "discovery" "mon_msgr2" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
protocol: TCP
|
||||
targetPort: {{ tuple "ceph_mon" "discovery" "mon_msgr2" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
selector:
|
||||
{{- if .Values.manifests.daemonset_mon }}
|
||||
{{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
{{- else }}
|
||||
app: rook-ceph-mon
|
||||
ceph_daemon_type: mon
|
||||
{{- end }}
|
||||
clusterIP: None
|
||||
publishNotReadyAddresses: true
|
||||
{{- end }}
|
35
ceph-mon/templates/service-mon.yaml
Normal file
35
ceph-mon/templates/service-mon.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if and .Values.manifests.service_mon .Values.deployment.ceph }}
|
||||
{{- $envAll := . }}
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ tuple "ceph_mon" "internal" . | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }}
|
||||
spec:
|
||||
ports:
|
||||
- name: mon
|
||||
port: {{ tuple "ceph_mon" "internal" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
protocol: TCP
|
||||
targetPort: {{ tuple "ceph_mon" "internal" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
- name: mon-msgr2
|
||||
port: {{ tuple "ceph_mon" "internal" "mon_msgr2" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
protocol: TCP
|
||||
targetPort: {{ tuple "ceph_mon" "internal" "mon_msgr2" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
|
||||
selector:
|
||||
{{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
|
||||
clusterIP: None
|
||||
{{- end }}
|
68
ceph-mon/templates/snippets/_mon_host_from_k8s_ep.sh.tpl
Normal file
68
ceph-mon/templates/snippets/_mon_host_from_k8s_ep.sh.tpl
Normal file
@ -0,0 +1,68 @@
|
||||
{{- define "ceph-mon.snippets.mon_host_from_k8s_ep" -}}
|
||||
{{/*
|
||||
|
||||
Inserts a bash function definition mon_host_from_k8s_ep() which can be used
|
||||
to construct a mon_hosts value from the given namespaced endpoint.
|
||||
|
||||
Usage (e.g. in _script.sh.tpl):
|
||||
#!/bin/bash
|
||||
|
||||
: "${NS:=ceph}"
|
||||
: "${EP:=ceph-mon-discovery}"
|
||||
|
||||
{{ include "ceph-mon.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
MON_HOST=$(mon_host_from_k8s_ep "$NS" "$EP")
|
||||
|
||||
if [ -z "$MON_HOST" ]; then
|
||||
# deal with failure
|
||||
else
|
||||
sed -i -e "s/^mon_host = /mon_host = $MON_HOST/" /etc/ceph/ceph.conf
|
||||
fi
|
||||
*/}}
|
||||
{{`
|
||||
# Construct a mon_hosts value from the given namespaced endpoint
|
||||
# IP x.x.x.x with port p named "mon-msgr2" will appear as [v2:x.x.x.x/p/0]
|
||||
# IP x.x.x.x with port q named "mon" will appear as [v1:x.x.x.x/q/0]
|
||||
# IP x.x.x.x with ports p and q will appear as [v2:x.x.x.x/p/0,v1:x.x.x.x/q/0]
|
||||
# The entries for all IPs will be joined with commas
|
||||
mon_host_from_k8s_ep() {
|
||||
local ns=$1
|
||||
local ep=$2
|
||||
|
||||
if [ -z "$ns" ] || [ -z "$ep" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# We don't want shell expansion for the go-template expression
|
||||
# shellcheck disable=SC2016
|
||||
kubectl get endpoints -n "$ns" "$ep" -o go-template='
|
||||
{{- $sep := "" }}
|
||||
{{- range $_,$s := .subsets }}
|
||||
{{- $v2port := 0 }}
|
||||
{{- $v1port := 0 }}
|
||||
{{- range $_,$port := index $s "ports" }}
|
||||
{{- if (eq $port.name "mon-msgr2") }}
|
||||
{{- $v2port = $port.port }}
|
||||
{{- else if (eq $port.name "mon") }}
|
||||
{{- $v1port = $port.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $_,$address := index $s "addresses" }}
|
||||
{{- $v2endpoint := printf "v2:%s:%d/0" $address.ip $v2port }}
|
||||
{{- $v1endpoint := printf "v1:%s:%d/0" $address.ip $v1port }}
|
||||
{{- if (and $v2port $v1port) }}
|
||||
{{- printf "%s[%s,%s]" $sep $v2endpoint $v1endpoint }}
|
||||
{{- $sep = "," }}
|
||||
{{- else if $v2port }}
|
||||
{{- printf "%s[%s]" $sep $v2endpoint }}
|
||||
{{- $sep = "," }}
|
||||
{{- else if $v1port }}
|
||||
{{- printf "%s[%s]" $sep $v1endpoint }}
|
||||
{{- $sep = "," }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}'
|
||||
}
|
||||
`}}
|
||||
{{- end -}}
|
287
ceph-mon/templates/utils/_mon_daemonset_overrides.tpl
Normal file
287
ceph-mon/templates/utils/_mon_daemonset_overrides.tpl
Normal file
@ -0,0 +1,287 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- define "ceph.utils.match_exprs_hash" }}
|
||||
{{- $match_exprs := index . 0 }}
|
||||
{{- $context := index . 1 }}
|
||||
{{- $_ := set $context.Values "__match_exprs_hash_content" "" }}
|
||||
{{- range $match_expr := $match_exprs }}
|
||||
{{- $_ := set $context.Values "__match_exprs_hash_content" (print $context.Values.__match_exprs_hash_content $match_expr.key $match_expr.operator ($match_expr.values | quote)) }}
|
||||
{{- end }}
|
||||
{{- $context.Values.__match_exprs_hash_content | sha256sum | trunc 8 }}
|
||||
{{- $_ := unset $context.Values "__match_exprs_hash_content" }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "ceph.utils.mon_daemonset_overrides" }}
|
||||
{{- $daemonset := index . 0 }}
|
||||
{{- $daemonset_yaml := index . 1 }}
|
||||
{{- $configmap_include := index . 2 }}
|
||||
{{- $configmap_name := index . 3 }}
|
||||
{{- $context := index . 4 }}
|
||||
{{- $_ := unset $context ".Files" }}
|
||||
{{- $_ := set $context.Values "__daemonset_yaml" $daemonset_yaml }}
|
||||
{{- $daemonset_root_name := printf "ceph_%s" $daemonset }}
|
||||
{{- $_ := set $context.Values "__daemonset_list" list }}
|
||||
{{- $_ := set $context.Values "__default" dict }}
|
||||
{{- if hasKey $context.Values.conf "overrides" }}
|
||||
{{- range $key, $val := $context.Values.conf.overrides }}
|
||||
|
||||
{{- if eq $key $daemonset_root_name }}
|
||||
{{- range $type, $type_data := . }}
|
||||
|
||||
{{- if eq $type "hosts" }}
|
||||
{{- range $host_data := . }}
|
||||
{{/* dictionary that will contain all info needed to generate this
|
||||
iteration of the daemonset */}}
|
||||
{{- $current_dict := dict }}
|
||||
|
||||
{{/* set daemonset name */}}
|
||||
{{- $_ := set $current_dict "name" $host_data.name }}
|
||||
|
||||
{{/* apply overrides */}}
|
||||
{{- $override_conf_copy := $host_data.conf }}
|
||||
{{/* Deep copy to prevent https://storyboard.openstack.org/#!/story/2005936 */}}
|
||||
{{- $root_conf_copy := omit ($context.Values.conf | toYaml | fromYaml) "overrides" }}
|
||||
{{- $merged_dict := mergeOverwrite $root_conf_copy $override_conf_copy }}
|
||||
{{- $root_conf_copy2 := dict "conf" $merged_dict }}
|
||||
{{- $context_values := omit (omit ($context.Values | toYaml | fromYaml) "conf") "__daemonset_list" }}
|
||||
{{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2 }}
|
||||
{{- $root_conf_copy4 := dict "Values" $root_conf_copy3 }}
|
||||
{{- $_ := set $current_dict "nodeData" $root_conf_copy4 }}
|
||||
|
||||
{{/* Schedule to this host explicitly. */}}
|
||||
{{- $nodeSelector_dict := dict }}
|
||||
|
||||
{{- $_ := set $nodeSelector_dict "key" "kubernetes.io/hostname" }}
|
||||
{{- $_ := set $nodeSelector_dict "operator" "In" }}
|
||||
|
||||
{{- $values_list := list $host_data.name }}
|
||||
{{- $_ := set $nodeSelector_dict "values" $values_list }}
|
||||
|
||||
{{- $list_aggregate := list $nodeSelector_dict }}
|
||||
{{- $_ := set $current_dict "matchExpressions" $list_aggregate }}
|
||||
|
||||
{{/* store completed daemonset entry/info into global list */}}
|
||||
{{- $list_aggregate := append $context.Values.__daemonset_list $current_dict }}
|
||||
{{- $_ := set $context.Values "__daemonset_list" $list_aggregate }}
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if eq $type "labels" }}
|
||||
{{- $_ := set $context.Values "__label_list" . }}
|
||||
{{- range $label_data := . }}
|
||||
{{/* dictionary that will contain all info needed to generate this
|
||||
iteration of the daemonset. */}}
|
||||
{{- $_ := set $context.Values "__current_label" dict }}
|
||||
|
||||
{{/* set daemonset name */}}
|
||||
{{- $_ := set $context.Values.__current_label "name" $label_data.label.key }}
|
||||
|
||||
{{/* apply overrides */}}
|
||||
{{- $override_conf_copy := $label_data.conf }}
|
||||
{{/* Deep copy to prevent https://storyboard.openstack.org/#!/story/2005936 */}}
|
||||
{{- $root_conf_copy := omit ($context.Values.conf | toYaml | fromYaml) "overrides" }}
|
||||
{{- $merged_dict := mergeOverwrite $root_conf_copy $override_conf_copy }}
|
||||
{{- $root_conf_copy2 := dict "conf" $merged_dict }}
|
||||
{{- $context_values := omit (omit ($context.Values | toYaml | fromYaml) "conf") "__daemonset_list" }}
|
||||
{{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2 }}
|
||||
{{- $root_conf_copy4 := dict "Values" $root_conf_copy3 }}
|
||||
{{- $_ := set $context.Values.__current_label "nodeData" $root_conf_copy4 }}
|
||||
|
||||
{{/* Schedule to the provided label value(s) */}}
|
||||
{{- $label_dict := omit $label_data.label "NULL" }}
|
||||
{{- $_ := set $label_dict "operator" "In" }}
|
||||
{{- $list_aggregate := list $label_dict }}
|
||||
{{- $_ := set $context.Values.__current_label "matchExpressions" $list_aggregate }}
|
||||
|
||||
{{/* Do not schedule to other specified labels, with higher
|
||||
precedence as the list position increases. Last defined label
|
||||
is highest priority. */}}
|
||||
{{- $other_labels := without $context.Values.__label_list $label_data }}
|
||||
{{- range $label_data2 := $other_labels }}
|
||||
{{- $label_dict := omit $label_data2.label "NULL" }}
|
||||
|
||||
{{- $_ := set $label_dict "operator" "NotIn" }}
|
||||
|
||||
{{- $list_aggregate := append $context.Values.__current_label.matchExpressions $label_dict }}
|
||||
{{- $_ := set $context.Values.__current_label "matchExpressions" $list_aggregate }}
|
||||
{{- end }}
|
||||
{{- $_ := set $context.Values "__label_list" $other_labels }}
|
||||
|
||||
{{/* Do not schedule to any other specified hosts */}}
|
||||
{{- range $type, $type_data := $val }}
|
||||
{{- if eq $type "hosts" }}
|
||||
{{- range $host_data := . }}
|
||||
{{- $label_dict := dict }}
|
||||
|
||||
{{- $_ := set $label_dict "key" "kubernetes.io/hostname" }}
|
||||
{{- $_ := set $label_dict "operator" "NotIn" }}
|
||||
|
||||
{{- $values_list := list $host_data.name }}
|
||||
{{- $_ := set $label_dict "values" $values_list }}
|
||||
|
||||
{{- $list_aggregate := append $context.Values.__current_label.matchExpressions $label_dict }}
|
||||
{{- $_ := set $context.Values.__current_label "matchExpressions" $list_aggregate }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* store completed daemonset entry/info into global list */}}
|
||||
{{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__current_label }}
|
||||
{{- $_ := set $context.Values "__daemonset_list" $list_aggregate }}
|
||||
{{- $_ := unset $context.Values "__current_label" }}
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* scheduler exceptions for the default daemonset */}}
|
||||
{{- $_ := set $context.Values.__default "matchExpressions" list }}
|
||||
|
||||
{{- range $type, $type_data := . }}
|
||||
{{/* Do not schedule to other specified labels */}}
|
||||
{{- if eq $type "labels" }}
|
||||
{{- range $label_data := . }}
|
||||
{{- $default_dict := omit $label_data.label "NULL" }}
|
||||
|
||||
{{- $_ := set $default_dict "operator" "NotIn" }}
|
||||
|
||||
{{- $list_aggregate := append $context.Values.__default.matchExpressions $default_dict }}
|
||||
{{- $_ := set $context.Values.__default "matchExpressions" $list_aggregate }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{/* Do not schedule to other specified hosts */}}
|
||||
{{- if eq $type "hosts" }}
|
||||
{{- range $host_data := . }}
|
||||
{{- $default_dict := dict }}
|
||||
|
||||
{{- $_ := set $default_dict "key" "kubernetes.io/hostname" }}
|
||||
{{- $_ := set $default_dict "operator" "NotIn" }}
|
||||
|
||||
{{- $values_list := list $host_data.name }}
|
||||
{{- $_ := set $default_dict "values" $values_list }}
|
||||
|
||||
{{- $list_aggregate := append $context.Values.__default.matchExpressions $default_dict }}
|
||||
{{- $_ := set $context.Values.__default "matchExpressions" $list_aggregate }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* generate the default daemonset */}}
|
||||
|
||||
{{/* set name */}}
|
||||
{{- $_ := set $context.Values.__default "name" "default" }}
|
||||
|
||||
{{/* no overrides apply, so copy as-is */}}
|
||||
{{- $root_conf_copy1 := omit $context.Values.conf "overrides" }}
|
||||
{{- $root_conf_copy2 := dict "conf" $root_conf_copy1 }}
|
||||
{{- $context_values := omit $context.Values "conf" }}
|
||||
{{- $root_conf_copy3 := mergeOverwrite $context_values $root_conf_copy2 }}
|
||||
{{- $root_conf_copy4 := dict "Values" $root_conf_copy3 }}
|
||||
{{- $_ := set $context.Values.__default "nodeData" $root_conf_copy4 }}
|
||||
|
||||
{{/* add to global list */}}
|
||||
{{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__default }}
|
||||
{{- $_ := set $context.Values "__daemonset_list" $list_aggregate }}
|
||||
|
||||
{{- $_ := set $context.Values "__last_configmap_name" $configmap_name }}
|
||||
{{- range $current_dict := $context.Values.__daemonset_list }}
|
||||
|
||||
{{- $context_novalues := omit $context "Values" }}
|
||||
{{- $merged_dict := mergeOverwrite $context_novalues $current_dict.nodeData }}
|
||||
{{- $_ := set $current_dict "nodeData" $merged_dict }}
|
||||
|
||||
{{/* name needs to be a DNS-1123 compliant name. Ensure lower case */}}
|
||||
{{- $name_format1 := printf (print $daemonset_root_name "-" $current_dict.name) | lower }}
|
||||
{{/* labels may contain underscores which would be invalid here, so we replace them with dashes
|
||||
there may be other valid label names which would make for an invalid DNS-1123 name
|
||||
but these will be easier to handle in future with sprig regex* functions
|
||||
(not availabile in helm 2.5.1) */}}
|
||||
{{- $name_format2 := $name_format1 | replace "_" "-" | replace "." "-" }}
|
||||
{{/* To account for the case where the same label is defined multiple times in overrides
|
||||
(but with different label values), we add a sha of the scheduling data to ensure
|
||||
name uniqueness */}}
|
||||
{{- $_ := set $current_dict "dns_1123_name" dict }}
|
||||
{{- if hasKey $current_dict "matchExpressions" }}
|
||||
{{- $_ := set $current_dict "dns_1123_name" (printf (print $name_format2 "-" (list $current_dict.matchExpressions $context | include "ceph.utils.match_exprs_hash"))) }}
|
||||
{{- else }}
|
||||
{{- $_ := set $current_dict "dns_1123_name" $name_format2 }}
|
||||
{{- end }}
|
||||
|
||||
{{/* set daemonset metadata name */}}
|
||||
{{- if not $context.Values.__daemonset_yaml.metadata }}{{- $_ := set $context.Values.__daemonset_yaml "metadata" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.metadata.name }}{{- $_ := set $context.Values.__daemonset_yaml.metadata "name" dict }}{{- end }}
|
||||
{{- $_ := set $context.Values.__daemonset_yaml.metadata "name" $current_dict.dns_1123_name }}
|
||||
|
||||
{{/* cross-reference configmap name to container volume definitions */}}
|
||||
{{- $_ := set $context.Values "__volume_list" list }}
|
||||
{{- range $current_volume := $context.Values.__daemonset_yaml.spec.template.spec.volumes }}
|
||||
{{- $_ := set $context.Values "__volume" $current_volume }}
|
||||
{{- if hasKey $context.Values.__volume "configMap" }}
|
||||
{{- if eq $context.Values.__volume.configMap.name $context.Values.__last_configmap_name }}
|
||||
{{- $_ := set $context.Values.__volume.configMap "name" $current_dict.dns_1123_name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $updated_list := append $context.Values.__volume_list $context.Values.__volume }}
|
||||
{{- $_ := set $context.Values "__volume_list" $updated_list }}
|
||||
{{- end }}
|
||||
{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec "volumes" $context.Values.__volume_list }}
|
||||
|
||||
{{/* populate scheduling restrictions */}}
|
||||
{{- if hasKey $current_dict "matchExpressions" }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template.spec }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template "spec" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec "affinity" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity "nodeAffinity" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity "requiredDuringSchedulingIgnoredDuringExecution" dict }}{{- end }}
|
||||
{{- $match_exprs := dict }}
|
||||
{{- $_ := set $match_exprs "matchExpressions" $current_dict.matchExpressions }}
|
||||
{{- $appended_match_expr := list $match_exprs }}
|
||||
{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution "nodeSelectorTerms" $appended_match_expr }}
|
||||
{{- end }}
|
||||
|
||||
{{/* input value hash for current set of values overrides */}}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec }}{{- $_ := set $context.Values.__daemonset_yaml "spec" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template }}{{- $_ := set $context.Values.__daemonset_yaml.spec "template" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template.metadata }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template "metadata" dict }}{{- end }}
|
||||
{{- if not $context.Values.__daemonset_yaml.spec.template.metadata.annotations }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata "annotations" dict }}{{- end }}
|
||||
{{- $cmap := list $current_dict.dns_1123_name $current_dict.nodeData | include $configmap_include }}
|
||||
{{- $values_hash := $cmap | quote | sha256sum }}
|
||||
{{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata.annotations "configmap-etc-hash" $values_hash }}
|
||||
|
||||
{{/* generate configmap */}}
|
||||
---
|
||||
{{ $cmap }}
|
||||
|
||||
{{/* generate daemonset yaml */}}
|
||||
{{ range $k, $v := index $current_dict.nodeData.Values.conf.storage "mon" }}
|
||||
---
|
||||
{{- $_ := set $context.Values "__tmpYAML" dict }}
|
||||
|
||||
{{ $dsNodeName := index $context.Values.__daemonset_yaml.metadata "name" }}
|
||||
{{ $localDsNodeName := print (trunc 54 $current_dict.dns_1123_name) "-" (print $dsNodeName $k | quote | sha256sum | trunc 8) }}
|
||||
{{- if not $context.Values.__tmpYAML.metadata }}{{- $_ := set $context.Values.__tmpYAML "metadata" dict }}{{- end }}
|
||||
{{- $_ := set $context.Values.__tmpYAML.metadata "name" $localDsNodeName }}
|
||||
|
||||
{{ merge $context.Values.__tmpYAML $context.Values.__daemonset_yaml | toYaml }}
|
||||
|
||||
{{ end }}
|
||||
|
||||
---
|
||||
{{- $_ := set $context.Values "__last_configmap_name" $current_dict.dns_1123_name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
530
ceph-mon/values.yaml
Normal file
530
ceph-mon/values.yaml
Normal file
@ -0,0 +1,530 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Default values for ceph-mon.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare name/value pairs to be passed into your templates.
|
||||
# name: value
|
||||
|
||||
---
|
||||
deployment:
|
||||
ceph: true
|
||||
storage_secrets: true
|
||||
|
||||
images:
|
||||
pull_policy: IfNotPresent
|
||||
tags:
|
||||
ceph_bootstrap: 'docker.io/openstackhelm/ceph-daemon:ubuntu_jammy_19.2.1-1-20250207'
|
||||
ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_19.2.1-1-20250207'
|
||||
ceph_mon: 'docker.io/openstackhelm/ceph-daemon:ubuntu_jammy_19.2.1-1-20250207'
|
||||
ceph_mgr: 'docker.io/openstackhelm/ceph-daemon:ubuntu_jammy_19.2.1-1-20250207'
|
||||
ceph_mon_check: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_19.2.1-1-20250207'
|
||||
dep_check: 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal'
|
||||
image_repo_sync: 'docker.io/library/docker:17.07.0'
|
||||
local_registry:
|
||||
active: false
|
||||
exclude:
|
||||
- dep_check
|
||||
- image_repo_sync
|
||||
|
||||
labels:
|
||||
job:
|
||||
node_selector_key: openstack-control-plane
|
||||
node_selector_value: enabled
|
||||
mon:
|
||||
node_selector_key: ceph-mon
|
||||
node_selector_value: enabled
|
||||
mgr:
|
||||
node_selector_key: ceph-mgr
|
||||
node_selector_value: enabled
|
||||
|
||||
pod:
|
||||
security_context:
|
||||
mon:
|
||||
pod:
|
||||
runAsUser: 65534
|
||||
container:
|
||||
ceph_init_dirs:
|
||||
runAsUser: 0
|
||||
readOnlyRootFilesystem: true
|
||||
ceph_log_ownership:
|
||||
runAsUser: 0
|
||||
readOnlyRootFilesystem: true
|
||||
ceph_mon:
|
||||
runAsUser: 64045
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
mgr:
|
||||
pod:
|
||||
runAsUser: 65534
|
||||
container:
|
||||
init_dirs:
|
||||
runAsUser: 0
|
||||
readOnlyRootFilesystem: true
|
||||
mgr:
|
||||
runAsUser: 64045
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
moncheck:
|
||||
pod:
|
||||
runAsUser: 65534
|
||||
container:
|
||||
ceph_mon:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
bootstrap:
|
||||
pod:
|
||||
runAsUser: 65534
|
||||
container:
|
||||
ceph_bootstrap:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
storage_keys_generator:
|
||||
pod:
|
||||
runAsUser: 65534
|
||||
container:
|
||||
ceph_storage_keys_generator:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
ceph:
|
||||
pod:
|
||||
runAsUser: 65534
|
||||
container:
|
||||
ceph-mds-keyring-generator:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
ceph-mgr-keyring-generator:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
ceph-mon-keyring-generator:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
ceph-osd-keyring-generator:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
post_apply:
|
||||
pod:
|
||||
runAsUser: 65534
|
||||
container:
|
||||
ceph_mon_post_apply:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
dns_policy: "ClusterFirstWithHostNet"
|
||||
replicas:
|
||||
mgr: 2
|
||||
mon_check: 1
|
||||
lifecycle:
|
||||
upgrades:
|
||||
daemonsets:
|
||||
pod_replacement_strategy: RollingUpdate
|
||||
mon:
|
||||
enabled: true
|
||||
min_ready_seconds: 0
|
||||
max_unavailable: 1
|
||||
updateStrategy:
|
||||
mgr:
|
||||
type: Recreate
|
||||
affinity:
|
||||
anti:
|
||||
type:
|
||||
default: preferredDuringSchedulingIgnoredDuringExecution
|
||||
topologyKey:
|
||||
default: kubernetes.io/hostname
|
||||
weight:
|
||||
default: 10
|
||||
resources:
|
||||
enabled: false
|
||||
mon:
|
||||
requests:
|
||||
memory: "50Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "100Mi"
|
||||
cpu: "500m"
|
||||
mgr:
|
||||
requests:
|
||||
memory: "5Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "50Mi"
|
||||
cpu: "500m"
|
||||
mon_check:
|
||||
requests:
|
||||
memory: "5Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "50Mi"
|
||||
cpu: "500m"
|
||||
jobs:
|
||||
bootstrap:
|
||||
limits:
|
||||
memory: "1024Mi"
|
||||
cpu: "2000m"
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
secret_provisioning:
|
||||
limits:
|
||||
memory: "1024Mi"
|
||||
cpu: "2000m"
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
image_repo_sync:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "1024Mi"
|
||||
cpu: "2000m"
|
||||
tolerations:
|
||||
mgr:
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
tolerationSeconds: 60
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/unreachable
|
||||
operator: Exists
|
||||
tolerationSeconds: 60
|
||||
mon_check:
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
tolerationSeconds: 60
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/unreachable
|
||||
operator: Exists
|
||||
tolerationSeconds: 60
|
||||
probes:
|
||||
ceph:
|
||||
ceph-mon:
|
||||
readiness:
|
||||
enabled: true
|
||||
params:
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 60
|
||||
timeoutSeconds: 5
|
||||
liveness:
|
||||
enabled: true
|
||||
params:
|
||||
initialDelaySeconds: 360
|
||||
periodSeconds: 180
|
||||
timeoutSeconds: 5
|
||||
ceph-mgr:
|
||||
readiness:
|
||||
enabled: true
|
||||
params:
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
liveness:
|
||||
enabled: true
|
||||
params:
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
|
||||
secrets:
|
||||
keyrings:
|
||||
mon: ceph-mon-keyring
|
||||
mds: ceph-bootstrap-mds-keyring
|
||||
osd: ceph-bootstrap-osd-keyring
|
||||
mgr: ceph-bootstrap-mgr-keyring
|
||||
admin: ceph-client-admin-keyring
|
||||
oci_image_registry:
|
||||
ceph-mon: ceph-mon-oci-image-registry-key
|
||||
|
||||
network:
|
||||
public: 192.168.0.0/16
|
||||
cluster: 192.168.0.0/16
|
||||
|
||||
conf:
|
||||
features:
|
||||
mgr: true
|
||||
templates:
|
||||
keyring:
|
||||
admin: |
|
||||
[client.admin]
|
||||
key = {{ key }}
|
||||
auid = 0
|
||||
caps mds = "allow"
|
||||
caps mon = "allow *"
|
||||
caps osd = "allow *"
|
||||
caps mgr = "allow *"
|
||||
mon: |
|
||||
[mon.]
|
||||
key = {{ key }}
|
||||
caps mon = "allow *"
|
||||
bootstrap:
|
||||
mds: |
|
||||
[client.bootstrap-mds]
|
||||
key = {{ key }}
|
||||
caps mon = "allow profile bootstrap-mds"
|
||||
mgr: |
|
||||
[client.bootstrap-mgr]
|
||||
key = {{ key }}
|
||||
caps mgr = "allow profile bootstrap-mgr"
|
||||
osd: |
|
||||
[client.bootstrap-osd]
|
||||
key = {{ key }}
|
||||
caps mon = "allow profile bootstrap-osd"
|
||||
ceph:
|
||||
global:
|
||||
# auth
|
||||
cephx: true
|
||||
cephx_require_signatures: false
|
||||
cephx_cluster_require_signatures: true
|
||||
cephx_service_require_signatures: false
|
||||
objecter_inflight_op_bytes: "1073741824"
|
||||
objecter_inflight_ops: 10240
|
||||
debug_ms: "0/0"
|
||||
mon_osd_down_out_interval: 1800
|
||||
mon_osd_down_out_subtree_limit: root
|
||||
mon_osd_min_in_ratio: 0
|
||||
mon_osd_min_up_ratio: 0
|
||||
mon_data_avail_warn: 15
|
||||
log_file: /dev/stdout
|
||||
mon_cluster_log_file: /dev/stdout
|
||||
# Beginning with the Pacific release, this config setting is necessary
|
||||
# to allow pools to use 1x replication, which is disabled by default. The
|
||||
# openstack-helm gate scripts use 1x replication for automated testing,
|
||||
# so this is required. It doesn't seem to be sufficient to add this to
|
||||
# /etc/ceph/ceph.conf, however. It must also be set explicitly via the
|
||||
# 'ceph config' command, so this must also be added to the
|
||||
# cluster_commands value in the ceph-client chart so it will be set
|
||||
# before pools are created and configured there.
|
||||
mon_allow_pool_size_one: true
|
||||
osd:
|
||||
osd_mkfs_type: xfs
|
||||
osd_mkfs_options_xfs: -f -i size=2048
|
||||
osd_max_object_name_len: 256
|
||||
ms_bind_port_min: 6800
|
||||
ms_bind_port_max: 7100
|
||||
osd_snap_trim_priority: 1
|
||||
osd_snap_trim_sleep: 0.1
|
||||
osd_pg_max_concurrent_snap_trims: 1
|
||||
filestore_merge_threshold: -10
|
||||
filestore_split_multiple: 12
|
||||
filestore_max_sync_interval: 10
|
||||
osd_scrub_begin_hour: 22
|
||||
osd_scrub_end_hour: 4
|
||||
osd_scrub_during_recovery: false
|
||||
osd_scrub_sleep: 0.1
|
||||
osd_scrub_chunk_min: 1
|
||||
osd_scrub_chunk_max: 4
|
||||
osd_scrub_load_threshold: 10.0
|
||||
osd_deep_scrub_stride: "1048576"
|
||||
osd_scrub_priority: 1
|
||||
osd_recovery_op_priority: 1
|
||||
osd_recovery_max_active: 1
|
||||
osd_mount_options_xfs: "rw,noatime,largeio,inode64,swalloc,logbufs=8,logbsize=256k,allocsize=4M"
|
||||
osd_journal_size: 10240
|
||||
storage:
|
||||
mon:
|
||||
directory: /var/lib/openstack-helm/ceph/mon
|
||||
|
||||
# The post-apply job will try to determine if mons need to be restarted
|
||||
# and only restart them if necessary. Set this value to "true" to restart
|
||||
# mons unconditionally.
|
||||
unconditional_mon_restart: "false"
|
||||
|
||||
daemonset:
|
||||
prefix_name: "mon"
|
||||
|
||||
dependencies:
|
||||
dynamic:
|
||||
common:
|
||||
local_image_registry:
|
||||
jobs:
|
||||
- ceph-mon-image-repo-sync
|
||||
services:
|
||||
- endpoint: node
|
||||
service: local_image_registry
|
||||
static:
|
||||
bootstrap:
|
||||
jobs: null
|
||||
services:
|
||||
- endpoint: internal
|
||||
service: ceph_mon
|
||||
job_keyring_generator:
|
||||
jobs: null
|
||||
mon:
|
||||
jobs:
|
||||
- ceph-storage-keys-generator
|
||||
- ceph-mon-keyring-generator
|
||||
mgr:
|
||||
jobs:
|
||||
- ceph-storage-keys-generator
|
||||
- ceph-mgr-keyring-generator
|
||||
services:
|
||||
- endpoint: internal
|
||||
service: ceph_mon
|
||||
moncheck:
|
||||
jobs:
|
||||
- ceph-storage-keys-generator
|
||||
- ceph-mon-keyring-generator
|
||||
services:
|
||||
- endpoint: discovery
|
||||
service: ceph_mon
|
||||
storage_keys_generator:
|
||||
jobs: null
|
||||
image_repo_sync:
|
||||
services:
|
||||
- endpoint: internal
|
||||
service: local_image_registry
|
||||
|
||||
bootstrap:
|
||||
enabled: false
|
||||
script: |
|
||||
ceph -s
|
||||
function ensure_pool () {
|
||||
ceph osd pool stats $1 || ceph osd pool create $1 $2
|
||||
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then
|
||||
ceph osd pool application enable $1 $3
|
||||
fi
|
||||
}
|
||||
#ensure_pool volumes 8 cinder
|
||||
|
||||
# Uncomment below to enable mgr modules
|
||||
# For a list of available modules:
|
||||
# http://docs.ceph.com/docs/master/mgr/
|
||||
# This overrides mgr_initial_modules (default: restful, status)
|
||||
# Any module not listed here will be disabled
|
||||
ceph_mgr_enabled_modules:
|
||||
- restful
|
||||
- status
|
||||
- prometheus
|
||||
- balancer
|
||||
- iostat
|
||||
- pg_autoscaler
|
||||
|
||||
# You can configure your mgr modules
|
||||
# below. Each module has its own set
|
||||
# of key/value. Refer to the doc
|
||||
# above for more info. For example:
|
||||
ceph_mgr_modules_config:
|
||||
# balancer:
|
||||
# active: 1
|
||||
# prometheus:
|
||||
# server_port: 9283
|
||||
# server_addr: 0.0.0.0
|
||||
# dashboard:
|
||||
# port: 7000
|
||||
# localpool:
|
||||
# failure_domain: host
|
||||
# subtree: rack
|
||||
# pg_num: "128"
|
||||
# num_rep: "3"
|
||||
# min_size: "2"
|
||||
|
||||
# if you change provision_storage_class to false
|
||||
# it is presumed you manage your own storage
|
||||
# class definition externally
|
||||
# We iterate over each storageclass parameters
|
||||
# and derive the manifest.
|
||||
storageclass:
|
||||
rbd:
|
||||
parameters:
|
||||
adminSecretName: pvc-ceph-conf-combined-storageclass
|
||||
adminSecretNameNode: pvc-ceph-conf-combined-storageclass
|
||||
cephfs:
|
||||
provision_storage_class: true
|
||||
provisioner: ceph.com/cephfs
|
||||
metadata:
|
||||
name: cephfs
|
||||
parameters:
|
||||
adminId: admin
|
||||
userSecretName: pvc-ceph-cephfs-client-key
|
||||
adminSecretName: pvc-ceph-conf-combined-storageclass
|
||||
adminSecretNamespace: ceph
|
||||
|
||||
endpoints:
|
||||
cluster_domain_suffix: cluster.local
|
||||
local_image_registry:
|
||||
name: docker-registry
|
||||
namespace: docker-registry
|
||||
hosts:
|
||||
default: localhost
|
||||
internal: docker-registry
|
||||
node: localhost
|
||||
host_fqdn_override:
|
||||
default: null
|
||||
port:
|
||||
registry:
|
||||
node: 5000
|
||||
oci_image_registry:
|
||||
name: oci-image-registry
|
||||
namespace: oci-image-registry
|
||||
auth:
|
||||
enabled: false
|
||||
ceph-mon:
|
||||
username: ceph-mon
|
||||
password: password
|
||||
hosts:
|
||||
default: localhost
|
||||
host_fqdn_override:
|
||||
default: null
|
||||
port:
|
||||
registry:
|
||||
default: null
|
||||
ceph_mon:
|
||||
namespace: null
|
||||
hosts:
|
||||
default: ceph-mon
|
||||
discovery: ceph-mon-discovery
|
||||
host_fqdn_override:
|
||||
default: null
|
||||
port:
|
||||
mon:
|
||||
default: 6789
|
||||
mon_msgr2:
|
||||
default: 3300
|
||||
ceph_mgr:
|
||||
namespace: null
|
||||
hosts:
|
||||
default: ceph-mgr
|
||||
host_fqdn_override:
|
||||
default: null
|
||||
port:
|
||||
mgr:
|
||||
default: 7000
|
||||
metrics:
|
||||
default: 9283
|
||||
scheme:
|
||||
default: http
|
||||
|
||||
monitoring:
|
||||
prometheus:
|
||||
enabled: true
|
||||
ceph_mgr:
|
||||
scrape: true
|
||||
port: 9283
|
||||
|
||||
manifests:
|
||||
configmap_bin: true
|
||||
configmap_etc: true
|
||||
configmap_templates: true
|
||||
daemonset_mon: true
|
||||
deployment_mgr: true
|
||||
deployment_mgr_sa: true
|
||||
deployment_moncheck: true
|
||||
job_image_repo_sync: true
|
||||
job_bootstrap: true
|
||||
job_keyring: true
|
||||
job_post_apply: true
|
||||
service_mon: true
|
||||
service_mgr: true
|
||||
service_mon_discovery: true
|
||||
job_storage_admin_keys: true
|
||||
secret_registry: true
|
||||
...
|
24
ceph-osd/Chart.yaml
Normal file
24
ceph-osd/Chart.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: v2
|
||||
appVersion: v1.0.0
|
||||
description: OpenStack-Helm Ceph OSD
|
||||
name: ceph-osd
|
||||
version: 2024.2.0
|
||||
home: https://github.com/ceph/ceph
|
||||
dependencies:
|
||||
- name: helm-toolkit
|
||||
repository: file://../helm-toolkit
|
||||
version: ">= 0.1.0"
|
||||
...
|
18
ceph-osd/templates/bin/_bootstrap.sh.tpl
Normal file
18
ceph-osd/templates/bin/_bootstrap.sh.tpl
Normal file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
{{ .Values.bootstrap.script | default "echo 'Not Enabled'" }}
|
64
ceph-osd/templates/bin/_helm-tests.sh.tpl
Normal file
64
ceph-osd/templates/bin/_helm-tests.sh.tpl
Normal file
@ -0,0 +1,64 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
function check_osd_count() {
|
||||
echo "#### Start: Checking OSD count ####"
|
||||
noup_flag=$(ceph osd stat | awk '/noup/ {print $2}')
|
||||
osd_stat=$(ceph osd stat -f json-pretty)
|
||||
num_osd=$(awk '/"num_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||
num_in_osds=$(awk '/"num_in_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||
num_up_osds=$(awk '/"num_up_osds"/{print $2}' <<< "$osd_stat" | cut -d, -f1)
|
||||
|
||||
MIN_OSDS=$((${num_osd}*$REQUIRED_PERCENT_OF_OSDS/100))
|
||||
if [ ${MIN_OSDS} -lt 1 ]; then
|
||||
MIN_OSDS=1
|
||||
fi
|
||||
|
||||
if [ "${noup_flag}" ]; then
|
||||
osd_status=$(ceph osd dump -f json | jq -c '.osds[] | .state')
|
||||
count=0
|
||||
for osd in $osd_status; do
|
||||
if [[ "$osd" == *"up"* || "$osd" == *"new"* ]]; then
|
||||
((count=count+1))
|
||||
fi
|
||||
done
|
||||
echo "Caution: noup flag is set. ${count} OSDs in up/new state. Required number of OSDs: ${MIN_OSDS}."
|
||||
ceph -s
|
||||
exit 0
|
||||
else
|
||||
if [ "${num_osd}" -eq 0 ]; then
|
||||
echo "There are no osds in the cluster"
|
||||
elif [ "${num_in_osds}" -ge "${MIN_OSDS}" ] && [ "${num_up_osds}" -ge "${MIN_OSDS}" ]; then
|
||||
echo "Required number of OSDs (${MIN_OSDS}) are UP and IN status"
|
||||
ceph -s
|
||||
exit 0
|
||||
else
|
||||
echo "Required number of OSDs (${MIN_OSDS}) are NOT UP and IN status. Cluster shows OSD count=${num_osd}, UP=${num_up_osds}, IN=${num_in_osds}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# in case the chart has been re-installed in order to make changes to daemonset
|
||||
# we do not need rack_by_rack restarts
|
||||
# but we need to wait until all re-installed ceph-osd pods are healthy
|
||||
# and there is degraded objects
|
||||
while true; do
|
||||
check_osd_count
|
||||
sleep 60
|
||||
done
|
||||
|
32
ceph-osd/templates/bin/_init-dirs.sh.tpl
Normal file
32
ceph-osd/templates/bin/_init-dirs.sh.tpl
Normal file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
export LC_ALL=C
|
||||
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
|
||||
|
||||
mkdir -p "$(dirname "${OSD_BOOTSTRAP_KEYRING}")"
|
||||
|
||||
# Let's create the ceph directories
|
||||
for DIRECTORY in osd tmp crash; do
|
||||
mkdir -p "/var/lib/ceph/${DIRECTORY}"
|
||||
done
|
||||
|
||||
# Create socket directory
|
||||
mkdir -p /run/ceph
|
||||
|
||||
# Adjust the owner of all those directories
|
||||
chown -R ceph. /run/ceph/ /var/lib/ceph/*
|
227
ceph-osd/templates/bin/_post-apply.sh.tpl
Normal file
227
ceph-osd/templates/bin/_post-apply.sh.tpl
Normal file
@ -0,0 +1,227 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
export LC_ALL=C
|
||||
|
||||
: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
|
||||
|
||||
if [[ ! -f /etc/ceph/${CLUSTER}.conf ]]; then
|
||||
echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -f ${ADMIN_KEYRING} ]]; then
|
||||
echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ceph --cluster ${CLUSTER} -s
|
||||
function wait_for_pods() {
|
||||
timeout=${2:-1800}
|
||||
end=$(date -ud "${timeout} seconds" +%s)
|
||||
# Selecting containers with "ceph-osd-default" name and
|
||||
# counting them based on "ready" field.
|
||||
count_pods=".items | map(.status.containerStatuses | .[] | \
|
||||
select(.name==\"ceph-osd-default\")) | \
|
||||
group_by(.ready) | map({(.[0].ready | tostring): length}) | .[]"
|
||||
min_osds="add | if .true >= (.false + .true)*${REQUIRED_PERCENT_OF_OSDS}/100 \
|
||||
then \"pass\" else \"fail\" end"
|
||||
while true; do
|
||||
# Leaving while loop if minimum amount of OSDs are ready.
|
||||
# It allows to proceed even if some OSDs are not ready
|
||||
# or in "CrashLoopBackOff" state
|
||||
state=$(kubectl get pods --namespace="${1}" -l component=osd -o json | jq "${count_pods}")
|
||||
osd_state=$(jq -s "${min_osds}" <<< "${state}")
|
||||
if [[ "${osd_state}" == \"pass\" ]]; then
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
|
||||
if [ $(date -u +%s) -gt $end ] ; then
|
||||
echo -e "Containers failed to start after $timeout seconds\n"
|
||||
kubectl get pods --namespace "${1}" -o wide -l component=osd
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function check_ds() {
|
||||
for ds in `kubectl get ds --namespace=$CEPH_NAMESPACE -l component=osd --no-headers=true|awk '{print $1}'`
|
||||
do
|
||||
ds_query=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status`
|
||||
if echo $ds_query |grep -i "numberAvailable" ;then
|
||||
currentNumberScheduled=`echo $ds_query|jq -r .currentNumberScheduled`
|
||||
desiredNumberScheduled=`echo $ds_query|jq -r .desiredNumberScheduled`
|
||||
numberAvailable=`echo $ds_query|jq -r .numberAvailable`
|
||||
numberReady=`echo $ds_query|jq -r .numberReady`
|
||||
updatedNumberScheduled=`echo $ds_query|jq -r .updatedNumberScheduled`
|
||||
ds_check=`echo "$currentNumberScheduled $desiredNumberScheduled $numberAvailable $numberReady $updatedNumberScheduled"| \
|
||||
tr ' ' '\n'|sort -u|wc -l`
|
||||
if [ $ds_check != 1 ]; then
|
||||
echo "few pods under daemonset $ds are not yet ready"
|
||||
exit
|
||||
else
|
||||
echo "all pods ubder deamonset $ds are ready"
|
||||
fi
|
||||
else
|
||||
echo "this are no osds under daemonset $ds"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function wait_for_pgs () {
|
||||
echo "#### Start: Checking pgs ####"
|
||||
|
||||
pgs_ready=0
|
||||
pgs_inactive=0
|
||||
query='map({state: .state}) | group_by(.state) | map({state: .[0].state, count: length}) | .[] | select(.state | contains("active") | not)'
|
||||
|
||||
if [[ $(ceph mon versions | awk '/version/{print $3}' | cut -d. -f1) -ge 14 ]]; then
|
||||
query=".pg_stats | ${query}"
|
||||
fi
|
||||
|
||||
# Loop until all pgs are active
|
||||
while [[ $pgs_ready -lt 3 ]]; do
|
||||
pgs_state=$(ceph --cluster ${CLUSTER} pg ls -f json | jq -c "${query}")
|
||||
if [[ $(jq -c '. | select(.state | contains("peering") | not)' <<< "${pgs_state}") ]]; then
|
||||
if [[ $pgs_inactive -gt 200 ]]; then
|
||||
# If inactive PGs aren't peering after ~10 minutes, fail
|
||||
echo "Failure, found inactive PGs that aren't peering"
|
||||
exit 1
|
||||
fi
|
||||
(( pgs_inactive+=1 ))
|
||||
else
|
||||
pgs_inactive=0
|
||||
fi
|
||||
if [[ "${pgs_state}" ]]; then
|
||||
pgs_ready=0
|
||||
else
|
||||
(( pgs_ready+=1 ))
|
||||
fi
|
||||
sleep 30
|
||||
done
|
||||
}
|
||||
|
||||
function wait_for_degraded_objects () {
|
||||
echo "#### Start: Checking for degraded objects ####"
|
||||
|
||||
# Loop until no degraded objects
|
||||
while [[ ! -z "`ceph --cluster ${CLUSTER} -s | grep 'degraded'`" ]]
|
||||
do
|
||||
sleep 30
|
||||
ceph -s
|
||||
done
|
||||
}
|
||||
|
||||
function wait_for_degraded_and_misplaced_objects () {
|
||||
echo "#### Start: Checking for degraded and misplaced objects ####"
|
||||
|
||||
# Loop until no degraded or misplaced objects
|
||||
while [[ ! -z "`ceph --cluster ${CLUSTER} -s | grep 'degraded\|misplaced'`" ]]
|
||||
do
|
||||
sleep 30
|
||||
ceph -s
|
||||
done
|
||||
}
|
||||
|
||||
function restart_by_rack() {
|
||||
|
||||
racks=`ceph osd tree | awk '/rack/{print $4}'`
|
||||
echo "Racks under ceph cluster are: $racks"
|
||||
for rack in $racks
|
||||
do
|
||||
hosts_in_rack=(`ceph osd tree | sed -n "/rack $rack/,/rack/p" | awk '/host/{print $4}' | tr '\n' ' '|sed 's/ *$//g'`)
|
||||
echo "hosts under rack "$rack" are: ${hosts_in_rack[@]}"
|
||||
echo "hosts count under $rack are: ${#hosts_in_rack[@]}"
|
||||
for host in ${hosts_in_rack[@]}
|
||||
do
|
||||
echo "host is : $host"
|
||||
if [[ ! -z "$host" ]]; then
|
||||
pods_on_host=$(kubectl get po -n "$CEPH_NAMESPACE" -l component=osd -o wide |grep "$host"|awk '{print $1}' | tr '\n' ' '|sed 's/ *$//g')
|
||||
echo "Restarting the pods under host $host"
|
||||
for pod in ${pods_on_host}
|
||||
do
|
||||
kubectl delete pod -n "$CEPH_NAMESPACE" "${pod}" || true
|
||||
done
|
||||
fi
|
||||
done
|
||||
echo "waiting for the pods under host $host from restart"
|
||||
# The pods will not be ready in first 60 seconds. Thus we can reduce
|
||||
# amount of queries to kubernetes.
|
||||
sleep 60
|
||||
# Degraded objects won't recover with noout set unless pods come back and
|
||||
# PGs become healthy, so simply wait for 0 degraded objects
|
||||
wait_for_degraded_objects
|
||||
ceph -s
|
||||
done
|
||||
}
|
||||
|
||||
if [[ "$DISRUPTIVE_OSD_RESTART" != "true" ]]; then
|
||||
wait_for_pods $CEPH_NAMESPACE
|
||||
fi
|
||||
|
||||
require_upgrade=0
|
||||
max_release=0
|
||||
|
||||
for ds in `kubectl get ds --namespace=$CEPH_NAMESPACE -l component=osd --no-headers=true|awk '{print $1}'`
|
||||
do
|
||||
updatedNumberScheduled=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.updatedNumberScheduled`
|
||||
desiredNumberScheduled=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.desiredNumberScheduled`
|
||||
if [[ $updatedNumberScheduled != $desiredNumberScheduled ]]; then
|
||||
if kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status|grep -i "numberAvailable" ;then
|
||||
require_upgrade=$((require_upgrade+1))
|
||||
_release=`kubectl get ds -n $CEPH_NAMESPACE $ds -o json|jq -r .status.observedGeneration`
|
||||
max_release=$(( max_release > _release ? max_release : _release ))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Latest revision of the helm chart(s) is : $max_release"
|
||||
|
||||
# If flags are set that will prevent recovery, don't restart OSDs
|
||||
ceph -s | grep "noup\|noin\|nobackfill\|norebalance\|norecover" > /dev/null
|
||||
if [[ $? -ne 0 ]]; then
|
||||
if [[ "$UNCONDITIONAL_OSD_RESTART" == "true" ]] || [[ $max_release -gt 1 ]]; then
|
||||
if [[ "$UNCONDITIONAL_OSD_RESTART" == "true" ]] || [[ $require_upgrade -gt 0 ]]; then
|
||||
if [[ "$DISRUPTIVE_OSD_RESTART" == "true" ]]; then
|
||||
echo "restarting all osds simultaneously"
|
||||
kubectl -n $CEPH_NAMESPACE delete pod -l component=osd
|
||||
sleep 60
|
||||
echo "waiting for pgs to become active and for degraded objects to recover"
|
||||
wait_for_pgs
|
||||
wait_for_degraded_objects
|
||||
ceph -s
|
||||
else
|
||||
echo "waiting for inactive pgs and degraded objects before upgrade"
|
||||
wait_for_pgs
|
||||
wait_for_degraded_and_misplaced_objects
|
||||
ceph -s
|
||||
ceph osd "set" noout
|
||||
echo "lets restart the osds rack by rack"
|
||||
restart_by_rack
|
||||
ceph osd "unset" noout
|
||||
fi
|
||||
fi
|
||||
|
||||
#lets check all the ceph-osd daemonsets
|
||||
echo "checking DS"
|
||||
check_ds
|
||||
else
|
||||
echo "No revisions found for upgrade"
|
||||
fi
|
||||
else
|
||||
echo "Skipping OSD restarts because flags are set that would prevent recovery"
|
||||
fi
|
41
ceph-osd/templates/bin/osd/_check.sh.tpl
Normal file
41
ceph-osd/templates/bin/osd/_check.sh.tpl
Normal file
@ -0,0 +1,41 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A liveness check for ceph OSDs: exit 0 if
|
||||
# all OSDs on this host are in the "active" state
|
||||
# per their admin sockets.
|
||||
|
||||
SOCKDIR=${CEPH_SOCKET_DIR:-/run/ceph}
|
||||
SBASE=${CEPH_OSD_SOCKET_BASE:-ceph-osd}
|
||||
SSUFFIX=${CEPH_SOCKET_SUFFIX:-asok}
|
||||
|
||||
# default: no sockets, not live
|
||||
cond=1
|
||||
for sock in $SOCKDIR/$SBASE.*.$SSUFFIX; do
|
||||
if [ -S $sock ]; then
|
||||
OSD_ID=$(echo $sock | awk -F. '{print $2}')
|
||||
OSD_STATE=$(ceph -f json --connect-timeout 1 --admin-daemon "${sock}" status|jq -r '.state')
|
||||
echo "OSD ${OSD_ID} ${OSD_STATE}";
|
||||
# Succeed if the OSD state is active (running) or preboot (starting)
|
||||
if [ "${OSD_STATE}" = "active" ] || [ "${OSD_STATE}" = "preboot" ]; then
|
||||
cond=0
|
||||
else
|
||||
# Any other state is unexpected and the probe fails
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "No daemon sockets found in $SOCKDIR"
|
||||
fi
|
||||
done
|
||||
exit $cond
|
106
ceph-osd/templates/bin/osd/_directory.sh.tpl
Normal file
106
ceph-osd/templates/bin/osd/_directory.sh.tpl
Normal file
@ -0,0 +1,106 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
export LC_ALL=C
|
||||
|
||||
source /tmp/osd-common-ceph-volume.sh
|
||||
|
||||
: "${JOURNAL_DIR:=/var/lib/ceph/journal}"
|
||||
|
||||
if [[ ! -d /var/lib/ceph/osd ]]; then
|
||||
echo "ERROR- could not find the osd directory, did you bind mount the OSD data directory?"
|
||||
echo "ERROR- use -v <host_osd_data_dir>:/var/lib/ceph/osd"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# check if anything is present, if not, create an osd and its directory
|
||||
if [[ -n "$(find /var/lib/ceph/osd -type d -empty ! -name "lost+found")" ]]; then
|
||||
echo "Creating osd"
|
||||
UUID=$(uuidgen)
|
||||
OSD_SECRET=$(ceph-authtool --gen-print-key)
|
||||
OSD_ID=$(echo "{\"cephx_secret\": \"${OSD_SECRET}\"}" | ceph osd new ${UUID} -i - -n client.bootstrap-osd -k "$OSD_BOOTSTRAP_KEYRING")
|
||||
|
||||
# test that the OSD_ID is an integer
|
||||
if [[ "$OSD_ID" =~ ^-?[0-9]+$ ]]; then
|
||||
echo "OSD created with ID: ${OSD_ID}"
|
||||
else
|
||||
echo "OSD creation failed: ${OSD_ID}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
OSD_PATH="$OSD_PATH_BASE-$OSD_ID/"
|
||||
if [ -n "${JOURNAL_DIR}" ]; then
|
||||
OSD_JOURNAL="${JOURNAL_DIR}/journal.${OSD_ID}"
|
||||
chown -R ceph. ${JOURNAL_DIR}
|
||||
else
|
||||
if [ -n "${JOURNAL}" ]; then
|
||||
OSD_JOURNAL=${JOURNAL}
|
||||
chown -R ceph. $(dirname ${JOURNAL_DIR})
|
||||
else
|
||||
OSD_JOURNAL=${OSD_PATH%/}/journal
|
||||
fi
|
||||
fi
|
||||
# create the folder and own it
|
||||
mkdir -p "${OSD_PATH}"
|
||||
echo "created folder ${OSD_PATH}"
|
||||
# write the secret to the osd keyring file
|
||||
ceph-authtool --create-keyring ${OSD_PATH%/}/keyring --name osd.${OSD_ID} --add-key ${OSD_SECRET}
|
||||
chown -R "${CHOWN_OPT[@]}" ceph. "${OSD_PATH}"
|
||||
OSD_KEYRING="${OSD_PATH%/}/keyring"
|
||||
# init data directory
|
||||
ceph-osd -i ${OSD_ID} --mkfs --osd-uuid ${UUID} --mkjournal --osd-journal ${OSD_JOURNAL} --setuser ceph --setgroup ceph
|
||||
# add the osd to the crush map
|
||||
crush_location
|
||||
fi
|
||||
|
||||
for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do
|
||||
# NOTE(gagehugo): Writing the OSD_ID to tmp for logging
|
||||
echo "${OSD_ID}" > /tmp/osd-id
|
||||
OSD_PATH="$OSD_PATH_BASE-$OSD_ID/"
|
||||
OSD_KEYRING="${OSD_PATH%/}/keyring"
|
||||
if [ -n "${JOURNAL_DIR}" ]; then
|
||||
OSD_JOURNAL="${JOURNAL_DIR}/journal.${OSD_ID}"
|
||||
chown -R ceph. ${JOURNAL_DIR}
|
||||
else
|
||||
if [ -n "${JOURNAL}" ]; then
|
||||
OSD_JOURNAL=${JOURNAL}
|
||||
chown -R ceph. $(dirname ${JOURNAL_DIR})
|
||||
else
|
||||
OSD_JOURNAL=${OSD_PATH%/}/journal
|
||||
chown ceph. ${OSD_JOURNAL}
|
||||
fi
|
||||
fi
|
||||
# log osd filesystem type
|
||||
FS_TYPE=`stat --file-system -c "%T" ${OSD_PATH}`
|
||||
echo "OSD $OSD_PATH filesystem type: $FS_TYPE"
|
||||
|
||||
# NOTE(supamatt): Just in case permissions do not align up, we recursively set them correctly.
|
||||
if [ $(stat -c%U ${OSD_PATH}) != ceph ]; then
|
||||
chown -R ceph. ${OSD_PATH};
|
||||
fi
|
||||
|
||||
crush_location
|
||||
done
|
||||
|
||||
exec /usr/bin/ceph-osd \
|
||||
--cluster ${CLUSTER} \
|
||||
-f \
|
||||
-i ${OSD_ID} \
|
||||
--osd-journal ${OSD_JOURNAL} \
|
||||
-k ${OSD_KEYRING}
|
||||
--setuser ceph \
|
||||
--setgroup disk $! > /run/ceph-osd.pid
|
20
ceph-osd/templates/bin/osd/_init.sh.tpl
Normal file
20
ceph-osd/templates/bin/osd/_init.sh.tpl
Normal file
@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
echo "Initializing the osd with ${DEPLOY_TOOL}"
|
||||
exec "/tmp/init-${DEPLOY_TOOL}.sh"
|
35
ceph-osd/templates/bin/osd/_log-runner-stop.sh.tpl
Normal file
35
ceph-osd/templates/bin/osd/_log-runner-stop.sh.tpl
Normal file
@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
source /tmp/utils-resolveLocations.sh
|
||||
|
||||
touch /tmp/ceph-log-runner.stop
|
||||
|
||||
TAIL_PID="$(cat /tmp/ceph-log-runner.pid)"
|
||||
while kill -0 ${TAIL_PID} >/dev/null 2>&1;
|
||||
do
|
||||
kill -9 ${TAIL_PID};
|
||||
sleep 1;
|
||||
done
|
||||
|
||||
SLEEP_PID="$(cat /tmp/ceph-log-runner-sleep.pid)"
|
||||
while kill -0 ${SLEEP_PID} >/dev/null 2>&1;
|
||||
do
|
||||
kill -9 ${SLEEP_PID};
|
||||
sleep 1;
|
||||
done
|
53
ceph-osd/templates/bin/osd/_log-tail.sh.tpl
Normal file
53
ceph-osd/templates/bin/osd/_log-tail.sh.tpl
Normal file
@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
osd_id_file="/tmp/osd-id"
|
||||
|
||||
function wait_for_file() {
|
||||
local file="$1"; shift
|
||||
local wait_seconds="${1:-30}"; shift
|
||||
|
||||
until test $((wait_seconds--)) -eq 0 -o -f "$file" ; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
((++wait_seconds))
|
||||
}
|
||||
wait_for_file "${osd_id_file}" "${WAIT_FOR_OSD_ID_TIMEOUT}"
|
||||
|
||||
log_file="/var/log/ceph/${DAEMON_NAME}.$(cat "${osd_id_file}").log"
|
||||
wait_for_file "${log_file}" "${WAIT_FOR_OSD_ID_TIMEOUT}"
|
||||
|
||||
trap "exit" SIGTERM SIGINT
|
||||
keep_running=true
|
||||
|
||||
function tail_file () {
|
||||
while $keep_running; do
|
||||
tail --retry -f "${log_file}" &
|
||||
tail_pid=$!
|
||||
echo $tail_pid > /tmp/ceph-log-runner-tail.pid
|
||||
wait $tail_pid
|
||||
if [ -f /tmp/ceph-log-runner.stop ]; then
|
||||
keep_running=false
|
||||
fi
|
||||
sleep 30
|
||||
done
|
||||
}
|
||||
|
||||
function truncate_log () {
|
||||
while $keep_running; do
|
||||
sleep ${TRUNCATE_PERIOD}
|
||||
sleep_pid=$!
|
||||
echo $sleep_pid > /tmp/ceph-log-runner-sleep.pid
|
||||
if [[ -f ${log_file} ]] ; then
|
||||
truncate -s "${TRUNCATE_SIZE}" "${log_file}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
tail_file &
|
||||
truncate_log &
|
||||
|
||||
wait -n
|
||||
keep_running=false
|
||||
wait
|
20
ceph-osd/templates/bin/osd/_start.sh.tpl
Normal file
20
ceph-osd/templates/bin/osd/_start.sh.tpl
Normal file
@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
echo "LAUNCHING OSD: in ${STORAGE_TYPE%-*}:${STORAGE_TYPE#*-} mode"
|
||||
exec "/tmp/osd-${STORAGE_TYPE%-*}-${DEPLOY_TOOL}.sh"
|
35
ceph-osd/templates/bin/osd/_stop.sh.tpl
Normal file
35
ceph-osd/templates/bin/osd/_stop.sh.tpl
Normal file
@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
source /tmp/utils-resolveLocations.sh
|
||||
|
||||
CEPH_OSD_PID="$(cat /run/ceph-osd.pid)"
|
||||
while kill -0 ${CEPH_OSD_PID} >/dev/null 2>&1; do
|
||||
kill -SIGTERM ${CEPH_OSD_PID}
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ "x${STORAGE_TYPE%-*}" == "xblock" ]; then
|
||||
OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
|
||||
OSD_JOURNAL=$(readlink -f ${JOURNAL_LOCATION})
|
||||
if [ "x${STORAGE_TYPE#*-}" == "xlogical" ]; then
|
||||
umount "$(findmnt -S "${OSD_DEVICE}1" | tail -n +2 | awk '{ print $1 }')"
|
||||
fi
|
||||
fi
|
||||
|
||||
fi
|
142
ceph-osd/templates/bin/osd/ceph-volume/_block.sh.tpl
Normal file
142
ceph-osd/templates/bin/osd/ceph-volume/_block.sh.tpl
Normal file
@ -0,0 +1,142 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
source /tmp/osd-common-ceph-volume.sh
|
||||
|
||||
set -ex
|
||||
|
||||
: "${OSD_SOFT_FORCE_ZAP:=1}"
|
||||
: "${OSD_JOURNAL_DISK:=}"
|
||||
|
||||
if [ "x${STORAGE_TYPE%-*}" == "xdirectory" ]; then
|
||||
export OSD_DEVICE="/var/lib/ceph/osd"
|
||||
else
|
||||
export OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
|
||||
fi
|
||||
|
||||
if [ "x$JOURNAL_TYPE" == "xdirectory" ]; then
|
||||
export OSD_JOURNAL="/var/lib/ceph/journal"
|
||||
else
|
||||
export OSD_JOURNAL=$(readlink -f ${JOURNAL_LOCATION})
|
||||
fi
|
||||
|
||||
if [[ -z "${OSD_DEVICE}" ]];then
|
||||
echo "ERROR- You must provide a device to build your OSD ie: /dev/sdb"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -b "${OSD_DEVICE}" ]]; then
|
||||
echo "ERROR- The device pointed by OSD_DEVICE ${OSD_DEVICE} doesn't exist !"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ACTIVATE_OPTIONS=""
|
||||
CEPH_OSD_OPTIONS=""
|
||||
|
||||
udev_settle
|
||||
|
||||
OSD_ID=$(ceph-volume inventory ${OSD_DEVICE} | grep "osd id" | awk '{print $3}')
|
||||
if [[ -z ${OSD_ID} ]]; then
|
||||
echo "OSD_ID not found from device ${OSD_DEVICE}"
|
||||
exit 1
|
||||
fi
|
||||
OSD_FSID=$(ceph-volume inventory ${OSD_DEVICE} | grep "osd fsid" | awk '{print $3}')
|
||||
if [[ -z ${OSD_FSID} ]]; then
|
||||
echo "OSD_FSID not found from device ${OSD_DEVICE}"
|
||||
exit 1
|
||||
fi
|
||||
OSD_PATH="${OSD_PATH_BASE}-${OSD_ID}"
|
||||
OSD_KEYRING="${OSD_PATH}/keyring"
|
||||
|
||||
mkdir -p ${OSD_PATH}
|
||||
|
||||
ceph-volume lvm -v \
|
||||
--setuser ceph \
|
||||
--setgroup disk \
|
||||
activate ${ACTIVATE_OPTIONS} \
|
||||
--auto-detect-objectstore \
|
||||
--no-systemd ${OSD_ID} ${OSD_FSID}
|
||||
|
||||
# NOTE(stevetaylor): Set the OSD's crush weight (use noin flag to prevent rebalancing if necessary)
|
||||
OSD_WEIGHT=$(get_osd_crush_weight_from_device ${OSD_DEVICE})
|
||||
# NOTE(supamatt): add or move the OSD's CRUSH location
|
||||
crush_location
|
||||
|
||||
if [ "${OSD_BLUESTORE:-0}" -ne 1 ]; then
|
||||
if [ -n "${OSD_JOURNAL}" ]; then
|
||||
if [ -b "${OSD_JOURNAL}" ]; then
|
||||
OSD_JOURNAL_DISK="$(readlink -f ${OSD_PATH}/journal)"
|
||||
if [ -z "${OSD_JOURNAL_DISK}" ]; then
|
||||
echo "ERROR: Unable to find journal device ${OSD_JOURNAL_DISK}"
|
||||
exit 1
|
||||
else
|
||||
OSD_JOURNAL="${OSD_JOURNAL_DISK}"
|
||||
if [ -e "${OSD_PATH}/run_mkjournal" ]; then
|
||||
ceph-osd -i ${OSD_ID} --mkjournal
|
||||
rm -rf ${OSD_PATH}/run_mkjournal
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
if [ "x${JOURNAL_TYPE}" == "xdirectory" ]; then
|
||||
OSD_JOURNAL="${OSD_JOURNAL}/journal.${OSD_ID}"
|
||||
touch ${OSD_JOURNAL}
|
||||
wait_for_file "${OSD_JOURNAL}"
|
||||
else
|
||||
if [ ! -b "${OSD_JOURNAL}" ]; then
|
||||
echo "ERROR: Unable to find journal device ${OSD_JOURNAL}"
|
||||
exit 1
|
||||
else
|
||||
chown ceph. "${OSD_JOURNAL}"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
wait_for_file "${OSD_JOURNAL}"
|
||||
chown ceph. "${OSD_JOURNAL}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# NOTE(supamatt): Just in case permissions do not align up, we recursively set them correctly.
|
||||
if [ $(stat -c%U ${OSD_PATH}) != ceph ]; then
|
||||
chown -R ceph. ${OSD_PATH};
|
||||
fi
|
||||
|
||||
# NOTE(gagehugo): Writing the OSD_ID to tmp for logging
|
||||
echo "${OSD_ID}" > /tmp/osd-id
|
||||
|
||||
if [ "x${JOURNAL_TYPE}" == "xdirectory" ]; then
|
||||
chown -R ceph. /var/lib/ceph/journal
|
||||
ceph-osd \
|
||||
--cluster ceph \
|
||||
--osd-data ${OSD_PATH} \
|
||||
--osd-journal ${OSD_JOURNAL} \
|
||||
-f \
|
||||
-i ${OSD_ID} \
|
||||
--setuser ceph \
|
||||
--setgroup disk \
|
||||
--mkjournal
|
||||
fi
|
||||
|
||||
exec /usr/bin/ceph-osd \
|
||||
--cluster ${CLUSTER} \
|
||||
${CEPH_OSD_OPTIONS} \
|
||||
-f \
|
||||
-i ${OSD_ID} \
|
||||
--setuser ceph \
|
||||
--setgroup disk & echo $! > /run/ceph-osd.pid
|
||||
wait
|
||||
|
||||
# Clean up resources held by the common script
|
||||
common_cleanup
|
103
ceph-osd/templates/bin/osd/ceph-volume/_bluestore.sh.tpl
Normal file
103
ceph-osd/templates/bin/osd/ceph-volume/_bluestore.sh.tpl
Normal file
@ -0,0 +1,103 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
source /tmp/osd-common-ceph-volume.sh
|
||||
|
||||
set -ex
|
||||
|
||||
: "${OSD_SOFT_FORCE_ZAP:=1}"
|
||||
|
||||
export OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
|
||||
|
||||
if [[ -z "${OSD_DEVICE}" ]];then
|
||||
echo "ERROR- You must provide a device to build your OSD ie: /dev/sdb"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -b "${OSD_DEVICE}" ]]; then
|
||||
echo "ERROR- The device pointed by OSD_DEVICE ${OSD_DEVICE} doesn't exist !"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ACTIVATE_OPTIONS=""
|
||||
CEPH_OSD_OPTIONS=""
|
||||
|
||||
udev_settle
|
||||
|
||||
OSD_ID=$(get_osd_id_from_device ${OSD_DEVICE})
|
||||
if [[ -z ${OSD_ID} ]]; then
|
||||
echo "OSD_ID not found from device ${OSD_DEVICE}"
|
||||
exit 1
|
||||
fi
|
||||
OSD_FSID=$(get_osd_fsid_from_device ${OSD_DEVICE})
|
||||
if [[ -z ${OSD_FSID} ]]; then
|
||||
echo "OSD_FSID not found from device ${OSD_DEVICE}"
|
||||
exit 1
|
||||
fi
|
||||
OSD_PATH="${OSD_PATH_BASE}-${OSD_ID}"
|
||||
OSD_KEYRING="${OSD_PATH}/keyring"
|
||||
|
||||
mkdir -p ${OSD_PATH}
|
||||
|
||||
ceph-volume lvm -v \
|
||||
--setuser ceph \
|
||||
--setgroup disk \
|
||||
activate ${ACTIVATE_OPTIONS} \
|
||||
--auto-detect-objectstore \
|
||||
--no-systemd ${OSD_ID} ${OSD_FSID}
|
||||
# Cross check the db and wal symlinks if missed
|
||||
DB_DEV=$(get_osd_db_device_from_device ${OSD_DEVICE})
|
||||
if [[ ! -z ${DB_DEV} ]]; then
|
||||
if [[ ! -h /var/lib/ceph/osd/ceph-${OSD_ID}/block.db ]]; then
|
||||
ln -snf ${DB_DEV} /var/lib/ceph/osd/ceph-${OSD_ID}/block.db
|
||||
chown -h ceph:ceph ${DB_DEV}
|
||||
chown -h ceph:ceph /var/lib/ceph/osd/ceph-${OSD_ID}/block.db
|
||||
fi
|
||||
fi
|
||||
WAL_DEV=$(get_osd_wal_device_from_device ${OSD_DEVICE})
|
||||
if [[ ! -z ${WAL_DEV} ]]; then
|
||||
if [[ ! -h /var/lib/ceph/osd/ceph-${OSD_ID}/block.wal ]]; then
|
||||
ln -snf ${WAL_DEV} /var/lib/ceph/osd/ceph-${OSD_ID}/block.wal
|
||||
chown -h ceph:ceph ${WAL_DEV}
|
||||
chown -h ceph:ceph /var/lib/ceph/osd/ceph-${OSD_ID}/block.wal
|
||||
fi
|
||||
fi
|
||||
|
||||
# NOTE(stevetaylor): Set the OSD's crush weight (use noin flag to prevent rebalancing if necessary)
|
||||
OSD_WEIGHT=$(get_osd_crush_weight_from_device ${OSD_DEVICE})
|
||||
# NOTE(supamatt): add or move the OSD's CRUSH location
|
||||
crush_location
|
||||
|
||||
|
||||
# NOTE(supamatt): Just in case permissions do not align up, we recursively set them correctly.
|
||||
if [ $(stat -c%U ${OSD_PATH}) != ceph ]; then
|
||||
chown -R ceph. ${OSD_PATH};
|
||||
fi
|
||||
|
||||
# NOTE(gagehugo): Writing the OSD_ID to tmp for logging
|
||||
echo "${OSD_ID}" > /tmp/osd-id
|
||||
|
||||
exec /usr/bin/ceph-osd \
|
||||
--cluster ${CLUSTER} \
|
||||
${CEPH_OSD_OPTIONS} \
|
||||
-f \
|
||||
-i ${OSD_ID} \
|
||||
--setuser ceph \
|
||||
--setgroup disk & echo $! > /run/ceph-osd.pid
|
||||
wait
|
||||
|
||||
# Clean up resources held by the common script
|
||||
common_cleanup
|
562
ceph-osd/templates/bin/osd/ceph-volume/_common.sh.tpl
Normal file
562
ceph-osd/templates/bin/osd/ceph-volume/_common.sh.tpl
Normal file
@ -0,0 +1,562 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
shopt -s expand_aliases
|
||||
export lock_fd=''
|
||||
export ALREADY_LOCKED=0
|
||||
export PS4='+${BASH_SOURCE:+$(basename ${BASH_SOURCE}):${LINENO}:}${FUNCNAME:+${FUNCNAME}():} '
|
||||
|
||||
source /tmp/utils-resolveLocations.sh
|
||||
|
||||
: "${CRUSH_LOCATION:=root=default host=${HOSTNAME}}"
|
||||
: "${OSD_PATH_BASE:=/var/lib/ceph/osd/${CLUSTER}}"
|
||||
: "${CEPH_CONF:="/etc/ceph/${CLUSTER}.conf"}"
|
||||
: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
|
||||
: "${OSD_JOURNAL_UUID:=$(uuidgen)}"
|
||||
: "${OSD_JOURNAL_SIZE:=$(awk '/^osd_journal_size/{print $3}' ${CEPH_CONF}.template)}"
|
||||
: "${OSD_WEIGHT:=1.0}"
|
||||
|
||||
{{ include "helm-toolkit.snippets.mon_host_from_k8s_ep" . }}
|
||||
|
||||
# Obtain a global lock on /var/lib/ceph/tmp/init-osd.lock
|
||||
function lock() {
|
||||
# Open a file descriptor for the lock file if there isn't one already
|
||||
if [[ -z "${lock_fd}" ]]; then
|
||||
exec {lock_fd}>/var/lib/ceph/tmp/init-osd.lock || exit 1
|
||||
fi
|
||||
flock -w 600 "${lock_fd}" &> /dev/null
|
||||
ALREADY_LOCKED=1
|
||||
}
|
||||
|
||||
# Release the global lock on /var/lib/ceph/tmp/init-osd.lock
|
||||
function unlock() {
|
||||
flock -u "${lock_fd}" &> /dev/null
|
||||
ALREADY_LOCKED=0
|
||||
}
|
||||
|
||||
# "Destructor" for common.sh, must be called by scripts that source this one
|
||||
function common_cleanup() {
|
||||
# Close the file descriptor for the lock file
|
||||
if [[ ! -z "${lock_fd}" ]]; then
|
||||
if [[ ${ALREADY_LOCKED} -ne 0 ]]; then
|
||||
unlock
|
||||
fi
|
||||
eval "exec ${lock_fd}>&-"
|
||||
fi
|
||||
}
|
||||
|
||||
# Run a command within the global synchronization lock
|
||||
function locked() {
|
||||
# Don't log every command inside locked() to keep logs cleaner
|
||||
{ set +x; } 2>/dev/null
|
||||
|
||||
local LOCK_SCOPE=0
|
||||
|
||||
# Allow locks to be re-entrant to avoid deadlocks
|
||||
if [[ ${ALREADY_LOCKED} -eq 0 ]]; then
|
||||
lock
|
||||
LOCK_SCOPE=1
|
||||
fi
|
||||
|
||||
# Execute the synchronized command
|
||||
set -x
|
||||
"$@"
|
||||
{ set +x; } 2>/dev/null
|
||||
|
||||
# Only unlock if the lock was obtained in this scope
|
||||
if [[ ${LOCK_SCOPE} -ne 0 ]]; then
|
||||
unlock
|
||||
fi
|
||||
|
||||
# Re-enable command logging
|
||||
set -x
|
||||
}
|
||||
|
||||
# Alias commands that interact with disks so they are always synchronized
|
||||
alias dmsetup='locked dmsetup'
|
||||
alias pvs='locked pvs'
|
||||
alias vgs='locked vgs'
|
||||
alias lvs='locked lvs'
|
||||
alias pvdisplay='locked pvdisplay'
|
||||
alias vgdisplay='locked vgdisplay'
|
||||
alias lvdisplay='locked lvdisplay'
|
||||
alias pvcreate='locked pvcreate'
|
||||
alias vgcreate='locked vgcreate'
|
||||
alias lvcreate='locked lvcreate'
|
||||
alias pvremove='locked pvremove'
|
||||
alias vgremove='locked vgremove'
|
||||
alias lvremove='locked lvremove'
|
||||
alias pvrename='locked pvrename'
|
||||
alias vgrename='locked vgrename'
|
||||
alias lvrename='locked lvrename'
|
||||
alias pvchange='locked pvchange'
|
||||
alias vgchange='locked vgchange'
|
||||
alias lvchange='locked lvchange'
|
||||
alias pvscan='locked pvscan'
|
||||
alias vgscan='locked vgscan'
|
||||
alias lvscan='locked lvscan'
|
||||
alias lvm_scan='locked lvm_scan'
|
||||
alias partprobe='locked partprobe'
|
||||
alias ceph-volume='locked ceph-volume'
|
||||
alias disk_zap='locked disk_zap'
|
||||
alias zap_extra_partitions='locked zap_extra_partitions'
|
||||
alias udev_settle='locked udev_settle'
|
||||
alias wipefs='locked wipefs'
|
||||
alias sgdisk='locked sgdisk'
|
||||
alias dd='locked dd'
|
||||
|
||||
eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
|
||||
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
|
||||
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
|
||||
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
|
||||
eval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map."'$HOSTNAME'"')
|
||||
eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')
|
||||
|
||||
if [[ $(ceph -v | awk '/version/{print $3}' | cut -d. -f1) -lt 12 ]]; then
|
||||
echo "ERROR - The minimum Ceph version supported is Luminous 12.x.x"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${HOSTNAME}" ]; then
|
||||
echo "HOSTNAME not set; This will prevent to add an OSD into the CRUSH map"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -e ${CEPH_CONF}.template ]]; then
|
||||
echo "ERROR- ${CEPH_CONF}.template must exist; get it from your existing mon"
|
||||
exit 1
|
||||
else
|
||||
ENDPOINT=$(mon_host_from_k8s_ep "${NAMESPACE}" ceph-mon-discovery)
|
||||
if [[ -z "${ENDPOINT}" ]]; then
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | tee ${CEPH_CONF}" || true
|
||||
else
|
||||
/bin/sh -c -e "cat ${CEPH_CONF}.template | sed 's#mon_host.*#mon_host = ${ENDPOINT}#g' | tee ${CEPH_CONF}" || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# Wait for a file to exist, regardless of the type
|
||||
function wait_for_file {
|
||||
timeout 10 bash -c "while [ ! -e ${1} ]; do echo 'Waiting for ${1} to show up' && sleep 1 ; done"
|
||||
}
|
||||
|
||||
function is_available {
|
||||
command -v $@ &>/dev/null
|
||||
}
|
||||
|
||||
function ceph_cmd_retry() {
|
||||
cnt=0
|
||||
until "ceph" "$@" || [ $cnt -ge 6 ]; do
|
||||
sleep 10
|
||||
((cnt++))
|
||||
done
|
||||
}
|
||||
|
||||
function crush_create_or_move {
|
||||
local crush_location=${1}
|
||||
ceph_cmd_retry --cluster "${CLUSTER}" --name="osd.${OSD_ID}" --keyring="${OSD_KEYRING}" \
|
||||
osd crush create-or-move -- "${OSD_ID}" "${OSD_WEIGHT}" ${crush_location}
|
||||
}
|
||||
|
||||
function crush_add_and_move {
|
||||
local crush_failure_domain_type=${1}
|
||||
local crush_failure_domain_name=${2}
|
||||
local crush_location=$(echo "root=default ${crush_failure_domain_type}=${crush_failure_domain_name} host=${HOSTNAME}")
|
||||
crush_create_or_move "${crush_location}"
|
||||
local crush_failure_domain_location_check=$(ceph_cmd_retry --cluster "${CLUSTER}" --name="osd.${OSD_ID}" --keyring="${OSD_KEYRING}" osd find ${OSD_ID} | grep "${crush_failure_domain_type}" | awk -F '"' '{print $4}')
|
||||
if [ "x${crush_failure_domain_location_check}" != "x${crush_failure_domain_name}" ]; then
|
||||
# NOTE(supamatt): Manually move the buckets for previously configured CRUSH configurations
|
||||
# as create-or-move may not appropiately move them.
|
||||
ceph_cmd_retry --cluster "${CLUSTER}" --name="osd.${OSD_ID}" --keyring="${OSD_KEYRING}" \
|
||||
osd crush add-bucket "${crush_failure_domain_name}" "${crush_failure_domain_type}" || true
|
||||
ceph_cmd_retry --cluster "${CLUSTER}" --name="osd.${OSD_ID}" --keyring="${OSD_KEYRING}" \
|
||||
osd crush move "${crush_failure_domain_name}" root=default || true
|
||||
ceph_cmd_retry --cluster "${CLUSTER}" --name="osd.${OSD_ID}" --keyring="${OSD_KEYRING}" \
|
||||
osd crush move "${HOSTNAME}" "${crush_failure_domain_type}=${crush_failure_domain_name}" || true
|
||||
fi
|
||||
}
|
||||
|
||||
function crush_location {
|
||||
set_device_class
|
||||
if [ "x${CRUSH_FAILURE_DOMAIN_TYPE}" != "xhost" ]; then
|
||||
|
||||
echo "Lets check this host is registered in k8s"
|
||||
if kubectl get node ${HOSTNAME}; then
|
||||
CRUSH_FAILURE_DOMAIN_NAME_FROM_NODE_LABEL=$(kubectl get node ${HOSTNAME} -o json| jq -r '.metadata.labels.rack')
|
||||
else
|
||||
echo "It seems there is some issue with setting the hostname on this node hence we didnt found this node in k8s"
|
||||
kubectl get nodes
|
||||
echo ${HOSTNAME}
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ${CRUSH_FAILURE_DOMAIN_NAME_FROM_NODE_LABEL} != "null" ]; then
|
||||
CRUSH_FAILURE_DOMAIN_NAME=${CRUSH_FAILURE_DOMAIN_NAME_FROM_NODE_LABEL}
|
||||
fi
|
||||
|
||||
if [ "x${CRUSH_FAILURE_DOMAIN_NAME}" != "xfalse" ]; then
|
||||
crush_add_and_move "${CRUSH_FAILURE_DOMAIN_TYPE}" "${CRUSH_FAILURE_DOMAIN_NAME}"
|
||||
elif [ "x${CRUSH_FAILURE_DOMAIN_BY_HOSTNAME}" != "xfalse" ]; then
|
||||
crush_add_and_move "${CRUSH_FAILURE_DOMAIN_TYPE}" "$(echo ${CRUSH_FAILURE_DOMAIN_TYPE}_$(echo ${HOSTNAME} | cut -c ${CRUSH_FAILURE_DOMAIN_BY_HOSTNAME}))"
|
||||
elif [ "x${CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP}" != "xnull" ]; then
|
||||
crush_add_and_move "${CRUSH_FAILURE_DOMAIN_TYPE}" "${CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP}"
|
||||
else
|
||||
# NOTE(supamatt): neither variables are defined then we fall back to default behavior
|
||||
crush_create_or_move "${CRUSH_LOCATION}"
|
||||
fi
|
||||
else
|
||||
crush_create_or_move "${CRUSH_LOCATION}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Calculate proper device names, given a device and partition number
|
||||
function dev_part {
|
||||
local osd_device=${1}
|
||||
local osd_partition=${2}
|
||||
|
||||
if [[ -L ${osd_device} ]]; then
|
||||
# This device is a symlink. Work out it's actual device
|
||||
local actual_device=$(readlink -f "${osd_device}")
|
||||
local bn=$(basename "${osd_device}")
|
||||
if [[ "${actual_device:0-1:1}" == [0-9] ]]; then
|
||||
local desired_partition="${actual_device}p${osd_partition}"
|
||||
else
|
||||
local desired_partition="${actual_device}${osd_partition}"
|
||||
fi
|
||||
# Now search for a symlink in the directory of $osd_device
|
||||
# that has the correct desired partition, and the longest
|
||||
# shared prefix with the original symlink
|
||||
local symdir=$(dirname "${osd_device}")
|
||||
local link=""
|
||||
local pfxlen=0
|
||||
for option in ${symdir}/*; do
|
||||
[[ -e $option ]] || break
|
||||
if [[ $(readlink -f "${option}") == "${desired_partition}" ]]; then
|
||||
local optprefixlen=$(prefix_length "${option}" "${bn}")
|
||||
if [[ ${optprefixlen} > ${pfxlen} ]]; then
|
||||
link=${symdir}/${option}
|
||||
pfxlen=${optprefixlen}
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [[ $pfxlen -eq 0 ]]; then
|
||||
>&2 echo "Could not locate appropriate symlink for partition ${osd_partition} of ${osd_device}"
|
||||
exit 1
|
||||
fi
|
||||
echo "$link"
|
||||
elif [[ "${osd_device:0-1:1}" == [0-9] ]]; then
|
||||
echo "${osd_device}p${osd_partition}"
|
||||
else
|
||||
echo "${osd_device}${osd_partition}"
|
||||
fi
|
||||
}
|
||||
|
||||
function zap_extra_partitions {
|
||||
# Examine temp mount and delete any block.db and block.wal partitions
|
||||
mountpoint=${1}
|
||||
journal_disk=""
|
||||
journal_part=""
|
||||
block_db_disk=""
|
||||
block_db_part=""
|
||||
block_wal_disk=""
|
||||
block_wal_part=""
|
||||
|
||||
# Discover journal, block.db, and block.wal partitions first before deleting anything
|
||||
# If the partitions are on the same disk, deleting one can affect discovery of the other(s)
|
||||
if [ -L "${mountpoint}/journal" ]; then
|
||||
journal_disk=$(readlink -m ${mountpoint}/journal | sed 's/[0-9]*//g')
|
||||
journal_part=$(readlink -m ${mountpoint}/journal | sed 's/[^0-9]*//g')
|
||||
fi
|
||||
if [ -L "${mountpoint}/block.db" ]; then
|
||||
block_db_disk=$(readlink -m ${mountpoint}/block.db | sed 's/[0-9]*//g')
|
||||
block_db_part=$(readlink -m ${mountpoint}/block.db | sed 's/[^0-9]*//g')
|
||||
fi
|
||||
if [ -L "${mountpoint}/block.wal" ]; then
|
||||
block_wal_disk=$(readlink -m ${mountpoint}/block.wal | sed 's/[0-9]*//g')
|
||||
block_wal_part=$(readlink -m ${mountpoint}/block.wal | sed 's/[^0-9]*//g')
|
||||
fi
|
||||
|
||||
# Delete any discovered journal, block.db, and block.wal partitions
|
||||
if [ ! -z "${journal_disk}" ]; then
|
||||
sgdisk -d ${journal_part} ${journal_disk}
|
||||
/usr/bin/flock -s ${journal_disk} /sbin/partprobe ${journal_disk}
|
||||
fi
|
||||
if [ ! -z "${block_db_disk}" ]; then
|
||||
sgdisk -d ${block_db_part} ${block_db_disk}
|
||||
/usr/bin/flock -s ${block_db_disk} /sbin/partprobe ${block_db_disk}
|
||||
fi
|
||||
if [ ! -z "${block_wal_disk}" ]; then
|
||||
sgdisk -d ${block_wal_part} ${block_wal_disk}
|
||||
/usr/bin/flock -s ${block_wal_disk} /sbin/partprobe ${block_wal_disk}
|
||||
fi
|
||||
}
|
||||
|
||||
function disk_zap {
|
||||
# Run all the commands to clear a disk
|
||||
local device=${1}
|
||||
local dm_devices=$(get_dm_devices_from_osd_device "${device}" | xargs)
|
||||
for dm_device in ${dm_devices}; do
|
||||
if [[ "$(dmsetup ls | grep ${dm_device})" ]]; then
|
||||
dmsetup remove ${dm_device}
|
||||
fi
|
||||
done
|
||||
local logical_volumes=$(get_lv_paths_from_osd_device "${device}" | xargs)
|
||||
if [[ "${logical_volumes}" ]]; then
|
||||
lvremove -y ${logical_volumes}
|
||||
fi
|
||||
local volume_group=$(pvdisplay -ddd -v ${device} | grep "VG Name" | awk '/ceph/{print $3}' | grep "ceph")
|
||||
if [[ ${volume_group} ]]; then
|
||||
vgremove -y ${volume_group}
|
||||
pvremove -y ${device}
|
||||
ceph-volume lvm zap ${device} --destroy
|
||||
fi
|
||||
wipefs --all ${device}
|
||||
sgdisk --zap-all -- ${device}
|
||||
# Wipe the first 200MB boundary, as Bluestore redeployments will not work otherwise
|
||||
dd if=/dev/zero of=${device} bs=1M count=200
|
||||
}
|
||||
|
||||
# This should be run atomically to prevent unexpected cache states
|
||||
function lvm_scan {
|
||||
pvscan --cache
|
||||
vgscan --cache
|
||||
lvscan --cache
|
||||
pvscan
|
||||
vgscan
|
||||
lvscan
|
||||
}
|
||||
|
||||
function wait_for_device {
|
||||
local device="$1"
|
||||
|
||||
echo "Waiting for block device ${device} to appear"
|
||||
for countdown in {1..600}; do
|
||||
test -b "${device}" && break
|
||||
sleep 1
|
||||
done
|
||||
test -b "${device}" || exit 1
|
||||
}
|
||||
|
||||
function udev_settle {
|
||||
osd_devices="${OSD_DEVICE}"
|
||||
partprobe "${OSD_DEVICE}"
|
||||
lvm_scan
|
||||
if [ "${OSD_BLUESTORE:-0}" -eq 1 ]; then
|
||||
if [ ! -z "$BLOCK_DB" ]; then
|
||||
osd_devices="${osd_devices}\|${BLOCK_DB}"
|
||||
# BLOCK_DB could be a physical or logical device here
|
||||
local block_db="$BLOCK_DB"
|
||||
local db_vg="$(echo $block_db | cut -d'/' -f1)"
|
||||
if [ ! -z "$db_vg" ]; then
|
||||
block_db=$(pvdisplay -ddd -v | grep -B1 "$db_vg" | awk '/PV Name/{print $3}')
|
||||
fi
|
||||
partprobe "${block_db}"
|
||||
fi
|
||||
if [ ! -z "$BLOCK_WAL" ] && [ "$BLOCK_WAL" != "$BLOCK_DB" ]; then
|
||||
osd_devices="${osd_devices}\|${BLOCK_WAL}"
|
||||
# BLOCK_WAL could be a physical or logical device here
|
||||
local block_wal="$BLOCK_WAL"
|
||||
local wal_vg="$(echo $block_wal | cut -d'/' -f1)"
|
||||
if [ ! -z "$wal_vg" ]; then
|
||||
block_wal=$(pvdisplay -ddd -v | grep -B1 "$wal_vg" | awk '/PV Name/{print $3}')
|
||||
fi
|
||||
partprobe "${block_wal}"
|
||||
fi
|
||||
else
|
||||
if [ "x$JOURNAL_TYPE" == "xblock-logical" ] && [ ! -z "$OSD_JOURNAL" ]; then
|
||||
OSD_JOURNAL=$(readlink -f ${OSD_JOURNAL})
|
||||
if [ ! -z "$OSD_JOURNAL" ]; then
|
||||
local JDEV=$(echo ${OSD_JOURNAL} | sed 's/[0-9]//g')
|
||||
osd_devices="${osd_devices}\|${JDEV}"
|
||||
partprobe "${JDEV}"
|
||||
wait_for_device "${JDEV}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# On occassion udev may not make the correct device symlinks for Ceph, just in case we make them manually
|
||||
mkdir -p /dev/disk/by-partuuid
|
||||
for dev in $(awk '!/rbd/{print $4}' /proc/partitions | grep "${osd_devices}" | grep "[0-9]"); do
|
||||
diskdev=$(echo "${dev//[!a-z]/}")
|
||||
partnum=$(echo "${dev//[!0-9]/}")
|
||||
symlink="/dev/disk/by-partuuid/$(sgdisk -i ${partnum} /dev/${diskdev} | awk '/Partition unique GUID/{print tolower($4)}')"
|
||||
if [ ! -e "${symlink}" ]; then
|
||||
ln -s "../../${dev}" "${symlink}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Helper function to get a logical volume from a physical volume
|
||||
function get_lv_from_device {
|
||||
device="$1"
|
||||
|
||||
pvdisplay -ddd -v -m ${device} | awk '/Logical volume/{print $3}'
|
||||
}
|
||||
|
||||
# Helper function to get an lvm tag from a logical volume
|
||||
function get_lvm_tag_from_volume {
|
||||
logical_volume="$1"
|
||||
tag="$2"
|
||||
|
||||
if [[ "$#" -lt 2 ]] || [[ -z "${logical_volume}" ]]; then
|
||||
# Return an empty string if the logical volume doesn't exist
|
||||
echo
|
||||
else
|
||||
# Get and return the specified tag from the logical volume
|
||||
lvs -o lv_tags ${logical_volume} | tr ',' '\n' | grep ${tag} | cut -d'=' -f2
|
||||
fi
|
||||
}
|
||||
|
||||
# Helper function to get an lvm tag from a physical device
|
||||
function get_lvm_tag_from_device {
|
||||
device="$1"
|
||||
tag="$2"
|
||||
# Attempt to get a logical volume for the physical device
|
||||
logical_volume="$(get_lv_from_device ${device})"
|
||||
|
||||
# Use get_lvm_tag_from_volume to get the specified tag from the logical volume
|
||||
get_lvm_tag_from_volume ${logical_volume} ${tag}
|
||||
}
|
||||
|
||||
# Helper function to get the size of a logical volume
|
||||
function get_lv_size_from_device {
|
||||
device="$1"
|
||||
logical_volume="$(get_lv_from_device ${device})"
|
||||
|
||||
lvs ${logical_volume} -o LV_SIZE --noheadings --units k --nosuffix | xargs | cut -d'.' -f1
|
||||
}
|
||||
|
||||
# Helper function to get the crush weight for an osd device
|
||||
function get_osd_crush_weight_from_device {
|
||||
device="$1"
|
||||
lv_size="$(get_lv_size_from_device ${device})" # KiB
|
||||
|
||||
if [[ ! -z "${BLOCK_DB_SIZE}" ]]; then
|
||||
db_size=$(echo "${BLOCK_DB_SIZE}" | cut -d'B' -f1 | numfmt --from=iec | awk '{print $1/1024}') # KiB
|
||||
lv_size=$((lv_size+db_size)) # KiB
|
||||
fi
|
||||
|
||||
echo ${lv_size} | awk '{printf("%.2f\n", $1/1073741824)}' # KiB to TiB
|
||||
}
|
||||
|
||||
# Helper function to get a cluster FSID from a physical device
|
||||
function get_cluster_fsid_from_device {
|
||||
device="$1"
|
||||
|
||||
# Use get_lvm_tag_from_device to get the cluster FSID from the device
|
||||
get_lvm_tag_from_device ${device} ceph.cluster_fsid
|
||||
}
|
||||
|
||||
# Helper function to get an OSD ID from a logical volume
|
||||
function get_osd_id_from_volume {
|
||||
logical_volume="$1"
|
||||
|
||||
# Use get_lvm_tag_from_volume to get the OSD ID from the logical volume
|
||||
get_lvm_tag_from_volume ${logical_volume} ceph.osd_id
|
||||
}
|
||||
|
||||
# Helper function get an OSD ID from a physical device
|
||||
function get_osd_id_from_device {
|
||||
device="$1"
|
||||
|
||||
# Use get_lvm_tag_from_device to get the OSD ID from the device
|
||||
get_lvm_tag_from_device ${device} ceph.osd_id
|
||||
}
|
||||
|
||||
# Helper function get an OSD FSID from a physical device
|
||||
function get_osd_fsid_from_device {
|
||||
device="$1"
|
||||
|
||||
# Use get_lvm_tag_from_device to get the OSD FSID from the device
|
||||
get_lvm_tag_from_device ${device} ceph.osd_fsid
|
||||
}
|
||||
|
||||
# Helper function get an OSD DB device from a physical device
|
||||
function get_osd_db_device_from_device {
|
||||
device="$1"
|
||||
|
||||
# Use get_lvm_tag_from_device to get the OSD DB device from the device
|
||||
get_lvm_tag_from_device ${device} ceph.db_device
|
||||
}
|
||||
|
||||
# Helper function get an OSD WAL device from a physical device
|
||||
function get_osd_wal_device_from_device {
|
||||
device="$1"
|
||||
|
||||
# Use get_lvm_tag_from_device to get the OSD WAL device from the device
|
||||
get_lvm_tag_from_device ${device} ceph.wal_device
|
||||
}
|
||||
|
||||
function get_block_uuid_from_device {
|
||||
device="$1"
|
||||
|
||||
get_lvm_tag_from_device ${device} ceph.block_uuid
|
||||
}
|
||||
|
||||
function get_dm_devices_from_osd_device {
|
||||
device="$1"
|
||||
pv_uuid=$(pvdisplay -ddd -v ${device} | awk '/PV UUID/{print $3}')
|
||||
|
||||
# Return the list of dm devices that belong to the osd
|
||||
if [[ "${pv_uuid}" ]]; then
|
||||
dmsetup ls | grep "$(echo "${pv_uuid}" | sed 's/-/--/g')" | awk '{print $1}'
|
||||
fi
|
||||
}
|
||||
|
||||
function get_lv_paths_from_osd_device {
|
||||
device="$1"
|
||||
pv_uuid=$(pvdisplay -ddd -v ${device} | awk '/PV UUID/{print $3}')
|
||||
|
||||
# Return the list of lvs that belong to the osd
|
||||
if [[ "${pv_uuid}" ]]; then
|
||||
lvdisplay | grep "LV Path" | grep "${pv_uuid}" | awk '{print $3}'
|
||||
fi
|
||||
}
|
||||
|
||||
function get_vg_name_from_device {
|
||||
device="$1"
|
||||
pv_uuid=$(pvdisplay -ddd -v ${device} | awk '/PV UUID/{print $3}')
|
||||
|
||||
if [[ "${pv_uuid}" ]]; then
|
||||
echo "ceph-vg-${pv_uuid}"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_lv_name_from_device {
|
||||
device="$1"
|
||||
device_type="$2"
|
||||
pv_uuid=$(pvdisplay -ddd -v ${device} | awk '/PV UUID/{print $3}')
|
||||
|
||||
if [[ "${pv_uuid}" ]]; then
|
||||
echo "ceph-${device_type}-${pv_uuid}"
|
||||
fi
|
||||
}
|
||||
|
||||
function set_device_class {
|
||||
if [ ! -z "$DEVICE_CLASS" ]; then
|
||||
if [ "x$DEVICE_CLASS" != "x$(get_device_class)" ]; then
|
||||
ceph_cmd_retry --cluster "${CLUSTER}" --name="osd.${OSD_ID}" --keyring="${OSD_KEYRING}" \
|
||||
osd crush rm-device-class "osd.${OSD_ID}"
|
||||
ceph_cmd_retry --cluster "${CLUSTER}" --name="osd.${OSD_ID}" --keyring="${OSD_KEYRING}" \
|
||||
osd crush set-device-class "${DEVICE_CLASS}" "osd.${OSD_ID}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function get_device_class {
|
||||
echo $(ceph_cmd_retry --cluster "${CLUSTER}" --name="osd.${OSD_ID}" --keyring="${OSD_KEYRING}" \
|
||||
osd crush get-device-class "osd.${OSD_ID}")
|
||||
}
|
@ -0,0 +1,214 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
# We do not want to zap journal disk. Tracking this option seperatly.
|
||||
: "${JOURNAL_FORCE_ZAP:=0}"
|
||||
|
||||
export OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
|
||||
export OSD_BLUESTORE=0
|
||||
|
||||
if [ "x$JOURNAL_TYPE" == "xdirectory" ]; then
|
||||
export OSD_JOURNAL="/var/lib/ceph/journal"
|
||||
else
|
||||
export OSD_JOURNAL=$(readlink -f ${JOURNAL_LOCATION})
|
||||
fi
|
||||
|
||||
# Check OSD FSID and journalling metadata
|
||||
# Returns 1 if the disk should be zapped; 0 otherwise.
|
||||
function check_osd_metadata {
|
||||
local ceph_fsid=$1
|
||||
retcode=0
|
||||
local tmpmnt=$(mktemp -d)
|
||||
mount ${DM_DEV} ${tmpmnt}
|
||||
|
||||
if [ "x${JOURNAL_TYPE}" != "xdirectory" ]; then
|
||||
if [ -f "${tmpmnt}/whoami" ]; then
|
||||
OSD_JOURNAL_DISK=$(readlink -f "${tmpmnt}/journal")
|
||||
local osd_id=$(cat "${tmpmnt}/whoami")
|
||||
if [ ! -b "${OSD_JOURNAL_DISK}" ]; then
|
||||
OSD_JOURNAL=$(readlink -f ${OSD_JOURNAL})
|
||||
local jdev=$(echo ${OSD_JOURNAL} | sed 's/[0-9]//g')
|
||||
if [ ${jdev} == ${OSD_JOURNAL} ]; then
|
||||
echo "OSD Init: It appears that ${OSD_DEVICE} is missing the journal at ${OSD_JOURNAL}."
|
||||
echo "OSD Init: Because OSD_FORCE_REPAIR is set, we will wipe the metadata of the OSD and zap it."
|
||||
rm -rf ${tmpmnt}/ceph_fsid
|
||||
else
|
||||
echo "OSD Init: It appears that ${OSD_DEVICE} is missing the journal at ${OSD_JOURNAL_DISK}."
|
||||
echo "OSD Init: Because OSD_FORCE_REPAIR is set and paritions are manually defined, we will"
|
||||
echo "OSD Init: attempt to recreate the missing journal device partitions."
|
||||
osd_journal_create ${OSD_JOURNAL}
|
||||
ln -sf /dev/disk/by-partuuid/${OSD_JOURNAL_UUID} ${tmpmnt}/journal
|
||||
echo ${OSD_JOURNAL_UUID} | tee ${tmpmnt}/journal_uuid
|
||||
chown ceph. ${OSD_JOURNAL}
|
||||
# During OSD start we will format the journal and set the fsid
|
||||
touch ${tmpmnt}/run_mkjournal
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "OSD Init: It looks like ${OSD_DEVICE} has a ceph data partition but is missing it's metadata."
|
||||
echo "OSD Init: The device may contain inconsistent metadata or be corrupted."
|
||||
echo "OSD Init: Because OSD_FORCE_REPAIR is set, we will wipe the metadata of the OSD and zap it."
|
||||
rm -rf ${tmpmnt}/ceph_fsid
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f "${tmpmnt}/ceph_fsid" ]; then
|
||||
local osd_fsid=$(cat "${tmpmnt}/ceph_fsid")
|
||||
|
||||
if [ ${osd_fsid} != ${ceph_fsid} ]; then
|
||||
echo "OSD Init: ${OSD_DEVICE} is an OSD belonging to a different (or old) ceph cluster."
|
||||
echo "OSD Init: The OSD FSID is ${osd_fsid} while this cluster is ${ceph_fsid}"
|
||||
echo "OSD Init: Because OSD_FORCE_REPAIR was set, we will zap this device."
|
||||
ZAP_EXTRA_PARTITIONS=${tmpmnt}
|
||||
retcode=1
|
||||
else
|
||||
echo "It looks like ${OSD_DEVICE} is an OSD belonging to a this ceph cluster."
|
||||
echo "OSD_FORCE_REPAIR is set, but will be ignored and the device will not be zapped."
|
||||
echo "Moving on, trying to activate the OSD now."
|
||||
fi
|
||||
else
|
||||
echo "OSD Init: ${OSD_DEVICE} has a ceph data partition but no FSID."
|
||||
echo "OSD Init: Because OSD_FORCE_REPAIR was set, we will zap this device."
|
||||
ZAP_EXTRA_PARTITIONS=${tmpmnt}
|
||||
retcode=1
|
||||
fi
|
||||
umount ${tmpmnt}
|
||||
return ${retcode}
|
||||
}
|
||||
|
||||
function determine_what_needs_zapping {
|
||||
|
||||
if [[ ! -z ${OSD_ID} ]]; then
|
||||
local dm_num=$(dmsetup ls | grep $(lsblk -J ${OSD_DEVICE} | jq -r '.blockdevices[].children[].name') | awk '{print $2}' | cut -d':' -f2 | cut -d')' -f1)
|
||||
DM_DEV="/dev/dm-"${dm_num}
|
||||
elif [[ $(sgdisk --print ${OSD_DEVICE} | grep "F800") ]]; then
|
||||
# Ceph-disk was used to initialize the disk, but this is not supported
|
||||
echo "OSD Init: ceph-disk was used to initialize the disk, but this is no longer supported"
|
||||
exit 1
|
||||
else
|
||||
if [[ ${OSD_FORCE_REPAIR} -eq 1 ]]; then
|
||||
echo "OSD Init: It looks like ${OSD_DEVICE} isn't consistent, however OSD_FORCE_REPAIR is enabled so we are zapping the device anyway"
|
||||
ZAP_DEVICE=1
|
||||
else
|
||||
echo "OSD Init: Regarding parted, device ${OSD_DEVICE} is inconsistent/broken/weird."
|
||||
echo "OSD Init: It would be too dangerous to destroy it without any notification."
|
||||
echo "OSD Init: Please set OSD_FORCE_REPAIR to '1' if you really want to zap this disk."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ${OSD_FORCE_REPAIR} -eq 1 ] && [ ! -z ${DM_DEV} ]; then
|
||||
if [ -b ${DM_DEV} ]; then
|
||||
local ceph_fsid=$(ceph-conf --lookup fsid)
|
||||
if [ ! -z "${ceph_fsid}" ]; then
|
||||
# Check the OSD metadata and zap the disk if necessary
|
||||
if [[ $(check_osd_metadata ${ceph_fsid}) -eq 1 ]]; then
|
||||
echo "OSD Init: ${OSD_DEVICE} needs to be zapped..."
|
||||
ZAP_DEVICE=1
|
||||
fi
|
||||
else
|
||||
echo "Unable to determine the FSID of the current cluster."
|
||||
echo "OSD_FORCE_REPAIR is set, but this OSD will not be zapped."
|
||||
echo "Moving on, trying to activate the OSD now."
|
||||
fi
|
||||
else
|
||||
echo "parted says ${DM_DEV} should exist, but we do not see it."
|
||||
echo "We will ignore OSD_FORCE_REPAIR and try to use the device as-is"
|
||||
echo "Moving on, trying to activate the OSD now."
|
||||
fi
|
||||
else
|
||||
echo "INFO- It looks like ${OSD_DEVICE} is an OSD LVM"
|
||||
echo "Moving on, trying to prepare and activate the OSD LVM now."
|
||||
fi
|
||||
}
|
||||
|
||||
function osd_journal_create {
|
||||
local osd_journal=${1}
|
||||
local osd_journal_partition=$(echo ${osd_journal} | sed 's/[^0-9]//g')
|
||||
local jdev=$(echo ${osd_journal} | sed 's/[0-9]//g')
|
||||
if [ -b "${jdev}" ]; then
|
||||
sgdisk --new=${osd_journal_partition}:0:+${OSD_JOURNAL_SIZE}M \
|
||||
--change-name='${osd_journal_partition}:ceph journal' \
|
||||
--partition-guid=${osd_journal_partition}:${OSD_JOURNAL_UUID} \
|
||||
--typecode=${osd_journal_partition}:45b0969e-9b03-4f30-b4c6-b4b80ceff106 --mbrtogpt -- ${jdev}
|
||||
OSD_JOURNAL=$(dev_part ${jdev} ${osd_journal_partition})
|
||||
udev_settle
|
||||
else
|
||||
echo "OSD Init: The backing device ${jdev} for ${OSD_JOURNAL} does not exist on this system."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function osd_journal_prepare {
|
||||
if [ -n "${OSD_JOURNAL}" ]; then
|
||||
if [ -b ${OSD_JOURNAL} ]; then
|
||||
OSD_JOURNAL=$(readlink -f ${OSD_JOURNAL})
|
||||
OSD_JOURNAL_PARTITION=$(echo ${OSD_JOURNAL} | sed 's/[^0-9]//g')
|
||||
local jdev=$(echo ${OSD_JOURNAL} | sed 's/[0-9]//g')
|
||||
if [ -z "${OSD_JOURNAL_PARTITION}" ]; then
|
||||
OSD_JOURNAL=$(dev_part ${jdev} ${OSD_JOURNAL_PARTITION})
|
||||
else
|
||||
OSD_JOURNAL=${OSD_JOURNAL}
|
||||
fi
|
||||
elif [ "x${JOURNAL_TYPE}" != "xdirectory" ]; then
|
||||
# The block device exists but doesn't appear to be paritioned, we will proceed with parititioning the device.
|
||||
OSD_JOURNAL=$(readlink -f ${OSD_JOURNAL})
|
||||
until [ -b ${OSD_JOURNAL} ]; do
|
||||
osd_journal_create ${OSD_JOURNAL}
|
||||
done
|
||||
fi
|
||||
chown ceph. ${OSD_JOURNAL};
|
||||
elif [ "x${JOURNAL_TYPE}" != "xdirectory" ]; then
|
||||
echo "No journal device specified. OSD and journal will share ${OSD_DEVICE}"
|
||||
echo "For better performance on HDD, consider moving your journal to a separate device"
|
||||
fi
|
||||
CLI_OPTS="${CLI_OPTS} --filestore"
|
||||
}
|
||||
|
||||
function osd_disk_prepare {
|
||||
|
||||
if [[ ${CEPH_LVM_PREPARE} -eq 1 ]] || [[ ${DISK_ZAPPED} -eq 1 ]]; then
|
||||
udev_settle
|
||||
RESULTING_VG=""; RESULTING_LV="";
|
||||
create_vg_if_needed "${OSD_DEVICE}"
|
||||
create_lv_if_needed "${OSD_DEVICE}" "${RESULTING_VG}" "--yes -l 100%FREE"
|
||||
|
||||
CLI_OPTS="${CLI_OPTS} --data ${RESULTING_LV}"
|
||||
CEPH_LVM_PREPARE=1
|
||||
udev_settle
|
||||
fi
|
||||
if pvdisplay -ddd -v ${OSD_DEVICE} | awk '/VG Name/{print $3}' | grep "ceph"; then
|
||||
echo "OSD Init: Device is already set up. LVM prepare does not need to be called."
|
||||
CEPH_LVM_PREPARE=0
|
||||
fi
|
||||
|
||||
osd_journal_prepare
|
||||
CLI_OPTS="${CLI_OPTS} --data ${OSD_DEVICE} --journal ${OSD_JOURNAL}"
|
||||
udev_settle
|
||||
|
||||
if [ ! -z "$DEVICE_CLASS" ]; then
|
||||
CLI_OPTS="${CLI_OPTS} --crush-device-class ${DEVICE_CLASS}"
|
||||
fi
|
||||
|
||||
if [[ ${CEPH_LVM_PREPARE} -eq 1 ]]; then
|
||||
echo "OSD Init: Calling ceph-volume lvm-v prepare ${CLI_OPTS}"
|
||||
ceph-volume lvm -v prepare ${CLI_OPTS}
|
||||
udev_settle
|
||||
fi
|
||||
}
|
||||
|
@ -0,0 +1,176 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
export OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
|
||||
export OSD_BLUESTORE=1
|
||||
alias prep_device='locked prep_device'
|
||||
|
||||
function check_block_device_for_zap {
|
||||
local block_device=$1
|
||||
local device_type=$2
|
||||
|
||||
if [[ ${block_device} ]]; then
|
||||
local vg_name=$(get_vg_name_from_device ${block_device})
|
||||
local lv_name=$(get_lv_name_from_device ${OSD_DEVICE} ${device_type})
|
||||
local vg=$(vgs --noheadings -o vg_name -S "vg_name=${vg_name}" | tr -d '[:space:]')
|
||||
if [[ "${vg}" ]]; then
|
||||
local device_osd_id=$(get_osd_id_from_volume "/dev/${vg_name}/${lv_name}")
|
||||
CEPH_LVM_PREPARE=1
|
||||
if [[ -n "${device_osd_id}" ]] && [[ -n "${OSD_ID}" ]]; then
|
||||
if [[ "${device_osd_id}" == "${OSD_ID}" ]]; then
|
||||
echo "OSD Init: OSD ID matches the OSD ID already on the data volume. LVM prepare does not need to be called."
|
||||
CEPH_LVM_PREPARE=0
|
||||
else
|
||||
echo "OSD Init: OSD ID does match the OSD ID on the data volume. Device needs to be zapped."
|
||||
ZAP_DEVICE=1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if this device (db or wal) has no associated data volume
|
||||
local logical_volumes="$(lvs --noheadings -o lv_name ${vg} | xargs)"
|
||||
for volume in ${logical_volumes}; do
|
||||
local data_volume=$(echo ${volume} | sed -E -e 's/-db-|-wal-/-lv-/g')
|
||||
if [[ -z $(lvs --noheadings -o lv_name -S "lv_name=${data_volume}") ]]; then
|
||||
# DB or WAL volume without a corresponding data volume, remove it
|
||||
lvremove -y /dev/${vg}/${volume}
|
||||
echo "OSD Init: LV /dev/${vg}/${volume} was removed as it did not have a data volume."
|
||||
fi
|
||||
done
|
||||
else
|
||||
if [[ "${vg_name}" ]]; then
|
||||
local logical_devices=$(get_dm_devices_from_osd_device "${OSD_DEVICE}")
|
||||
local device_filter=$(echo "${vg_name}" | sed 's/-/--/g')
|
||||
local logical_devices=$(echo "${logical_devices}" | grep "${device_filter}" | xargs)
|
||||
if [[ "$logical_devices" ]]; then
|
||||
echo "OSD Init: No VG resources found with name ${vg_name}. Device needs to be zapped."
|
||||
ZAP_DEVICE=1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function determine_what_needs_zapping {
|
||||
|
||||
local osd_fsid=$(get_cluster_fsid_from_device ${OSD_DEVICE})
|
||||
local cluster_fsid=$(ceph-conf --lookup fsid)
|
||||
|
||||
# If the OSD FSID is defined within the device, check if we're already bootstrapped.
|
||||
if [[ ! -z "${osd_fsid}" ]]; then
|
||||
# Check if the OSD FSID is the same as the cluster FSID. If so, then we're
|
||||
# already bootstrapped; otherwise, this is an old disk and needs to
|
||||
# be zapped.
|
||||
if [[ "${osd_fsid}" == "${cluster_fsid}" ]]; then
|
||||
if [[ ! -z "${OSD_ID}" ]]; then
|
||||
# Check to see what needs to be done to prepare the disk. If the OSD
|
||||
# ID is in the Ceph OSD list, then LVM prepare does not need to be done.
|
||||
if ceph --name client.bootstrap-osd --keyring $OSD_BOOTSTRAP_KEYRING osd ls |grep -w ${OSD_ID}; then
|
||||
echo "OSD Init: Running bluestore mode and ${OSD_DEVICE} already bootstrapped. LVM prepare does not need to be called."
|
||||
CEPH_LVM_PREPARE=0
|
||||
elif [[ ${OSD_FORCE_REPAIR} -eq 1 ]]; then
|
||||
echo "OSD initialized for this cluster, but OSD ID not found in the cluster, reinitializing"
|
||||
ZAP_DEVICE=1
|
||||
else
|
||||
echo "OSD initialized for this cluster, but OSD ID not found in the cluster, repair manually"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "OSD Init: OSD FSID ${osd_fsid} initialized for a different cluster. It needs to be zapped."
|
||||
ZAP_DEVICE=1
|
||||
fi
|
||||
elif [[ $(sgdisk --print ${OSD_DEVICE} | grep "F800") ]]; then
|
||||
# Ceph-disk was used to initialize the disk, but this is not supported
|
||||
echo "ceph-disk was used to initialize the disk, but this is no longer supported"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
check_block_device_for_zap "${BLOCK_DB}" db
|
||||
check_block_device_for_zap "${BLOCK_WAL}" wal
|
||||
|
||||
# Zapping extra partitions isn't done for bluestore
|
||||
ZAP_EXTRA_PARTITIONS=0
|
||||
}
|
||||
|
||||
function prep_device {
|
||||
local block_device=$1
|
||||
local block_device_size=$2
|
||||
local device_type=$3
|
||||
local vg_name lv_name vg device_osd_id logical_devices logical_volume
|
||||
RESULTING_VG=""; RESULTING_LV="";
|
||||
|
||||
udev_settle
|
||||
vg_name=$(get_vg_name_from_device ${block_device})
|
||||
lv_name=$(get_lv_name_from_device ${OSD_DEVICE} ${device_type})
|
||||
vg=$(vgs --noheadings -o vg_name -S "vg_name=${vg_name}" | tr -d '[:space:]')
|
||||
if [[ -z "${vg}" ]]; then
|
||||
create_vg_if_needed "${block_device}"
|
||||
vg=${RESULTING_VG}
|
||||
fi
|
||||
udev_settle
|
||||
|
||||
create_lv_if_needed "${block_device}" "${vg}" "--yes -L ${block_device_size}" "${lv_name}"
|
||||
if [[ "${device_type}" == "db" ]]; then
|
||||
BLOCK_DB=${RESULTING_LV}
|
||||
elif [[ "${device_type}" == "wal" ]]; then
|
||||
BLOCK_WAL=${RESULTING_LV}
|
||||
fi
|
||||
udev_settle
|
||||
}
|
||||
|
||||
function osd_disk_prepare {
|
||||
|
||||
if [[ ${CEPH_LVM_PREPARE} -eq 1 ]] || [[ ${DISK_ZAPPED} -eq 1 ]]; then
|
||||
udev_settle
|
||||
RESULTING_VG=""; RESULTING_LV="";
|
||||
create_vg_if_needed "${OSD_DEVICE}"
|
||||
create_lv_if_needed "${OSD_DEVICE}" "${RESULTING_VG}" "--yes -l 100%FREE"
|
||||
|
||||
CLI_OPTS="${CLI_OPTS} --data ${RESULTING_LV}"
|
||||
CEPH_LVM_PREPARE=1
|
||||
udev_settle
|
||||
fi
|
||||
|
||||
if [[ ${BLOCK_DB} && ${BLOCK_WAL} ]]; then
|
||||
prep_device "${BLOCK_DB}" "${BLOCK_DB_SIZE}" "db" "${OSD_DEVICE}"
|
||||
prep_device "${BLOCK_WAL}" "${BLOCK_WAL_SIZE}" "wal" "${OSD_DEVICE}"
|
||||
elif [[ -z ${BLOCK_DB} && ${BLOCK_WAL} ]]; then
|
||||
prep_device "${BLOCK_WAL}" "${BLOCK_WAL_SIZE}" "wal" "${OSD_DEVICE}"
|
||||
elif [[ ${BLOCK_DB} && -z ${BLOCK_WAL} ]]; then
|
||||
prep_device "${BLOCK_DB}" "${BLOCK_DB_SIZE}" "db" "${OSD_DEVICE}"
|
||||
fi
|
||||
|
||||
CLI_OPTS="${CLI_OPTS} --bluestore"
|
||||
|
||||
if [ ! -z "$BLOCK_DB" ]; then
|
||||
CLI_OPTS="${CLI_OPTS} --block.db ${BLOCK_DB}"
|
||||
fi
|
||||
|
||||
if [ ! -z "$BLOCK_WAL" ]; then
|
||||
CLI_OPTS="${CLI_OPTS} --block.wal ${BLOCK_WAL}"
|
||||
fi
|
||||
|
||||
if [ ! -z "$DEVICE_CLASS" ]; then
|
||||
CLI_OPTS="${CLI_OPTS} --crush-device-class ${DEVICE_CLASS}"
|
||||
fi
|
||||
|
||||
if [[ ${CEPH_LVM_PREPARE} -eq 1 ]]; then
|
||||
echo "OSD Init: Calling ceph-volume lvm-v prepare ${CLI_OPTS}"
|
||||
ceph-volume lvm -v prepare ${CLI_OPTS}
|
||||
udev_settle
|
||||
fi
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
# We do not want to zap journal disk. Tracking this option seperatly.
|
||||
: "${JOURNAL_FORCE_ZAP:=0}"
|
||||
|
||||
export OSD_DEVICE="/var/lib/ceph/osd"
|
||||
export OSD_JOURNAL="/var/lib/ceph/journal"
|
@ -0,0 +1,268 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
: "${OSD_FORCE_REPAIR:=0}"
|
||||
|
||||
source /tmp/osd-common-ceph-volume.sh
|
||||
|
||||
source /tmp/init-ceph-volume-helper-${STORAGE_TYPE}.sh
|
||||
|
||||
|
||||
# Set up aliases for functions that require disk synchronization
|
||||
alias rename_vg='locked rename_vg'
|
||||
alias rename_lvs='locked rename_lvs'
|
||||
alias update_lv_tags='locked update_lv_tags'
|
||||
|
||||
# Renames a single VG if necessary
|
||||
function rename_vg {
|
||||
local physical_disk=$1
|
||||
local old_vg_name=$(pvdisplay -ddd -v ${physical_disk} | awk '/VG Name/{print $3}')
|
||||
local vg_name=$(get_vg_name_from_device ${physical_disk})
|
||||
|
||||
if [[ "${old_vg_name}" ]] && [[ "${vg_name}" != "${old_vg_name}" ]]; then
|
||||
vgrename ${old_vg_name} ${vg_name}
|
||||
echo "OSD Init: Renamed volume group ${old_vg_name} to ${vg_name}."
|
||||
fi
|
||||
}
|
||||
|
||||
# Renames all LVs associated with an OSD as necesasry
|
||||
function rename_lvs {
|
||||
local data_disk=$1
|
||||
local vg_name=$(pvdisplay -ddd -v ${data_disk} | awk '/VG Name/{print $3}')
|
||||
|
||||
if [[ "${vg_name}" ]]; then
|
||||
# Rename the OSD volume if necessary
|
||||
local old_lv_name=$(lvdisplay ${vg_name} | awk '/LV Name/{print $3}')
|
||||
local lv_name=$(get_lv_name_from_device ${data_disk} lv)
|
||||
|
||||
if [[ "${old_lv_name}" ]] && [[ "${lv_name}" != "${old_lv_name}" ]]; then
|
||||
lvrename ${vg_name} ${old_lv_name} ${lv_name}
|
||||
echo "OSD Init: Renamed logical volume ${old_lv_name} (from group ${vg_name}) to ${lv_name}."
|
||||
fi
|
||||
|
||||
# Rename the OSD's block.db volume if necessary, referenced by UUID
|
||||
local lv_tag=$(get_lvm_tag_from_device ${data_disk} ceph.db_uuid)
|
||||
|
||||
if [[ "${lv_tag}" ]]; then
|
||||
local lv_device=$(lvdisplay | grep -B4 "${lv_tag}" | awk '/LV Path/{print $3}')
|
||||
|
||||
if [[ "${lv_device}" ]]; then
|
||||
local db_vg=$(echo ${lv_device} | awk -F "/" '{print $3}')
|
||||
old_lv_name=$(echo ${lv_device} | awk -F "/" '{print $4}')
|
||||
local db_name=$(get_lv_name_from_device ${data_disk} db)
|
||||
|
||||
if [[ "${old_lv_name}" ]] && [[ "${db_name}" != "${old_lv_name}" ]]; then
|
||||
lvrename ${db_vg} ${old_lv_name} ${db_name}
|
||||
echo "OSD Init: Renamed DB logical volume ${old_lv_name} (from group ${db_vg}) to ${db_name}."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Rename the OSD's WAL volume if necessary, referenced by UUID
|
||||
lv_tag=$(get_lvm_tag_from_device ${data_disk} ceph.wal_uuid)
|
||||
|
||||
if [[ "${lv_tag}" ]]; then
|
||||
local lv_device=$(lvdisplay | grep -B4 "${lv_tag}" | awk '/LV Path/{print $3}')
|
||||
|
||||
if [[ "${lv_device}" ]]; then
|
||||
local wal_vg=$(echo ${lv_device} | awk -F "/" '{print $3}')
|
||||
old_lv_name=$(echo ${lv_device} | awk -F "/" '{print $4}')
|
||||
local wal_name=$(get_lv_name_from_device ${data_disk} wal)
|
||||
|
||||
if [[ "${old_lv_name}" ]] && [[ "${wal_name}" != "${old_lv_name}" ]]; then
|
||||
lvrename ${wal_vg} ${old_lv_name} ${wal_name}
|
||||
echo "OSD Init: Renamed WAL logical volume ${old_lv_name} (from group ${wal_vg}) to ${wal_name}."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Fixes up the tags that reference block, db, and wal logical_volumes
|
||||
# NOTE: This updates tags based on current VG and LV names, so any necessary
|
||||
# renaming should be completed prior to calling this
|
||||
function update_lv_tags {
|
||||
local data_disk=$1
|
||||
local pv_uuid=$(pvdisplay -ddd -v ${data_disk} | awk '/PV UUID/{print $3}')
|
||||
|
||||
if [[ "${pv_uuid}" ]]; then
|
||||
local volumes="$(lvs --no-headings | grep -e "${pv_uuid}")"
|
||||
local block_device db_device wal_device vg_name
|
||||
local old_block_device old_db_device old_wal_device
|
||||
|
||||
# Build OSD device paths from current VG and LV names
|
||||
while read lv vg other_stuff; do
|
||||
if [[ "${lv}" == "$(get_lv_name_from_device ${data_disk} lv)" ]]; then
|
||||
block_device="/dev/${vg}/${lv}"
|
||||
old_block_device=$(get_lvm_tag_from_volume ${block_device} ceph.block_device)
|
||||
fi
|
||||
if [[ "${lv}" == "$(get_lv_name_from_device ${data_disk} db)" ]]; then
|
||||
db_device="/dev/${vg}/${lv}"
|
||||
old_db_device=$(get_lvm_tag_from_volume ${block_device} ceph.db_device)
|
||||
fi
|
||||
if [[ "${lv}" == "$(get_lv_name_from_device ${data_disk} wal)" ]]; then
|
||||
wal_device="/dev/${vg}/${lv}"
|
||||
old_wal_device=$(get_lvm_tag_from_volume ${block_device} ceph.wal_device)
|
||||
fi
|
||||
done <<< ${volumes}
|
||||
|
||||
# Set new tags on all of the volumes using paths built above
|
||||
while read lv vg other_stuff; do
|
||||
if [[ "${block_device}" ]]; then
|
||||
if [[ "${old_block_device}" ]]; then
|
||||
lvchange --deltag "ceph.block_device=${old_block_device}" /dev/${vg}/${lv}
|
||||
fi
|
||||
lvchange --addtag "ceph.block_device=${block_device}" /dev/${vg}/${lv}
|
||||
echo "OSD Init: Updated lv tags for data volume ${block_device}."
|
||||
fi
|
||||
if [[ "${db_device}" ]]; then
|
||||
if [[ "${old_db_device}" ]]; then
|
||||
lvchange --deltag "ceph.db_device=${old_db_device}" /dev/${vg}/${lv}
|
||||
fi
|
||||
lvchange --addtag "ceph.db_device=${db_device}" /dev/${vg}/${lv}
|
||||
echo "OSD Init: Updated lv tags for DB volume ${db_device}."
|
||||
fi
|
||||
if [[ "${wal_device}" ]]; then
|
||||
if [[ "${old_wal_device}" ]]; then
|
||||
lvchange --deltag "ceph.wal_device=${old_wal_device}" /dev/${vg}/${lv}
|
||||
fi
|
||||
lvchange --addtag "ceph.wal_device=${wal_device}" /dev/${vg}/${lv}
|
||||
echo "OSD Init: Updated lv tags for WAL volume ${wal_device}."
|
||||
fi
|
||||
done <<< ${volumes}
|
||||
fi
|
||||
}
|
||||
|
||||
function create_vg_if_needed {
|
||||
local bl_device=$1
|
||||
local vg_name=$(get_vg_name_from_device ${bl_device})
|
||||
if [[ -z "${vg_name}" ]]; then
|
||||
local random_uuid=$(uuidgen)
|
||||
vgcreate ceph-vg-${random_uuid} ${bl_device}
|
||||
vg_name=$(get_vg_name_from_device ${bl_device})
|
||||
vgrename ceph-vg-${random_uuid} ${vg_name}
|
||||
echo "OSD Init: Created volume group ${vg_name} for device ${bl_device}."
|
||||
fi
|
||||
RESULTING_VG=${vg_name}
|
||||
}
|
||||
|
||||
function create_lv_if_needed {
|
||||
local bl_device=$1
|
||||
local vg_name=$2
|
||||
local options=$3
|
||||
local lv_name=${4:-$(get_lv_name_from_device ${bl_device} lv)}
|
||||
|
||||
if [[ ! "$(lvdisplay | awk '/LV Name/{print $3}' | grep ${lv_name})" ]]; then
|
||||
lvcreate ${options} -n ${lv_name} ${vg_name}
|
||||
echo "OSD Init: Created logical volume ${lv_name} in group ${vg_name} for device ${bl_device}."
|
||||
fi
|
||||
RESULTING_LV=${vg_name}/${lv_name}
|
||||
}
|
||||
|
||||
function osd_disk_prechecks {
|
||||
if [[ -z "${OSD_DEVICE}" ]]; then
|
||||
echo "ERROR- You must provide a device to build your OSD ie: /dev/sdb"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -b "${OSD_DEVICE}" ]]; then
|
||||
echo "ERROR- The device pointed by OSD_DEVICE (${OSD_DEVICE}) doesn't exist !"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -e ${OSD_BOOTSTRAP_KEYRING} ]; then
|
||||
echo "ERROR- ${OSD_BOOTSTRAP_KEYRING} must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-osd -o ${OSD_BOOTSTRAP_KEYRING}'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
timeout 10 ceph --name client.bootstrap-osd --keyring ${OSD_BOOTSTRAP_KEYRING} health || exit 1
|
||||
}
|
||||
|
||||
function perform_zap {
|
||||
if [[ ${ZAP_EXTRA_PARTITIONS} != "" ]]; then
|
||||
# This used for filestore/blockstore only
|
||||
echo "OSD Init: Zapping extra partitions ${ZAP_EXTRA_PARTITIONS}"
|
||||
zap_extra_partitions "${ZAP_EXTRA_PARTITIONS}"
|
||||
fi
|
||||
echo "OSD Init: Zapping device ${OSD_DEVICE}..."
|
||||
disk_zap ${OSD_DEVICE}
|
||||
DISK_ZAPPED=1
|
||||
udev_settle
|
||||
}
|
||||
|
||||
|
||||
#######################################################################
|
||||
# Main program
|
||||
#######################################################################
|
||||
|
||||
if [[ "${STORAGE_TYPE}" != "directory" ]]; then
|
||||
|
||||
# Check to make sure we have what we need to continue
|
||||
osd_disk_prechecks
|
||||
|
||||
# Settle LVM changes before inspecting volumes
|
||||
udev_settle
|
||||
|
||||
# Rename VGs first
|
||||
if [[ "${OSD_DEVICE}" ]]; then
|
||||
OSD_DEVICE=$(readlink -f ${OSD_DEVICE})
|
||||
rename_vg ${OSD_DEVICE}
|
||||
fi
|
||||
|
||||
# Rename block DB device VG next
|
||||
if [[ "${BLOCK_DB}" ]]; then
|
||||
BLOCK_DB=$(readlink -f ${BLOCK_DB})
|
||||
rename_vg ${BLOCK_DB}
|
||||
fi
|
||||
|
||||
# Rename block WAL device VG next
|
||||
if [[ "${BLOCK_WAL}" ]]; then
|
||||
BLOCK_WAL=$(readlink -f ${BLOCK_WAL})
|
||||
rename_vg ${BLOCK_WAL}
|
||||
fi
|
||||
|
||||
# Rename LVs after VGs are correct
|
||||
rename_lvs ${OSD_DEVICE}
|
||||
|
||||
# Update tags (all VG and LV names should be correct before calling this)
|
||||
update_lv_tags ${OSD_DEVICE}
|
||||
|
||||
# Settle LVM changes again after any changes have been made
|
||||
udev_settle
|
||||
|
||||
# Initialize some important global variables
|
||||
CEPH_LVM_PREPARE=1
|
||||
OSD_ID=$(get_osd_id_from_device ${OSD_DEVICE})
|
||||
DISK_ZAPPED=0
|
||||
ZAP_DEVICE=0
|
||||
ZAP_EXTRA_PARTITIONS=""
|
||||
|
||||
# The disk may need to be zapped or some LVs may need to be deleted before
|
||||
# moving on with the disk preparation.
|
||||
determine_what_needs_zapping
|
||||
|
||||
if [[ ${ZAP_DEVICE} -eq 1 ]]; then
|
||||
perform_zap
|
||||
fi
|
||||
|
||||
# Prepare the disk for use
|
||||
osd_disk_prepare
|
||||
|
||||
# Clean up resources held by the common script
|
||||
common_cleanup
|
||||
fi
|
38
ceph-osd/templates/bin/utils/_checkDNS.sh.tpl
Normal file
38
ceph-osd/templates/bin/utils/_checkDNS.sh.tpl
Normal file
@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
: "${CEPH_CONF:="/etc/ceph/${CLUSTER}.conf"}"
|
||||
ENDPOINT="{$1}"
|
||||
|
||||
function check_mon_dns () {
|
||||
GREP_CMD=$(grep -rl 'ceph-mon' ${CEPH_CONF})
|
||||
|
||||
if [[ "${ENDPOINT}" == "{up}" ]]; then
|
||||
echo "If DNS is working, we are good here"
|
||||
elif [[ "${ENDPOINT}" != "" ]]; then
|
||||
if [[ ${GREP_CMD} != "" ]]; then
|
||||
# No DNS, write CEPH MONs IPs into ${CEPH_CONF}
|
||||
sh -c -e "cat ${CEPH_CONF}.template | sed 's/mon_host.*/mon_host = ${ENDPOINT}/g' | tee ${CEPH_CONF}" > /dev/null 2>&1
|
||||
else
|
||||
echo "endpoints are already cached in ${CEPH_CONF}"
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_mon_dns
|
||||
|
||||
exit
|
41
ceph-osd/templates/bin/utils/_defragOSDs.sh.tpl
Normal file
41
ceph-osd/templates/bin/utils/_defragOSDs.sh.tpl
Normal file
@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
source /tmp/utils-resolveLocations.sh
|
||||
|
||||
if [ "x${STORAGE_TYPE%-*}" == "xblock" ]; then
|
||||
OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
|
||||
ODEV=$(echo ${OSD_DEVICE} | sed 's/[0-9]//g' | cut -f 3 -d '/')
|
||||
OSD_PATH=$(cat /proc/mounts | awk '/ceph-/{print $2}')
|
||||
OSD_STORE=$(cat ${OSD_PATH}/type)
|
||||
DATA_PART=$(cat /proc/mounts | awk '/ceph-/{print $1}')
|
||||
|
||||
ODEV_ROTATIONAL=$(cat /sys/block/${ODEV}/queue/rotational)
|
||||
ODEV_SCHEDULER=$(cat /sys/block/${ODEV}/queue/scheduler | tr -d '[]')
|
||||
|
||||
# NOTE(supamatt): TODO implement bluestore defrag options once it's available upstream
|
||||
if [ "${ODEV_ROTATIONAL}" -eq "1" ] && [ "x${OSD_STORE}" == "xfilestore" ]; then
|
||||
# NOTE(supamatt): Switch to CFQ in order to not block I/O
|
||||
echo "cfq" | tee /sys/block/${ODEV}/queue/scheduler || true
|
||||
ionice -c 3 xfs_fsr "${OSD_DEVICE}" 2>/dev/null
|
||||
# NOTE(supamatt): Switch back to previous IO scheduler
|
||||
echo ${ODEV_SCHEDULER} | tee /sys/block/${ODEV}/queue/scheduler || true
|
||||
fi
|
||||
fi
|
||||
|
||||
exit 0
|
41
ceph-osd/templates/bin/utils/_resolveLocations.sh.tpl
Normal file
41
ceph-osd/templates/bin/utils/_resolveLocations.sh.tpl
Normal file
@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -ex
|
||||
|
||||
if [[ "${STORAGE_LOCATION}" ]]; then
|
||||
STORAGE_LOCATION=$(ls ${STORAGE_LOCATION})
|
||||
if [[ `echo "${STORAGE_LOCATION}" | wc -w` -ge 2 ]]; then
|
||||
echo "ERROR- Multiple locations found: ${STORAGE_LOCATION}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${BLOCK_DB}" ]]; then
|
||||
BLOCK_DB=$(ls ${BLOCK_DB})
|
||||
if [[ `echo "${BLOCK_DB}" | wc -w` -ge 2 ]]; then
|
||||
echo "ERROR- Multiple locations found: ${BLOCK_DB}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${BLOCK_WAL}" ]]; then
|
||||
BLOCK_WAL=$(ls ${BLOCK_WAL})
|
||||
if [[ `echo "${BLOCK_WAL}" | wc -w` -ge 2 ]]; then
|
||||
echo "ERROR- Multiple locations found: ${BLOCK_WAL}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
71
ceph-osd/templates/configmap-bin.yaml
Normal file
71
ceph-osd/templates/configmap-bin.yaml
Normal file
@ -0,0 +1,71 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- if .Values.manifests.configmap_bin }}
|
||||
{{- $envAll := . }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "bin" | quote }}
|
||||
data:
|
||||
{{- if .Values.images.local_registry.active }}
|
||||
image-repo-sync.sh: |
|
||||
{{- include "helm-toolkit.scripts.image_repo_sync" . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.bootstrap.enabled }}
|
||||
bootstrap.sh: |
|
||||
{{ tuple "bin/_bootstrap.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
{{- end }}
|
||||
post-apply.sh: |
|
||||
{{ tuple "bin/_post-apply.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-start.sh: |
|
||||
{{ tuple "bin/osd/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
log-tail.sh: |
|
||||
{{ tuple "bin/osd/_log-tail.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-directory-ceph-volume.sh: |
|
||||
{{ tuple "bin/osd/_directory.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-block-ceph-volume.sh: |
|
||||
{{ tuple "bin/osd/ceph-volume/_block.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-bluestore-ceph-volume.sh: |
|
||||
{{ tuple "bin/osd/ceph-volume/_bluestore.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-init-ceph-volume-helper-bluestore.sh: |
|
||||
{{ tuple "bin/osd/ceph-volume/_init-ceph-volume-helper-bluestore.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-init-ceph-volume-helper-directory.sh: |
|
||||
{{ tuple "bin/osd/ceph-volume/_init-ceph-volume-helper-directory.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-init-ceph-volume-helper-block-logical.sh: |
|
||||
{{ tuple "bin/osd/ceph-volume/_init-ceph-volume-helper-block-logical.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-init-ceph-volume.sh: |
|
||||
{{ tuple "bin/osd/ceph-volume/_init-with-ceph-volume.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-common-ceph-volume.sh: |
|
||||
{{ tuple "bin/osd/ceph-volume/_common.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-init.sh: |
|
||||
{{ tuple "bin/osd/_init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-check.sh: |
|
||||
{{ tuple "bin/osd/_check.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
osd-stop.sh: |
|
||||
{{ tuple "bin/osd/_stop.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
log-runner-stop.sh: |
|
||||
{{ tuple "bin/osd/_log-runner-stop.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
init-dirs.sh: |
|
||||
{{ tuple "bin/_init-dirs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
helm-tests.sh: |
|
||||
{{ tuple "bin/_helm-tests.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
utils-checkDNS.sh: |
|
||||
{{ tuple "bin/utils/_checkDNS.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
utils-defragOSDs.sh: |
|
||||
{{ tuple "bin/utils/_defragOSDs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
utils-resolveLocations.sh: |
|
||||
{{ tuple "bin/utils/_resolveLocations.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
|
||||
{{- end }}
|
50
ceph-osd/templates/configmap-etc.yaml
Normal file
50
ceph-osd/templates/configmap-etc.yaml
Normal file
@ -0,0 +1,50 @@
|
||||
{{/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
{{- define "ceph.osd.configmap.etc" }}
|
||||
{{- $configMapName := index . 0 }}
|
||||
{{- $envAll := index . 1 }}
|
||||
{{- with $envAll }}
|
||||
|
||||
{{- if empty .Values.conf.ceph.global.mon_host -}}
|
||||
{{- $monHost := tuple "ceph_mon" "internal" "mon_msgr2" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}
|
||||
{{- $_ := $monHost | set .Values.conf.ceph.global "mon_host" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if empty .Values.conf.ceph.global.fsid -}}
|
||||
{{- $_ := uuidv4 | set .Values.conf.ceph.global "fsid" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if empty .Values.conf.ceph.osd.cluster_network -}}
|
||||
{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd "cluster_network" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if empty .Values.conf.ceph.osd.public_network -}}
|
||||
{{- $_ := .Values.network.public | set .Values.conf.ceph.osd "public_network" -}}
|
||||
{{- end -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $configMapName }}
|
||||
data:
|
||||
ceph.conf: |
|
||||
{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph | indent 4 }}
|
||||
storage.json: |
|
||||
{{ toPrettyJson .Values.conf.storage | indent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.manifests.configmap_etc }}
|
||||
{{- list (printf "%s-%s" .Release.Name "etc") . | include "ceph.osd.configmap.etc" }}
|
||||
{{- end }}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user