diff --git a/ceph-client/Chart.yaml b/ceph-client/Chart.yaml
new file mode 100644
index 0000000000..225179ea7b
--- /dev/null
+++ b/ceph-client/Chart.yaml
@@ -0,0 +1,18 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: OpenStack-Helm Ceph Client
+name: ceph-client
+version: 0.1.0
diff --git a/ceph-client/requirements.yaml b/ceph-client/requirements.yaml
new file mode 100644
index 0000000000..53782e69b2
--- /dev/null
+++ b/ceph-client/requirements.yaml
@@ -0,0 +1,18 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+dependencies:
+  - name: helm-toolkit
+    repository: http://localhost:8879/charts
+    version: 0.1.0
diff --git a/ceph-client/templates/bin/_bootstrap.sh.tpl b/ceph-client/templates/bin/_bootstrap.sh.tpl
new file mode 100644
index 0000000000..533c0a5a3f
--- /dev/null
+++ b/ceph-client/templates/bin/_bootstrap.sh.tpl
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+{{ .Values.bootstrap.script | default "echo 'Not Enabled'" }}
diff --git a/ceph-client/templates/bin/_init-dirs.sh.tpl b/ceph-client/templates/bin/_init-dirs.sh.tpl
new file mode 100644
index 0000000000..dd186d4c0a
--- /dev/null
+++ b/ceph-client/templates/bin/_init-dirs.sh.tpl
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+export LC_ALL=C
+: "${HOSTNAME:=$(uname -n)}"
+: "${MGR_NAME:=${HOSTNAME}}"
+: "${MDS_NAME:=mds-${HOSTNAME}}"
+: "${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}"
+: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
+
+for keyring in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING}; do
+  mkdir -p "$(dirname "$keyring")"
+done
+
+# Let's create the ceph directories
+for DIRECTORY in mds tmp mgr; do
+  mkdir -p "/var/lib/ceph/${DIRECTORY}"
+done
+
+# Create socket directory
+mkdir -p /run/ceph
+
+# Create the MDS directory
+mkdir -p "/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}"
+
+# Create the MGR directory
+mkdir -p "/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}"
+
+# Adjust the owner of all those directories
+chown -R ceph. /run/ceph/ /var/lib/ceph/*
diff --git a/ceph-client/templates/bin/mds/_start.sh.tpl b/ceph-client/templates/bin/mds/_start.sh.tpl
new file mode 100644
index 0000000000..50128c76cd
--- /dev/null
+++ b/ceph-client/templates/bin/mds/_start.sh.tpl
@@ -0,0 +1,76 @@
+#!/bin/bash
+set -ex
+export LC_ALL=C
+: "${HOSTNAME:=$(uname -n)}"
+: "${CEPHFS_CREATE:=0}"
+: "${CEPHFS_NAME:=cephfs}"
+: "${CEPHFS_DATA_POOL:=${CEPHFS_NAME}_data}"
+: "${CEPHFS_DATA_POOL_PG:=8}"
+: "${CEPHFS_METADATA_POOL:=${CEPHFS_NAME}_metadata}"
+: "${CEPHFS_METADATA_POOL_PG:=8}"
+: "${MDS_NAME:=mds-${HOSTNAME}}"
+: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
+: "${MDS_KEYRING:=/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}/keyring}"
+: "${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}"
+
+if [[ ! -e "/etc/ceph/${CLUSTER}.conf" ]]; then
+  echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
+  exit 1
+fi
+
+# Check to see if we are a new MDS
+if [ ! -e "${MDS_KEYRING}" ]; then
+
+  if [ -e "${ADMIN_KEYRING}" ]; then
+     KEYRING_OPT=(--name client.admin --keyring "${ADMIN_KEYRING}")
+  elif [ -e "${MDS_BOOTSTRAP_KEYRING}" ]; then
+     KEYRING_OPT=(--name client.bootstrap-mds --keyring "${MDS_BOOTSTRAP_KEYRING}")
+  else
+    echo "ERROR- Failed to bootstrap MDS: could not find admin or bootstrap-mds keyring.  You can extract it from your current monitor by running 'ceph auth get client.bootstrap-mds -o ${MDS_BOOTSTRAP_KEYRING}"
+    exit 1
+  fi
+
+  timeout 10 ceph --cluster "${CLUSTER}" "${KEYRING_OPT[@]}" health || exit 1
+
+  # Generate the MDS key
+  ceph --cluster "${CLUSTER}" "${KEYRING_OPT[@]}" auth get-or-create "mds.${MDS_NAME}" osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o "${MDS_KEYRING}"
+  chown ceph. "${MDS_KEYRING}"
+  chmod 600 "${MDS_KEYRING}"
+
+fi
+
+# NOTE (leseb): having the admin keyring is really a security issue
+# If we need to bootstrap a MDS we should probably create the following on the monitors
+# I understand that this handy to do this here
+# but having the admin key inside every container is a concern
+
+# Create the Ceph filesystem, if necessary
+if [ $CEPHFS_CREATE -eq 1 ]; then
+
+  if [[ ! -e ${ADMIN_KEYRING} ]]; then
+      echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
+      exit 1
+  fi
+
+  if [[ "$(ceph --cluster "${CLUSTER}" fs ls | grep -c name:.${CEPHFS_NAME},)" -eq 0 ]]; then
+     # Make sure the specified data pool exists
+     if ! ceph --cluster "${CLUSTER}" osd pool stats ${CEPHFS_DATA_POOL} > /dev/null 2>&1; then
+        ceph --cluster "${CLUSTER}" osd pool create ${CEPHFS_DATA_POOL} ${CEPHFS_DATA_POOL_PG}
+     fi
+
+     # Make sure the specified metadata pool exists
+     if ! ceph --cluster "${CLUSTER}" osd pool stats ${CEPHFS_METADATA_POOL} > /dev/null 2>&1; then
+        ceph --cluster "${CLUSTER}" osd pool create ${CEPHFS_METADATA_POOL} ${CEPHFS_METADATA_POOL_PG}
+     fi
+
+     ceph --cluster "${CLUSTER}" fs new ${CEPHFS_NAME} ${CEPHFS_METADATA_POOL} ${CEPHFS_DATA_POOL}
+  fi
+fi
+
+# NOTE: prefixing this with exec causes it to die (commit suicide)
+/usr/bin/ceph-mds \
+  --cluster "${CLUSTER}" \
+  --setuser "ceph" \
+  --setgroup "ceph" \
+  -d \
+  -i "${MDS_NAME}"
diff --git a/ceph-client/templates/bin/mgr/_check.sh.tpl b/ceph-client/templates/bin/mgr/_check.sh.tpl
new file mode 100644
index 0000000000..3520a633c6
--- /dev/null
+++ b/ceph-client/templates/bin/mgr/_check.sh.tpl
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+export LC_ALL=C
+
+COMMAND="${@:-liveness}"
+
+function heath_check () {
+  IS_MGR_AVAIL=$(ceph --cluster "${CLUSTER}" mgr dump | python -c "import json, sys; print json.load(sys.stdin)['available']")
+
+  if [ "${IS_MGR_AVAIL}" = True ]; then
+    exit 0
+  else
+    exit 1
+  fi
+}
+
+function liveness () {
+  heath_check
+}
+
+function readiness () {
+  heath_check
+}
+
+$COMMAND
diff --git a/ceph-client/templates/bin/mgr/_start.sh.tpl b/ceph-client/templates/bin/mgr/_start.sh.tpl
new file mode 100644
index 0000000000..be622ac317
--- /dev/null
+++ b/ceph-client/templates/bin/mgr/_start.sh.tpl
@@ -0,0 +1,65 @@
+#!/bin/bash
+set -ex
+: "${CEPH_GET_ADMIN_KEY:=0}"
+: "${MGR_NAME:=$(uname -n)}"
+: "${MGR_KEYRING:=/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}/keyring}"
+: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
+
+if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
+    echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
+    exit 1
+fi
+
+if [ ${CEPH_GET_ADMIN_KEY} -eq 1 ]; then
+    if [[ ! -e ${ADMIN_KEYRING} ]]; then
+        echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
+        exit 1
+    fi
+fi
+
+# Create a MGR keyring
+rm -rf $MGR_KEYRING
+if [ ! -e "$MGR_KEYRING" ]; then
+    # Create ceph-mgr key
+    timeout 10 ceph --cluster "${CLUSTER}" auth get-or-create mgr."${MGR_NAME}" mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o "$MGR_KEYRING"
+    chown --verbose ceph. "$MGR_KEYRING"
+    chmod 600 "$MGR_KEYRING"
+fi
+
+echo "SUCCESS"
+
+ceph --cluster "${CLUSTER}" -v
+
+# Env. variables matching the pattern "<module>_" will be
+# found and parsed for config-key settings by
+#  ceph config-key set mgr/<module>/<key> <value>
+MODULES_TO_DISABLE=`ceph mgr dump | python -c "import json, sys; print ' '.join(json.load(sys.stdin)['modules'])"`
+
+for module in ${ENABLED_MODULES}; do
+    # This module may have been enabled in the past
+    # remove it from the disable list if present
+    MODULES_TO_DISABLE=${MODULES_TO_DISABLE/$module/}
+
+    options=`env | grep ^${module}_ || true`
+    for option in ${options}; do
+        #strip module name
+        option=${option/${module}_/}
+        key=`echo $option | cut -d= -f1`
+        value=`echo $option | cut -d= -f2`
+        ceph --cluster "${CLUSTER}" config-key set mgr/$module/$key $value
+    done
+    ceph --cluster "${CLUSTER}" mgr module enable ${module} --force
+done
+
+for module in $MODULES_TO_DISABLE; do
+  ceph --cluster "${CLUSTER}" mgr module disable ${module}
+done
+
+echo "SUCCESS"
+# start ceph-mgr
+exec /usr/bin/ceph-mgr \
+  --cluster "${CLUSTER}" \
+  --setuser "ceph" \
+  --setgroup "ceph" \
+  -d \
+  -i "${MGR_NAME}"
diff --git a/ceph-client/templates/bin/pool/_calc.py.tpl b/ceph-client/templates/bin/pool/_calc.py.tpl
new file mode 100644
index 0000000000..897b0efd3b
--- /dev/null
+++ b/ceph-client/templates/bin/pool/_calc.py.tpl
@@ -0,0 +1,46 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+{{/*
+Copyright 2018 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+#NOTE(portdirect): this is a simple approximation of https://ceph.com/pgcalc/
+
+import math
+import sys
+
+replication = int(sys.argv[1])
+number_of_osds = int(sys.argv[2])
+percentage_data = float(sys.argv[3])
+target_pgs_per_osd = int(sys.argv[4])
+
+raw_pg_num_opt = target_pgs_per_osd * number_of_osds \
+    * (math.ceil(percentage_data) / 100.0) / replication
+
+raw_pg_num_min = number_of_osds / replication
+
+if raw_pg_num_min >= raw_pg_num_opt:
+    raw_pg_num = raw_pg_num_min
+else:
+    raw_pg_num = raw_pg_num_opt
+
+max_pg_num = int(math.pow(2, math.ceil(math.log(raw_pg_num, 2))))
+min_pg_num = int(math.pow(2, math.floor(math.log(raw_pg_num, 2))))
+
+if min_pg_num >= (raw_pg_num * 0.75):
+    print min_pg_num
+else:
+    print max_pg_num
diff --git a/ceph-client/templates/bin/pool/_init.sh.tpl b/ceph-client/templates/bin/pool/_init.sh.tpl
new file mode 100644
index 0000000000..5805f44cb5
--- /dev/null
+++ b/ceph-client/templates/bin/pool/_init.sh.tpl
@@ -0,0 +1,87 @@
+#!/bin/bash
+
+{{/*
+Copyright 2018 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+export LC_ALL=C
+
+: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
+: "${OSD_TARGET_PGS:=100}"
+: "${QUANTITY_OSDS:=15}"
+
+if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
+  echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
+  exit 1
+fi
+
+if [[ ! -e ${ADMIN_KEYRING} ]]; then
+   echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
+   exit 1
+fi
+
+if ! ceph --cluster "${CLUSTER}" osd crush rule ls | grep -q "^same_host$"; then
+  ceph --cluster "${CLUSTER}" osd crush rule create-simple same_host default osd
+fi
+
+function create_pool () {
+  POOL_APPLICATION=$1
+  POOL_NAME=$2
+  POOL_REPLICATION=$3
+  POOL_PLACEMENT_GROUPS=$4
+  POOL_CRUSH_RULE=$5
+  if ! ceph --cluster "${CLUSTER}" osd pool stats "${POOL_NAME}" > /dev/null 2>&1; then
+    ceph --cluster "${CLUSTER}" osd pool create "${POOL_NAME}" ${POOL_PLACEMENT_GROUPS}
+    while [ $(ceph --cluster "${CLUSTER}" -s | grep creating -c) -gt 0 ]; do echo -n .;sleep 1; done
+    if [ "x${POOL_NAME}" == "xrbd" ]; then
+      rbd --cluster "${CLUSTER}" pool init ${POOL_NAME}
+    fi
+    ceph --cluster "${CLUSTER}" osd pool application enable "${POOL_NAME}" "${POOL_APPLICATION}"
+  fi
+  ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" size ${POOL_REPLICATION}
+  ceph --cluster "${CLUSTER}" osd pool set "${POOL_NAME}" crush_rule "${POOL_CRUSH_RULE}"
+  for PG_PARAM in pg_num pgp_num; do
+    CURRENT_PG_VALUE=$(ceph --cluster ceph osd pool get "${POOL_NAME}" "${PG_PARAM}" | awk "/^${PG_PARAM}:/ { print \$NF }")
+    if [ "${POOL_PLACEMENT_GROUPS}" -gt "${CURRENT_PG_VALUE}" ]; then
+      ceph --cluster ceph osd pool set "${POOL_NAME}" "${PG_PARAM}" "${POOL_PLACEMENT_GROUPS}"
+    fi
+  done
+}
+
+function manage_pool () {
+  POOL_APPLICATION=$1
+  POOL_NAME=$2
+  POOL_REPLICATION=$3
+  TOTAL_OSDS=$4
+  TOTAL_DATA_PERCENT=$5
+  TARGET_PG_PER_OSD=$6
+  POOL_CRUSH_RULE=$7
+  POOL_PLACEMENT_GROUPS=$(/tmp/pool-calc.py ${POOL_REPLICATION} ${TOTAL_OSDS} ${TOTAL_DATA_PERCENT} ${TARGET_PG_PER_OSD})
+  create_pool "${POOL_APPLICATION}" "${POOL_NAME}" "${POOL_REPLICATION}" "${POOL_PLACEMENT_GROUPS}" "${POOL_CRUSH_RULE}"
+}
+
+{{ $targetNumOSD := .Values.conf.pool.target.osd }}
+{{ $targetPGperOSD := .Values.conf.pool.target.pg_per_osd }}
+{{ $crushRuleDefault := .Values.conf.pool.default.crush_rule }}
+{{- range $pool := .Values.conf.pool.spec -}}
+{{- with $pool }}
+manage_pool {{ .application }} {{ .name }} {{ .replication }} {{ $targetNumOSD }} {{ .percent_total_data }} {{ $targetPGperOSD }} {{ $crushRuleDefault }}
+{{- end }}
+{{- end }}
+
+{{- if .Values.conf.pool.crush.tunables }}
+ceph --cluster "${CLUSTER}" osd crush tunables {{ .Values.conf.pool.crush.tunables }}
+{{- end }}
diff --git a/ceph-client/templates/configmap-bin.yaml b/ceph-client/templates/configmap-bin.yaml
new file mode 100644
index 0000000000..d4f31c0a85
--- /dev/null
+++ b/ceph-client/templates/configmap-bin.yaml
@@ -0,0 +1,51 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.configmap_bin .Values.deployment.ceph }}
+{{- $envAll := . }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ceph-client-bin
+data:
+{{- if .Values.images.local_registry.active }}
+  image-repo-sync.sh: |
+{{- include "helm-toolkit.scripts.image_repo_sync" . | indent 4 }}
+{{- end }}
+
+{{- if .Values.bootstrap.enabled }}
+  bootstrap.sh: |
+{{ tuple "bin/_bootstrap.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+{{- end }}
+
+  init-dirs.sh: |
+{{ tuple "bin/_init-dirs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+
+  pool-init.sh: |
+{{ tuple "bin/pool/_init.sh.tpl" . | include  "helm-toolkit.utils.template" | indent 4 }}
+  pool-calc.py: |
+{{ tuple "bin/pool/_calc.py.tpl" . | include  "helm-toolkit.utils.template" | indent 4 }}
+
+  mds-start.sh: |
+{{ tuple "bin/mds/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+
+  mgr-start.sh: |
+{{ tuple "bin/mgr/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+  mgr-check.sh: |
+{{ tuple "bin/mgr/_check.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+
+{{- end }}
diff --git a/ceph-client/templates/configmap-etc-client.yaml b/ceph-client/templates/configmap-etc-client.yaml
new file mode 100644
index 0000000000..7464532a31
--- /dev/null
+++ b/ceph-client/templates/configmap-etc-client.yaml
@@ -0,0 +1,56 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- define "ceph.configmap.etc" }}
+{{- $configMapName := index . 0 }}
+{{- $envAll := index . 1 }}
+{{- with $envAll }}
+
+{{- if or (.Values.deployment.ceph) (.Values.deployment.client_secrets) }}
+
+{{- if empty .Values.conf.ceph.global.mon_host -}}
+{{- $monHost := tuple "ceph_mon" "internal" "mon" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}
+{{- $_ := $monHost | set .Values.conf.ceph.global "mon_host" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.global.mon_addr -}}
+{{- $monPort := tuple "ceph_mon" "internal" "mon" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+{{- $_ := printf ":%s" $monPort | set .Values.conf.ceph.global "mon_addr" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.osd.cluster_network -}}
+{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd "cluster_network" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.osd.public_network -}}
+{{- $_ := .Values.network.public | set .Values.conf.ceph.osd "public_network" -}}
+{{- end -}}
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ $configMapName }}
+data:
+  ceph.conf: |
+{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph | indent 4 }}
+
+{{- end }}
+{{- end }}
+{{- end }}
+{{- if .Values.manifests.configmap_etc }}
+{{- list "ceph-client-etc" . | include "ceph.configmap.etc" }}
+{{- end }}
diff --git a/ceph-client/templates/deployment-mds.yaml b/ceph-client/templates/deployment-mds.yaml
new file mode 100644
index 0000000000..2118048e34
--- /dev/null
+++ b/ceph-client/templates/deployment-mds.yaml
@@ -0,0 +1,130 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.deployment_mds ( and .Values.deployment.ceph .Values.conf.features.mds) }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "ceph-mds"}}
+{{ tuple $envAll "mds" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  name: ceph-mds
+  labels:
+{{ tuple $envAll "ceph" "mds" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+spec:
+  replicas: {{ .Values.pod.replicas.mds }}
+  selector:
+    matchLabels:
+{{ tuple $envAll "ceph" "mds" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
+  template:
+    metadata:
+      name: ceph-mds
+      labels:
+{{ tuple $envAll "ceph" "mds" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      affinity:
+{{ tuple $envAll "ceph" "mds" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+      nodeSelector:
+        {{ .Values.labels.mds.node_selector_key }}: {{ .Values.labels.mds.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "mds" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+        - name: ceph-init-dirs
+{{ tuple $envAll "ceph_mds" | include "helm-toolkit.snippets.image" | indent 10 }}
+          command:
+            - /tmp/init-dirs.sh
+          env:
+            - name: CLUSTER
+              value: "ceph"
+          volumeMounts:
+            - name: ceph-client-bin
+              mountPath: /tmp/init-dirs.sh
+              subPath: init-dirs.sh
+              readOnly: true
+            - name: pod-var-lib-ceph
+              mountPath: /var/lib/ceph
+              readOnly: false
+            - name: pod-run
+              mountPath: /run
+              readOnly: false
+      containers:
+        - name: ceph-mds
+{{ tuple $envAll "ceph_mds" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.mds | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          command:
+            - /tmp/mds-start.sh
+          env:
+            - name: CLUSTER
+              value: "ceph"
+            - name: CEPHFS_CREATE
+              value: "1"
+          ports:
+            - containerPort: 6800
+          livenessProbe:
+              tcpSocket:
+                port: 6800
+              initialDelaySeconds: 60
+              timeoutSeconds: 5
+          readinessProbe:
+              tcpSocket:
+                port: 6800
+              timeoutSeconds: 5
+          volumeMounts:
+            - name: ceph-client-bin
+              mountPath: /tmp/mds-start.sh
+              subPath: mds-start.sh
+              readOnly: true
+            - name: ceph-client-etc
+              mountPath: /etc/ceph/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+            - name: ceph-client-admin-keyring
+              mountPath: /etc/ceph/ceph.client.admin.keyring
+              subPath: ceph.client.admin.keyring
+              readOnly: true
+            - name: ceph-bootstrap-mds-keyring
+              mountPath: /var/lib/ceph/bootstrap-mds/ceph.keyring
+              subPath: ceph.keyring
+              readOnly: false
+            - name: pod-var-lib-ceph
+              mountPath: /var/lib/ceph
+              readOnly: false
+            - name: pod-run
+              mountPath: /run
+              readOnly: false
+      volumes:
+        - name: ceph-client-etc
+          configMap:
+            name: ceph-client-etc
+            defaultMode: 0444
+        - name: ceph-client-bin
+          configMap:
+            name: ceph-client-bin
+            defaultMode: 0555
+        - name: pod-var-lib-ceph
+          emptyDir: {}
+        - name: pod-run
+          emptyDir:
+            medium: "Memory"
+        - name: ceph-client-admin-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.admin }}
+        - name: ceph-bootstrap-mds-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.mds }}
+{{- end }}
diff --git a/ceph-client/templates/deployment-mgr.yaml b/ceph-client/templates/deployment-mgr.yaml
new file mode 100644
index 0000000000..d81f7fda8c
--- /dev/null
+++ b/ceph-client/templates/deployment-mgr.yaml
@@ -0,0 +1,166 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.deployment_mgr (and .Values.deployment.ceph .Values.conf.features.mgr ) }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "ceph-mgr"}}
+{{ tuple $envAll "mgr" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  name: ceph-mgr
+  labels:
+{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+spec:
+  replicas: {{ .Values.pod.replicas.mgr }}
+  selector:
+    matchLabels:
+{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      affinity:
+{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+      nodeSelector:
+        {{ .Values.labels.mgr.node_selector_key }}: {{ .Values.labels.mgr.node_selector_value }}
+      hostNetwork: true
+      dnsPolicy: {{ .Values.pod.dns_policy }}
+      initContainers:
+{{ tuple $envAll "mgr" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+        - name: ceph-init-dirs
+{{ tuple $envAll "ceph_mds" | include "helm-toolkit.snippets.image" | indent 10 }}
+          command:
+            - /tmp/init-dirs.sh
+          env:
+            - name: CLUSTER
+              value: "ceph"
+          volumeMounts:
+            - name: ceph-client-bin
+              mountPath: /tmp/init-dirs.sh
+              subPath: init-dirs.sh
+              readOnly: true
+            - name: pod-var-lib-ceph
+              mountPath: /var/lib/ceph
+              readOnly: false
+            - name: pod-run
+              mountPath: /run
+              readOnly: false
+            - name: pod-etc-ceph
+              mountPath: /etc/ceph
+      containers:
+        - name: ceph-mgr
+{{ tuple $envAll "ceph_mgr" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.mgr | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: CLUSTER
+              value: "ceph"
+            {{- if .Values.ceph_mgr_enabled_modules }}
+            - name: ENABLED_MODULES
+              value: |-
+              {{- range $value := .Values.ceph_mgr_enabled_modules }}
+                {{ $value }}
+              {{- end }}
+            {{- end }}
+            {{- if .Values.ceph_mgr_modules_config }}
+            {{- range $module,$params := .Values.ceph_mgr_modules_config }}
+            {{- range $key, $value := $params }}
+            - name: {{ $module }}_{{ $key }}
+              value: {{ $value | quote }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+          command:
+            - /mgr-start.sh
+          ports:
+            - name: mgr
+              containerPort: {{ tuple "ceph_mgr" "internal" "mgr" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+          {{- if (has "prometheus" .Values.ceph_mgr_enabled_modules) }}
+            - name: metrics
+              containerPort: {{ tuple "ceph_mgr" "internal" "metrics" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+          {{ end -}}
+          livenessProbe:
+           exec:
+            command:
+             - /tmp/mgr-check.sh
+             - liveness
+           initialDelaySeconds: 30
+           timeoutSeconds: 5
+          readinessProbe:
+           exec:
+            command:
+             - /tmp/mgr-check.sh
+             - readiness
+           initialDelaySeconds: 30
+           timeoutSeconds: 5
+          volumeMounts:
+            - name: ceph-client-bin
+              mountPath: /mgr-start.sh
+              subPath: mgr-start.sh
+              readOnly: true
+            - name: ceph-client-bin
+              mountPath: /tmp/mgr-check.sh
+              subPath: mgr-check.sh
+              readOnly: true
+            - name: pod-etc-ceph
+              mountPath: /etc/ceph
+            - name: ceph-client-etc
+              mountPath: /etc/ceph/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+            - name: ceph-client-admin-keyring
+              mountPath: /etc/ceph/ceph.client.admin.keyring
+              subPath: ceph.client.admin.keyring
+              readOnly: true
+            - name: ceph-bootstrap-mgr-keyring
+              mountPath: /var/lib/ceph/bootstrap-mgr/ceph.keyring
+              subPath: ceph.keyring
+              readOnly: false
+            - name: pod-var-lib-ceph
+              mountPath: /var/lib/ceph
+              readOnly: false
+            - name: pod-run
+              mountPath: /run
+              readOnly: false
+      volumes:
+        - name: pod-etc-ceph
+          emptyDir: {}
+        - name: ceph-client-bin
+          configMap:
+            name: ceph-client-bin
+            defaultMode: 0555
+        - name: ceph-client-etc
+          configMap:
+            name: ceph-client-etc
+            defaultMode: 0444
+        - name: pod-var-lib-ceph
+          emptyDir: {}
+        - name: pod-run
+          emptyDir:
+            medium: "Memory"
+        - name: ceph-client-admin-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.admin }}
+        - name: ceph-bootstrap-mgr-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.mgr }}
+{{- end }}
diff --git a/ceph-client/templates/job-bootstrap.yaml b/ceph-client/templates/job-bootstrap.yaml
new file mode 100644
index 0000000000..72a935973b
--- /dev/null
+++ b/ceph-client/templates/job-bootstrap.yaml
@@ -0,0 +1,70 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "ceph-client-bootstrap"}}
+{{ tuple $envAll "bootstrap" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: ceph-client-bootstrap
+spec:
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" "bootstrap" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      restartPolicy: OnFailure
+      nodeSelector:
+        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "bootstrap" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container"  | indent 8 }}
+      containers:
+        - name: ceph-client-bootstrap
+{{ tuple $envAll "ceph_bootstrap" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          command:
+            - /tmp/bootstrap.sh
+          volumeMounts:
+            - name: ceph-client-bin
+              mountPath: /tmp/bootstrap.sh
+              subPath: bootstrap.sh
+              readOnly: true
+            - name: ceph-client-etc
+              mountPath: /etc/ceph/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+            - name: ceph-client-admin-keyring
+              mountPath: /etc/ceph/ceph.client.admin.keyring
+              subPath: ceph.client.admin.keyring
+              readOnly: true
+      volumes:
+        - name: ceph-client-bin
+          configMap:
+            name: ceph-client-bin
+            defaultMode: 0555
+        - name: ceph-client-etc
+          configMap:
+            name: ceph-client-etc
+            defaultMode: 0444
+        - name: ceph-client-admin-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.admin }}
+{{- end }}
diff --git a/ceph-client/templates/job-image-repo-sync.yaml b/ceph-client/templates/job-image-repo-sync.yaml
new file mode 100644
index 0000000000..1814e9aef2
--- /dev/null
+++ b/ceph-client/templates/job-image-repo-sync.yaml
@@ -0,0 +1,20 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}
+{{- $imageRepoSyncJob := dict "envAll" . "serviceName" "ceph-client" -}}
+{{ $imageRepoSyncJob | include "helm-toolkit.manifests.job_image_repo_sync" }}
+{{- end }}
diff --git a/ceph-client/templates/job-rbd-pool.yaml b/ceph-client/templates/job-rbd-pool.yaml
new file mode 100644
index 0000000000..961321259b
--- /dev/null
+++ b/ceph-client/templates/job-rbd-pool.yaml
@@ -0,0 +1,91 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_rbd_pool .Values.deployment.ceph }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "ceph-rbd-pool" }}
+{{ tuple $envAll "rbd_pool" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: ceph-rbd-pool
+spec:
+  template:
+    metadata:
+      name: ceph-rbd-pool
+      labels:
+{{ tuple $envAll "ceph" "rbd-pool" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      restartPolicy: OnFailure
+      affinity:
+{{ tuple $envAll "ceph" "rbd-pool" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+      nodeSelector:
+        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "rbd_pool" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+      containers:
+        - name: ceph-rbd-pool
+{{ tuple $envAll "ceph_rbd_pool" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.mgr | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: CLUSTER
+              value: "ceph"
+          command:
+            - /tmp/pool-init.sh
+          volumeMounts:
+            - name: ceph-client-bin
+              mountPath: /tmp/pool-init.sh
+              subPath: pool-init.sh
+              readOnly: true
+            - name: ceph-client-bin
+              mountPath: /tmp/pool-calc.py
+              subPath: pool-calc.py
+              readOnly: true
+            - name: ceph-client-etc
+              mountPath: /etc/ceph/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+            - name: ceph-client-admin-keyring
+              mountPath: /etc/ceph/ceph.client.admin.keyring
+              subPath: ceph.client.admin.keyring
+              readOnly: true
+            - name: pod-var-lib-ceph
+              mountPath: /var/lib/ceph
+              readOnly: false
+            - name: pod-run
+              mountPath: /run
+              readOnly: false
+      volumes:
+        - name: ceph-client-etc
+          configMap:
+            name: ceph-client-etc
+            defaultMode: 0444
+        - name: ceph-client-bin
+          configMap:
+            name: ceph-client-bin
+            defaultMode: 0555
+        - name: pod-var-lib-ceph
+          emptyDir: {}
+        - name: pod-run
+          emptyDir:
+            medium: "Memory"
+        - name: ceph-client-admin-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.admin }}
+{{- end }}
diff --git a/ceph-client/templates/service-mgr.yaml b/ceph-client/templates/service-mgr.yaml
new file mode 100644
index 0000000000..3198e83d4c
--- /dev/null
+++ b/ceph-client/templates/service-mgr.yaml
@@ -0,0 +1,42 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.service_mgr ( and .Values.deployment.ceph .Values.conf.features.mgr )}}
+{{- $envAll := . }}
+{{- $prometheus_annotations := $envAll.Values.monitoring.prometheus.ceph_mgr }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: ceph-mgr
+  annotations:
+{{- if .Values.monitoring.prometheus.enabled }}
+{{ tuple $prometheus_annotations | include "helm-toolkit.snippets.prometheus_service_annotations" | indent 4 }}
+{{- end }}
+spec:
+  ports:
+  - name: ceph-mgr
+    port: {{ tuple "ceph_mgr" "internal" "mgr" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+    protocol: TCP
+    targetPort: {{ tuple "ceph_mgr" "internal" "mgr" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+  {{ if (has "prometheus" .Values.ceph_mgr_enabled_modules) }}
+  - name: metrics
+    protocol: TCP
+    port: {{ tuple "ceph_mgr" "internal" "metrics" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+  {{ end }}
+  selector:
+{{ tuple $envAll "ceph" "mgr" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+{{- end }}
diff --git a/ceph-client/values.yaml b/ceph-client/values.yaml
new file mode 100644
index 0000000000..218cb48796
--- /dev/null
+++ b/ceph-client/values.yaml
@@ -0,0 +1,374 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default values for ceph-client.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
+
+deployment:
+  ceph: true
+
+release_group: null
+
+images:
+  pull_policy: IfNotPresent
+  tags:
+    ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
+    ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
+    ceph_mds: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
+    ceph_mgr: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
+    ceph_rbd_pool: 'docker.io/port/ceph-config-helper:v1.10.3'
+    dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
+    image_repo_sync: docker.io/docker:17.07.0
+  local_registry:
+    active: false
+    exclude:
+      - dep_check
+      - image_repo_sync
+
+labels:
+  job:
+    node_selector_key: openstack-control-plane
+    node_selector_value: enabled
+  provisioner:
+    node_selector_key: openstack-control-plane
+    node_selector_value: enabled
+  mds:
+    node_selector_key: ceph-mds
+    node_selector_value: enabled
+  mgr:
+    node_selector_key: ceph-mgr
+    node_selector_value: enabled
+
+pod:
+  dns_policy: "ClusterFirstWithHostNet"
+  replicas:
+    mds: 2
+    mgr: 2
+  affinity:
+    anti:
+      type:
+        default: preferredDuringSchedulingIgnoredDuringExecution
+      topologyKey:
+        default: kubernetes.io/hostname
+  resources:
+    enabled: false
+    mds:
+      requests:
+        memory: "10Mi"
+        cpu: "250m"
+      limits:
+        memory: "50Mi"
+        cpu: "500m"
+    mgr:
+      requests:
+        memory: "5Mi"
+        cpu: "250m"
+      limits:
+        memory: "50Mi"
+        cpu: "500m"
+    jobs:
+      bootstrap:
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+        requests:
+          memory: "128Mi"
+          cpu: "500m"
+      image_repo_sync:
+        requests:
+          memory: "128Mi"
+          cpu: "100m"
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+
+secrets:
+  keyrings:
+    mon: ceph-mon-keyring
+    mds: ceph-bootstrap-mds-keyring
+    osd: ceph-bootstrap-osd-keyring
+    rgw: ceph-bootstrap-rgw-keyring
+    mgr: ceph-bootstrap-mgr-keyring
+    admin: ceph-client-admin-keyring
+
+network:
+  public: 192.168.0.0/16
+  cluster: 192.168.0.0/16
+
+conf:
+  features:
+    mds: true
+    mgr: true
+  pool:
+  #NOTE(portdirect): this drives a simple approximation of
+  # https://ceph.com/pgcalc/, the `target.osd` key should be set to match the
+  # expected number of osds in a cluster, and the `target.pg_per_osd` should be
+  # set to match the desired number of placement groups on each OSD.
+    crush:
+      #NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series
+      # kernel this should be set to `hammer`
+      tunables: null
+    target:
+      #NOTE(portdirect): arbitrarily we set the default number of expected OSD's to 5
+      # to match the number of nodes in the OSH gate.
+      osd: 5
+      pg_per_osd: 100
+    default:
+      #NOTE(portdirect): this should be 'same_host' for a single node
+      # cluster to be in a healthy state
+      crush_rule: replicated_rule
+    #NOTE(portdirect): this section describes the pools that will be managed by
+    # the ceph pool management job, as it tunes the pgs and crush rule, based on
+    # the above.
+    spec:
+      # RBD pool
+      - name: rbd
+        application: rbd
+        replication: 3
+        percent_total_data: 40
+      # CephFS pools
+      - name: cephfs_metadata
+        application: cephfs
+        replication: 3
+        percent_total_data: 5
+      - name: cephfs_data
+        application: cephfs
+        replication: 3
+        percent_total_data: 10
+      # RadosGW pools
+      - name: .rgw.root
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.control
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.data.root
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.gc
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.log
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.intent-log
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.meta
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.usage
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.users.keys
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.users.email
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.users.swift
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.users.uid
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.buckets.extra
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.buckets.index
+        application: rgw
+        replication: 3
+        percent_total_data: 3
+      - name: default.rgw.buckets.data
+        application: rgw
+        replication: 3
+        percent_total_data: 34.8
+  ceph:
+    global:
+      # auth
+      cephx: true
+      cephx_require_signatures: false
+      cephx_cluster_require_signatures: true
+      cephx_service_require_signatures: false
+    osd:
+      osd_mkfs_type: xfs
+      osd_mkfs_options_xfs: -f -i size=2048
+      osd_max_object_name_len: 256
+      ms_bind_port_min: 6800
+      ms_bind_port_max: 7100
+
+dependencies:
+  dynamic:
+    common:
+      local_image_registry:
+        jobs:
+          - ceph-client-image-repo-sync
+        services:
+          - endpoint: node
+            service: local_image_registry
+  static:
+    bootstrap:
+      jobs: null
+      services:
+        - endpoint: internal
+          service: ceph_mon
+    cephfs_client_key_generator:
+      jobs: null
+    cephfs_provisioner:
+      jobs:
+        - ceph-rbd-pool
+      services:
+        - endpoint: internal
+          service: ceph_mon
+    mds:
+      jobs:
+        - ceph-storage-keys-generator
+        - ceph-mds-keyring-generator
+        - ceph-rbd-pool
+      services:
+        - endpoint: internal
+          service: ceph_mon
+    mgr:
+      jobs:
+        - ceph-storage-keys-generator
+        - ceph-mgr-keyring-generator
+      services:
+        - endpoint: internal
+          service: ceph_mon
+    namespace_client_key_cleaner:
+      jobs: null
+    namespace_client_key_generator:
+      jobs: null
+    rbd_pool:
+      services:
+        - endpoint: internal
+          service: ceph_mon
+    rbd_provisioner:
+      jobs:
+        - ceph-rbd-pool
+      services:
+        - endpoint: internal
+          service: ceph_mon
+    image_repo_sync:
+      services:
+        - endpoint: internal
+          service: local_image_registry
+
+bootstrap:
+  enabled: false
+  script: |
+    ceph -s
+    function ensure_pool () {
+      ceph osd pool stats $1 || ceph osd pool create $1 $2
+      local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
+      if [[ ${test_luminous} -gt 0 ]]; then
+        ceph osd pool application enable $1 $3
+      fi
+    }
+    #ensure_pool volumes 8 cinder
+
+# Uncomment below to enable mgr modules
+# For a list of available modules:
+#  http://docs.ceph.com/docs/master/mgr/
+# This overrides mgr_initial_modules (default: restful, status)
+# Any module not listed here will be disabled
+ceph_mgr_enabled_modules:
+  - restful
+  - status
+  - prometheus
+
+# You can configure your mgr modules
+# below. Each module has its own set
+# of key/value. Refer to the doc
+# above for more info. For example:
+#ceph_mgr_modules_config:
+#  dashboard:
+#    port: 7000
+#  localpool:
+#    failure_domain: host
+#    subtree: rack
+#    pg_num: "128"
+#    num_rep: "3"
+#    min_size: "2"
+
+endpoints:
+  cluster_domain_suffix: cluster.local
+  local_image_registry:
+    name: docker-registry
+    namespace: docker-registry
+    hosts:
+      default: localhost
+      internal: docker-registry
+      node: localhost
+    host_fqdn_override:
+      default: null
+    port:
+      registry:
+        node: 5000
+  ceph_mon:
+    namespace: null
+    hosts:
+      default: ceph-mon
+      discovery: ceph-mon-discovery
+    host_fqdn_override:
+      default: null
+    port:
+      mon:
+        default: 6789
+  ceph_mgr:
+    namespace: null
+    hosts:
+      default: ceph-mgr
+    host_fqdn_override:
+      default: null
+    port:
+      mgr:
+        default: 7000
+      metrics:
+        default: 9283
+    scheme:
+      default: http
+
+monitoring:
+  prometheus:
+    enabled: true
+    ceph_mgr:
+      scrape: true
+      port: 9283
+
+manifests:
+  configmap_bin: true
+  configmap_etc: true
+  deployment_mds: true
+  deployment_mgr: true
+  job_bootstrap: false
+  job_cephfs_client_key: true
+  job_image_repo_sync: true
+  job_rbd_pool: true
+  service_mgr: true
diff --git a/ceph-mon/Chart.yaml b/ceph-mon/Chart.yaml
new file mode 100644
index 0000000000..ba425831b1
--- /dev/null
+++ b/ceph-mon/Chart.yaml
@@ -0,0 +1,18 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: OpenStack-Helm Ceph Mon
+name: ceph-mon
+version: 0.1.0
diff --git a/ceph-mon/requirements.yaml b/ceph-mon/requirements.yaml
new file mode 100644
index 0000000000..53782e69b2
--- /dev/null
+++ b/ceph-mon/requirements.yaml
@@ -0,0 +1,18 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+dependencies:
+  - name: helm-toolkit
+    repository: http://localhost:8879/charts
+    version: 0.1.0
diff --git a/ceph-mon/templates/bin/_bootstrap.sh.tpl b/ceph-mon/templates/bin/_bootstrap.sh.tpl
new file mode 100644
index 0000000000..533c0a5a3f
--- /dev/null
+++ b/ceph-mon/templates/bin/_bootstrap.sh.tpl
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+{{ .Values.bootstrap.script | default "echo 'Not Enabled'" }}
diff --git a/ceph-mon/templates/bin/_init-dirs.sh.tpl b/ceph-mon/templates/bin/_init-dirs.sh.tpl
new file mode 100644
index 0000000000..5128888bab
--- /dev/null
+++ b/ceph-mon/templates/bin/_init-dirs.sh.tpl
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+export LC_ALL=C
+: "${HOSTNAME:=$(uname -n)}"
+: "${MGR_NAME:=${HOSTNAME}}"
+: "${MDS_NAME:=mds-${HOSTNAME}}"
+: "${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}"
+: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
+
+for keyring in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING} ; do
+  mkdir -p "$(dirname "$keyring")"
+done
+
+# Let's create the ceph directories
+for DIRECTORY in mon osd mds radosgw tmp mgr; do
+  mkdir -p "/var/lib/ceph/${DIRECTORY}"
+done
+
+# Create socket directory
+mkdir -p /run/ceph
+
+# Create the MDS directory
+mkdir -p "/var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}"
+
+# Create the MGR directory
+mkdir -p "/var/lib/ceph/mgr/${CLUSTER}-${MGR_NAME}"
+
+# Adjust the owner of all those directories
+chown -R ceph. /run/ceph/ /var/lib/ceph/*
diff --git a/ceph-mon/templates/bin/keys/_bootstrap-keyring-generator.py.tpl b/ceph-mon/templates/bin/keys/_bootstrap-keyring-generator.py.tpl
new file mode 100644
index 0000000000..a0a279c7b2
--- /dev/null
+++ b/ceph-mon/templates/bin/keys/_bootstrap-keyring-generator.py.tpl
@@ -0,0 +1,14 @@
+#!/bin/python
+import os
+import struct
+import time
+import base64
+key = os.urandom(16)
+header = struct.pack(
+    '<hiih',
+    1,                 # le16 type: CEPH_CRYPTO_AES
+    int(time.time()),  # le32 created: seconds
+    0,                 # le32 created: nanoseconds,
+    len(key),          # le16: len(key)
+)
+print(base64.b64encode(header + key).decode('ascii'))
diff --git a/ceph-mon/templates/bin/keys/_bootstrap-keyring-manager.sh.tpl b/ceph-mon/templates/bin/keys/_bootstrap-keyring-manager.sh.tpl
new file mode 100644
index 0000000000..78d6cfdd5a
--- /dev/null
+++ b/ceph-mon/templates/bin/keys/_bootstrap-keyring-manager.sh.tpl
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+{{ if .Release.IsInstall }}
+
+function ceph_gen_key () {
+  python ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
+}
+
+function kube_ceph_keyring_gen () {
+  CEPH_KEY=$1
+  CEPH_KEY_TEMPLATE=$2
+  sed "s|{{"{{"}} key {{"}}"}}|${CEPH_KEY}|" ${CEPH_TEMPLATES_DIR}/${CEPH_KEY_TEMPLATE} | base64 -w0 | tr -d '\n'
+}
+
+function create_kube_key () {
+  CEPH_KEYRING=$1
+  CEPH_KEYRING_NAME=$2
+  CEPH_KEYRING_TEMPLATE=$3
+  KUBE_SECRET_NAME=$4
+  if ! kubectl get --namespace ${DEPLOYMENT_NAMESPACE} secrets ${KUBE_SECRET_NAME}; then
+    {
+      cat <<EOF
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: ${KUBE_SECRET_NAME}
+type: Opaque
+data:
+  ${CEPH_KEYRING_NAME}: $( kube_ceph_keyring_gen ${CEPH_KEYRING} ${CEPH_KEYRING_TEMPLATE} )
+EOF
+    } | kubectl apply --namespace ${DEPLOYMENT_NAMESPACE} -f -
+  fi
+}
+
+#create_kube_key <ceph_key> <ceph_keyring_name> <ceph_keyring_template> <kube_secret_name>
+create_kube_key $(ceph_gen_key) ${CEPH_KEYRING_NAME} ${CEPH_KEYRING_TEMPLATE} ${KUBE_SECRET_NAME}
+
+{{ else }}
+
+echo "Not touching ${KUBE_SECRET_NAME} as this is not the initial deployment"
+
+{{- end -}}
diff --git a/ceph-mon/templates/bin/keys/_storage-keyring-manager.sh.tpl b/ceph-mon/templates/bin/keys/_storage-keyring-manager.sh.tpl
new file mode 100644
index 0000000000..9521b36837
--- /dev/null
+++ b/ceph-mon/templates/bin/keys/_storage-keyring-manager.sh.tpl
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+{{ if .Release.IsInstall }}
+
+function ceph_gen_key () {
+  python ${CEPH_GEN_DIR}/keys-bootstrap-keyring-generator.py
+}
+
+function kube_ceph_keyring_gen () {
+  CEPH_KEY=$1
+  CEPH_KEY_TEMPLATE=$2
+  sed "s|{{"{{"}} key {{"}}"}}|${CEPH_KEY}|" ${CEPH_TEMPLATES_DIR}/${CEPH_KEY_TEMPLATE} | base64 -w0 | tr -d '\n'
+}
+
+CEPH_CLIENT_KEY=$(ceph_gen_key)
+
+function create_kube_key () {
+  CEPH_KEYRING=$1
+  CEPH_KEYRING_NAME=$2
+  CEPH_KEYRING_TEMPLATE=$3
+  KUBE_SECRET_NAME=$4
+
+  if ! kubectl get --namespace ${DEPLOYMENT_NAMESPACE} secrets ${KUBE_SECRET_NAME}; then
+    {
+      cat <<EOF
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: ${KUBE_SECRET_NAME}
+type: Opaque
+data:
+  ${CEPH_KEYRING_NAME}: $( kube_ceph_keyring_gen ${CEPH_KEYRING} ${CEPH_KEYRING_TEMPLATE} )
+EOF
+    } | kubectl apply --namespace ${DEPLOYMENT_NAMESPACE} -f -
+  fi
+}
+#create_kube_key <ceph_key> <ceph_keyring_name> <ceph_keyring_template> <kube_secret_name>
+create_kube_key ${CEPH_CLIENT_KEY} ${CEPH_KEYRING_NAME} ${CEPH_KEYRING_TEMPLATE} ${CEPH_KEYRING_ADMIN_NAME}
+
+function create_kube_storage_key () {
+  CEPH_KEYRING=$1
+  KUBE_SECRET_NAME=$2
+
+  if ! kubectl get --namespace ${DEPLOYMENT_NAMESPACE} secrets ${KUBE_SECRET_NAME}; then
+    {
+      cat <<EOF
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: ${KUBE_SECRET_NAME}
+type: kubernetes.io/rbd
+data:
+  key: $( echo ${CEPH_KEYRING} | base64 | tr -d '\n' )
+EOF
+    } | kubectl apply --namespace ${DEPLOYMENT_NAMESPACE} -f -
+  fi
+}
+#create_kube_storage_key <ceph_key> <kube_secret_name>
+create_kube_storage_key ${CEPH_CLIENT_KEY} ${CEPH_STORAGECLASS_ADMIN_SECRET_NAME}
+
+{{ else }}
+
+echo "Not touching ${KUBE_SECRET_NAME} as this is not the initial deployment"
+
+{{ end }}
diff --git a/ceph-mon/templates/bin/mon/_check.sh.tpl b/ceph-mon/templates/bin/mon/_check.sh.tpl
new file mode 100644
index 0000000000..e494540d9b
--- /dev/null
+++ b/ceph-mon/templates/bin/mon/_check.sh.tpl
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+COMMAND="${@:-liveness}"
+: ${K8S_HOST_NETWORK:=0}
+
+function heath_check () {
+  SOCKDIR=${CEPH_SOCKET_DIR:-/run/ceph}
+  SBASE=${CEPH_OSD_SOCKET_BASE:-ceph-mon}
+  SSUFFIX=${CEPH_SOCKET_SUFFIX:-asok}
+
+  MON_ID=$(ps auwwx | grep ceph-mon | grep -v "$1" | grep -v grep | sed 's/.*-i\ //;s/\ .*//'|awk '{print $1}')
+
+  if [ -z "${MON_ID}" ]; then
+    if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
+        MON_NAME=${POD_NAME}
+    else
+        MON_NAME=${NODE_NAME}
+    fi
+  fi
+
+  if [ -S "${SOCKDIR}/${SBASE}.${MON_NAME}.${SSUFFIX}" ]; then
+   MON_STATE=$(ceph -f json-pretty --connect-timeout 1 --admin-daemon "${sock}" mon_status|grep state|sed 's/.*://;s/[^a-z]//g')
+   echo "MON ${MON_ID} ${MON_STATE}";
+   # this might be a stricter check than we actually want.  what are the
+   # other values for the "state" field?
+   for S in ${MON_LIVE_STATE}; do
+    if [ "x${MON_STATE}x" = "x${S}x" ]; then
+     exit 0
+    fi
+   done
+  fi
+  # if we made it this far, things are not running
+  exit 1
+}
+
+function liveness () {
+  MON_LIVE_STATE="probing electing synchronizing leader peon"
+  heath_check
+}
+
+function readiness () {
+  MON_LIVE_STATE="leader peon"
+  heath_check
+}
+
+$COMMAND
diff --git a/ceph-mon/templates/bin/mon/_fluentbit-sidecar.sh.tpl b/ceph-mon/templates/bin/mon/_fluentbit-sidecar.sh.tpl
new file mode 100644
index 0000000000..f72e41de16
--- /dev/null
+++ b/ceph-mon/templates/bin/mon/_fluentbit-sidecar.sh.tpl
@@ -0,0 +1,19 @@
+#!/bin/sh
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+set -ex
+
+exec /fluent-bit/bin/fluent-bit -c /fluent-bit/etc/fluent-bit.conf
diff --git a/ceph-mon/templates/bin/mon/_start.sh.tpl b/ceph-mon/templates/bin/mon/_start.sh.tpl
new file mode 100644
index 0000000000..ad2acc2c4c
--- /dev/null
+++ b/ceph-mon/templates/bin/mon/_start.sh.tpl
@@ -0,0 +1,106 @@
+#!/bin/bash
+set -ex
+export LC_ALL=C
+: "${K8S_HOST_NETWORK:=0}"
+: "${MON_KEYRING:=/etc/ceph/${CLUSTER}.mon.keyring}"
+: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
+: "${MDS_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring}"
+: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
+
+if [[ -z "$CEPH_PUBLIC_NETWORK" ]]; then
+  echo "ERROR- CEPH_PUBLIC_NETWORK must be defined as the name of the network for the OSDs"
+  exit 1
+fi
+
+if [[ -z "$MON_IP" ]]; then
+  echo "ERROR- MON_IP must be defined as the IP address of the monitor"
+  exit 1
+fi
+
+if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
+    MON_NAME=${POD_NAME}
+else
+    MON_NAME=${NODE_NAME}
+fi
+MON_DATA_DIR="/var/lib/ceph/mon/${CLUSTER}-${MON_NAME}"
+MONMAP="/etc/ceph/monmap-${CLUSTER}"
+
+# Make the monitor directory
+su -s /bin/sh -c "mkdir -p \"${MON_DATA_DIR}\"" ceph
+
+function get_mon_config {
+  # Get fsid from ceph.conf
+  local fsid=$(ceph-conf --lookup fsid -c /etc/ceph/${CLUSTER}.conf)
+
+  timeout=10
+  MONMAP_ADD=""
+
+  while [[ -z "${MONMAP_ADD// }" && "${timeout}" -gt 0 ]]; do
+    # Get the ceph mon pods (name and IP) from the Kubernetes API. Formatted as a set of monmap params
+    if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
+        MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--add {{`{{.metadata.name}}`}} {{`{{.status.podIP}}`}}:${MON_PORT} {{`{{end}}`}} {{`{{end}}`}}")
+    else
+        MONMAP_ADD=$(kubectl get pods --namespace=${NAMESPACE} ${KUBECTL_PARAM} -o template --template="{{`{{range .items}}`}}{{`{{if .status.podIP}}`}}--add {{`{{.spec.nodeName}}`}} {{`{{.status.podIP}}`}}:${MON_PORT} {{`{{end}}`}} {{`{{end}}`}}")
+    fi
+    (( timeout-- ))
+    sleep 1
+  done
+
+  if [[ -z "${MONMAP_ADD// }" ]]; then
+      exit 1
+  fi
+
+  # if monmap exists and the mon is already there, don't overwrite monmap
+  if [ -f "${MONMAP}" ]; then
+      monmaptool --print "${MONMAP}" |grep -q "${MON_IP// }"":${MON_PORT}"
+      if [ $? -eq 0 ]; then
+          echo "${MON_IP} already exists in monmap ${MONMAP}"
+          return
+      fi
+  fi
+
+  # Create a monmap with the Pod Names and IP
+  monmaptool --create ${MONMAP_ADD} --fsid ${fsid} ${MONMAP} --clobber
+}
+
+get_mon_config
+
+# If we don't have a monitor keyring, this is a new monitor
+if [ ! -e "${MON_DATA_DIR}/keyring" ]; then
+  if [ ! -e ${MON_KEYRING}.seed ]; then
+    echo "ERROR- ${MON_KEYRING}.seed must exist. You can extract it from your current monitor by running 'ceph auth get mon. -o ${MON_KEYRING}' or use a KV Store"
+    exit 1
+  else
+    cp -vf ${MON_KEYRING}.seed ${MON_KEYRING}
+  fi
+
+  if [ ! -e ${MONMAP} ]; then
+    echo "ERROR- ${MONMAP} must exist. You can extract it from your current monitor by running 'ceph mon getmap -o ${MONMAP}' or use a KV Store"
+    exit 1
+  fi
+
+  # Testing if it's not the first monitor, if one key doesn't exist we assume none of them exist
+  for KEYRING in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING} ${ADMIN_KEYRING}; do
+    ceph-authtool ${MON_KEYRING} --import-keyring ${KEYRING}
+  done
+
+  # Prepare the monitor daemon's directory with the map and keyring
+  ceph-mon --setuser ceph --setgroup ceph --cluster "${CLUSTER}" --mkfs -i ${MON_NAME} --inject-monmap ${MONMAP} --keyring ${MON_KEYRING} --mon-data "${MON_DATA_DIR}"
+else
+  echo "Trying to get the most recent monmap..."
+  # Ignore when we timeout, in most cases that means the cluster has no quorum or
+  # no mons are up and running yet
+  timeout 5 ceph --cluster "${CLUSTER}" mon getmap -o ${MONMAP} || true
+  ceph-mon --setuser ceph --setgroup ceph --cluster "${CLUSTER}" -i ${MON_NAME} --inject-monmap ${MONMAP} --keyring ${MON_KEYRING} --mon-data "${MON_DATA_DIR}"
+  timeout 7 ceph --cluster "${CLUSTER}" mon add "${MON_NAME}" "${MON_IP}:${MON_PORT}" || true
+fi
+
+# start MON
+exec /usr/bin/ceph-mon \
+  --cluster "${CLUSTER}" \
+  --setuser "ceph" \
+  --setgroup "ceph" \
+  -d \
+  -i ${MON_NAME} \
+  --mon-data "${MON_DATA_DIR}" \
+  --public-addr "${MON_IP}:${MON_PORT}"
diff --git a/ceph-mon/templates/bin/mon/_stop.sh.tpl b/ceph-mon/templates/bin/mon/_stop.sh.tpl
new file mode 100644
index 0000000000..8e4a3d59bb
--- /dev/null
+++ b/ceph-mon/templates/bin/mon/_stop.sh.tpl
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+set -ex
+
+NUMBER_OF_MONS=$(ceph mon stat | awk '$3 == "mons" {print $2}')
+if [ "${NUMBER_OF_MONS}" -gt "1" ]; then
+  if [[ ${K8S_HOST_NETWORK} -eq 0 ]]; then
+      ceph mon remove "${POD_NAME}"
+  else
+      ceph mon remove "${NODE_NAME}"
+  fi
+else
+  echo "we are the last mon, not removing"
+fi
diff --git a/ceph-mon/templates/bin/moncheck/_reap-zombies.py.tpl b/ceph-mon/templates/bin/moncheck/_reap-zombies.py.tpl
new file mode 100644
index 0000000000..546f20c1fd
--- /dev/null
+++ b/ceph-mon/templates/bin/moncheck/_reap-zombies.py.tpl
@@ -0,0 +1,50 @@
+#!/usr/bin/python2
+import re
+import os
+import subprocess
+import json
+
+MON_REGEX = r"^\d: ([0-9\.]*):\d+/\d* mon.([^ ]*)$"
+# kubctl_command = 'kubectl get pods --namespace=${NAMESPACE} -l component=mon,application=ceph -o template --template="{ {{"}}"}}range .items{{"}}"}} \\"{{"}}"}}.metadata.name{{"}}"}}\\": \\"{{"}}"}}.status.podIP{{"}}"}}\\" ,   {{"}}"}}end{{"}}"}} }"'
+if int(os.getenv('K8S_HOST_NETWORK', 0)) > 0:
+    kubectl_command = 'kubectl get pods --namespace=${NAMESPACE} -l component=mon,application=ceph -o template --template="{ {{"{{"}}range  \$i, \$v  := .items{{"}}"}} {{"{{"}} if \$i{{"}}"}} , {{"{{"}} end {{"}}"}} \\"{{"{{"}}\$v.spec.nodeName{{"}}"}}\\": \\"{{"{{"}}\$v.status.podIP{{"}}"}}\\" {{"{{"}}end{{"}}"}} }"'
+else:
+    kubectl_command = 'kubectl get pods --namespace=${NAMESPACE} -l component=mon,application=ceph -o template --template="{ {{"{{"}}range  \$i, \$v  := .items{{"}}"}} {{"{{"}} if \$i{{"}}"}} , {{"{{"}} end {{"}}"}} \\"{{"{{"}}\$v.metadata.name{{"}}"}}\\": \\"{{"{{"}}\$v.status.podIP{{"}}"}}\\" {{"{{"}}end{{"}}"}} }"'
+
+monmap_command = "ceph --cluster=${NAMESPACE} mon getmap > /tmp/monmap && monmaptool -f /tmp/monmap --print"
+
+
+def extract_mons_from_monmap():
+    monmap = subprocess.check_output(monmap_command, shell=True)
+    mons = {}
+    for line in monmap.split("\n"):
+        m = re.match(MON_REGEX, line)
+        if m is not None:
+            mons[m.group(2)] = m.group(1)
+    return mons
+
+def extract_mons_from_kubeapi():
+    kubemap = subprocess.check_output(kubectl_command, shell=True)
+    return json.loads(kubemap)
+
+current_mons = extract_mons_from_monmap()
+expected_mons = extract_mons_from_kubeapi()
+
+print "current mons:", current_mons
+print "expected mons:", expected_mons
+
+for mon in current_mons:
+    removed_mon = False
+    if not mon in expected_mons:
+        print "removing zombie mon ", mon
+        subprocess.call(["ceph", "--cluster", os.environ["NAMESPACE"], "mon", "remove", mon])
+        removed_mon = True
+    elif current_mons[mon] != expected_mons[mon]: # check if for some reason the ip of the mon changed
+        print "ip change dedected for pod ", mon
+        subprocess.call(["kubectl", "--namespace", os.environ["NAMESPACE"], "delete", "pod", mon])
+        removed_mon = True
+        print "deleted mon %s via the kubernetes api" % mon
+
+
+if not removed_mon:
+    print "no zombie mons found ..."
diff --git a/ceph-mon/templates/bin/moncheck/_start.sh.tpl b/ceph-mon/templates/bin/moncheck/_start.sh.tpl
new file mode 100644
index 0000000000..25d4159f73
--- /dev/null
+++ b/ceph-mon/templates/bin/moncheck/_start.sh.tpl
@@ -0,0 +1,14 @@
+#!/bin/bash
+set -ex
+export LC_ALL=C
+
+function watch_mon_health {
+  while [ true ]; do
+    echo "checking for zombie mons"
+    /tmp/moncheck-reap-zombies.py || true
+    echo "sleep 30 sec"
+    sleep 30
+  done
+}
+
+watch_mon_health
diff --git a/ceph-mon/templates/configmap-bin.yaml b/ceph-mon/templates/configmap-bin.yaml
new file mode 100644
index 0000000000..e9945bf580
--- /dev/null
+++ b/ceph-mon/templates/configmap-bin.yaml
@@ -0,0 +1,61 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.configmap_bin .Values.deployment.ceph }}
+{{- $envAll := . }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ceph-mon-bin
+data:
+{{- if .Values.images.local_registry.active }}
+  image-repo-sync.sh: |
+{{- include "helm-toolkit.scripts.image_repo_sync" . | indent 4 }}
+{{- end }}
+
+{{- if .Values.bootstrap.enabled }}
+  bootstrap.sh: |
+{{ tuple "bin/_bootstrap.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+{{- end }}
+
+  init-dirs.sh: |
+{{ tuple "bin/_init-dirs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+
+  keys-bootstrap-keyring-generator.py: |
+{{ tuple "bin/keys/_bootstrap-keyring-generator.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+  keys-bootstrap-keyring-manager.sh: |
+{{ tuple "bin/keys/_bootstrap-keyring-manager.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+  keys-storage-keyring-manager.sh: |
+{{ tuple "bin/keys/_storage-keyring-manager.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+
+  mon-start.sh: |
+{{ tuple "bin/mon/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+  mon-stop.sh: |
+{{ tuple "bin/mon/_stop.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+  mon-check.sh: |
+{{ tuple "bin/mon/_check.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+
+  moncheck-start.sh: |
+{{ tuple "bin/moncheck/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+  moncheck-reap-zombies.py: |
+{{ tuple "bin/moncheck/_reap-zombies.py.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+
+{{ if .Values.logging.fluentd }}
+  fluentbit-sidecar.sh: |
+{{ tuple "bin/mon/_fluentbit-sidecar.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+{{ end }}
+{{- end }}
diff --git a/ceph-mon/templates/configmap-etc.yaml b/ceph-mon/templates/configmap-etc.yaml
new file mode 100644
index 0000000000..ac4b1e7b03
--- /dev/null
+++ b/ceph-mon/templates/configmap-etc.yaml
@@ -0,0 +1,73 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- define "ceph.configmap.etc" }}
+{{- $configMapName := index . 0 }}
+{{- $envAll := index . 1 }}
+{{- with $envAll }}
+
+{{- if .Values.deployment.ceph }}
+
+{{- if empty .Values.conf.ceph.global.mon_host -}}
+{{- $monHost := tuple "ceph_mon" "discovery" "mon" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}
+{{- $_ := $monHost | set .Values.conf.ceph.global "mon_host" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.global.mon_addr -}}
+{{- $monPort := tuple "ceph_mon" "internal" "mon" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+{{- $_ := printf ":%s" $monPort | set .Values.conf.ceph.global "mon_addr" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.global.fsid -}}
+{{- $_ := uuidv4 | set .Values.conf.ceph.global "fsid" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.osd.cluster_network -}}
+{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd "cluster_network" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.osd.public_network -}}
+{{- $_ := .Values.network.public | set .Values.conf.ceph.osd "public_network" -}}
+{{- end -}}
+
+{{- if not (has "fluentd_output" .Values.conf.fluentbit) -}}
+{{- $fluentd_host := tuple "fluentd" "internal" $envAll | include "helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup" }}
+{{- $fluentd_port := tuple "fluentd" "internal" "service" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+{{- $fluentd_output := dict "header" "output" "Name" "forward" "Match" "*" "Host" $fluentd_host "Port" $fluentd_port -}}
+{{- $_ := set .Values "__fluentbit_config" ( list $fluentd_output) -}}
+{{- $__fluentbit_config := append .Values.conf.fluentbit .Values.__fluentbit_config -}}
+{{- $_ := set .Values.conf "fluentbit" $__fluentbit_config -}}
+{{- end -}}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ $configMapName }}
+data:
+  ceph.conf: |
+{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph | indent 4 }}
+{{ if .Values.logging.fluentd }}
+  fluent-bit.conf: |
+{{ include "ceph-mon.utils.to_fluentbit_conf" .Values.conf.fluentbit | indent 4 }}
+  parsers.conf: |
+{{ include "ceph-mon.utils.to_fluentbit_conf" .Values.conf.parsers | indent 4 }}
+{{ end }}
+{{- end }}
+{{- end }}
+{{- end }}
+{{- if .Values.manifests.configmap_etc }}
+{{- list "ceph-mon-etc" . | include "ceph.configmap.etc" }}
+{{- end }}
diff --git a/ceph-mon/templates/configmap-templates.yaml b/ceph-mon/templates/configmap-templates.yaml
new file mode 100644
index 0000000000..43f4600537
--- /dev/null
+++ b/ceph-mon/templates/configmap-templates.yaml
@@ -0,0 +1,35 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.configmap_templates .Values.deployment.storage_secrets }}
+{{- $envAll := . }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ceph-templates
+data:
+  admin.keyring: |
+{{ .Values.conf.templates.keyring.admin | indent 4 }}
+  mon.keyring: |
+{{ .Values.conf.templates.keyring.mon | indent 4 }}
+  bootstrap.keyring.mds: |
+{{ .Values.conf.templates.keyring.bootstrap.mds | indent 4 }}
+  bootstrap.keyring.mgr: |
+{{ .Values.conf.templates.keyring.bootstrap.mgr | indent 4 }}
+  bootstrap.keyring.osd: |
+{{ .Values.conf.templates.keyring.bootstrap.osd | indent 4 }}
+{{- end }}
diff --git a/ceph-mon/templates/daemonset-mon.yaml b/ceph-mon/templates/daemonset-mon.yaml
new file mode 100644
index 0000000000..1b388172ae
--- /dev/null
+++ b/ceph-mon/templates/daemonset-mon.yaml
@@ -0,0 +1,238 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.daemonset_mon .Values.deployment.ceph }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "ceph-mon"}}
+{{ tuple $envAll "mon" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ $serviceAccountName }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - pods
+    verbs:
+      - get
+      - list
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: {{ $serviceAccountName }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ $serviceAccountName }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+---
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+  name: ceph-mon
+  labels:
+{{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+spec:
+  selector:
+    matchLabels:
+{{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      nodeSelector:
+        {{ .Values.labels.mon.node_selector_key }}: {{ .Values.labels.mon.node_selector_value }}
+      hostNetwork: true
+      dnsPolicy: {{ .Values.pod.dns_policy }}
+      initContainers:
+{{ tuple $envAll "mon" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+        - name: ceph-init-dirs
+{{ tuple $envAll "ceph_mon" | include "helm-toolkit.snippets.image" | indent 10 }}
+          command:
+            - /tmp/init-dirs.sh
+          env:
+            - name: CLUSTER
+              value: "ceph"
+          volumeMounts:
+            - name: ceph-mon-bin
+              mountPath: /tmp/init-dirs.sh
+              subPath: init-dirs.sh
+              readOnly: true
+            - name: pod-var-lib-ceph
+              mountPath: /var/lib/ceph
+              readOnly: false
+            - name: pod-run
+              mountPath: /run
+              readOnly: false
+      containers:
+        - name: ceph-mon
+{{ tuple $envAll "ceph_mon" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.mon | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: CLUSTER
+              value: "ceph"
+            - name: K8S_HOST_NETWORK
+              value: "1"
+            - name: MONMAP
+              value: /var/lib/ceph/mon/monmap
+            - name: NAMESPACE
+              valueFrom:
+                fieldRef:
+                  apiVersion: v1
+                  fieldPath: metadata.namespace
+            - name: CEPH_PUBLIC_NETWORK
+              value: {{ .Values.network.public | quote }}
+            - name: KUBECTL_PARAM
+              value: {{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_kubectl_params" | indent 10 }}
+            - name: MON_PORT
+              value: {{ tuple "ceph_mon" "internal" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" | quote }}
+            - name: MON_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.podIP
+            - name: POD_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.name
+            - name: NODE_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
+          command:
+            - /tmp/mon-start.sh
+          lifecycle:
+            preStop:
+                exec:
+                  command:
+                    - /tmp/mon-stop.sh
+          ports:
+            - containerPort: {{ tuple "ceph_mon" "internal" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+          livenessProbe:
+           exec:
+            command:
+             - /tmp/mon-check.sh
+             - liveness
+           initialDelaySeconds: 360
+           periodSeconds: 180
+          readinessProbe:
+           exec:
+            command:
+             - /tmp/mon-check.sh
+             - readiness
+           initialDelaySeconds: 60
+           periodSeconds: 60
+          volumeMounts:
+            - name: ceph-mon-bin
+              mountPath: /tmp/mon-start.sh
+              subPath: mon-start.sh
+              readOnly: true
+            - name: ceph-mon-bin
+              mountPath: /tmp/mon-stop.sh
+              subPath: mon-stop.sh
+              readOnly: true
+            - name: ceph-mon-bin
+              mountPath: /tmp/mon-check.sh
+              subPath: mon-check.sh
+              readOnly: true
+            - name: ceph-mon-etc
+              mountPath: /etc/ceph/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+            - name: ceph-client-admin-keyring
+              mountPath: /etc/ceph/ceph.client.admin.keyring
+              subPath: ceph.client.admin.keyring
+              readOnly: true
+            - name: ceph-mon-keyring
+              mountPath: /etc/ceph/ceph.mon.keyring.seed
+              subPath: ceph.mon.keyring
+              readOnly: true
+            - name: ceph-bootstrap-osd-keyring
+              mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring
+              subPath: ceph.keyring
+              readOnly: true
+            - name: ceph-bootstrap-mds-keyring
+              mountPath: /var/lib/ceph/bootstrap-mds/ceph.keyring
+              subPath: ceph.keyring
+              readOnly: true
+            - name: pod-var-lib-ceph
+              mountPath: /var/lib/ceph
+              readOnly: false
+            - name: pod-run
+              mountPath: /run
+              readOnly: false
+            - name: varlog
+              mountPath: /var/log/ceph
+        {{ if .Values.logging.fluentd }}
+        - name: fluentbit-sidecar
+{{ tuple $envAll "fluentbit" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.fluentbit | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          command:
+            - /tmp/fluentbit-sidecar.sh
+          volumeMounts:
+            - name: ceph-mon-bin
+              mountPath: /tmp/fluentbit-sidecar.sh
+              subPath: fluentbit-sidecar.sh
+              readOnly: true
+            - name: varlog
+              mountPath: /var/log/ceph
+            - name: ceph-mon-etc
+              mountPath: /fluent-bit/etc/fluent-bit.conf
+              subPath: fluent-bit.conf
+              readOnly: true
+            - name: ceph-mon-etc
+              mountPath: /fluent-bit/etc/parsers.conf
+              subPath: parsers.conf
+              readOnly: true
+        {{ end }}
+      volumes:
+        - name: varlog
+          emptyDir: {}
+        - name: ceph-mon-bin
+          configMap:
+            name: ceph-mon-bin
+            defaultMode: 0555
+        - name: ceph-mon-etc
+          configMap:
+            name: ceph-mon-etc
+            defaultMode: 0444
+        - name: pod-var-lib-ceph
+          hostPath:
+            path: {{ .Values.conf.storage.mon.directory }}
+        - name: pod-run
+          emptyDir:
+            medium: "Memory"
+        - name: ceph-client-admin-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.admin }}
+        - name: ceph-mon-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.mon }}
+        - name: ceph-bootstrap-osd-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.osd }}
+        - name: ceph-bootstrap-mds-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.mds }}
+{{- end }}
diff --git a/ceph-mon/templates/deployment-moncheck.yaml b/ceph-mon/templates/deployment-moncheck.yaml
new file mode 100644
index 0000000000..70f8e109ae
--- /dev/null
+++ b/ceph-mon/templates/deployment-moncheck.yaml
@@ -0,0 +1,111 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.deployment_moncheck .Values.deployment.ceph }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "ceph-mon-check"}}
+{{ tuple $envAll "moncheck" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  name: ceph-mon-check
+  labels:
+{{ tuple $envAll "ceph" "moncheck" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+spec:
+  replicas: {{ .Values.pod.replicas.mon_check }}
+  selector:
+    matchLabels:
+{{ tuple $envAll "ceph" "moncheck" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" "moncheck" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      affinity:
+{{ tuple $envAll "ceph" "moncheck" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+      nodeSelector:
+        {{ .Values.labels.mon.node_selector_key }}: {{ .Values.labels.mon.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "moncheck" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+      containers:
+        - name: ceph-mon
+{{ tuple $envAll "ceph_mon_check" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.moncheck | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: CLUSTER
+              value: "ceph"
+            - name: K8S_HOST_NETWORK
+              value: "1"
+            - name: NAMESPACE
+              valueFrom:
+                fieldRef:
+                  apiVersion: v1
+                  fieldPath: metadata.namespace
+          command:
+            - /tmp/moncheck-start.sh
+          ports:
+            - containerPort: {{ tuple "ceph_mon" "internal" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+          volumeMounts:
+            - name: ceph-mon-bin
+              mountPath: /tmp/moncheck-start.sh
+              subPath: moncheck-start.sh
+              readOnly: true
+            - name: ceph-mon-bin
+              mountPath: /tmp/moncheck-reap-zombies.py
+              subPath: moncheck-reap-zombies.py
+              readOnly: true
+            - name: ceph-mon-etc
+              mountPath: /etc/ceph/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+            - name: ceph-client-admin-keyring
+              mountPath: /etc/ceph/ceph.client.admin.keyring
+              subPath: ceph.client.admin.keyring
+              readOnly: true
+            - name: ceph-mon-keyring
+              mountPath: /etc/ceph/ceph.mon.keyring
+              subPath: ceph.mon.keyring
+              readOnly: true
+            - name: pod-var-lib-ceph
+              mountPath: /var/lib/ceph
+              readOnly: false
+            - name: pod-run
+              mountPath: /run
+              readOnly: false
+      volumes:
+        - name: ceph-mon-etc
+          configMap:
+            name: ceph-mon-etc
+            defaultMode: 0444
+        - name: ceph-mon-bin
+          configMap:
+            name: ceph-mon-bin
+            defaultMode: 0555
+        - name: pod-var-lib-ceph
+          emptyDir: {}
+        - name: pod-run
+          emptyDir:
+            medium: "Memory"
+        - name: ceph-client-admin-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.admin }}
+        - name: ceph-mon-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.mon }}
+{{- end }}
diff --git a/ceph-mon/templates/job-bootstrap.yaml b/ceph-mon/templates/job-bootstrap.yaml
new file mode 100644
index 0000000000..76665038fd
--- /dev/null
+++ b/ceph-mon/templates/job-bootstrap.yaml
@@ -0,0 +1,70 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "ceph-bootstrap"}}
+{{ tuple $envAll "bootstrap" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: ceph-bootstrap
+spec:
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" "bootstrap" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      restartPolicy: OnFailure
+      nodeSelector:
+        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "bootstrap" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container"  | indent 8 }}
+      containers:
+        - name: ceph-bootstrap
+{{ tuple $envAll "ceph_bootstrap" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          command:
+            - /tmp/bootstrap.sh
+          volumeMounts:
+            - name: ceph-mon-bin
+              mountPath: /tmp/bootstrap.sh
+              subPath: bootstrap.sh
+              readOnly: true
+            - name: ceph-mon-etc
+              mountPath: /etc/ceph/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+            - name: ceph-client-admin-keyring
+              mountPath: /etc/ceph/ceph.client.admin.keyring
+              subPath: ceph.client.admin.keyring
+              readOnly: true
+      volumes:
+        - name: ceph-mon-bin
+          configMap:
+            name: ceph-mon-bin
+            defaultMode: 0555
+        - name: ceph-mon-etc
+          configMap:
+            name: ceph-mon-etc
+            defaultMode: 0444
+        - name: ceph-client-admin-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.admin }}
+{{- end }}
diff --git a/ceph-mon/templates/job-image-repo-sync.yaml b/ceph-mon/templates/job-image-repo-sync.yaml
new file mode 100644
index 0000000000..4a0b567a8f
--- /dev/null
+++ b/ceph-mon/templates/job-image-repo-sync.yaml
@@ -0,0 +1,20 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}
+{{- $imageRepoSyncJob := dict "envAll" . "serviceName" "ceph-mon" -}}
+{{ $imageRepoSyncJob | include "helm-toolkit.manifests.job_image_repo_sync" }}
+{{- end }}
diff --git a/ceph-mon/templates/job-keyring.yaml b/ceph-mon/templates/job-keyring.yaml
new file mode 100644
index 0000000000..30fb49e95b
--- /dev/null
+++ b/ceph-mon/templates/job-keyring.yaml
@@ -0,0 +1,118 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_keyring .Values.deployment.storage_secrets }}
+{{- $envAll := . }}
+{{- range $key1, $cephBootstrapKey := tuple "mds" "osd" "mon" "mgr" }}
+{{- $jobName := print $cephBootstrapKey "-keyring-generator" }}
+
+{{- $serviceAccountName := print "ceph-" $jobName }}
+{{ tuple $envAll "job_keyring_generator" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ $serviceAccountName }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    verbs:
+      - get
+      - create
+      - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: {{ $serviceAccountName }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ $serviceAccountName }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: ceph-{{ $jobName }}
+spec:
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" $jobName | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      restartPolicy: OnFailure
+      nodeSelector:
+        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "job_keyring_generator" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+      containers:
+        - name:  ceph-{{ $jobName }}
+{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.jobs.secret_provisioning | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: DEPLOYMENT_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: CEPH_GEN_DIR
+              value: /tmp
+            - name: CEPH_TEMPLATES_DIR
+              value: /tmp/templates
+            {{- if eq $cephBootstrapKey "mon"}}
+            - name: CEPH_KEYRING_NAME
+              value: ceph.mon.keyring
+            - name: CEPH_KEYRING_TEMPLATE
+              value: mon.keyring
+            {{- else }}
+            - name: CEPH_KEYRING_NAME
+              value: ceph.keyring
+            - name: CEPH_KEYRING_TEMPLATE
+              value: bootstrap.keyring.{{ $cephBootstrapKey }}
+            {{- end }}
+            - name: KUBE_SECRET_NAME
+              value: {{  index $envAll.Values.secrets.keyrings $cephBootstrapKey }}
+          command:
+            - /tmp/keys-bootstrap-keyring-manager.sh
+          volumeMounts:
+            - name: ceph-mon-bin
+              mountPath: /tmp/keys-bootstrap-keyring-manager.sh
+              subPath: keys-bootstrap-keyring-manager.sh
+              readOnly: true
+            - name: ceph-mon-bin
+              mountPath: /tmp/keys-bootstrap-keyring-generator.py
+              subPath: keys-bootstrap-keyring-generator.py
+              readOnly: true
+            - name: ceph-templates
+              mountPath: /tmp/templates
+              readOnly: true
+      volumes:
+        - name: ceph-mon-bin
+          configMap:
+            name: ceph-mon-bin
+            defaultMode: 0555
+        - name: ceph-templates
+          configMap:
+            name: ceph-templates
+            defaultMode: 0444
+{{- end }}
+{{- end }}
diff --git a/ceph-mon/templates/job-storage-admin-keys.yaml b/ceph-mon/templates/job-storage-admin-keys.yaml
new file mode 100644
index 0000000000..9f6f1e280b
--- /dev/null
+++ b/ceph-mon/templates/job-storage-admin-keys.yaml
@@ -0,0 +1,110 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_storage_admin_keys .Values.deployment.storage_secrets }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "ceph-storage-keys-generator" }}
+{{ tuple $envAll "storage_keys_generator" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ $serviceAccountName }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    verbs:
+      - get
+      - create
+      - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: {{ $serviceAccountName }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ $serviceAccountName }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: ceph-storage-keys-generator
+spec:
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" "storage-keys-generator" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      restartPolicy: OnFailure
+      nodeSelector:
+        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "storage_keys_generator" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+      containers:
+        - name:  ceph-storage-keys-generator
+{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.jobs.secret_provisioning | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: DEPLOYMENT_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: CEPH_GEN_DIR
+              value: /tmp
+            - name: CEPH_TEMPLATES_DIR
+              value: /tmp/templates
+            - name: CEPH_KEYRING_NAME
+              value: ceph.client.admin.keyring
+            - name: CEPH_KEYRING_TEMPLATE
+              value: admin.keyring
+            - name: CEPH_KEYRING_ADMIN_NAME
+              value: {{ .Values.secrets.keyrings.admin }}
+            - name: CEPH_STORAGECLASS_ADMIN_SECRET_NAME
+              value: {{ .Values.storageclass.rbd.admin_secret_name }}
+          command:
+            - /tmp/keys-storage-keyring-manager.sh
+          volumeMounts:
+            - name: ceph-mon-bin
+              mountPath: /tmp/keys-storage-keyring-manager.sh
+              subPath: keys-storage-keyring-manager.sh
+              readOnly: true
+            - name: ceph-mon-bin
+              mountPath: /tmp/keys-bootstrap-keyring-generator.py
+              subPath: keys-bootstrap-keyring-generator.py
+              readOnly: true
+            - name: ceph-templates
+              mountPath: /tmp/templates
+              readOnly: true
+      volumes:
+        - name: ceph-mon-bin
+          configMap:
+            name: ceph-mon-bin
+            defaultMode: 0555
+        - name: ceph-templates
+          configMap:
+            name: ceph-templates
+            defaultMode: 0444
+{{- end }}
diff --git a/ceph-mon/templates/service-mon-discovery.yaml b/ceph-mon/templates/service-mon-discovery.yaml
new file mode 100644
index 0000000000..ffe2eacd03
--- /dev/null
+++ b/ceph-mon/templates/service-mon-discovery.yaml
@@ -0,0 +1,40 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.service_mon_discovery .Values.deployment.ceph }}
+{{- $envAll := . }}
+---
+kind: Service
+apiVersion: v1
+metadata:
+  name: {{ tuple "ceph_mon" "discovery" . | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }}
+  annotations:
+    # In kubernetes 1.6 and beyond, it seems there was a change in behavior
+    # requiring us to tolerate unready endpoints to form a quorum.  I can only
+    # guess at some small timing change causing statefulset+2 to not see the
+    # now ready statefulset+1, and because we do not tolerate unready endpoints
+    # a newly provisioned ceph-mon will most certainly never see itself in the
+    # peer list.  This change allows us to form a quorum reliably everytime
+    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+  ports:
+  - port: {{ tuple "ceph_mon" "discovery" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+    protocol: TCP
+    targetPort: {{ tuple "ceph_mon" "discovery" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+  selector:
+{{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+  clusterIP: None
+{{- end }}
diff --git a/ceph-mon/templates/service-mon.yaml b/ceph-mon/templates/service-mon.yaml
new file mode 100644
index 0000000000..c69aa82c18
--- /dev/null
+++ b/ceph-mon/templates/service-mon.yaml
@@ -0,0 +1,32 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.service_mon .Values.deployment.ceph }}
+{{- $envAll := . }}
+---
+kind: Service
+apiVersion: v1
+metadata:
+  name: {{ tuple "ceph_mon" "internal" . | include "helm-toolkit.endpoints.hostname_short_endpoint_lookup" }}
+spec:
+  ports:
+  - port: {{ tuple "ceph_mon" "internal" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+    protocol: TCP
+    targetPort: {{ tuple "ceph_mon" "internal" "mon" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+  selector:
+{{ tuple $envAll "ceph" "mon" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+  clusterIP: None
+{{- end }}
diff --git a/ceph-mon/templates/utils/_to_fluentbit_conf.tpl b/ceph-mon/templates/utils/_to_fluentbit_conf.tpl
new file mode 100644
index 0000000000..773120488b
--- /dev/null
+++ b/ceph-mon/templates/utils/_to_fluentbit_conf.tpl
@@ -0,0 +1,38 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+# This function generates fluentbit configuration files with entries in the
+# ceph-mon values.yaml.  It results in a configuration section with the
+# following format (for as many key/value pairs defined in values for a section):
+# [HEADER]
+#     key value
+#     key value
+#     key value
+# The configuration schema can be found here:
+# http://fluentbit.io/documentation/0.12/configuration/schema.html
+
+{{- define "ceph-mon.utils.to_fluentbit_conf" -}}
+{{- range $values := . -}}
+{{- range $section := . -}}
+{{- $header := pick . "header" -}}
+{{- $config := omit . "header" }}
+[{{$header.header | upper }}]
+{{range $key, $value := $config -}}
+{{ $key | indent 4 }} {{ $value }}
+{{end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/ceph-mon/values.yaml b/ceph-mon/values.yaml
new file mode 100644
index 0000000000..7578818649
--- /dev/null
+++ b/ceph-mon/values.yaml
@@ -0,0 +1,321 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default values for ceph-mon.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
+
+deployment:
+  ceph: true
+  storage_secrets: true
+
+images:
+  pull_policy: IfNotPresent
+  tags:
+    ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
+    ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
+    ceph_mon: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
+    ceph_mon_check: 'docker.io/port/ceph-config-helper:v1.10.3'
+    fluentbit: docker.io/fluent/fluent-bit:0.12.14
+    dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
+    image_repo_sync: docker.io/docker:17.07.0
+  local_registry:
+    active: false
+    exclude:
+      - dep_check
+      - image_repo_sync
+
+labels:
+  job:
+    node_selector_key: openstack-control-plane
+    node_selector_value: enabled
+  mon:
+    node_selector_key: ceph-mon
+    node_selector_value: enabled
+
+pod:
+  dns_policy: "ClusterFirstWithHostNet"
+  replicas:
+    mon_check: 1
+  affinity:
+    anti:
+      type:
+        default: preferredDuringSchedulingIgnoredDuringExecution
+      topologyKey:
+        default: kubernetes.io/hostname
+  resources:
+    enabled: false
+    mon:
+      requests:
+        memory: "50Mi"
+        cpu: "250m"
+      limits:
+        memory: "100Mi"
+        cpu: "500m"
+    mon_check:
+      requests:
+        memory: "5Mi"
+        cpu: "250m"
+      limits:
+        memory: "50Mi"
+        cpu: "500m"
+    fluentbit:
+      requests:
+        memory: "5Mi"
+        cpu: "250m"
+      limits:
+        memory: "50Mi"
+        cpu: "500m"
+    jobs:
+      bootstrap:
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+        requests:
+          memory: "128Mi"
+          cpu: "500m"
+      secret_provisioning:
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+        requests:
+          memory: "128Mi"
+          cpu: "500m"
+      image_repo_sync:
+        requests:
+          memory: "128Mi"
+          cpu: "100m"
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+
+secrets:
+  keyrings:
+    mon: ceph-mon-keyring
+    mds: ceph-bootstrap-mds-keyring
+    osd: ceph-bootstrap-osd-keyring
+    mgr: ceph-bootstrap-mgr-keyring
+    admin: ceph-client-admin-keyring
+
+network:
+  public: 192.168.0.0/16
+  cluster: 192.168.0.0/16
+
+conf:
+  templates:
+    keyring:
+      admin: |
+        [client.admin]
+          key = {{ key }}
+          auid = 0
+          caps mds = "allow"
+          caps mon = "allow *"
+          caps osd = "allow *"
+          caps mgr = "allow *"
+      mon: |
+        [mon.]
+          key = {{ key }}
+          caps mon = "allow *"
+      bootstrap:
+        mds: |
+          [client.bootstrap-mds]
+            key = {{ key }}
+            caps mon = "allow profile bootstrap-mds"
+        mgr: |
+          [client.bootstrap-mgr]
+            key = {{ key }}
+            caps mgr = "allow profile bootstrap-mgr"
+        osd: |
+          [client.bootstrap-osd]
+            key = {{ key }}
+            caps mon = "allow profile bootstrap-osd"
+  ceph:
+    global:
+      # auth
+      cephx: true
+      cephx_require_signatures: false
+      cephx_cluster_require_signatures: true
+      cephx_service_require_signatures: false
+    osd:
+      osd_mkfs_type: xfs
+      osd_mkfs_options_xfs: -f -i size=2048
+      osd_max_object_name_len: 256
+      ms_bind_port_min: 6800
+      ms_bind_port_max: 7100
+  storage:
+    mon:
+      directory: /var/lib/openstack-helm/ceph/mon
+  fluentbit:
+    - service:
+        header: service
+        Flush: 30
+        Daemon: Off
+        Log_Level: info
+        Parsers_File: parsers.conf
+    - ceph_tail:
+        # NOTE(srwilkers): Despite being exposed, these values should not be
+        # modified, as the ceph-mon logs are always placed here
+        header: input
+        Name: tail
+        Tag: ceph-mon.*
+        Path: /var/log/ceph/*.log
+        Parser: syslog
+        DB: /var/log/ceph/ceph.db
+        DB.Sync: Normal
+        Buffer_Chunk_Size: 1M
+        Buffer_Max_Size: 1M
+        Mem_Buf_Limit: 5MB
+        Refresh_Interval: 10s
+  parsers:
+    - syslog:
+        header: parser
+        Name: syslog
+        Format: regex
+        Regex: '^(?<time>.*[0-9]{2}:[0-9]{2}:[0-9]{2}) (?<host>[^ ]*) (?<app>[a-zA-Z0-9_\/\.\-]*)(?:\[(?<pid>[0-9]+)\])?(?:[^\:]*\:)? (?<log>.+)$'
+        Time_Key: time
+        Time_Format: "%Y-%m-%dT%H:%M:%S.%L"
+        Time_Keep: On
+        Types: "pid:integer"
+
+logging:
+  fluentd: false
+
+dependencies:
+  dynamic:
+    common:
+      local_image_registry:
+        jobs:
+          - ceph-mon-image-repo-sync
+        services:
+          - endpoint: node
+            service: local_image_registry
+  static:
+    bootstrap:
+      jobs: null
+      services:
+        - endpoint: internal
+          service: ceph_mon
+    job_keyring_generator:
+      jobs: null
+    mon:
+      jobs:
+        - ceph-storage-keys-generator
+        - ceph-mon-keyring-generator
+    moncheck:
+      jobs:
+        - ceph-storage-keys-generator
+        - ceph-mon-keyring-generator
+      services:
+        - endpoint: discovery
+          service: ceph_mon
+    storage_keys_generator:
+      jobs: null
+    image_repo_sync:
+      services:
+        - endpoint: internal
+          service: local_image_registry
+
+bootstrap:
+  enabled: false
+  script: |
+    ceph -s
+    function ensure_pool () {
+      ceph osd pool stats $1 || ceph osd pool create $1 $2
+      local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
+      if [[ ${test_luminous} -gt 0 ]]; then
+        ceph osd pool application enable $1 $3
+      fi
+    }
+    #ensure_pool volumes 8 cinder
+
+# if you change provision_storage_class to false
+# it is presumed you manage your own storage
+# class definition externally
+storageclass:
+  rbd:
+    provision_storage_class: true
+    provisioner: ceph.com/rbd
+    name: general
+    monitors: null
+    pool: rbd
+    admin_id: admin
+    admin_secret_name: pvc-ceph-conf-combined-storageclass
+    admin_secret_namespace: ceph
+    user_id: admin
+    user_secret_name: pvc-ceph-client-key
+    image_format: "2"
+    image_features: layering
+  cephfs:
+    provision_storage_class: true
+    provisioner: ceph.com/cephfs
+    name: cephfs
+    admin_id: admin
+    user_secret_name: pvc-ceph-cephfs-client-key
+    admin_secret_name: pvc-ceph-conf-combined-storageclass
+    admin_secret_namespace: ceph
+
+endpoints:
+  cluster_domain_suffix: cluster.local
+  local_image_registry:
+    name: docker-registry
+    namespace: docker-registry
+    hosts:
+      default: localhost
+      internal: docker-registry
+      node: localhost
+    host_fqdn_override:
+      default: null
+    port:
+      registry:
+        node: 5000
+  ceph_mon:
+    namespace: null
+    hosts:
+      default: ceph-mon
+      discovery: ceph-mon-discovery
+    host_fqdn_override:
+      default: null
+    port:
+      mon:
+        default: 6789
+  fluentd:
+    namespace: null
+    name: fluentd
+    hosts:
+      default: fluentd-logging
+    host_fqdn_override:
+      default: null
+    path:
+      default: null
+    scheme:
+      default: http
+    port:
+      service:
+        default: 24224
+      metrics:
+        default: 24220
+
+manifests:
+  configmap_bin: true
+  configmap_etc: true
+  configmap_templates: true
+  daemonset_mon: true
+  deployment_moncheck: true
+  job_image_repo_sync: true
+  job_bootstrap: true
+  job_keyring: true
+  service_mon: true
+  service_mon_discovery: true
+  job_storage_admin_keys: true
diff --git a/ceph-osd/Chart.yaml b/ceph-osd/Chart.yaml
new file mode 100644
index 0000000000..b2e5376c57
--- /dev/null
+++ b/ceph-osd/Chart.yaml
@@ -0,0 +1,18 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: OpenStack-Helm Ceph OSD
+name: ceph-osd
+version: 0.1.0
diff --git a/ceph-osd/requirements.yaml b/ceph-osd/requirements.yaml
new file mode 100644
index 0000000000..53782e69b2
--- /dev/null
+++ b/ceph-osd/requirements.yaml
@@ -0,0 +1,18 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+dependencies:
+  - name: helm-toolkit
+    repository: http://localhost:8879/charts
+    version: 0.1.0
diff --git a/ceph-osd/templates/bin/_init-dirs.sh.tpl b/ceph-osd/templates/bin/_init-dirs.sh.tpl
new file mode 100644
index 0000000000..b15731c4ae
--- /dev/null
+++ b/ceph-osd/templates/bin/_init-dirs.sh.tpl
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+export LC_ALL=C
+: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
+
+mkdir -p "$(dirname "${OSD_BOOTSTRAP_KEYRING}")"
+
+# Let's create the ceph directories
+for DIRECTORY in osd tmp; do
+  mkdir -p "/var/lib/ceph/${DIRECTORY}"
+done
+
+# Create socket directory
+mkdir -p /run/ceph
+
+# Adjust the owner of all those directories
+chown -R ceph. /run/ceph/ /var/lib/ceph/*
diff --git a/ceph-osd/templates/bin/osd/_block.sh.tpl b/ceph-osd/templates/bin/osd/_block.sh.tpl
new file mode 100644
index 0000000000..2abadb3e07
--- /dev/null
+++ b/ceph-osd/templates/bin/osd/_block.sh.tpl
@@ -0,0 +1,193 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
+: "${OSD_JOURNAL_UUID:=$(uuidgen)}"
+: "${CRUSH_LOCATION:=root=default host=${HOSTNAME}}"
+: "${OSD_PATH_BASE:=/var/lib/ceph/osd/${CLUSTER}}"
+: "${OSD_SOFT_FORCE_ZAP:=1}"
+: "${OSD_JOURNAL_PARTITION:=}"
+
+if [ "x${STORAGE_TYPE%-*}" == "xdirectory" ]; then
+  export OSD_DEVICE="/var/lib/ceph/osd"
+else
+  export OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
+fi
+
+if [ "x$JOURNAL_TYPE" == "xdirectory" ]; then
+  export OSD_JOURNAL="/var/lib/ceph/journal"
+else
+  export OSD_JOURNAL=$(readlink -f ${JOURNAL_LOCATION})
+fi
+
+if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then
+  echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
+  exit 1
+fi
+
+if [[ -z "${OSD_DEVICE}" ]];then
+  echo "ERROR- You must provide a device to build your OSD ie: /dev/sdb"
+  exit 1
+fi
+
+if [[ ! -b "${OSD_DEVICE}" ]]; then
+  echo "ERROR- The device pointed by OSD_DEVICE ($OSD_DEVICE) doesn't exist !"
+  exit 1
+fi
+
+# Calculate proper device names, given a device and partition number
+function dev_part {
+  local osd_device=${1}
+  local osd_partition=${2}
+
+  if [[ -L ${osd_device} ]]; then
+    # This device is a symlink. Work out it's actual device
+    local actual_device
+    actual_device=$(readlink -f "${osd_device}")
+    if [[ "${actual_device:0-1:1}" == [0-9] ]]; then
+      local desired_partition="${actual_device}p${osd_partition}"
+    else
+      local desired_partition="${actual_device}${osd_partition}"
+    fi
+    # Now search for a symlink in the directory of $osd_device
+    # that has the correct desired partition, and the longest
+    # shared prefix with the original symlink
+    local symdir
+    symdir=$(dirname "${osd_device}")
+    local link=""
+    local pfxlen=0
+    for option in ${symdir}/*; do
+      [[ -e $option ]] || break
+      if [[ $(readlink -f "$option") == "$desired_partition" ]]; then
+        local optprefixlen
+        optprefixlen=$(prefix_length "$option" "$osd_device")
+        if [[ $optprefixlen > $pfxlen ]]; then
+          link=$option
+          pfxlen=$optprefixlen
+        fi
+      fi
+    done
+    if [[ $pfxlen -eq 0 ]]; then
+      >&2 echo "Could not locate appropriate symlink for partition ${osd_partition} of ${osd_device}"
+      exit 1
+    fi
+    echo "$link"
+  elif [[ "${osd_device:0-1:1}" == [0-9] ]]; then
+    echo "${osd_device}p${osd_partition}"
+  else
+    echo "${osd_device}${osd_partition}"
+  fi
+}
+
+CEPH_DISK_OPTIONS=""
+CEPH_OSD_OPTIONS=""
+
+DATA_UUID=$(blkid -o value -s PARTUUID ${OSD_DEVICE}*1)
+LOCKBOX_UUID=$(blkid -o value -s PARTUUID ${OSD_DEVICE}3 || true)
+JOURNAL_PART=$(dev_part ${OSD_DEVICE} 2)
+
+# watch the udev event queue, and exit if all current events are handled
+udevadm settle --timeout=600
+
+# Wait for a file to exist, regardless of the type
+function wait_for_file {
+  timeout 10 bash -c "while [ ! -e ${1} ]; do echo 'Waiting for ${1} to show up' && sleep 1 ; done"
+}
+
+DATA_PART=$(dev_part ${OSD_DEVICE} 1)
+MOUNTED_PART=${DATA_PART}
+
+ceph-disk -v \
+  --setuser ceph \
+  --setgroup disk \
+  activate ${CEPH_DISK_OPTIONS} \
+    --no-start-daemon ${DATA_PART}
+
+OSD_ID=$(grep "${MOUNTED_PART}" /proc/mounts | awk '{print $2}' | grep -oh '[0-9]*')
+
+OSD_PATH="${OSD_PATH_BASE}-${OSD_ID}"
+OSD_KEYRING="${OSD_PATH}/keyring"
+OSD_WEIGHT=$(df -P -k "${OSD_PATH}" | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }')
+ceph \
+  --cluster "${CLUSTER}" \
+  --name="osd.${OSD_ID}" \
+  --keyring="${OSD_KEYRING}" \
+  osd \
+  crush \
+  create-or-move -- "${OSD_ID}" "${OSD_WEIGHT}" ${CRUSH_LOCATION}
+
+if [ "${OSD_BLUESTORE:-0}" -ne 1 ]; then
+  if [ -n "${OSD_JOURNAL}" ]; then
+    if [ -b "${OSD_JOURNAL}" ]; then
+      OSD_JOURNAL_PARTITION="$(echo "${OSD_JOURNAL_PARTITION}" | sed 's/[^0-9]//g')"
+      if [ -z "${OSD_JOURNAL_PARTITION}" ]; then
+        # maybe they specified the journal as a /dev path like '/dev/sdc12':
+        JDEV="$(echo "${OSD_JOURNAL}" | sed 's/\(.*[^0-9]\)[0-9]*$/\1/')"
+        if [ -d "/sys/block/$(basename "${JDEV}")/$(basename "${OSD_JOURNAL}")" ]; then
+          OSD_JOURNAL="$(dev_part "${JDEV}" "$(echo "${OSD_JOURNAL}" | sed 's/.*[^0-9]\([0-9]*\)$/\1/')")"
+        else
+          # they likely supplied a bare device and prepare created partition 1.
+          OSD_JOURNAL="$(dev_part "${OSD_JOURNAL}" 1)"
+        fi
+      else
+        OSD_JOURNAL="$(dev_part "${OSD_JOURNAL}" "${OSD_JOURNAL_PARTITION}")"
+      fi
+    fi
+    if [ "x${JOURNAL_TYPE}" == "xdirectory" ]; then
+      OSD_JOURNAL="${OSD_JOURNAL}/journal.${OSD_ID}"
+    else
+      if [ ! -b "${OSD_JOURNAL}" ]; then
+        echo "ERROR: Unable to find journal device ${OSD_JOURNAL}"
+        exit 1
+      else
+        wait_for_file "${OSD_JOURNAL}"
+        chown ceph. "${OSD_JOURNAL}"
+      fi
+    fi
+  else
+    wait_for_file "${JOURNAL_PART}"
+    chown ceph. "${JOURNAL_PART}"
+    OSD_JOURNAL="${JOURNAL_PART}"
+  fi
+  CEPH_OSD_OPTIONS="${CEPH_OSD_OPTIONS} --osd-journal ${OSD_JOURNAL}"
+fi
+
+if [ "x${JOURNAL_TYPE}" == "xdirectory" ]; then
+  touch ${OSD_JOURNAL}
+  chown -R ceph. /var/lib/ceph/journal
+  ceph-osd \
+    --cluster ceph \
+    --osd-data ${OSD_PATH} \
+    --osd-journal ${OSD_JOURNAL} \
+    -f \
+    -i 0 \
+    --setuser ceph \
+    --setgroup disk \
+    --mkjournal
+fi
+
+exec /usr/bin/ceph-osd \
+    --cluster ${CLUSTER} \
+    ${CEPH_OSD_OPTIONS} \
+    -f \
+    -i ${OSD_ID} \
+    --setuser ceph \
+    --setgroup disk & echo $! > /run/ceph-osd.pid
+wait
diff --git a/ceph-osd/templates/bin/osd/_check.sh.tpl b/ceph-osd/templates/bin/osd/_check.sh.tpl
new file mode 100644
index 0000000000..e09847e614
--- /dev/null
+++ b/ceph-osd/templates/bin/osd/_check.sh.tpl
@@ -0,0 +1,44 @@
+#!/bin/sh
+
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# A liveness check for ceph OSDs: exit 0 if
+# all OSDs on this host are in the "active" state
+# per their admin sockets.
+
+SOCKDIR=${CEPH_SOCKET_DIR:-/run/ceph}
+SBASE=${CEPH_OSD_SOCKET_BASE:-ceph-osd}
+SSUFFIX=${CEPH_SOCKET_SUFFIX:-asok}
+
+# default: no sockets, not live
+cond=1
+for sock in $SOCKDIR/$SBASE.*.$SSUFFIX; do
+ if [ -S $sock ]; then
+  OSD_ID=$(echo $sock | awk -F. '{print $2}')
+  OSD_STATE=$(ceph -f json-pretty --connect-timeout 1 --admin-daemon "${sock}" status|grep state|sed 's/.*://;s/[^a-z]//g')
+  echo "OSD ${OSD_ID} ${OSD_STATE}";
+  # this might be a stricter check than we actually want.  what are the
+  # other values for the "state" field?
+  if [ "x${OSD_STATE}x" = 'xactivex' ]; then
+   cond=0
+  else
+   # one's not ready, so the whole pod's not ready.
+   exit 1
+  fi
+ else
+  echo "No daemon sockets found in $SOCKDIR"
+ fi
+done
+exit $cond
diff --git a/ceph-osd/templates/bin/osd/_directory.sh.tpl b/ceph-osd/templates/bin/osd/_directory.sh.tpl
new file mode 100644
index 0000000000..115ec6cf35
--- /dev/null
+++ b/ceph-osd/templates/bin/osd/_directory.sh.tpl
@@ -0,0 +1,102 @@
+#!/bin/bash
+set -ex
+export LC_ALL=C
+: "${HOSTNAME:=$(uname -n)}"
+: "${CRUSH_LOCATION:=root=default host=${HOSTNAME}}"
+: "${OSD_PATH_BASE:=/var/lib/ceph/osd/${CLUSTER}}"
+: "${JOURNAL_DIR:=/var/lib/ceph/journal}"
+: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
+
+function is_available {
+  command -v $@ &>/dev/null
+}
+if is_available rpm; then
+  OS_VENDOR=redhat
+  source /etc/sysconfig/ceph
+elif is_available dpkg; then
+  OS_VENDOR=ubuntu
+  source /etc/default/ceph
+fi
+
+if [[ $(ceph -v | egrep -q "12.2|luminous"; echo $?) -ne 0 ]]; then
+    echo "ERROR- need Luminous release"
+    exit 1
+fi
+
+if [[ ! -d /var/lib/ceph/osd ]]; then
+  echo "ERROR- could not find the osd directory, did you bind mount the OSD data directory?"
+  echo "ERROR- use -v <host_osd_data_dir>:/var/lib/ceph/osd"
+  exit 1
+fi
+
+if [ -z "${HOSTNAME}" ]; then
+  echo "HOSTNAME not set; This will prevent to add an OSD into the CRUSH map"
+  exit 1
+fi
+
+# check if anything is present, if not, create an osd and its directory
+if [[ -n "$(find /var/lib/ceph/osd -prune -empty)" ]]; then
+  echo "Creating osd"
+  UUID=$(uuidgen)
+  OSD_SECRET=$(ceph-authtool --gen-print-key)
+  OSD_ID=$(echo "{\"cephx_secret\": \"${OSD_SECRET}\"}" | ceph osd new ${UUID} -i - -n client.bootstrap-osd -k "$OSD_BOOTSTRAP_KEYRING")
+
+  # test that the OSD_ID is an integer
+  if [[ "$OSD_ID" =~ ^-?[0-9]+$ ]]; then
+    echo "OSD created with ID: ${OSD_ID}"
+  else
+    echo "OSD creation failed: ${OSD_ID}"
+    exit 1
+  fi
+
+  OSD_PATH="$OSD_PATH_BASE-$OSD_ID/"
+  if [ -n "${JOURNAL_DIR}" ]; then
+     OSD_J="${JOURNAL_DIR}/journal.${OSD_ID}"
+     chown -R ceph. ${JOURNAL_DIR}
+  else
+     if [ -n "${JOURNAL}" ]; then
+        OSD_J=${JOURNAL}
+        chown -R ceph. $(dirname ${JOURNAL_DIR})
+     else
+        OSD_J=${OSD_PATH%/}/journal
+     fi
+  fi
+  # create the folder and own it
+  mkdir -p "${OSD_PATH}"
+  chown "${CHOWN_OPT[@]}" ceph. "${OSD_PATH}"
+  echo "created folder ${OSD_PATH}"
+  # write the secret to the osd keyring file
+  ceph-authtool --create-keyring ${OSD_PATH%/}/keyring --name osd.${OSD_ID} --add-key ${OSD_SECRET}
+  OSD_KEYRING="${OSD_PATH%/}/keyring"
+  # init data directory
+  ceph-osd -i ${OSD_ID} --mkfs --osd-uuid ${UUID} --mkjournal --osd-journal ${OSD_J} --setuser ceph --setgroup ceph
+  # add the osd to the crush map
+  OSD_WEIGHT=$(df -P -k ${OSD_PATH} | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }')
+  ceph --name=osd.${OSD_ID} --keyring=${OSD_KEYRING} osd crush create-or-move -- ${OSD_ID} ${OSD_WEIGHT} ${CRUSH_LOCATION}
+fi
+
+# create the directory and an empty Procfile
+mkdir -p /etc/forego/"${CLUSTER}"
+echo "" > /etc/forego/"${CLUSTER}"/Procfile
+
+for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do
+  OSD_PATH="$OSD_PATH_BASE-$OSD_ID/"
+  OSD_KEYRING="${OSD_PATH%/}/keyring"
+  if [ -n "${JOURNAL_DIR}" ]; then
+     OSD_J="${JOURNAL_DIR}/journal.${OSD_ID}"
+     chown -R ceph. ${JOURNAL_DIR}
+  else
+     if [ -n "${JOURNAL}" ]; then
+        OSD_J=${JOURNAL}
+        chown -R ceph. $(dirname ${JOURNAL_DIR})
+     else
+        OSD_J=${OSD_PATH%/}/journal
+     fi
+  fi
+  # log osd filesystem type
+  FS_TYPE=`stat --file-system -c "%T" ${OSD_PATH}`
+  echo "OSD $OSD_PATH filesystem type: $FS_TYPE"
+  echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd --cluster ${CLUSTER} -f -i ${OSD_ID} --osd-journal ${OSD_J} -k $OSD_KEYRING" | tee -a /etc/forego/"${CLUSTER}"/Procfile
+done
+
+exec /usr/local/bin/forego start -f /etc/forego/"${CLUSTER}"/Procfile
diff --git a/ceph-osd/templates/bin/osd/_fluentbit-sidecar.sh.tpl b/ceph-osd/templates/bin/osd/_fluentbit-sidecar.sh.tpl
new file mode 100644
index 0000000000..1435938277
--- /dev/null
+++ b/ceph-osd/templates/bin/osd/_fluentbit-sidecar.sh.tpl
@@ -0,0 +1,20 @@
+#!/bin/sh
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+exec /fluent-bit/bin/fluent-bit -c /fluent-bit/etc/fluent-bit.conf
diff --git a/ceph-osd/templates/bin/osd/_init.sh.tpl b/ceph-osd/templates/bin/osd/_init.sh.tpl
new file mode 100644
index 0000000000..3442270713
--- /dev/null
+++ b/ceph-osd/templates/bin/osd/_init.sh.tpl
@@ -0,0 +1,218 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+: "${OSD_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring}"
+: "${OSD_JOURNAL_UUID:=$(uuidgen)}"
+: "${OSD_FORCE_ZAP:=1}"
+
+if [ "x${STORAGE_TYPE%-*}" == "xdirectory" ]; then
+  export OSD_DEVICE="/var/lib/ceph/osd"
+else
+  export OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
+fi
+
+if [ "x$JOURNAL_TYPE" == "xdirectory" ]; then
+  export OSD_JOURNAL="/var/lib/ceph/journal"
+else
+  export OSD_JOURNAL=$(readlink -f ${JOURNAL_LOCATION})
+fi
+
+# Calculate proper device names, given a device and partition number
+function dev_part {
+  local OSD_DEVICE=${1}
+  local OSD_PARTITION=${2}
+
+  if [[ -L ${OSD_DEVICE} ]]; then
+    # This device is a symlink. Work out it's actual device
+    local ACTUAL_DEVICE=$(readlink -f ${OSD_DEVICE})
+    local BN=$(basename ${OSD_DEVICE})
+    if [[ "${ACTUAL_DEVICE:0-1:1}" == [0-9] ]]; then
+      local DESIRED_PARTITION="${ACTUAL_DEVICE}p${OSD_PARTITION}"
+    else
+      local DESIRED_PARTITION="${ACTUAL_DEVICE}${OSD_PARTITION}"
+    fi
+    # Now search for a symlink in the directory of $OSD_DEVICE
+    # that has the correct desired partition, and the longest
+    # shared prefix with the original symlink
+    local SYMDIR=$(dirname ${OSD_DEVICE})
+    local LINK=""
+    local PFXLEN=0
+    for OPTION in $(ls $SYMDIR); do
+    if [[ $(readlink -f $SYMDIR/$OPTION) == $DESIRED_PARTITION ]]; then
+      local OPT_PREFIX_LEN=$(prefix_length $OPTION $BN)
+      if [[ $OPT_PREFIX_LEN > $PFXLEN ]]; then
+        LINK=$SYMDIR/$OPTION
+        PFXLEN=$OPT_PREFIX_LEN
+      fi
+    fi
+    done
+    if [[ $PFXLEN -eq 0 ]]; then
+      >&2 log "Could not locate appropriate symlink for partition ${OSD_PARTITION} of ${OSD_DEVICE}"
+      exit 1
+    fi
+    echo "$LINK"
+  elif [[ "${OSD_DEVICE:0-1:1}" == [0-9] ]]; then
+    echo "${OSD_DEVICE}p${OSD_PARTITION}"
+  else
+    echo "${OSD_DEVICE}${OSD_PARTITION}"
+  fi
+}
+
+function osd_disk_prepare {
+  if [[ -z "${OSD_DEVICE}" ]];then
+    echo "ERROR- You must provide a device to build your OSD ie: /dev/sdb"
+    exit 1
+  fi
+
+  if [[ ! -b "${OSD_DEVICE}" ]]; then
+    echo "ERROR- The device pointed by OSD_DEVICE ($OSD_DEVICE) doesn't exist !"
+    exit 1
+  fi
+
+  if [ ! -e $OSD_BOOTSTRAP_KEYRING ]; then
+    echo "ERROR- $OSD_BOOTSTRAP_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-osd -o $OSD_BOOTSTRAP_KEYRING'"
+    exit 1
+  fi
+  timeout 10 ceph ${CLI_OPTS} --name client.bootstrap-osd --keyring $OSD_BOOTSTRAP_KEYRING health || exit 1
+
+  # check device status first
+  if ! parted --script ${OSD_DEVICE} print > /dev/null 2>&1; then
+    if [[ ${OSD_FORCE_ZAP} -eq 1 ]]; then
+      echo "It looks like ${OSD_DEVICE} isn't consistent, however OSD_FORCE_ZAP is enabled so we are zapping the device anyway"
+      ceph-disk -v zap ${OSD_DEVICE}
+    else
+      echo "Regarding parted, device ${OSD_DEVICE} is inconsistent/broken/weird."
+      echo "It would be too dangerous to destroy it without any notification."
+      echo "Please set OSD_FORCE_ZAP to '1' if you really want to zap this disk."
+      exit 1
+    fi
+  fi
+
+  # then search for some ceph metadata on the disk
+  if [[ "$(parted --script ${OSD_DEVICE} print | egrep '^ 1.*ceph data')" ]]; then
+    if [[ ${OSD_FORCE_ZAP} -eq 1 ]]; then
+      if [ -b "${OSD_DEVICE}1" ]; then
+        local fs=`lsblk -fn ${OSD_DEVICE}1`
+        if [ ! -z "${fs}" ]; then
+          local cephFSID=`ceph-conf --lookup fsid`
+          if [ ! -z "${cephFSID}" ]; then
+            local tmpmnt=`mktemp -d`
+            mount ${OSD_DEVICE}1 ${tmpmnt}
+            if [ -f "${tmpmnt}/ceph_fsid" ]; then
+              osdFSID=`cat "${tmpmnt}/ceph_fsid"`
+              umount ${tmpmnt}
+              if [ ${osdFSID} != ${cephFSID} ]; then
+                echo "It looks like ${OSD_DEVICE} is an OSD belonging to a different (or old) ceph cluster."
+                echo "The OSD FSID is ${osdFSID} while this cluster is ${cephFSID}"
+                echo "Because OSD_FORCE_ZAP was set, we will zap this device."
+                ceph-disk -v zap ${OSD_DEVICE}
+              else
+                echo "It looks like ${OSD_DEVICE} is an OSD belonging to a this ceph cluster."
+                echo "OSD_FORCE_ZAP is set, but will be ignored and the device will not be zapped."
+                echo "Moving on, trying to activate the OSD now."
+                return
+              fi
+            else
+              umount ${tmpmnt}
+              echo "It looks like ${OSD_DEVICE} has a ceph data partition but no FSID."
+              echo "Because OSD_FORCE_ZAP was set, we will zap this device."
+              ceph-disk -v zap ${OSD_DEVICE}
+            fi
+          else
+            echo "Unable to determine the FSID of the current cluster."
+            echo "OSD_FORCE_ZAP is set, but this OSD will not be zapped."
+            echo "Moving on, trying to activate the OSD now."
+            return
+          fi
+        else
+          echo "It looks like ${OSD_DEVICE} has a ceph data partition but no filesystem."
+          echo "Because OSD_FORCE_ZAP was set, we will zap this device."
+          ceph-disk -v zap ${OSD_DEVICE}
+        fi
+      else
+        echo "parted says ${OSD_DEVICE}1 should exist, but we do not see it."
+        echo "We will ignore OSD_FORCE_ZAP and try to use the device as-is"
+        echo "Moving on, trying to activate the OSD now."
+        return
+      fi
+    else
+      echo "INFO- It looks like ${OSD_DEVICE} is an OSD, set OSD_FORCE_ZAP=1 to use this device anyway and zap its content"
+      echo "You can also use the zap_device scenario on the appropriate device to zap it"
+      echo "Moving on, trying to activate the OSD now."
+      return
+    fi
+  fi
+
+  if [ "${OSD_BLUESTORE:-0}" -ne 1 ]; then
+    # we only care about journals for filestore.
+    if [ -n "${OSD_JOURNAL}" ]; then
+      if [ -b $OSD_JOURNAL ]; then
+        OSD_JOURNAL=`readlink -f ${OSD_JOURNAL}`
+        OSD_JOURNAL_PARTITION=`echo $OSD_JOURNAL_PARTITION | sed 's/[^0-9]//g'`
+        if [ -z "${OSD_JOURNAL_PARTITION}" ]; then
+          # maybe they specified the journal as a /dev path like '/dev/sdc12':
+          local JDEV=`echo ${OSD_JOURNAL} | sed 's/\(.*[^0-9]\)[0-9]*$/\1/'`
+          if [ -d /sys/block/`basename $JDEV`/`basename $OSD_JOURNAL` ]; then
+            OSD_JOURNAL=$(dev_part ${JDEV} `echo ${OSD_JOURNAL} |\
+              sed 's/.*[^0-9]\([0-9]*\)$/\1/'`)
+            OSD_JOURNAL_PARTITION=${JDEV}
+          fi
+        else
+          OSD_JOURNAL=$(dev_part ${OSD_JOURNAL} ${OSD_JOURNAL_PARTITION})
+        fi
+      fi
+      chown ceph. ${OSD_JOURNAL}
+    else
+      echo "No journal device specified.  OSD and journal will share ${OSD_DEVICE}"
+      echo "For better performance, consider moving your journal to a separate device"
+    fi
+    CLI_OPTS="${CLI_OPTS} --filestore"
+  else
+    OSD_JOURNAL=''
+    CLI_OPTS="${CLI_OPTS} --bluestore"
+  fi
+
+  if [ -b "${OSD_JOURNAL}" -a "${OSD_FORCE_ZAP:-0}" -eq 1 ]; then
+    # if we got here and zap is set, it's ok to wipe the journal.
+    echo "OSD_FORCE_ZAP is set, so we will erase the journal device ${OSD_JOURNAL}"
+    if [ -z "${OSD_JOURNAL_PARTITION}" ]; then
+      # it's a raw block device.  nuke any existing partition table.
+      parted -s ${OSD_JOURNAL} mklabel msdos
+    else
+      # we are likely working on a partition. Just make a filesystem on
+      # the device, as other partitions may be in use so nuking the whole
+      # disk isn't safe.
+      mkfs -t xfs -f ${OSD_JOURNAL}
+    fi
+  fi
+
+  if [ "x$JOURNAL_TYPE" == "xdirectory" ]; then
+    export OSD_JOURNAL="--journal-file"
+  fi
+
+  ceph-disk -v prepare ${CLI_OPTS} --journal-uuid ${OSD_JOURNAL_UUID} ${OSD_DEVICE} ${OSD_JOURNAL}
+
+  # watch the udev event queue, and exit if all current events are handled
+  udevadm settle --timeout=600
+}
+
+if ! [ "x${STORAGE_TYPE%-*}" == "xdirectory" ]; then
+  osd_disk_prepare
+fi
diff --git a/ceph-osd/templates/bin/osd/_start.sh.tpl b/ceph-osd/templates/bin/osd/_start.sh.tpl
new file mode 100644
index 0000000000..5c3b2f7489
--- /dev/null
+++ b/ceph-osd/templates/bin/osd/_start.sh.tpl
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+echo "LAUNCHING OSD: in ${STORAGE_TYPE%-*}:${STORAGE_TYPE#*-} mode"
+
+exec "/tmp/osd-${STORAGE_TYPE%-*}.sh"
diff --git a/ceph-osd/templates/bin/osd/_stop.sh.tpl b/ceph-osd/templates/bin/osd/_stop.sh.tpl
new file mode 100644
index 0000000000..7e5c906885
--- /dev/null
+++ b/ceph-osd/templates/bin/osd/_stop.sh.tpl
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+if [ "x${STORAGE_TYPE%-*}" == "xblock" ]; then
+  OSD_DEVICE=$(readlink -f ${STORAGE_LOCATION})
+  OSD_JOURNAL=$(readlink -f ${JOURNAL_LOCATION})
+  if [ "x${STORAGE_TYPE#*-}" == "xlogical" ]; then
+    CEPH_OSD_PID="$(cat /run/ceph-osd.pid)"
+    while kill -0 ${CEPH_OSD_PID} >/dev/null 2>&1; do
+        kill -SIGTERM ${CEPH_OSD_PID}
+        sleep 1
+    done
+    umount "$(findmnt -S "${OSD_DEVICE}1" | tail -n +2 | awk '{ print $1 }')"
+  fi
+fi
diff --git a/ceph-osd/templates/configmap-bin.yaml b/ceph-osd/templates/configmap-bin.yaml
new file mode 100644
index 0000000000..d17a6e872a
--- /dev/null
+++ b/ceph-osd/templates/configmap-bin.yaml
@@ -0,0 +1,47 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if .Values.manifests.configmap_bin }}
+{{- $envAll := . }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ceph-osd-bin
+data:
+{{- if .Values.images.local_registry.active }}
+  image-repo-sync.sh: |
+{{- include "helm-toolkit.scripts.image_repo_sync" . | indent 4 }}
+{{- end }}
+  osd-start.sh: |
+{{ tuple "bin/osd/_start.sh.tpl" . | include  "helm-toolkit.utils.template" | indent 4 }}
+  osd-directory.sh: |
+{{ tuple "bin/osd/_directory.sh.tpl" . | include  "helm-toolkit.utils.template" | indent 4 }}
+  osd-block.sh: |
+{{ tuple "bin/osd/_block.sh.tpl" . | include  "helm-toolkit.utils.template" | indent 4 }}
+  osd-init.sh: |
+{{ tuple "bin/osd/_init.sh.tpl" . | include  "helm-toolkit.utils.template" | indent 4 }}
+  osd-check.sh: |
+{{ tuple "bin/osd/_check.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+  osd-stop.sh: |
+{{ tuple "bin/osd/_stop.sh.tpl" . | include  "helm-toolkit.utils.template" | indent 4 }}
+  init-dirs.sh: |
+{{ tuple "bin/_init-dirs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+{{ if .Values.logging.fluentd }}
+  fluentbit-sidecar.sh: |
+{{ tuple "bin/osd/_fluentbit-sidecar.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+{{ end }}
+{{- end }}
diff --git a/ceph-osd/templates/configmap-etc.yaml b/ceph-osd/templates/configmap-etc.yaml
new file mode 100644
index 0000000000..85ab873b9e
--- /dev/null
+++ b/ceph-osd/templates/configmap-etc.yaml
@@ -0,0 +1,70 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- define "ceph.osd.configmap.etc" }}
+{{- $configMapName := index . 0 }}
+{{- $envAll := index . 1 }}
+{{- with $envAll }}
+
+{{- if empty .Values.conf.ceph.global.mon_host -}}
+{{- $monHost := tuple "ceph_mon" "internal" "mon" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}
+{{- $_ := $monHost | set .Values.conf.ceph.global "mon_host" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.global.mon_addr -}}
+{{- $monPort := tuple "ceph_mon" "internal" "mon" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+{{- $_ := printf ":%s" $monPort | set .Values.conf.ceph.global "mon_addr" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.global.fsid -}}
+{{- $_ := uuidv4 | set .Values.conf.ceph.global "fsid" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.osd.cluster_network -}}
+{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd "cluster_network" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.osd.public_network -}}
+{{- $_ := .Values.network.public | set .Values.conf.ceph.osd "public_network" -}}
+{{- end -}}
+
+{{- if not (has "fluentd_output" .Values.conf.fluentbit) -}}
+{{- $fluentd_host := tuple "fluentd" "internal" $envAll | include "helm-toolkit.endpoints.hostname_namespaced_endpoint_lookup" }}
+{{- $fluentd_port := tuple "fluentd" "internal" "service" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+{{- $fluentd_output := dict "header" "output" "Name" "forward" "Match" "*" "Host" $fluentd_host "Port" $fluentd_port -}}
+{{- $_ := set .Values "__fluentbit_config" ( list $fluentd_output) -}}
+{{- $__fluentbit_config := append .Values.conf.fluentbit .Values.__fluentbit_config -}}
+{{- $_ := set .Values.conf "fluentbit" $__fluentbit_config -}}
+{{- end -}}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ $configMapName }}
+data:
+  ceph.conf: |
+{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph | indent 4 }}
+{{ if .Values.logging.fluentd }}
+  fluent-bit.conf: |
+{{ include "ceph-osd.utils.to_fluentbit_conf" .Values.conf.fluentbit | indent 4 }}
+  parsers.conf: |
+{{ include "ceph-osd.utils.to_fluentbit_conf" .Values.conf.parsers | indent 4 }}
+{{ end }}
+{{- end }}
+{{- end }}
+{{- if .Values.manifests.configmap_etc }}
+{{- list "ceph-osd-etc" . | include "ceph.osd.configmap.etc" }}
+{{- end }}
diff --git a/ceph-osd/templates/daemonset-osd.yaml b/ceph-osd/templates/daemonset-osd.yaml
new file mode 100644
index 0000000000..2d4b6db5da
--- /dev/null
+++ b/ceph-osd/templates/daemonset-osd.yaml
@@ -0,0 +1,288 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+
+{{- define "ceph.osd.daemonset" }}
+{{- $daemonset := index . 0 }}
+{{- $configMapName := index . 1 }}
+{{- $serviceAccountName := index . 2 }}
+{{- $envAll := index . 3 }}
+{{- with $envAll }}
+---
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+  name: ceph-osd
+  labels:
+{{ tuple $envAll "ceph" "osd" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+spec:
+  selector:
+    matchLabels:
+{{ tuple $envAll "ceph" "osd" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" "osd" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      nodeSelector:
+        {{ .Values.labels.osd.node_selector_key }}: {{ .Values.labels.osd.node_selector_value }}
+      hostNetwork: true
+      hostPID: true
+      dnsPolicy: {{ .Values.pod.dns_policy }}
+      initContainers:
+{{ tuple $envAll "osd" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+        - name: ceph-init-dirs
+{{ tuple $envAll "ceph_osd" | include "helm-toolkit.snippets.image" | indent 10 }}
+          command:
+            - /tmp/init-dirs.sh
+          env:
+          # NOTE(portdirect): These environment variables will be populated
+          # dynamicly at the point of render.
+          # - name: JOURNAL_LOCATION
+          #   value: /var/lib/openstack-helm/ceph/osd/journal-one
+          # - name: STORAGE_LOCATION
+          #   value: /var/lib/openstack-helm/ceph/osd/data-one
+          # - name: JOURNAL_TYPE
+          #   value: directory
+          # - name: STORAGE_TYPE
+          #   value: directory
+            - name: CLUSTER
+              value: "ceph"
+          volumeMounts:
+            - name: ceph-osd-bin
+              mountPath: /tmp/init-dirs.sh
+              subPath: init-dirs.sh
+              readOnly: true
+            - name: pod-var-lib-ceph
+              mountPath: /var/lib/ceph
+              readOnly: false
+            - name: pod-run
+              mountPath: /run
+              readOnly: false
+        - name: osd-init
+{{ tuple $envAll "ceph_osd" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.osd | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          securityContext:
+            privileged: true
+          env:
+          # NOTE(portdirect): These environment variables will be populated
+          # dynamicly at the point of render and added to all containers in the
+          # pod
+          # - name: JOURNAL_LOCATION
+          #   value: /var/lib/openstack-helm/ceph/osd/journal-one
+          # - name: STORAGE_LOCATION
+          #   value: /var/lib/openstack-helm/ceph/osd/data-one
+          # - name: JOURNAL_TYPE
+          #   value: directory
+          # - name: STORAGE_TYPE
+          #   value: directory
+            - name: CLUSTER
+              value: "ceph"
+            - name: CEPH_GET_ADMIN_KEY
+              value: "1"
+          command:
+            - /tmp/osd-init.sh
+          volumeMounts:
+            - name: ceph-osd-bin
+              mountPath: /tmp/osd-init.sh
+              subPath: osd-init.sh
+              readOnly: true
+            - name: ceph-osd-etc
+              mountPath: /etc/ceph/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+            - name: ceph-bootstrap-osd-keyring
+              mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring
+              subPath: ceph.keyring
+              readOnly: false
+            - name: devices
+              mountPath: /dev
+              readOnly: false
+            - name: pod-var-lib-ceph
+              mountPath: /var/lib/ceph
+              readOnly: false
+            - name: pod-run
+              mountPath: /run
+              readOnly: false
+            - name: run-lvm
+              mountPath: /run/lvm
+              readOnly: false
+            - name: data
+              mountPath: /var/lib/ceph/osd
+              readOnly: false
+            - name: journal
+              mountPath: /var/lib/ceph/journal
+              readOnly: false
+      containers:
+        - name: osd-pod
+{{ tuple $envAll "ceph_osd" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.osd | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          securityContext:
+            privileged: true
+          env:
+          # NOTE(portdirect): These environment variables will be populated
+          # dynamicly at the point of render.
+          # - name: JOURNAL_LOCATION
+          #   value: /var/lib/openstack-helm/ceph/osd/journal-one
+          # - name: STORAGE_LOCATION
+          #   value: /var/lib/openstack-helm/ceph/osd/data-one
+          # - name: JOURNAL_TYPE
+          #   value: directory
+          # - name: STORAGE_TYPE
+          #   value: directory
+            - name: CLUSTER
+              value: "ceph"
+            - name: CEPH_GET_ADMIN_KEY
+              value: "1"
+          command:
+            - /tmp/osd-start.sh
+          lifecycle:
+            preStop:
+              exec:
+                command:
+                  - /tmp/osd-stop.sh
+          livenessProbe:
+           exec:
+            command:
+             - /tmp/osd-check.sh
+             - liveness
+           initialDelaySeconds: 120
+           periodSeconds: 60
+          readinessProbe:
+           exec:
+            command:
+             - /tmp/osd-check.sh
+             - readiness
+           initialDelaySeconds: 60
+           periodSeconds: 60
+          volumeMounts:
+            - name: ceph-osd-bin
+              mountPath: /tmp/osd-start.sh
+              subPath: osd-start.sh
+              readOnly: true
+            - name: ceph-osd-bin
+              mountPath: /tmp/osd-directory.sh
+              subPath: osd-directory.sh
+              readOnly: true
+            - name: ceph-osd-bin
+              mountPath: /tmp/osd-block.sh
+              subPath: osd-block.sh
+              readOnly: true
+            - name: ceph-osd-bin
+              mountPath: /tmp/osd-check.sh
+              subPath: osd-check.sh
+              readOnly: true
+            - name: ceph-osd-bin
+              mountPath: /tmp/osd-stop.sh
+              subPath: osd-stop.sh
+              readOnly: true
+            - name: ceph-osd-etc
+              mountPath: /etc/ceph/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+            - name: ceph-bootstrap-osd-keyring
+              mountPath: /var/lib/ceph/bootstrap-osd/ceph.keyring
+              subPath: ceph.keyring
+              readOnly: false
+            - name: devices
+              mountPath: /dev
+              readOnly: false
+            - name: pod-var-lib-ceph
+              mountPath: /var/lib/ceph
+              readOnly: false
+            - name: pod-run
+              mountPath: /run
+              readOnly: false
+            - name: run-lvm
+              mountPath: /run/lvm
+              readOnly: false
+            - name: data
+              mountPath: /var/lib/ceph/osd
+              readOnly: false
+            - name: journal
+              mountPath: /var/lib/ceph/journal
+              readOnly: false
+            - name: varlog
+              mountPath: /var/log/ceph
+        {{ if .Values.logging.fluentd }}
+        - name: fluentbit-sidecar
+{{ tuple $envAll "fluentbit" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.fluentbit | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          command:
+            - /tmp/fluentbit-sidecar.sh
+          volumeMounts:
+            - name: ceph-osd-bin
+              mountPath: /tmp/fluentbit-sidecar.sh
+              subPath: fluentbit-sidecar.sh
+              readOnly: true
+            - name: varlog
+              mountPath: /var/log/ceph
+            - name: ceph-osd-etc
+              mountPath: /fluent-bit/etc/fluent-bit.conf
+              subPath: fluent-bit.conf
+              readOnly: true
+            - name: ceph-osd-etc
+              mountPath: /fluent-bit/etc/parsers.conf
+              subPath: parsers.conf
+              readOnly: true
+        {{ end }}
+      volumes:
+        - name: devices
+          hostPath:
+            path: /dev
+        - name: run-lvm
+          hostPath:
+            path: /run/lvm
+        - name: pod-var-lib-ceph
+          emptyDir: {}
+        - name: varlog
+          emptyDir: {}
+        - name: pod-run
+          emptyDir:
+            medium: "Memory"
+        - name: ceph-osd-bin
+          configMap:
+            name: ceph-osd-bin
+            defaultMode: 0555
+        - name: ceph-osd-etc
+          configMap:
+            name: {{ $configMapName }}
+            defaultMode: 0444
+        - name: ceph-bootstrap-osd-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.osd }}
+      # NOTE(portdirect): If directory mounts are to be used for OSD's
+      # they will automaticly be inserted here, with the format:
+      # - name: data
+      #   hostPath:
+      #     path: /var/lib/foo
+      # - name: journal
+      #   hostPath:
+      #     path: /var/lib/bar
+
+{{- end }}
+{{- end }}
+
+{{- if .Values.manifests.daemonset_osd }}
+{{- $daemonset := "osd" }}
+{{- $configMapName := "ceph-osd-etc" }}
+{{- $serviceAccountName := "ceph-osd"}}
+{{ tuple . "osd" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+{{- $daemonset_yaml := list $daemonset $configMapName $serviceAccountName . | include "ceph.osd.daemonset" | toString | fromYaml }}
+{{- $configmap_yaml := "ceph.osd.configmap.etc" }}
+{{- list $daemonset $daemonset_yaml $configmap_yaml $configMapName . | include "ceph.utils.osd_daemonset_overrides" }}
+{{- end }}
diff --git a/ceph-osd/templates/job-image-repo-sync.yaml b/ceph-osd/templates/job-image-repo-sync.yaml
new file mode 100644
index 0000000000..8212dafb0e
--- /dev/null
+++ b/ceph-osd/templates/job-image-repo-sync.yaml
@@ -0,0 +1,20 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_image_repo_sync .Values.images.local_registry.active }}
+{{- $imageRepoSyncJob := dict "envAll" . "serviceName" "ceph-osd" -}}
+{{ $imageRepoSyncJob | include "helm-toolkit.manifests.job_image_repo_sync" }}
+{{- end }}
diff --git a/ceph-osd/templates/utils/_osd_daemonset_overrides.tpl b/ceph-osd/templates/utils/_osd_daemonset_overrides.tpl
new file mode 100644
index 0000000000..eb9c709862
--- /dev/null
+++ b/ceph-osd/templates/utils/_osd_daemonset_overrides.tpl
@@ -0,0 +1,359 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- define "ceph.utils.match_exprs_hash" }}
+  {{- $match_exprs := index . 0 }}
+  {{- $context := index . 1 }}
+  {{- $_ := set $context.Values "__match_exprs_hash_content" "" }}
+  {{- range $match_expr := $match_exprs }}
+    {{- $_ := set $context.Values "__match_exprs_hash_content" (print $context.Values.__match_exprs_hash_content $match_expr.key $match_expr.operator ($match_expr.values | quote)) }}
+  {{- end }}
+  {{- $context.Values.__match_exprs_hash_content | sha256sum | trunc 8 }}
+  {{- $_ := unset $context.Values "__match_exprs_hash_content" }}
+{{- end }}
+
+{{- define "ceph.utils.osd_daemonset_overrides" }}
+  {{- $daemonset := index . 0 }}
+  {{- $daemonset_yaml := index . 1 }}
+  {{- $configmap_include := index . 2 }}
+  {{- $configmap_name := index . 3 }}
+  {{- $context := index . 4 }}
+  {{- $_ := unset $context ".Files" }}
+  {{- $_ := set $context.Values "__daemonset_yaml" $daemonset_yaml }}
+  {{- $daemonset_root_name := printf "ceph_%s" $daemonset }}
+  {{- $_ := set $context.Values "__daemonset_list" list }}
+  {{- $_ := set $context.Values "__default" dict }}
+  {{- if hasKey $context.Values.conf "overrides" }}
+    {{- range $key, $val := $context.Values.conf.overrides }}
+
+      {{- if eq $key $daemonset_root_name }}
+        {{- range $type, $type_data := . }}
+
+          {{- if eq $type "hosts" }}
+            {{- range $host_data := . }}
+              {{/* dictionary that will contain all info needed to generate this
+              iteration of the daemonset */}}
+              {{- $current_dict := dict }}
+
+              {{/* set daemonset name */}}
+              {{- $_ := set $current_dict "name" $host_data.name }}
+
+              {{/* apply overrides */}}
+              {{- $override_conf_copy := $host_data.conf }}
+              {{- $root_conf_copy := omit $context.Values.conf "overrides" }}
+              {{- $merged_dict := merge $override_conf_copy $root_conf_copy }}
+              {{- $root_conf_copy2 := dict "conf" $merged_dict }}
+              {{- $context_values := omit $context.Values "conf" }}
+              {{- $root_conf_copy3 := merge $context_values $root_conf_copy2 }}
+              {{- $root_conf_copy4 := dict "Values" $root_conf_copy3 }}
+              {{- $_ := set $current_dict "nodeData" $root_conf_copy4 }}
+
+              {{/* Schedule to this host explicitly. */}}
+              {{- $nodeSelector_dict := dict }}
+
+              {{- $_ := set $nodeSelector_dict "key" "kubernetes.io/hostname" }}
+              {{- $_ := set $nodeSelector_dict "operator" "In" }}
+
+              {{- $values_list := list $host_data.name }}
+              {{- $_ := set $nodeSelector_dict "values" $values_list }}
+
+              {{- $list_aggregate := list $nodeSelector_dict }}
+              {{- $_ := set $current_dict "matchExpressions" $list_aggregate }}
+
+              {{/* store completed daemonset entry/info into global list */}}
+              {{- $list_aggregate := append $context.Values.__daemonset_list $current_dict }}
+              {{- $_ := set $context.Values "__daemonset_list" $list_aggregate }}
+
+            {{- end }}
+          {{- end }}
+
+          {{- if eq $type "labels" }}
+            {{- $_ := set $context.Values "__label_list" . }}
+            {{- range $label_data := . }}
+              {{/* dictionary that will contain all info needed to generate this
+              iteration of the daemonset. */}}
+              {{- $_ := set $context.Values "__current_label" dict }}
+
+              {{/* set daemonset name */}}
+              {{- $_ := set $context.Values.__current_label "name" $label_data.label.key }}
+
+              {{/* apply overrides */}}
+              {{- $override_conf_copy := $label_data.conf }}
+              {{- $root_conf_copy := omit $context.Values.conf "overrides" }}
+              {{- $merged_dict := merge $override_conf_copy $root_conf_copy }}
+              {{- $root_conf_copy2 := dict "conf" $merged_dict }}
+              {{- $context_values := omit $context.Values "conf" }}
+              {{- $root_conf_copy3 := merge $context_values $root_conf_copy2 }}
+              {{- $root_conf_copy4 := dict "Values" $root_conf_copy3 }}
+              {{- $_ := set $context.Values.__current_label "nodeData" $root_conf_copy4 }}
+
+              {{/* Schedule to the provided label value(s) */}}
+              {{- $label_dict := omit $label_data.label "NULL" }}
+              {{- $_ := set $label_dict "operator" "In" }}
+              {{- $list_aggregate := list $label_dict }}
+              {{- $_ := set $context.Values.__current_label "matchExpressions" $list_aggregate }}
+
+              {{/* Do not schedule to other specified labels, with higher
+              precedence as the list position increases. Last defined label
+              is highest priority. */}}
+              {{- $other_labels := without $context.Values.__label_list $label_data }}
+              {{- range $label_data2 := $other_labels }}
+                {{- $label_dict := omit $label_data2.label "NULL" }}
+
+                {{- $_ := set $label_dict "operator" "NotIn" }}
+
+                {{- $list_aggregate := append $context.Values.__current_label.matchExpressions $label_dict }}
+                {{- $_ := set $context.Values.__current_label "matchExpressions" $list_aggregate }}
+              {{- end }}
+              {{- $_ := set $context.Values "__label_list" $other_labels }}
+
+              {{/* Do not schedule to any other specified hosts */}}
+              {{- range $type, $type_data := $val }}
+                {{- if eq $type "hosts" }}
+                  {{- range $host_data := . }}
+                    {{- $label_dict := dict }}
+
+                    {{- $_ := set $label_dict "key" "kubernetes.io/hostname" }}
+                    {{- $_ := set $label_dict "operator" "NotIn" }}
+
+                    {{- $values_list := list $host_data.name }}
+                    {{- $_ := set $label_dict "values" $values_list }}
+
+                    {{- $list_aggregate := append $context.Values.__current_label.matchExpressions $label_dict }}
+                    {{- $_ := set $context.Values.__current_label "matchExpressions" $list_aggregate }}
+                  {{- end }}
+                {{- end }}
+              {{- end }}
+
+              {{/* store completed daemonset entry/info into global list */}}
+              {{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__current_label }}
+              {{- $_ := set $context.Values "__daemonset_list" $list_aggregate }}
+              {{- $_ := unset $context.Values "__current_label" }}
+
+            {{- end }}
+          {{- end }}
+        {{- end }}
+
+        {{/* scheduler exceptions for the default daemonset */}}
+        {{- $_ := set $context.Values.__default "matchExpressions" list }}
+
+        {{- range $type, $type_data := . }}
+          {{/* Do not schedule to other specified labels */}}
+          {{- if eq $type "labels" }}
+            {{- range $label_data := . }}
+              {{- $default_dict := omit $label_data.label "NULL" }}
+
+              {{- $_ := set $default_dict "operator" "NotIn" }}
+
+              {{- $list_aggregate := append $context.Values.__default.matchExpressions $default_dict }}
+              {{- $_ := set $context.Values.__default "matchExpressions" $list_aggregate }}
+            {{- end }}
+          {{- end }}
+          {{/* Do not schedule to other specified hosts */}}
+          {{- if eq $type "hosts" }}
+            {{- range $host_data := . }}
+              {{- $default_dict := dict }}
+
+              {{- $_ := set $default_dict "key" "kubernetes.io/hostname" }}
+              {{- $_ := set $default_dict "operator" "NotIn" }}
+
+              {{- $values_list := list $host_data.name }}
+              {{- $_ := set $default_dict "values" $values_list }}
+
+              {{- $list_aggregate := append $context.Values.__default.matchExpressions $default_dict }}
+              {{- $_ := set $context.Values.__default "matchExpressions" $list_aggregate }}
+            {{- end }}
+          {{- end }}
+        {{- end }}
+      {{- end }}
+    {{- end }}
+  {{- end }}
+
+  {{/* generate the default daemonset */}}
+
+  {{/* set name */}}
+  {{- $_ := set $context.Values.__default "name" "default" }}
+
+  {{/* no overrides apply, so copy as-is */}}
+  {{- $root_conf_copy1 := omit $context.Values.conf "overrides" }}
+  {{- $root_conf_copy2 := dict "conf" $root_conf_copy1 }}
+  {{- $context_values := omit $context.Values "conf" }}
+  {{- $root_conf_copy3 := merge $context_values $root_conf_copy2 }}
+  {{- $root_conf_copy4 := dict "Values" $root_conf_copy3 }}
+  {{- $_ := set $context.Values.__default "nodeData" $root_conf_copy4 }}
+
+  {{/* add to global list */}}
+  {{- $list_aggregate := append $context.Values.__daemonset_list $context.Values.__default }}
+  {{- $_ := set $context.Values "__daemonset_list" $list_aggregate }}
+
+  {{- $_ := set $context.Values "__last_configmap_name" $configmap_name }}
+  {{- range $current_dict := $context.Values.__daemonset_list }}
+
+    {{- $context_novalues := omit $context "Values" }}
+    {{- $merged_dict := merge $current_dict.nodeData $context_novalues }}
+    {{- $_ := set $current_dict "nodeData" $merged_dict }}
+
+    {{/* name needs to be a DNS-1123 compliant name. Ensure lower case */}}
+    {{- $name_format1 := printf (print $daemonset_root_name "-" $current_dict.name) | lower }}
+    {{/* labels may contain underscores which would be invalid here, so we replace them with dashes
+    there may be other valid label names which would make for an invalid DNS-1123 name
+    but these will be easier to handle in future with sprig regex* functions
+    (not availabile in helm 2.5.1) */}}
+    {{- $name_format2 := $name_format1 | replace "_" "-" | replace "." "-" }}
+    {{/* To account for the case where the same label is defined multiple times in overrides
+    (but with different label values), we add a sha of the scheduling data to ensure
+    name uniqueness */}}
+    {{- $_ := set $current_dict "dns_1123_name" dict }}
+    {{- if hasKey $current_dict "matchExpressions" }}
+      {{- $_ := set $current_dict "dns_1123_name" (printf (print $name_format2 "-" (list $current_dict.matchExpressions $context | include "ceph.utils.match_exprs_hash"))) }}
+    {{- else }}
+      {{- $_ := set $current_dict "dns_1123_name" $name_format2 }}
+    {{- end }}
+
+    {{/* set daemonset metadata name */}}
+    {{- if not $context.Values.__daemonset_yaml.metadata }}{{- $_ := set $context.Values.__daemonset_yaml "metadata" dict }}{{- end }}
+    {{- if not $context.Values.__daemonset_yaml.metadata.name }}{{- $_ := set $context.Values.__daemonset_yaml.metadata "name" dict }}{{- end }}
+    {{- $_ := set $context.Values.__daemonset_yaml.metadata "name" $current_dict.dns_1123_name }}
+
+    {{/* set container names and add to the list of containers for the pod */}}
+    {{- $_ := set $context.Values "__containers_list" ( list ) }}
+    {{- range $container := $context.Values.__daemonset_yaml.spec.template.spec.containers }}
+    {{- if eq $container.name "osd-pod" }}
+    {{- $_ := set $container "name" $current_dict.dns_1123_name }}
+    {{- end }}
+    {{- $__containers_list := append $context.Values.__containers_list $container }}
+    {{- $_ := set $context.Values "__containers_list" $__containers_list }}
+    {{- end }}
+    {{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec "containers" $context.Values.__containers_list }}
+
+    {{/* cross-reference configmap name to container volume definitions */}}
+    {{- $_ := set $context.Values "__volume_list" list }}
+    {{- range $current_volume := $context.Values.__daemonset_yaml.spec.template.spec.volumes }}
+      {{- $_ := set $context.Values "__volume" $current_volume }}
+      {{- if hasKey $context.Values.__volume "configMap" }}
+        {{- if eq $context.Values.__volume.configMap.name $context.Values.__last_configmap_name }}
+          {{- $_ := set $context.Values.__volume.configMap "name" $current_dict.dns_1123_name }}
+        {{- end }}
+      {{- end }}
+      {{- $updated_list := append $context.Values.__volume_list $context.Values.__volume }}
+      {{- $_ := set $context.Values "__volume_list" $updated_list }}
+    {{- end }}
+    {{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec "volumes" $context.Values.__volume_list }}
+
+    {{/* populate scheduling restrictions */}}
+    {{- if hasKey $current_dict "matchExpressions" }}
+      {{- if not $context.Values.__daemonset_yaml.spec.template.spec }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template "spec" dict }}{{- end }}
+      {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec "affinity" dict }}{{- end }}
+      {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity "nodeAffinity" dict }}{{- end }}
+      {{- if not $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity "requiredDuringSchedulingIgnoredDuringExecution" dict }}{{- end }}
+      {{- $match_exprs := dict }}
+      {{- $_ := set $match_exprs "matchExpressions" $current_dict.matchExpressions }}
+      {{- $appended_match_expr := list $match_exprs }}
+      {{- $_ := set $context.Values.__daemonset_yaml.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution "nodeSelectorTerms" $appended_match_expr }}
+    {{- end }}
+
+    {{/* input value hash for current set of values overrides */}}
+    {{- if not $context.Values.__daemonset_yaml.spec }}{{- $_ := set $context.Values.__daemonset_yaml "spec" dict }}{{- end }}
+    {{- if not $context.Values.__daemonset_yaml.spec.template }}{{- $_ := set $context.Values.__daemonset_yaml.spec "template" dict }}{{- end }}
+    {{- if not $context.Values.__daemonset_yaml.spec.template.metadata }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template "metadata" dict }}{{- end }}
+    {{- if not $context.Values.__daemonset_yaml.spec.template.metadata.annotations }}{{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata "annotations" dict }}{{- end }}
+    {{- $cmap := list $current_dict.dns_1123_name $current_dict.nodeData | include $configmap_include }}
+    {{- $values_hash := $cmap | quote | sha256sum }}
+    {{- $_ := set $context.Values.__daemonset_yaml.spec.template.metadata.annotations "configmap-etc-hash" $values_hash }}
+
+    {{/* generate configmap */}}
+---
+{{ $cmap }}
+
+    {{/* generate daemonset yaml */}}
+{{ range $k, $v := index $current_dict.nodeData.Values.conf.storage "osd" }}
+---
+{{- $_ := set $context.Values "__tmpYAML" dict }}
+
+{{ $dsNodeName := index $context.Values.__daemonset_yaml.metadata "name" }}
+{{ $localDsNodeName := print (trunc 54 $current_dict.dns_1123_name) "-" (print $dsNodeName $k | quote | sha256sum | trunc 8)}}
+{{- if not $context.Values.__tmpYAML.metadata }}{{- $_ := set $context.Values.__tmpYAML "metadata" dict }}{{- end }}
+{{- $_ := set $context.Values.__tmpYAML.metadata "name" $localDsNodeName }}
+
+{{ $podDataVols := index $context.Values.__daemonset_yaml.spec.template.spec "volumes" }}
+{{- $_ := set $context.Values "__tmpPodVols" $podDataVols }}
+
+  {{ if eq $v.data.type "directory" }}
+    {{ $dataDirVolume := dict "hostPath" (dict "path" $v.data.location) "name" "data" }}
+    {{ $newPodDataVols := append $context.Values.__tmpPodVols $dataDirVolume }}
+    {{- $_ := set $context.Values "__tmpPodVols" $newPodDataVols }}
+  {{ else }}
+    {{ $dataDirVolume := dict "emptyDir" dict "name" "data" }}
+    {{ $newPodDataVols := append $context.Values.__tmpPodVols $dataDirVolume }}
+    {{- $_ := set $context.Values "__tmpPodVols" $newPodDataVols }}
+  {{ end }}
+
+  {{ if eq $v.journal.type "directory" }}
+    {{ $journalDirVolume := dict "hostPath" (dict "path" $v.journal.location) "name" "journal" }}
+    {{ $newPodDataVols := append $context.Values.__tmpPodVols $journalDirVolume }}
+    {{- $_ := set $context.Values "__tmpPodVols" $newPodDataVols }}
+  {{ else }}
+    {{ $dataDirVolume := dict "emptyDir" dict "name" "journal" }}
+    {{ $newPodDataVols := append $context.Values.__tmpPodVols $dataDirVolume }}
+    {{- $_ := set $context.Values "__tmpPodVols" $newPodDataVols }}
+  {{ end }}
+
+  {{- if not $context.Values.__tmpYAML.spec }}{{- $_ := set $context.Values.__tmpYAML "spec" dict }}{{- end }}
+  {{- if not $context.Values.__tmpYAML.spec.template }}{{- $_ := set $context.Values.__tmpYAML.spec "template" dict }}{{- end }}
+  {{- if not $context.Values.__tmpYAML.spec.template.spec }}{{- $_ := set $context.Values.__tmpYAML.spec.template "spec" dict }}{{- end }}
+  {{- $_ := set $context.Values.__tmpYAML.spec.template.spec "volumes" $context.Values.__tmpPodVols }}
+
+  {{- if not $context.Values.__tmpYAML.spec }}{{- $_ := set $context.Values.__tmpYAML "spec" dict }}{{- end }}
+  {{- if not $context.Values.__tmpYAML.spec.template }}{{- $_ := set $context.Values.__tmpYAML.spec "template" dict }}{{- end }}
+  {{- if not $context.Values.__tmpYAML.spec.template.spec }}{{- $_ := set $context.Values.__tmpYAML.spec.template "spec" dict }}{{- end }}
+  {{- if not $context.Values.__tmpYAML.spec.template.spec.containers }}{{- $_ := set $context.Values.__tmpYAML.spec.template.spec "containers" list }}{{- end }}
+  {{- if not $context.Values.__tmpYAML.spec.template.spec.initContainers }}{{- $_ := set $context.Values.__tmpYAML.spec.template.spec "initContainers" list }}{{- end }}
+
+  {{- $_ := set $context.Values "__tmpYAMLcontainers" list }}
+  {{- range $podContainer := $context.Values.__daemonset_yaml.spec.template.spec.containers }}
+    {{- $_ := set $context.Values "_tmpYAMLcontainer" $podContainer }}
+    {{- if empty $context.Values._tmpYAMLcontainer.env }}
+    {{- $_ := set $context.Values._tmpYAMLcontainer "env" ( list ) }}
+    {{- end }}
+    {{ $containerEnv := prepend (prepend (prepend ( prepend (index $context.Values._tmpYAMLcontainer "env") (dict "name" "STORAGE_TYPE" "value" $v.data.type)) (dict "name" "JOURNAL_TYPE" "value" $v.journal.type)) (dict "name" "STORAGE_LOCATION" "value" $v.data.location)) (dict "name" "JOURNAL_LOCATION" "value" $v.journal.location) }}
+    {{- $localInitContainerEnv := omit $context.Values._tmpYAMLcontainer "env" }}
+    {{- $_ := set $localInitContainerEnv "env" $containerEnv }}
+    {{ $containerList := append $context.Values.__tmpYAMLcontainers $localInitContainerEnv }}
+    {{ $_ := set $context.Values "__tmpYAMLcontainers" $containerList }}
+  {{ end }}
+  {{- $_ := set $context.Values.__tmpYAML.spec.template.spec "containers" $context.Values.__tmpYAMLcontainers }}
+
+  {{- $_ := set $context.Values "__tmpYAMLinitContainers" list }}
+  {{- range $podContainer := $context.Values.__daemonset_yaml.spec.template.spec.initContainers }}
+    {{- $_ := set $context.Values "_tmpYAMLinitContainer" $podContainer }}
+    {{ $initContainerEnv := prepend (prepend (prepend ( prepend (index $context.Values._tmpYAMLinitContainer "env") (dict "name" "STORAGE_TYPE" "value" $v.data.type)) (dict "name" "JOURNAL_TYPE" "value" $v.journal.type)) (dict "name" "STORAGE_LOCATION" "value" $v.data.location)) (dict "name" "JOURNAL_LOCATION" "value" $v.journal.location) }}
+    {{- $localInitContainerEnv := omit $context.Values._tmpYAMLinitContainer "env" }}
+    {{- $_ := set $localInitContainerEnv "env" $initContainerEnv }}
+    {{ $initContainerList := append $context.Values.__tmpYAMLinitContainers $localInitContainerEnv }}
+    {{ $_ := set $context.Values "__tmpYAMLinitContainers" $initContainerList }}
+  {{ end }}
+  {{- $_ := set $context.Values.__tmpYAML.spec.template.spec "initContainers" $context.Values.__tmpYAMLinitContainers }}
+
+  {{- $_ := set $context.Values.__tmpYAML.spec.template.spec "volumes" $context.Values.__tmpPodVols }}
+
+{{ merge $context.Values.__tmpYAML $context.Values.__daemonset_yaml | toYaml }}
+
+{{ end }}
+
+---
+    {{- $_ := set $context.Values "__last_configmap_name" $current_dict.dns_1123_name }}
+  {{- end }}
+{{- end }}
diff --git a/ceph-osd/templates/utils/_to_fluentbit_conf.tpl b/ceph-osd/templates/utils/_to_fluentbit_conf.tpl
new file mode 100644
index 0000000000..a8769be5d3
--- /dev/null
+++ b/ceph-osd/templates/utils/_to_fluentbit_conf.tpl
@@ -0,0 +1,38 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+# This function generates fluentbit configuration files with entries in the
+# ceph-osd values.yaml.  It results in a configuration section with the
+# following format (for as many key/value pairs defined in values for a section):
+# [HEADER]
+#     key value
+#     key value
+#     key value
+# The configuration schema can be found here:
+# http://fluentbit.io/documentation/0.12/configuration/schema.html
+
+{{- define "ceph-osd.utils.to_fluentbit_conf" -}}
+{{- range $values := . -}}
+{{- range $section := . -}}
+{{- $header := pick . "header" -}}
+{{- $config := omit . "header" }}
+[{{$header.header | upper }}]
+{{range $key, $value := $config -}}
+{{ $key | indent 4 }} {{ $value }}
+{{end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/ceph-osd/values.yaml b/ceph-osd/values.yaml
new file mode 100644
index 0000000000..c40dd43dc1
--- /dev/null
+++ b/ceph-osd/values.yaml
@@ -0,0 +1,249 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default values for ceph-osd.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
+
+images:
+  pull_policy: IfNotPresent
+  tags:
+    ceph_osd: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
+    ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
+    fluentbit: docker.io/fluent/fluent-bit:0.12.14
+    dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
+    image_repo_sync: docker.io/docker:17.07.0
+  local_registry:
+    active: false
+    exclude:
+      - dep_check
+      - image_repo_sync
+
+labels:
+  job:
+    node_selector_key: openstack-control-plane
+    node_selector_value: enabled
+  osd:
+    node_selector_key: ceph-osd
+    node_selector_value: enabled
+
+pod:
+  dns_policy: "ClusterFirstWithHostNet"
+  affinity:
+    anti:
+      type:
+        default: preferredDuringSchedulingIgnoredDuringExecution
+      topologyKey:
+        default: kubernetes.io/hostname
+  resources:
+    enabled: false
+    osd:
+      requests:
+        memory: "512Mi"
+        cpu: "500m"
+      limits:
+        memory: "1024Mi"
+        cpu: "1000m"
+    fluentbit:
+      requests:
+        memory: "5Mi"
+        cpu: "250m"
+      limits:
+        memory: "50Mi"
+        cpu: "500m"
+    jobs:
+      image_repo_sync:
+        requests:
+          memory: "128Mi"
+          cpu: "100m"
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+
+secrets:
+  keyrings:
+    osd: ceph-bootstrap-osd-keyring
+
+network:
+  public: 192.168.0.0/16
+  cluster: 192.168.0.0/16
+
+conf:
+  ceph:
+    global:
+      # auth
+      cephx: true
+      cephx_require_signatures: false
+      cephx_cluster_require_signatures: true
+      cephx_service_require_signatures: false
+    osd:
+      osd_mkfs_type: xfs
+      osd_mkfs_options_xfs: -f -i size=2048
+      osd_max_object_name_len: 256
+      ms_bind_port_min: 6800
+      ms_bind_port_max: 7100
+  storage:
+    # NOTE(portdirect): for homogeneous clusters the `osd` key can be used to
+    # define OSD pods that will be deployed across the cluster.
+    osd:
+      - data:
+          type: directory
+          location: /var/lib/openstack-helm/ceph/osd/osd-one
+        journal:
+          type: directory
+          location: /var/lib/openstack-helm/ceph/osd/journal-one
+      # - data:
+      #     type: block-logical
+      #     location: /dev/sde
+      #   journal:
+      #     type: block-logical
+      #     location: /dev/sdf
+      # - data:
+      #     type: block-logical
+      #     location: /dev/sdg
+      #   journal:
+      #     type: directory
+      #     location: /var/lib/openstack-helm/ceph/osd/journal-sdg
+# NOTE(portdirect): for heterogeneous clusters the overrides section can be used to define
+# OSD pods that will be deployed upon specifc nodes.
+# overrides:
+#   ceph_osd:
+#     hosts:
+#       - name: host1.fqdn
+#         conf:
+#           storage:
+#             osd:
+#               - data:
+#                   type: directory
+#                   location: /var/lib/openstack-helm/ceph/osd/data-three
+#                 journal:
+#                   type: directory
+#                   location: /var/lib/openstack-helm/ceph/osd/journal-three
+  fluentbit:
+    - service:
+        header: service
+        Flush: 30
+        Daemon: Off
+        Log_Level: info
+        Parsers_File: parsers.conf
+    - ceph_tail:
+        # NOTE(srwilkers): Despite being exposed, these values should not be
+        # modified, as the ceph-osd logs are always placed here
+        header: input
+        Name: tail
+        Tag: ceph-osd.*
+        Path: /var/log/ceph/ceph-osd.**.log
+        Parser: syslog
+        DB: /var/log/ceph/ceph-osd.db
+        DB.Sync: Normal
+        Buffer_Chunk_Size: 1M
+        Buffer_Max_Size: 1M
+        Mem_Buf_Limit: 5MB
+        Refresh_Interval: 10s
+  parsers:
+    - syslog:
+        header: parser
+        Name: syslog
+        Format: regex
+        Regex: '^(?<time>.*[0-9]{2}:[0-9]{2}:[0-9]{2}) (?<host>[^ ]*) (?<app>[a-zA-Z0-9_\/\.\-]*)(?:\[(?<pid>[0-9]+)\])?(?:[^\:]*\:)? (?<log>.+)$'
+        Time_Key: time
+        Time_Format: "%Y-%m-%dT%H:%M:%S.%L"
+        Time_Keep: On
+        Types: "pid:integer"
+
+logging:
+  fluentd: false
+
+dependencies:
+  dynamic:
+    common:
+      local_image_registry:
+        jobs:
+          - ceph-osd-image-repo-sync
+        services:
+          - endpoint: node
+            service: local_image_registry
+  static:
+    osd:
+      jobs:
+        - ceph-storage-keys-generator
+        - ceph-osd-keyring-generator
+      services:
+        - endpoint: internal
+          service: ceph_mon
+    image_repo_sync:
+      services:
+        - endpoint: internal
+          service: local_image_registry
+
+bootstrap:
+  enabled: false
+  script: |
+    ceph -s
+    function ensure_pool () {
+      ceph osd pool stats $1 || ceph osd pool create $1 $2
+      local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
+      if [[ ${test_luminous} -gt 0 ]]; then
+        ceph osd pool application enable $1 $3
+      fi
+    }
+    #ensure_pool volumes 8 cinder
+
+endpoints:
+  cluster_domain_suffix: cluster.local
+  local_image_registry:
+    name: docker-registry
+    namespace: docker-registry
+    hosts:
+      default: localhost
+      internal: docker-registry
+      node: localhost
+    host_fqdn_override:
+      default: null
+    port:
+      registry:
+        node: 5000
+  ceph_mon:
+    namespace: null
+    hosts:
+      default: ceph-mon
+      discovery: ceph-mon-discovery
+    host_fqdn_override:
+      default: null
+    port:
+      mon:
+        default: 6789
+  fluentd:
+    namespace: null
+    name: fluentd
+    hosts:
+      default: fluentd-logging
+    host_fqdn_override:
+      default: null
+    path:
+      default: null
+    scheme:
+      default: http
+    port:
+      service:
+        default: 24224
+      metrics:
+        default: 24220
+
+manifests:
+  configmap_bin: true
+  configmap_etc: true
+  daemonset_osd: true
+  job_image_repo_sync: true
diff --git a/ceph-provisioners/Chart.yaml b/ceph-provisioners/Chart.yaml
new file mode 100644
index 0000000000..a0d25ad40f
--- /dev/null
+++ b/ceph-provisioners/Chart.yaml
@@ -0,0 +1,18 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: OpenStack-Helm Ceph Client
+name: ceph-provisioners
+version: 0.1.0
diff --git a/ceph-provisioners/requirements.yaml b/ceph-provisioners/requirements.yaml
new file mode 100644
index 0000000000..53782e69b2
--- /dev/null
+++ b/ceph-provisioners/requirements.yaml
@@ -0,0 +1,18 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+dependencies:
+  - name: helm-toolkit
+    repository: http://localhost:8879/charts
+    version: 0.1.0
diff --git a/ceph-provisioners/templates/bin/_bootstrap.sh.tpl b/ceph-provisioners/templates/bin/_bootstrap.sh.tpl
new file mode 100644
index 0000000000..533c0a5a3f
--- /dev/null
+++ b/ceph-provisioners/templates/bin/_bootstrap.sh.tpl
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+{{ .Values.bootstrap.script | default "echo 'Not Enabled'" }}
diff --git a/ceph-provisioners/templates/bin/provisioner/cephfs/_client-key-manager.sh.tpl b/ceph-provisioners/templates/bin/provisioner/cephfs/_client-key-manager.sh.tpl
new file mode 100644
index 0000000000..fe06a08733
--- /dev/null
+++ b/ceph-provisioners/templates/bin/provisioner/cephfs/_client-key-manager.sh.tpl
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+CEPH_CEPHFS_KEY=$(kubectl get secret ${PVC_CEPH_CEPHFS_STORAGECLASS_ADMIN_SECRET_NAME} \
+    --namespace=${PVC_CEPH_CEPHFS_STORAGECLASS_DEPLOYED_NAMESPACE} \
+    -o json )
+
+ceph_activate_namespace() {
+  kube_namespace=$1
+  secret_type=$2
+  secret_name=$3
+  ceph_key=$4
+  {
+  cat <<EOF
+apiVersion: v1
+kind: Secret
+metadata:
+  name: "${secret_name}"
+type: "${secret_type}"
+data:
+  key: $( echo ${ceph_key} )
+EOF
+  } | kubectl apply --namespace ${kube_namespace} -f -
+}
+
+if ! kubectl get --namespace ${DEPLOYMENT_NAMESPACE} secrets ${PVC_CEPH_CEPHFS_STORAGECLASS_USER_SECRET_NAME}; then
+  ceph_activate_namespace \
+    ${DEPLOYMENT_NAMESPACE} \
+    "kubernetes.io/cephfs" \
+    ${PVC_CEPH_CEPHFS_STORAGECLASS_USER_SECRET_NAME} \
+    "$(echo ${CEPH_CEPHFS_KEY} | jq -r '.data | .[]')"
+fi
diff --git a/ceph-provisioners/templates/bin/provisioner/cephfs/_start.sh.tpl b/ceph-provisioners/templates/bin/provisioner/cephfs/_start.sh.tpl
new file mode 100644
index 0000000000..da9c041f6e
--- /dev/null
+++ b/ceph-provisioners/templates/bin/provisioner/cephfs/_start.sh.tpl
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+exec /usr/local/bin/cephfs-provisioner -id "${POD_NAME}"
diff --git a/ceph-provisioners/templates/bin/provisioner/rbd/_namespace-client-key-cleaner.sh.tpl b/ceph-provisioners/templates/bin/provisioner/rbd/_namespace-client-key-cleaner.sh.tpl
new file mode 100644
index 0000000000..85e52082e8
--- /dev/null
+++ b/ceph-provisioners/templates/bin/provisioner/rbd/_namespace-client-key-cleaner.sh.tpl
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+kubectl delete secret \
+  --namespace ${DEPLOYMENT_NAMESPACE} \
+  --ignore-not-found=true \
+  ${PVC_CEPH_RBD_STORAGECLASS_USER_SECRET_NAME}
diff --git a/ceph-provisioners/templates/bin/provisioner/rbd/_namespace-client-key-manager.sh.tpl b/ceph-provisioners/templates/bin/provisioner/rbd/_namespace-client-key-manager.sh.tpl
new file mode 100644
index 0000000000..5711ae39aa
--- /dev/null
+++ b/ceph-provisioners/templates/bin/provisioner/rbd/_namespace-client-key-manager.sh.tpl
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+CEPH_RBD_KEY=$(kubectl get secret ${PVC_CEPH_RBD_STORAGECLASS_ADMIN_SECRET_NAME} \
+    --namespace=${PVC_CEPH_RBD_STORAGECLASS_DEPLOYED_NAMESPACE} \
+    -o json )
+
+ceph_activate_namespace() {
+  kube_namespace=$1
+  secret_type=$2
+  secret_name=$3
+  ceph_key=$4
+  {
+  cat <<EOF
+apiVersion: v1
+kind: Secret
+metadata:
+  name: "${secret_name}"
+type: "${secret_type}"
+data:
+  key: $( echo ${ceph_key} )
+EOF
+  } | kubectl apply --namespace ${kube_namespace} -f -
+}
+
+ceph_activate_namespace ${DEPLOYMENT_NAMESPACE} "kubernetes.io/rbd" ${PVC_CEPH_RBD_STORAGECLASS_USER_SECRET_NAME} "$(echo ${CEPH_RBD_KEY} | jq -r '.data | .[]')"
diff --git a/ceph-provisioners/templates/bin/provisioner/rbd/_start.sh.tpl b/ceph-provisioners/templates/bin/provisioner/rbd/_start.sh.tpl
new file mode 100644
index 0000000000..496d3038b9
--- /dev/null
+++ b/ceph-provisioners/templates/bin/provisioner/rbd/_start.sh.tpl
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+exec /usr/local/bin/rbd-provisioner -id "${POD_NAME}"
diff --git a/ceph-provisioners/templates/configmap-bin-provisioner.yaml b/ceph-provisioners/templates/configmap-bin-provisioner.yaml
new file mode 100644
index 0000000000..d34870fba3
--- /dev/null
+++ b/ceph-provisioners/templates/configmap-bin-provisioner.yaml
@@ -0,0 +1,29 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.configmap_bin .Values.deployment.client_secrets }}
+{{- $envAll := . }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ceph-provisioners-bin-clients
+data:
+  provisioner-rbd-namespace-client-key-manager.sh: |
+{{ tuple "bin/provisioner/rbd/_namespace-client-key-manager.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+  provisioner-rbd-namespace-client-key-cleaner.sh: |
+{{ tuple "bin/provisioner/rbd/_namespace-client-key-cleaner.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+{{- end }}
diff --git a/ceph-provisioners/templates/configmap-bin.yaml b/ceph-provisioners/templates/configmap-bin.yaml
new file mode 100644
index 0000000000..ed735d790d
--- /dev/null
+++ b/ceph-provisioners/templates/configmap-bin.yaml
@@ -0,0 +1,43 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.configmap_bin_common .Values.deployment.ceph }}
+{{- $envAll := . }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ceph-provisioners-bin
+data:
+{{- if .Values.images.local_registry.active }}
+  image-repo-sync.sh: |
+{{- include "helm-toolkit.scripts.image_repo_sync" . | indent 4 }}
+{{- end }}
+
+{{- if .Values.bootstrap.enabled }}
+  bootstrap.sh: |
+{{ tuple "bin/_bootstrap.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+{{- end }}
+
+  provisioner-cephfs-start.sh: |
+{{ tuple "bin/provisioner/cephfs/_start.sh.tpl" . | include  "helm-toolkit.utils.template" | indent 4 }}
+  provisioner-cephfs-client-key-manager.sh: |
+{{ tuple "bin/provisioner/cephfs/_client-key-manager.sh.tpl" . | include  "helm-toolkit.utils.template" | indent 4 }}
+
+  provisioner-rbd-start.sh: |
+{{ tuple "bin/provisioner/rbd/_start.sh.tpl" . | include  "helm-toolkit.utils.template" | indent 4 }}
+
+{{- end }}
diff --git a/ceph-provisioners/templates/configmap-etc-client.yaml b/ceph-provisioners/templates/configmap-etc-client.yaml
new file mode 100644
index 0000000000..e3001bd686
--- /dev/null
+++ b/ceph-provisioners/templates/configmap-etc-client.yaml
@@ -0,0 +1,56 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- define "ceph.configmap.etc" }}
+{{- $configMapName := index . 0 }}
+{{- $envAll := index . 1 }}
+{{- with $envAll }}
+
+{{- if or (.Values.deployment.ceph) (.Values.deployment.client_secrets) }}
+
+{{- if empty .Values.conf.ceph.global.mon_host -}}
+{{- $monHost := tuple "ceph_mon" "internal" "mon" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}
+{{- $_ := $monHost | set .Values.conf.ceph.global "mon_host" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.global.mon_addr -}}
+{{- $monPort := tuple "ceph_mon" "internal" "mon" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+{{- $_ := printf ":%s" $monPort | set .Values.conf.ceph.global "mon_addr" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.osd.cluster_network -}}
+{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd "cluster_network" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.osd.public_network -}}
+{{- $_ := .Values.network.public | set .Values.conf.ceph.osd "public_network" -}}
+{{- end -}}
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ $configMapName }}
+data:
+  ceph.conf: |
+{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph | indent 4 }}
+
+{{- end }}
+{{- end }}
+{{- end }}
+{{- if .Values.manifests.configmap_etc }}
+{{- list "ceph-etc" . | include "ceph.configmap.etc" }}
+{{- end }}
diff --git a/ceph-provisioners/templates/deployment-cephfs-provisioner.yaml b/ceph-provisioners/templates/deployment-cephfs-provisioner.yaml
new file mode 100644
index 0000000000..4830fba4e9
--- /dev/null
+++ b/ceph-provisioners/templates/deployment-cephfs-provisioner.yaml
@@ -0,0 +1,178 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.deployment_cephfs_provisioner .Values.deployment.cephfs_provisioner  }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := printf "%s-%s" .Release.Name "ceph-cephfs-provisioner" }}
+{{ tuple $envAll "cephfs_provisioner" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: {{ $serviceAccountName }}
+rules:
+  - apiGroups:
+      - ''
+    resources:
+      - secrets
+    verbs:
+      - get
+      - list
+      - watch
+      - create
+      - delete
+  - apiGroups:
+      - ''
+    resources:
+      - persistentvolumes
+    verbs:
+      - get
+      - list
+      - watch
+      - create
+      - delete
+  - apiGroups:
+      - ''
+    resources:
+      - persistentvolumeclaims
+    verbs:
+      - get
+      - list
+      - watch
+      - update
+  - apiGroups:
+      - storage.k8s.io
+    resources:
+      - storageclasses
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups:
+      - ''
+    resources:
+      - events
+    verbs:
+      - list
+      - watch
+      - create
+      - update
+      - patch
+  - apiGroups:
+      - ''
+    resources:
+      - services
+      - endpoints
+    verbs:
+      - get
+  - apiGroups:
+      - extensions
+    resources:
+      - podsecuritypolicies
+    resourceNames:
+      - cephfs-provisioner
+    verbs:
+      - use
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: {{ $serviceAccountName }}-run-cephfs-provisioner
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+roleRef:
+  kind: ClusterRole
+  name: {{ $serviceAccountName }}
+  apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ $serviceAccountName }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    verbs:
+      - get
+      - list
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: {{ $serviceAccountName }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ $serviceAccountName }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  name: ceph-cephfs-provisioner
+  labels:
+{{ tuple $envAll "cephfs" "provisioner" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+spec:
+  replicas: {{ .Values.pod.replicas.cephfs_provisioner }}
+  selector:
+    matchLabels:
+{{ tuple $envAll "cephfs" "provisioner" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "cephfs" "provisioner" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      affinity:
+{{ tuple $envAll "cephfs" "provisioner" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+      nodeSelector:
+        {{ .Values.labels.provisioner.node_selector_key }}: {{ .Values.labels.provisioner.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "cephfs_provisioner" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+      containers:
+        - name: ceph-cephfs-provisioner
+{{ tuple $envAll "ceph_cephfs_provisioner" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.cephfs_provisioner | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: PROVISIONER_NAME
+              value: {{ .Values.storageclass.rbd.provisioner }}
+            - name: POD_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.name
+          command:
+            - /tmp/provisioner-cephfs-start.sh
+          volumeMounts:
+            - name: ceph-provisioners-bin
+              mountPath: /tmp/provisioner-cephfs-start.sh
+              subPath: provisioner-cephfs-start.sh
+              readOnly: true
+      volumes:
+        - name: ceph-provisioners-bin
+          configMap:
+            name: ceph-provisioners-bin
+            defaultMode: 0555
+{{- end }}
diff --git a/ceph-provisioners/templates/deployment-rbd-provisioner.yaml b/ceph-provisioners/templates/deployment-rbd-provisioner.yaml
new file mode 100644
index 0000000000..99c7e3016b
--- /dev/null
+++ b/ceph-provisioners/templates/deployment-rbd-provisioner.yaml
@@ -0,0 +1,168 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.deployment_rbd_provisioner .Values.deployment.rbd_provisioner }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := printf "%s-%s" .Release.Name "ceph-rbd-provisioner" }}
+{{ tuple $envAll "rbd_provisioner" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: {{ $serviceAccountName }}
+rules:
+  - apiGroups:
+      - ''
+    resources:
+      - persistentvolumes
+    verbs:
+      - get
+      - list
+      - watch
+      - create
+      - delete
+  - apiGroups:
+      - ''
+    resources:
+      - persistentvolumeclaims
+    verbs:
+      - get
+      - list
+      - watch
+      - update
+  - apiGroups:
+      - storage.k8s.io
+    resources:
+      - storageclasses
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups:
+      - ''
+    resources:
+      - events
+    verbs:
+      - list
+      - watch
+      - create
+      - update
+      - patch
+  - apiGroups:
+      - ''
+    resources:
+      - services
+      - endpoints
+    verbs:
+      - get
+  - apiGroups:
+      - extensions
+    resources:
+      - podsecuritypolicies
+    resourceNames:
+      - rbd-provisioner
+    verbs:
+      - use
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: {{ $serviceAccountName }}-run-rbd-provisioner
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+roleRef:
+  kind: ClusterRole
+  name: {{ $serviceAccountName }}
+  apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ $serviceAccountName }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    verbs:
+      - get
+      - list
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: {{ $serviceAccountName }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ $serviceAccountName }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  name: ceph-rbd-provisioner
+  labels:
+{{ tuple $envAll "rbd" "provisioner" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+spec:
+  replicas: {{ .Values.pod.replicas.rbd_provisioner }}
+  selector:
+    matchLabels:
+{{ tuple $envAll "rbd" "provisioner" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "rbd" "provisioner" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      affinity:
+{{ tuple $envAll "rbd" "provisioner" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+      nodeSelector:
+        {{ .Values.labels.provisioner.node_selector_key }}: {{ .Values.labels.provisioner.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "rbd_provisioner" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+      containers:
+        - name: ceph-rbd-provisioner
+{{ tuple $envAll "ceph_rbd_provisioner" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.rbd_provisioner | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: PROVISIONER_NAME
+              value: {{ .Values.storageclass.rbd.provisioner }}
+            - name: POD_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.name
+          command:
+            - /tmp/provisioner-rbd-start.sh
+          volumeMounts:
+            - name: ceph-provisioners-bin
+              mountPath: /tmp/provisioner-rbd-start.sh
+              subPath: provisioner-rbd-start.sh
+              readOnly: true
+      volumes:
+        - name: ceph-provisioners-bin
+          configMap:
+            name: ceph-provisioners-bin
+            defaultMode: 0555
+{{- end }}
diff --git a/ceph-provisioners/templates/job-bootstrap.yaml b/ceph-provisioners/templates/job-bootstrap.yaml
new file mode 100644
index 0000000000..00c4737368
--- /dev/null
+++ b/ceph-provisioners/templates/job-bootstrap.yaml
@@ -0,0 +1,70 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_bootstrap .Values.bootstrap.enabled }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "ceph-client-bootstrap"}}
+{{ tuple $envAll "bootstrap" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: ceph-client-bootstrap
+spec:
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" "bootstrap" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      restartPolicy: OnFailure
+      nodeSelector:
+        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "bootstrap" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container"  | indent 8 }}
+      containers:
+        - name: ceph-client-bootstrap
+{{ tuple $envAll "ceph_bootstrap" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.jobs.bootstrap | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          command:
+            - /tmp/bootstrap.sh
+          volumeMounts:
+            - name: ceph-provisioners-bin
+              mountPath: /tmp/bootstrap.sh
+              subPath: bootstrap.sh
+              readOnly: true
+            - name: ceph-etc
+              mountPath: /etc/ceph/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+            - name: ceph-client-admin-keyring
+              mountPath: /etc/ceph/ceph.client.admin.keyring
+              subPath: ceph.client.admin.keyring
+              readOnly: true
+      volumes:
+        - name: ceph-provisioners-bin
+          configMap:
+            name: ceph-provisioners-bin
+            defaultMode: 0555
+        - name: ceph-etc
+          configMap:
+            name: ceph-etc
+            defaultMode: 0444
+        - name: ceph-client-admin-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.admin }}
+{{- end }}
diff --git a/ceph-provisioners/templates/job-cephfs-client-key.yaml b/ceph-provisioners/templates/job-cephfs-client-key.yaml
new file mode 100644
index 0000000000..2b0bee5a5b
--- /dev/null
+++ b/ceph-provisioners/templates/job-cephfs-client-key.yaml
@@ -0,0 +1,124 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_cephfs_client_key .Values.deployment.cephfs_provisioner }}
+{{- $envAll := . }}
+
+{{- $randStringSuffix := randAlphaNum 5 | lower }}
+
+{{- $serviceAccountName := "ceph-cephfs-client-key-generator" }}
+{{ tuple $envAll "cephfs_client_key_generator" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ $serviceAccountName }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    verbs:
+      - get
+      - create
+      - update
+      - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: {{ $serviceAccountName }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ $serviceAccountName }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
+  namespace: {{ .Values.storageclass.rbd.admin_secret_namespace }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    verbs:
+      - get
+      - list
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
+  namespace: {{ .Values.storageclass.rbd.admin_secret_namespace }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: ceph-cephfs-client-key-generator
+spec:
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" "cephfs-client-key-generator" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      restartPolicy: OnFailure
+      nodeSelector:
+        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "cephfs_client_key_generator" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+      containers:
+        - name:  ceph-storage-keys-generator
+{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.jobs.secret_provisioning | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: DEPLOYMENT_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: PVC_CEPH_CEPHFS_STORAGECLASS_USER_SECRET_NAME
+              value: {{ .Values.storageclass.cephfs.user_secret_name }}
+            - name: PVC_CEPH_CEPHFS_STORAGECLASS_ADMIN_SECRET_NAME
+              value: {{ .Values.storageclass.cephfs.admin_secret_name }}
+            - name: PVC_CEPH_CEPHFS_STORAGECLASS_DEPLOYED_NAMESPACE
+              value: {{ .Values.storageclass.cephfs.admin_secret_namespace }}
+          command:
+            - /tmp/provisioner-cephfs-client-key-manager.sh
+          volumeMounts:
+            - name: ceph-provisioners-bin
+              mountPath: /tmp/provisioner-cephfs-client-key-manager.sh
+              subPath: provisioner-cephfs-client-key-manager.sh
+              readOnly: true
+      volumes:
+        - name: ceph-provisioners-bin
+          configMap:
+            name: ceph-provisioners-bin
+            defaultMode: 0555
+{{- end }}
diff --git a/ceph-provisioners/templates/job-namespace-client-key-cleaner.yaml b/ceph-provisioners/templates/job-namespace-client-key-cleaner.yaml
new file mode 100644
index 0000000000..f0691fc5c4
--- /dev/null
+++ b/ceph-provisioners/templates/job-namespace-client-key-cleaner.yaml
@@ -0,0 +1,93 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_namespace_client_key_cleaner .Values.deployment.client_secrets }}
+{{- $envAll := . }}
+
+{{- $randStringSuffix := randAlphaNum 5 | lower }}
+
+{{- $serviceAccountName := print "ceph-namespace-client-key-cleaner-" $randStringSuffix }}
+{{ tuple $envAll "namespace_client_key_cleaner" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ $serviceAccountName }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    verbs:
+      - get
+      - list
+      - delete
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: {{ $serviceAccountName }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ $serviceAccountName }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: ceph-namespace-client-key-cleaner-{{ $randStringSuffix }}
+  annotations:
+    "helm.sh/hook": pre-delete
+spec:
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" "client-key-cleaner" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      restartPolicy: OnFailure
+      nodeSelector:
+        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "namespace_client_key_cleaner" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+      containers:
+        - name:  ceph-namespace-client-keys-cleaner
+{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.jobs.secret_provisioning | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: DEPLOYMENT_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: PVC_CEPH_RBD_STORAGECLASS_USER_SECRET_NAME
+              value: {{ .Values.storageclass.rbd.user_secret_name }}
+          command:
+            - /tmp/provisioner-rbd-namespace-client-key-cleaner.sh
+          volumeMounts:
+            - name: ceph-provisioners-bin-clients
+              mountPath: /tmp/provisioner-rbd-namespace-client-key-cleaner.sh
+              subPath: provisioner-rbd-namespace-client-key-cleaner.sh
+              readOnly: true
+      volumes:
+        - name: ceph-provisioners-bin-clients
+          configMap:
+            name: ceph-provisioners-bin-clients
+            defaultMode: 0555
+{{- end }}
diff --git a/ceph-provisioners/templates/job-namespace-client-key.yaml b/ceph-provisioners/templates/job-namespace-client-key.yaml
new file mode 100644
index 0000000000..75fd06872e
--- /dev/null
+++ b/ceph-provisioners/templates/job-namespace-client-key.yaml
@@ -0,0 +1,124 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_namespace_client_key .Values.deployment.client_secrets }}
+{{- $envAll := . }}
+
+{{- $randStringSuffix := randAlphaNum 5 | lower }}
+
+{{- $serviceAccountName := "ceph-namespace-client-key-generator" }}
+{{ tuple $envAll "namespace_client_key_generator" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ $serviceAccountName }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    verbs:
+      - get
+      - create
+      - update
+      - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: {{ $serviceAccountName }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ $serviceAccountName }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
+  namespace: {{ .Values.storageclass.rbd.admin_secret_namespace }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    verbs:
+      - get
+      - list
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
+  namespace: {{ .Values.storageclass.rbd.admin_secret_namespace }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ printf "%s-%s" $serviceAccountName $randStringSuffix }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: ceph-namespace-client-key-generator
+spec:
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" "client-key-generator" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      restartPolicy: OnFailure
+      nodeSelector:
+        {{ $envAll.Values.labels.job.node_selector_key }}: {{ $envAll.Values.labels.job.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "namespace_client_key_generator" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+      containers:
+        - name:  ceph-storage-keys-generator
+{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.jobs.secret_provisioning | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: DEPLOYMENT_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: PVC_CEPH_RBD_STORAGECLASS_USER_SECRET_NAME
+              value: {{ .Values.storageclass.rbd.user_secret_name }}
+            - name: PVC_CEPH_RBD_STORAGECLASS_ADMIN_SECRET_NAME
+              value: {{ .Values.storageclass.rbd.admin_secret_name }}
+            - name: PVC_CEPH_RBD_STORAGECLASS_DEPLOYED_NAMESPACE
+              value: {{ .Values.storageclass.rbd.admin_secret_namespace }}
+          command:
+            - /tmp/provisioner-rbd-namespace-client-key-manager.sh
+          volumeMounts:
+            - name: ceph-provisioners-bin-clients
+              mountPath: /tmp/provisioner-rbd-namespace-client-key-manager.sh
+              subPath: provisioner-rbd-namespace-client-key-manager.sh
+              readOnly: true
+      volumes:
+        - name: ceph-provisioners-bin-clients
+          configMap:
+            name: ceph-provisioners-bin-clients
+            defaultMode: 0555
+{{- end }}
diff --git a/ceph-provisioners/templates/storageclass-cephfs.yaml b/ceph-provisioners/templates/storageclass-cephfs.yaml
new file mode 100644
index 0000000000..4144db8518
--- /dev/null
+++ b/ceph-provisioners/templates/storageclass-cephfs.yaml
@@ -0,0 +1,30 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.storageclass_cephfs ( and .Values.deployment.ceph .Values.storageclass.cephfs.provision_storage_class ) }}
+{{- $envAll := . }}
+---
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+  name: {{ .Values.storageclass.cephfs.name }}
+provisioner: {{ .Values.storageclass.cephfs.provisioner }}
+parameters:
+    monitors: {{ tuple "ceph_mon" "internal" "mon" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}
+    adminId: {{ .Values.storageclass.cephfs.admin_id | quote }}
+    adminSecretName: {{ .Values.storageclass.cephfs.user_secret_name | quote }}
+    adminSecretNamespace: {{ .Release.Namespace | quote }}
+{{- end }}
diff --git a/ceph-provisioners/templates/storageclass-rbd.yaml b/ceph-provisioners/templates/storageclass-rbd.yaml
new file mode 100644
index 0000000000..0dfd104355
--- /dev/null
+++ b/ceph-provisioners/templates/storageclass-rbd.yaml
@@ -0,0 +1,35 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.storageclass_rbd (and .Values.deployment.ceph .Values.storageclass.rbd.provision_storage_class) }}
+{{- $envAll := . }}
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+    name: {{ .Values.storageclass.rbd.name }}
+provisioner: {{ .Values.storageclass.rbd.provisioner }}
+parameters:
+    monitors: {{ tuple "ceph_mon" "internal" "mon" . | include "helm-toolkit.endpoints.host_and_port_endpoint_uri_lookup" }}
+    adminId: {{ .Values.storageclass.rbd.admin_id }}
+    adminSecretName: {{ .Values.storageclass.rbd.admin_secret_name }}
+    adminSecretNamespace: {{ .Values.storageclass.rbd.admin_secret_namespace }}
+    pool: {{ .Values.storageclass.rbd.pool }}
+    userId: {{ .Values.storageclass.rbd.user_id }}
+    userSecretName: {{ .Values.storageclass.rbd.user_secret_name }}
+    imageFormat: {{ .Values.storageclass.rbd.image_format | quote }}
+    imageFeatures: {{ .Values.storageclass.rbd.image_features }}
+{{- end }}
diff --git a/ceph-provisioners/values.yaml b/ceph-provisioners/values.yaml
new file mode 100644
index 0000000000..feb817e7d0
--- /dev/null
+++ b/ceph-provisioners/values.yaml
@@ -0,0 +1,231 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default values for ceph-client.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
+
+deployment:
+  ceph: true
+  client_secrets: false
+  rbd_provisioner: true
+  cephfs_provisioner: true
+
+release_group: null
+
+images:
+  pull_policy: IfNotPresent
+  tags:
+    ceph_bootstrap: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
+    ceph_cephfs_provisioner: 'quay.io/external_storage/cephfs-provisioner:v0.1.1'
+    ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
+    ceph_rbd_provisioner: 'quay.io/external_storage/rbd-provisioner:v0.1.1'
+    dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
+    image_repo_sync: docker.io/docker:17.07.0
+  local_registry:
+    active: false
+    exclude:
+      - dep_check
+      - image_repo_sync
+
+labels:
+  job:
+    node_selector_key: openstack-control-plane
+    node_selector_value: enabled
+  provisioner:
+    node_selector_key: openstack-control-plane
+    node_selector_value: enabled
+
+pod:
+  dns_policy: "ClusterFirstWithHostNet"
+  replicas:
+    cephfs_provisioner: 2
+    rbd_provisioner: 2
+  affinity:
+    anti:
+      type:
+        default: preferredDuringSchedulingIgnoredDuringExecution
+      topologyKey:
+        default: kubernetes.io/hostname
+  resources:
+    enabled: false
+    rbd_provisioner:
+      requests:
+        memory: "5Mi"
+        cpu: "250m"
+      limits:
+        memory: "50Mi"
+        cpu: "500m"
+    cephfs_provisioner:
+      requests:
+        memory: "5Mi"
+        cpu: "250m"
+      limits:
+        memory: "50Mi"
+        cpu: "500m"
+    jobs:
+      bootstrap:
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+        requests:
+          memory: "128Mi"
+          cpu: "500m"
+      image_repo_sync:
+        requests:
+          memory: "128Mi"
+          cpu: "100m"
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+
+secrets:
+  keyrings:
+    admin: ceph-client-admin-keyring
+
+network:
+  public: 192.168.0.0/16
+  cluster: 192.168.0.0/16
+
+conf:
+  ceph:
+    global:
+      # auth
+      cephx: true
+      cephx_require_signatures: false
+      cephx_cluster_require_signatures: true
+      cephx_service_require_signatures: false
+    osd:
+      osd_mkfs_type: xfs
+      osd_mkfs_options_xfs: -f -i size=2048
+      osd_max_object_name_len: 256
+      ms_bind_port_min: 6800
+      ms_bind_port_max: 7100
+
+dependencies:
+  dynamic:
+    common:
+      local_image_registry:
+        jobs:
+          - ceph-client-image-repo-sync
+        services:
+          - endpoint: node
+            service: local_image_registry
+  static:
+    bootstrap:
+      jobs: null
+      services:
+        - endpoint: internal
+          service: ceph_mon
+    cephfs_client_key_generator:
+      jobs: null
+    cephfs_provisioner:
+      jobs:
+        - ceph-rbd-pool
+      services:
+        - endpoint: internal
+          service: ceph_mon
+    namespace_client_key_cleaner:
+      jobs: null
+    namespace_client_key_generator:
+      jobs: null
+    rbd_provisioner:
+      jobs:
+        - ceph-rbd-pool
+      services:
+        - endpoint: internal
+          service: ceph_mon
+    image_repo_sync:
+      services:
+        - endpoint: internal
+          service: local_image_registry
+
+bootstrap:
+  enabled: false
+  script: |
+    ceph -s
+    function ensure_pool () {
+      ceph osd pool stats $1 || ceph osd pool create $1 $2
+      local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous" | xargs echo)
+      if [[ ${test_luminous} -gt 0 ]]; then
+        ceph osd pool application enable $1 $3
+      fi
+    }
+    #ensure_pool volumes 8 cinder
+
+# if you change provision_storage_class to false
+# it is presumed you manage your own storage
+# class definition externally
+storageclass:
+  rbd:
+    provision_storage_class: true
+    provisioner: ceph.com/rbd
+    name: general
+    monitors: null
+    pool: rbd
+    admin_id: admin
+    admin_secret_name: pvc-ceph-conf-combined-storageclass
+    admin_secret_namespace: ceph
+    user_id: admin
+    user_secret_name: pvc-ceph-client-key
+    image_format: "2"
+    image_features: layering
+  cephfs:
+    provision_storage_class: true
+    provisioner: ceph.com/cephfs
+    name: cephfs
+    admin_id: admin
+    user_secret_name: pvc-ceph-cephfs-client-key
+    admin_secret_name: pvc-ceph-conf-combined-storageclass
+    admin_secret_namespace: ceph
+
+endpoints:
+  cluster_domain_suffix: cluster.local
+  local_image_registry:
+    name: docker-registry
+    namespace: docker-registry
+    hosts:
+      default: localhost
+      internal: docker-registry
+      node: localhost
+    host_fqdn_override:
+      default: null
+    port:
+      registry:
+        node: 5000
+  ceph_mon:
+    namespace: null
+    hosts:
+      default: ceph-mon
+      discovery: ceph-mon-discovery
+    host_fqdn_override:
+      default: null
+    port:
+      mon:
+        default: 6789
+
+manifests:
+  configmap_bin: true
+  configmap_bin_common: true
+  configmap_etc: true
+  deployment_rbd_provisioner: true
+  deployment_cephfs_provisioner: true
+  job_bootstrap: false
+  job_cephfs_client_key: true
+  job_image_repo_sync: true
+  job_namespace_client_key_cleaner: true
+  job_namespace_client_key: true
+  storageclass_cephfs: true
+  storageclass_rbd: true
diff --git a/ceph-rgw/Chart.yaml b/ceph-rgw/Chart.yaml
new file mode 100644
index 0000000000..7e9c7d809c
--- /dev/null
+++ b/ceph-rgw/Chart.yaml
@@ -0,0 +1,18 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: OpenStack-Helm Ceph RadosGW
+name: ceph-rgw
+version: 0.1.0
diff --git a/ceph-rgw/requirements.yaml b/ceph-rgw/requirements.yaml
new file mode 100644
index 0000000000..53782e69b2
--- /dev/null
+++ b/ceph-rgw/requirements.yaml
@@ -0,0 +1,18 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+dependencies:
+  - name: helm-toolkit
+    repository: http://localhost:8879/charts
+    version: 0.1.0
diff --git a/ceph-rgw/templates/bin/_ceph-admin-keyring.sh.tpl b/ceph-rgw/templates/bin/_ceph-admin-keyring.sh.tpl
new file mode 100644
index 0000000000..8384abf4e9
--- /dev/null
+++ b/ceph-rgw/templates/bin/_ceph-admin-keyring.sh.tpl
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+export HOME=/tmp
+
+cat <<EOF > /etc/ceph/ceph.client.admin.keyring
+[client.admin]
+    key = $(cat /tmp/client-keyring)
+EOF
+
+exit 0
diff --git a/ceph-rgw/templates/bin/_ceph-rgw-storage-init.sh.tpl b/ceph-rgw/templates/bin/_ceph-rgw-storage-init.sh.tpl
new file mode 100644
index 0000000000..81236a115c
--- /dev/null
+++ b/ceph-rgw/templates/bin/_ceph-rgw-storage-init.sh.tpl
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -x
+if [ "x$STORAGE_BACKEND" == "xceph-rgw" ]; then
+  SECRET=$(mktemp --suffix .yaml)
+  KEYRING=$(mktemp --suffix .keyring)
+  function cleanup {
+      rm -f ${SECRET} ${KEYRING}
+  }
+  trap cleanup EXIT
+fi
+
+function kube_ceph_keyring_gen () {
+  CEPH_KEY=$1
+  CEPH_KEY_TEMPLATE=$2
+  sed "s|{{"{{"}} key {{"}}"}}|${CEPH_KEY}|" /tmp/ceph-templates/${CEPH_KEY_TEMPLATE} | base64 -w0 | tr -d '\n'
+}
+
+set -ex
+if [ "x$STORAGE_BACKEND" == "xceph-rgw" ]; then
+  ceph -s
+  if USERINFO=$(ceph auth get client.bootstrap-rgw); then
+    KEYSTR=$(echo $USERINFO | sed 's/.*\( key = .*\) caps mon.*/\1/')
+    echo $KEYSTR  > ${KEYRING}
+  else
+    #NOTE(Portdirect): Determine proper privs to assign keyring
+    ceph auth get-or-create client.bootstrap-rgw \
+      mon "allow profile bootstrap-rgw" \
+      -o ${KEYRING}
+  fi
+  FINAL_KEYRING=$(sed -n 's/^[[:blank:]]*key[[:blank:]]\+=[[:blank:]]\(.*\)/\1/p' ${KEYRING})
+  cat > ${SECRET} <<EOF
+apiVersion: v1
+kind: Secret
+metadata:
+  name: "os-ceph-bootstrap-rgw-keyring"
+type: Opaque
+data:
+ ceph.keyring: $( kube_ceph_keyring_gen ${FINAL_KEYRING} "bootstrap.keyring.rgw"  )
+EOF
+  kubectl apply --namespace ${NAMESPACE} -f ${SECRET}
+
+fi
diff --git a/ceph-rgw/templates/bin/_init-dirs.sh.tpl b/ceph-rgw/templates/bin/_init-dirs.sh.tpl
new file mode 100644
index 0000000000..fc82bdb848
--- /dev/null
+++ b/ceph-rgw/templates/bin/_init-dirs.sh.tpl
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+export LC_ALL=C
+: "${HOSTNAME:=$(uname -n)}"
+: "${RGW_NAME:=${HOSTNAME}}"
+: "${RGW_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring}"
+
+for keyring in ${RGW_BOOTSTRAP_KEYRING}; do
+  mkdir -p "$(dirname "$keyring")"
+done
+
+# Let's create the ceph directories
+for DIRECTORY in radosgw tmp; do
+  mkdir -p "/var/lib/ceph/${DIRECTORY}"
+done
+
+# Create socket directory
+mkdir -p /run/ceph
+
+# Creating rados directories
+mkdir -p "/var/lib/ceph/radosgw/${RGW_NAME}"
+
+# Adjust the owner of all those directories
+chown -R ceph. /run/ceph/ /var/lib/ceph/*
diff --git a/ceph-rgw/templates/bin/rgw/_init_keystone.sh.tpl b/ceph-rgw/templates/bin/rgw/_init_keystone.sh.tpl
new file mode 100644
index 0000000000..c0d4e0e452
--- /dev/null
+++ b/ceph-rgw/templates/bin/rgw/_init_keystone.sh.tpl
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+{{/*
+Copyright 2018 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+cp -va /tmp/ceph.conf /etc/ceph/ceph.conf
+
+cat >> /etc/ceph/ceph.conf <<EOF
+
+[client.rgw.${POD_NAME}]
+rgw_frontends = "civetweb port=${RGW_CIVETWEB_PORT}"
+rgw_keystone_url = "${KEYSTONE_URL}"
+rgw_keystone_admin_user = "${OS_USERNAME}"
+rgw_keystone_admin_password = "${OS_PASSWORD}"
+rgw_keystone_admin_project = "${OS_PROJECT_NAME}"
+rgw_keystone_admin_domain = "${OS_USER_DOMAIN_NAME}"
+{{ range $key, $value := .Values.conf.rgw_ks.config -}}
+{{- if kindIs "slice" $value -}}
+{{ $key }} = {{ include "helm-toolkit.joinListWithComma" $value | quote }}
+{{ else -}}
+{{ $key }} = {{ $value | quote  }}
+{{ end -}}
+{{- end -}}
+EOF
diff --git a/ceph-rgw/templates/bin/rgw/_rgw-s3-admin.sh.tpl b/ceph-rgw/templates/bin/rgw/_rgw-s3-admin.sh.tpl
new file mode 100644
index 0000000000..c17c0fc745
--- /dev/null
+++ b/ceph-rgw/templates/bin/rgw/_rgw-s3-admin.sh.tpl
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+{{/*
+Copyright 2018 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+
+function create_admin_user () {
+  radosgw-admin user create \
+    --uid=${S3_ADMIN_USERNAME} \
+    --display-name=${S3_ADMIN_USERNAME}
+
+  radosgw-admin caps add \
+      --uid=${S3_ADMIN_USERNAME} \
+      --caps={{ .Values.conf.rgw_s3.admin_caps | quote }}
+
+  radosgw-admin key create \
+    --uid=${S3_ADMIN_USERNAME} \
+    --key-type=s3 \
+    --access-key ${S3_ADMIN_ACCESS_KEY} \
+    --secret-key ${S3_ADMIN_SECRET_KEY}
+}
+
+radosgw-admin user stats --uid=${S3_ADMIN_USERNAME} || \
+  create_admin_user
diff --git a/ceph-rgw/templates/bin/rgw/_start.sh.tpl b/ceph-rgw/templates/bin/rgw/_start.sh.tpl
new file mode 100644
index 0000000000..6e119eef5a
--- /dev/null
+++ b/ceph-rgw/templates/bin/rgw/_start.sh.tpl
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+{{/*
+Copyright 2018 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+set -ex
+export LC_ALL=C
+: "${CEPH_GET_ADMIN_KEY:=0}"
+: "${RGW_NAME:=$(uname -n)}"
+: "${RGW_ZONEGROUP:=}"
+: "${RGW_ZONE:=}"
+: "${RGW_REMOTE_CGI:=0}"
+: "${RGW_REMOTE_CGI_PORT:=9000}"
+: "${RGW_REMOTE_CGI_HOST:=0.0.0.0}"
+: "${ADMIN_KEYRING:=/etc/ceph/${CLUSTER}.client.admin.keyring}"
+: "${RGW_KEYRING:=/var/lib/ceph/radosgw/${RGW_NAME}/keyring}"
+: "${RGW_BOOTSTRAP_KEYRING:=/var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring}"
+
+if [[ ! -e "/etc/ceph/${CLUSTER}.conf" ]]; then
+  echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon"
+  exit 1
+fi
+
+if [ "${CEPH_GET_ADMIN_KEY}" -eq 1 ]; then
+  if [[ ! -e "${ADMIN_KEYRING}" ]]; then
+      echo "ERROR- ${ADMIN_KEYRING} must exist; get it from your existing mon"
+      exit 1
+  fi
+fi
+
+# Check to see if our RGW has been initialized
+if [ ! -e "${RGW_KEYRING}" ]; then
+
+  if [ ! -e "${RGW_BOOTSTRAP_KEYRING}" ]; then
+    echo "ERROR- ${RGW_BOOTSTRAP_KEYRING} must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-rgw -o ${RGW_BOOTSTRAP_KEYRING}'"
+    exit 1
+  fi
+
+  timeout 10 ceph --cluster "${CLUSTER}" --name "client.bootstrap-rgw" --keyring "${RGW_BOOTSTRAP_KEYRING}" health || exit 1
+
+  # Generate the RGW key
+  ceph --cluster "${CLUSTER}" --name "client.bootstrap-rgw" --keyring "${RGW_BOOTSTRAP_KEYRING}" auth get-or-create "client.rgw.${RGW_NAME}" osd 'allow rwx' mon 'allow rw' -o "${RGW_KEYRING}"
+  chown ceph. "${RGW_KEYRING}"
+  chmod 0600 "${RGW_KEYRING}"
+fi
+
+RGW_FRONTENDS="civetweb port=$RGW_CIVETWEB_PORT"
+if [ "$RGW_REMOTE_CGI" -eq 1 ]; then
+  RGW_FRONTENDS="fastcgi socket_port=$RGW_REMOTE_CGI_PORT socket_host=$RGW_REMOTE_CGI_HOST"
+fi
+
+/usr/bin/radosgw \
+  --cluster "${CLUSTER}" \
+  --setuser "ceph" \
+  --setgroup "ceph" \
+  -d \
+  -n "client.rgw.${RGW_NAME}" \
+  -k "${RGW_KEYRING}" \
+  --rgw-socket-path="" \
+  --rgw-zonegroup="${RGW_ZONEGROUP}" \
+  --rgw-zone="${RGW_ZONE}" \
+  --rgw-frontends="${RGW_FRONTENDS}"
diff --git a/ceph-rgw/templates/configmap-bin-ks.yaml b/ceph-rgw/templates/configmap-bin-ks.yaml
new file mode 100644
index 0000000000..276c58477d
--- /dev/null
+++ b/ceph-rgw/templates/configmap-bin-ks.yaml
@@ -0,0 +1,31 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.configmap_bin_ks .Values.deployment.rgw_keystone_user_and_endpoints }}
+{{- $envAll := . }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ceph-rgw-bin-ks
+data:
+  ks-service.sh: |
+{{- include "helm-toolkit.scripts.keystone_service" . | indent 4 }}
+  ks-endpoints.sh: |
+{{- include "helm-toolkit.scripts.keystone_endpoints" . | indent 4 }}
+  ks-user.sh: |
+{{- include "helm-toolkit.scripts.keystone_user" . | indent 4 }}
+{{- end }}
diff --git a/ceph-rgw/templates/configmap-bin.yaml b/ceph-rgw/templates/configmap-bin.yaml
new file mode 100644
index 0000000000..a9c96f9bc9
--- /dev/null
+++ b/ceph-rgw/templates/configmap-bin.yaml
@@ -0,0 +1,44 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.configmap_bin .Values.deployment.ceph }}
+{{- $envAll := . }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ceph-rgw-bin
+data:
+{{- if .Values.images.local_registry.active }}
+  image-repo-sync.sh: |
+{{- include "helm-toolkit.scripts.image_repo_sync" . | indent 4 }}
+{{- end }}
+
+  init-dirs.sh: |
+{{ tuple "bin/_init-dirs.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+
+  rgw-start.sh: |
+{{ tuple "bin/rgw/_start.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+  rgw-init-keystone.sh: |
+{{ tuple "bin/rgw/_init_keystone.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+  storage-init.sh: |
+{{ tuple "bin/_ceph-rgw-storage-init.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+  ceph-admin-keyring.sh: |
+{{ tuple "bin/_ceph-admin-keyring.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+  rgw-s3-admin.sh: |
+{{ tuple "bin/rgw/_rgw-s3-admin.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
+
+{{- end }}
diff --git a/ceph-rgw/templates/configmap-ceph-rgw-templates.yaml b/ceph-rgw/templates/configmap-ceph-rgw-templates.yaml
new file mode 100644
index 0000000000..e446e4461d
--- /dev/null
+++ b/ceph-rgw/templates/configmap-ceph-rgw-templates.yaml
@@ -0,0 +1,27 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.configmap_ceph_templates .Values.manifests.job_ceph_rgw_storage_init }}
+{{- $envAll := . }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ceph-templates
+data:
+  bootstrap.keyring.rgw: |
+{{ .Values.conf.templates.keyring.bootstrap.rgw | indent 4 }}
+{{- end }}
diff --git a/ceph-rgw/templates/configmap-etc-client.yaml b/ceph-rgw/templates/configmap-etc-client.yaml
new file mode 100644
index 0000000000..25d7e1cfa2
--- /dev/null
+++ b/ceph-rgw/templates/configmap-etc-client.yaml
@@ -0,0 +1,55 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- define "ceph.configmap.etc" }}
+{{- $configMapName := index . 0 }}
+{{- $envAll := index . 1 }}
+{{- with $envAll }}
+
+{{- if or (.Values.deployment.ceph) (.Values.deployment.client_secrets) }}
+
+{{- if empty .Values.conf.ceph.global.mon_host -}}
+{{- $monHost := tuple "ceph_mon" "internal" . | include "helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup" }}
+{{- $_ := $monHost | set .Values.conf.ceph.global "mon_host" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.osd.cluster_network -}}
+{{- $_ := .Values.network.cluster | set .Values.conf.ceph.osd "cluster_network" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.ceph.osd.public_network -}}
+{{- $_ := .Values.network.public | set .Values.conf.ceph.osd "public_network" -}}
+{{- end -}}
+
+{{- if empty .Values.conf.rgw_ks.config.rgw_swift_url -}}
+{{- $_ := tuple "object_store" "public" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" | trimSuffix .Values.endpoints.object_store.path.default | set .Values.conf.rgw_ks.config "rgw_swift_url" -}}
+{{- end -}}
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ $configMapName }}
+data:
+  ceph.conf: |
+{{ include "helm-toolkit.utils.to_ini" .Values.conf.ceph | indent 4 }}
+
+{{- end }}
+{{- end }}
+{{- end }}
+{{- if .Values.manifests.configmap_etc }}
+{{- list "ceph-rgw-etc" . | include "ceph.configmap.etc" }}
+{{- end }}
diff --git a/ceph-rgw/templates/deployment-rgw.yaml b/ceph-rgw/templates/deployment-rgw.yaml
new file mode 100644
index 0000000000..47fcd19d88
--- /dev/null
+++ b/ceph-rgw/templates/deployment-rgw.yaml
@@ -0,0 +1,163 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.deployment_rgw ( and .Values.deployment.ceph .Values.conf.features.rgw ) }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "ceph-rgw"}}
+{{ tuple $envAll "rgw" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  name: ceph-rgw
+  labels:
+{{ tuple $envAll "ceph" "rgw" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+spec:
+  replicas: {{ .Values.pod.replicas.rgw }}
+  selector:
+    matchLabels:
+{{ tuple $envAll "ceph" "rgw" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 6 }}
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" "rgw" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      affinity:
+{{ tuple $envAll "ceph" "rgw" | include "helm-toolkit.snippets.kubernetes_pod_anti_affinity" | indent 8 }}
+      nodeSelector:
+        {{ .Values.labels.rgw.node_selector_key }}: {{ .Values.labels.rgw.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "rgw" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+        - name: ceph-init-dirs
+{{ tuple $envAll "ceph_rgw" | include "helm-toolkit.snippets.image" | indent 10 }}
+          command:
+            - /tmp/init-dirs.sh
+          env:
+            - name: CLUSTER
+              value: "ceph"
+          volumeMounts:
+            - name: ceph-rgw-bin
+              mountPath: /tmp/init-dirs.sh
+              subPath: init-dirs.sh
+              readOnly: true
+            - name: pod-var-lib-ceph
+              mountPath: /var/lib/ceph
+              readOnly: false
+            - name: pod-run
+              mountPath: /run
+              readOnly: false
+{{ if .Values.conf.rgw_ks.enabled }}
+        - name: ceph-rgw-ks-init
+{{ tuple $envAll "ceph_rgw" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.rgw | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: CLUSTER
+              value: "ceph"
+            - name: POD_NAME
+              valueFrom:
+                fieldRef:
+                  apiVersion: v1
+                  fieldPath: metadata.name
+{{- with $env := dict "ksUserSecret" .Values.secrets.identity.user_rgw }}
+{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 12 }}
+{{- end }}
+            - name: KEYSTONE_URL
+              value: {{ tuple "identity" "internal" "api" . | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" | trimSuffix .Values.endpoints.identity.path.default | quote }}
+            - name: RGW_CIVETWEB_PORT
+              value: "{{ tuple "object_store" "internal" "api" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
+          command:
+            - /tmp/rgw-init-keystone.sh
+          volumeMounts:
+            - name: pod-etc-ceph
+              mountPath: /etc/ceph
+            - name: ceph-rgw-bin
+              mountPath: /tmp/rgw-init-keystone.sh
+              subPath: rgw-init-keystone.sh
+              readOnly: true
+            - name: ceph-rgw-etc
+              mountPath: /tmp/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+{{ end }}
+      containers:
+        - name: ceph-rgw
+{{ tuple $envAll "ceph_rgw" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.rgw | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: CLUSTER
+              value: "ceph"
+            - name: RGW_CIVETWEB_PORT
+              value: "{{ tuple "object_store" "internal" "api" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}"
+          command:
+            - /tmp/rgw-start.sh
+          ports:
+            - containerPort: {{ tuple "object_store" "internal" "api" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+          livenessProbe:
+              httpGet:
+                path: /
+                port: {{ tuple "object_store" "internal" "api" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+              initialDelaySeconds: 120
+              timeoutSeconds: 5
+          readinessProbe:
+              httpGet:
+                path: /
+                port: {{ tuple "object_store" "internal" "api" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+              timeoutSeconds: 5
+          volumeMounts:
+            - name: ceph-rgw-bin
+              mountPath: /tmp/rgw-start.sh
+              subPath: rgw-start.sh
+              readOnly: true
+            - name: pod-etc-ceph
+              mountPath: /etc/ceph
+{{- if not .Values.conf.rgw_ks.enabled }}
+            - name: ceph-rgw-etc
+              mountPath: /etc/ceph/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+{{- end }}
+            - name: ceph-bootstrap-rgw-keyring
+              mountPath: /var/lib/ceph/bootstrap-rgw/ceph.keyring
+              subPath: ceph.keyring
+              readOnly: false
+            - name: pod-var-lib-ceph
+              mountPath: /var/lib/ceph
+              readOnly: false
+            - name: pod-run
+              mountPath: /run
+              readOnly: false
+      volumes:
+        - name: pod-etc-ceph
+          emptyDir: {}
+        - name: ceph-rgw-bin
+          configMap:
+            name: ceph-rgw-bin
+            defaultMode: 0555
+        - name: ceph-rgw-etc
+          configMap:
+            name: ceph-rgw-etc
+            defaultMode: 0444
+        - name: pod-var-lib-ceph
+          emptyDir: {}
+        - name: pod-run
+          emptyDir:
+            medium: "Memory"
+        - name: ceph-bootstrap-rgw-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.rgw }}
+{{- end }}
diff --git a/ceph-rgw/templates/ingress-rgw.yaml b/ceph-rgw/templates/ingress-rgw.yaml
new file mode 100644
index 0000000000..aa6ff278c9
--- /dev/null
+++ b/ceph-rgw/templates/ingress-rgw.yaml
@@ -0,0 +1,20 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.ingress_rgw ( and .Values.deployment.ceph (and .Values.network.api.ingress.public .Values.conf.features.rgw ) ) }}
+{{- $ingressOpts := dict "envAll" . "backendServiceType" "object_store" "backendPort" "ceph-rgw" -}}
+{{ $ingressOpts | include "helm-toolkit.manifests.ingress" }}
+{{- end }}
diff --git a/ceph-rgw/templates/job-ks-endpoints.yaml b/ceph-rgw/templates/job-ks-endpoints.yaml
new file mode 100644
index 0000000000..4062c07719
--- /dev/null
+++ b/ceph-rgw/templates/job-ks-endpoints.yaml
@@ -0,0 +1,20 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_ks_endpoints .Values.deployment.rgw_keystone_user_and_endpoints }}
+{{- $ksServiceJob := dict "envAll" . "configMapBin" "ceph-rgw-bin-ks" "serviceName" "ceph" "serviceTypes" ( tuple "object-store" ) -}}
+{{ $ksServiceJob | include "helm-toolkit.manifests.job_ks_endpoints" }}
+{{- end }}
diff --git a/ceph-rgw/templates/job-ks-service.yaml b/ceph-rgw/templates/job-ks-service.yaml
new file mode 100644
index 0000000000..4b18dee875
--- /dev/null
+++ b/ceph-rgw/templates/job-ks-service.yaml
@@ -0,0 +1,20 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_ks_service .Values.deployment.rgw_keystone_user_and_endpoints }}
+{{- $ksServiceJob := dict "envAll" . "configMapBin" "ceph-rgw-bin-ks" "serviceName" "ceph" "serviceTypes" ( tuple "object-store" ) -}}
+{{ $ksServiceJob | include "helm-toolkit.manifests.job_ks_service" }}
+{{- end }}
diff --git a/ceph-rgw/templates/job-ks-user.yaml b/ceph-rgw/templates/job-ks-user.yaml
new file mode 100644
index 0000000000..7243199f29
--- /dev/null
+++ b/ceph-rgw/templates/job-ks-user.yaml
@@ -0,0 +1,20 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_ks_user .Values.deployment.rgw_keystone_user_and_endpoints }}
+{{- $ksUserJob := dict "envAll" . "configMapBin" "ceph-rgw-bin-ks" "serviceName" "ceph" "serviceUser" "swift" -}}
+{{ $ksUserJob | include "helm-toolkit.manifests.job_ks_user" }}
+{{- end }}
diff --git a/ceph-rgw/templates/job-rgw-storage-init.yaml b/ceph-rgw/templates/job-rgw-storage-init.yaml
new file mode 100644
index 0000000000..92ffce04ba
--- /dev/null
+++ b/ceph-rgw/templates/job-rgw-storage-init.yaml
@@ -0,0 +1,133 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if .Values.manifests.job_ceph_rgw_storage_init }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "ceph-rgw-storage-init" }}
+{{ tuple $envAll "rgw_storage_init" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ $serviceAccountName }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    verbs:
+      - get
+      - create
+      - update
+      - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: {{ $serviceAccountName }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ $serviceAccountName }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: ceph-rgw-storage-init
+spec:
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph-rgw" "rgw-storage-init" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      serviceAccountName: {{ $serviceAccountName }}
+      restartPolicy: OnFailure
+      nodeSelector:
+        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "rgw_storage_init" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+        - name: ceph-keyring-placement
+{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
+          securityContext:
+            runAsUser: 0
+          command:
+            - /tmp/ceph-admin-keyring.sh
+          volumeMounts:
+            - name: etcceph
+              mountPath: /etc/ceph
+            - name: ceph-rgw-bin
+              mountPath: /tmp/ceph-admin-keyring.sh
+              subPath: ceph-admin-keyring.sh
+              readOnly: true
+            - name: ceph-keyring
+              mountPath: /tmp/client-keyring
+              subPath: key
+              readOnly: true
+      containers:
+        - name: ceph-rgw-storage-init
+{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
+{{ tuple $envAll $envAll.Values.pod.resources.jobs.rgw_storage_init | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: STORAGE_BACKEND
+              value: "ceph-rgw"
+          command:
+            - /tmp/storage-init.sh
+          volumeMounts:
+            - name: ceph-rgw-bin
+              mountPath: /tmp/storage-init.sh
+              subPath: storage-init.sh
+              readOnly: true
+            - name: ceph-templates
+              mountPath: /tmp/ceph-templates
+              readOnly: true
+            - name: etcceph
+              mountPath: /etc/ceph
+            - name: ceph-etc
+              mountPath: /etc/ceph/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+            - name: ceph-keyring
+              mountPath: /tmp/client-keyring
+              subPath: key
+              readOnly: true
+      volumes:
+        - name: ceph-rgw-bin
+          configMap:
+            name: ceph-rgw-bin
+            defaultMode: 0555
+        - name: etcceph
+          emptyDir: {}
+        - name: ceph-etc
+          configMap:
+            name: ceph-etc
+            defaultMode: 0444
+        - name: ceph-templates
+          configMap:
+            name: ceph-templates
+            defaultMode: 0444
+        - name: ceph-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.admin | quote }}
+{{- end }}
diff --git a/ceph-rgw/templates/job-s3-admin.yaml b/ceph-rgw/templates/job-s3-admin.yaml
new file mode 100644
index 0000000000..b3cdd35c94
--- /dev/null
+++ b/ceph-rgw/templates/job-s3-admin.yaml
@@ -0,0 +1,139 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.job_s3_admin ( and .Values.conf.features.rgw .Values.conf.rgw_s3.enabled ) }}
+{{- $envAll := . }}
+
+{{- $serviceAccountName := "rgw-s3-admin" }}
+{{ tuple $envAll "rgw_s3_admin" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
+
+{{- $s3AdminSecret := .Values.secrets.rgw_s3.admin }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: {{ $serviceAccountName }}
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - secrets
+    verbs:
+      - get
+      - create
+      - update
+      - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: {{ $serviceAccountName }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ $serviceAccountName }}
+subjects:
+  - kind: ServiceAccount
+    name: {{ $serviceAccountName }}
+    namespace: {{ $envAll.Release.Namespace }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: ceph-rgw-s3-admin
+spec:
+  template:
+    metadata:
+      labels:
+{{ tuple $envAll "ceph" "rgw-s3-admin" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
+    spec:
+      namespace: ceph
+      serviceAccountName: {{ $serviceAccountName }}
+      restartPolicy: OnFailure
+      nodeSelector:
+        {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
+      initContainers:
+{{ tuple $envAll "rgw_s3_admin" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
+        - name: ceph-keyring-placement
+{{ tuple $envAll "ceph_config_helper" | include "helm-toolkit.snippets.image" | indent 10 }}
+          securityContext:
+            runAsUser: 0
+          command:
+            - /tmp/ceph-admin-keyring.sh
+          volumeMounts:
+            - name: etcceph
+              mountPath: /etc/ceph
+            - name: ceph-rgw-bin
+              mountPath: /tmp/ceph-admin-keyring.sh
+              subPath: ceph-admin-keyring.sh
+              readOnly: true
+            - name: ceph-keyring
+              mountPath: /tmp/client-keyring
+              subPath: key
+              readOnly: true
+      containers:
+        - name: create-s3-admin
+          image: {{ .Values.images.tags.rgw_s3_admin }}
+          imagePullPolicy: {{ .Values.images.pull_policy }}
+{{ tuple $envAll $envAll.Values.pod.resources.jobs.rgw_s3_admin | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
+          env:
+            - name: S3_ADMIN_USERNAME
+              valueFrom:
+                secretKeyRef:
+                  name: {{ $s3AdminSecret }}
+                  key: S3_ADMIN_USERNAME
+            - name: S3_ADMIN_ACCESS_KEY
+              valueFrom:
+                secretKeyRef:
+                  name: {{ $s3AdminSecret }}
+                  key: S3_ADMIN_ACCESS_KEY
+            - name: S3_ADMIN_SECRET_KEY
+              valueFrom:
+                secretKeyRef:
+                  name: {{ $s3AdminSecret }}
+                  key: S3_ADMIN_SECRET_KEY
+          command:
+            - /tmp/rgw-s3-admin.sh
+          volumeMounts:
+            - name: etcceph
+              mountPath: /etc/ceph
+            - name: ceph-rgw-bin
+              mountPath: /tmp/rgw-s3-admin.sh
+              subPath: rgw-s3-admin.sh
+              readOnly: true
+            - name: ceph-rgw-etc
+              mountPath: /etc/ceph/ceph.conf
+              subPath: ceph.conf
+              readOnly: true
+            - name: ceph-keyring
+              mountPath: /tmp/client-keyring
+              subPath: key
+              readOnly: true
+      volumes:
+        - name: etcceph
+          emptyDir: {}
+        - name: ceph-rgw-bin
+          configMap:
+            name: ceph-rgw-bin
+            defaultMode: 0555
+        - name: ceph-rgw-etc
+          configMap:
+            name: ceph-rgw-etc
+            defaultMode: 0444
+        - name: ceph-keyring
+          secret:
+            secretName: {{ .Values.secrets.keyrings.admin | quote }}
+{{- end }}
diff --git a/ceph-rgw/templates/secret-ingress-tls.yaml b/ceph-rgw/templates/secret-ingress-tls.yaml
new file mode 100644
index 0000000000..dee370f370
--- /dev/null
+++ b/ceph-rgw/templates/secret-ingress-tls.yaml
@@ -0,0 +1,19 @@
+{{/*
+Copyright 2017-2018 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.secret_ingress_tls ( and .Values.deployment.ceph .Values.conf.features.rgw ) }}
+{{- include "helm-toolkit.manifests.secret_ingress_tls" ( dict "envAll" . "backendServiceType" "object_store" ) }}
+{{- end }}
diff --git a/ceph-rgw/templates/secret-keystone-rgw.yaml b/ceph-rgw/templates/secret-keystone-rgw.yaml
new file mode 100644
index 0000000000..c1d8e0c7a8
--- /dev/null
+++ b/ceph-rgw/templates/secret-keystone-rgw.yaml
@@ -0,0 +1,30 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.secret_keystone_rgw .Values.deployment.ceph }}
+{{- $envAll := . }}
+{{- range $key1, $userClass := tuple "swift" }}
+{{- $secretName := index $envAll.Values.secrets.identity "user_rgw" }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ $secretName }}
+type: Opaque
+data:
+{{- tuple $userClass "internal" $envAll | include "helm-toolkit.snippets.keystone_secret_openrc" | indent 2 -}}
+{{- end }}
+{{- end }}
diff --git a/ceph-rgw/templates/secret-keystone.yaml b/ceph-rgw/templates/secret-keystone.yaml
new file mode 100644
index 0000000000..bea479fa3b
--- /dev/null
+++ b/ceph-rgw/templates/secret-keystone.yaml
@@ -0,0 +1,30 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.secret_keystone .Values.deployment.rgw_keystone_user_and_endpoints }}
+{{- $envAll := . }}
+{{- range $key1, $userClass := tuple "admin" "swift" }}
+{{- $secretName := index $envAll.Values.secrets.identity $userClass }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ $secretName }}
+type: Opaque
+data:
+{{- tuple $userClass "internal" $envAll | include "helm-toolkit.snippets.keystone_secret_openrc" | indent 2 -}}
+{{- end }}
+{{- end }}
diff --git a/ceph-rgw/templates/secret-s3-rgw.yaml b/ceph-rgw/templates/secret-s3-rgw.yaml
new file mode 100644
index 0000000000..8f9a19268c
--- /dev/null
+++ b/ceph-rgw/templates/secret-s3-rgw.yaml
@@ -0,0 +1,30 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if .Values.manifests.secret_s3_rgw }}
+{{- $envAll := . }}
+{{- $secretName := index $envAll.Values.secrets.rgw_s3.admin }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ $secretName }}
+type: Opaque
+data:
+  S3_ADMIN_USERNAME: {{ .Values.endpoints.ceph_object_store.auth.admin.username | b64enc }}
+  S3_ADMIN_ACCESS_KEY: {{ .Values.endpoints.ceph_object_store.auth.admin.access_key | b64enc }}
+  S3_ADMIN_SECRET_KEY: {{ .Values.endpoints.ceph_object_store.auth.admin.secret_key | b64enc }}
+{{- end }}
diff --git a/ceph-rgw/templates/service-ingress-rgw.yaml b/ceph-rgw/templates/service-ingress-rgw.yaml
new file mode 100644
index 0000000000..aec670168d
--- /dev/null
+++ b/ceph-rgw/templates/service-ingress-rgw.yaml
@@ -0,0 +1,20 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.service_ingress_rgw ( and .Values.deployment.ceph (and .Values.network.api.ingress.public .Values.conf.features.rgw ) ) }}
+{{- $serviceIngressOpts := dict "envAll" . "backendServiceType" "object_store" -}}
+{{ $serviceIngressOpts | include "helm-toolkit.manifests.service_ingress" }}
+{{- end }}
diff --git a/ceph-rgw/templates/service-rgw.yaml b/ceph-rgw/templates/service-rgw.yaml
new file mode 100644
index 0000000000..f986a0b14d
--- /dev/null
+++ b/ceph-rgw/templates/service-rgw.yaml
@@ -0,0 +1,41 @@
+{{/*
+Copyright 2017 The Openstack-Helm Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.manifests.service_rgw ( and .Values.deployment.ceph .Values.conf.features.rgw ) }}
+{{- $envAll := . }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: ceph-rgw
+spec:
+  ports:
+  - name: ceph-rgw
+    port: {{ tuple "object_store" "internal" "api" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+    protocol: TCP
+    targetPort: {{ tuple "object_store" "internal" "api" $envAll | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
+  {{ if .Values.network.api.node_port.enabled }}
+    nodePort: {{ .Values.network.api.node_port.port }}
+  {{ end }}
+  selector:
+{{ tuple $envAll "ceph" "rgw" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 4 }}
+  {{ if .Values.network.api.node_port.enabled }}
+  type: NodePort
+  {{ if .Values.network.api.external_policy_local }}
+  externalTrafficPolicy: Local
+  {{ end }}
+  {{ end }}
+{{- end }}
diff --git a/ceph-rgw/values.yaml b/ceph-rgw/values.yaml
new file mode 100644
index 0000000000..03c54462cc
--- /dev/null
+++ b/ceph-rgw/values.yaml
@@ -0,0 +1,476 @@
+# Copyright 2017 The Openstack-Helm Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default values for ceph-client.
+# This is a YAML-formatted file.
+# Declare name/value pairs to be passed into your templates.
+# name: value
+
+deployment:
+  ceph: false
+  rgw_keystone_user_and_endpoints: false
+
+release_group: null
+
+images:
+  pull_policy: IfNotPresent
+  tags:
+    ceph_config_helper: 'docker.io/port/ceph-config-helper:v1.10.3'
+    ceph_rgw: 'docker.io/ceph/daemon:tag-build-master-luminous-ubuntu-16.04'
+    dep_check: 'quay.io/stackanetes/kubernetes-entrypoint:v0.3.1'
+    image_repo_sync: docker.io/docker:17.07.0
+    rgw_s3_admin: 'docker.io/port/ceph-config-helper:v1.10.3'
+    ks_endpoints: 'docker.io/openstackhelm/heat:newton'
+    ks_service: 'docker.io/openstackhelm/heat:newton'
+    ks_user: 'docker.io/openstackhelm/heat:newton'
+  local_registry:
+    active: false
+    exclude:
+      - dep_check
+      - image_repo_sync
+
+labels:
+  job:
+    node_selector_key: openstack-control-plane
+    node_selector_value: enabled
+  rgw:
+    node_selector_key: ceph-rgw
+    node_selector_value: enabled
+
+pod:
+  dns_policy: "ClusterFirstWithHostNet"
+  replicas:
+    rgw: 2
+  affinity:
+    anti:
+      type:
+        default: preferredDuringSchedulingIgnoredDuringExecution
+      topologyKey:
+        default: kubernetes.io/hostname
+  resources:
+    enabled: false
+    rgw:
+      requests:
+        memory: "5Mi"
+        cpu: "250m"
+      limits:
+        memory: "50Mi"
+        cpu: "500m"
+    jobs:
+      ceph-rgw-storage-init:
+        requests:
+          memory: "128Mi"
+          cpu: "100m"
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+      image_repo_sync:
+        requests:
+          memory: "128Mi"
+          cpu: "100m"
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+      ks-endpoints:
+        requests:
+          memory: "128Mi"
+          cpu: "100m"
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+      ks_service:
+        requests:
+          memory: "128Mi"
+          cpu: "100m"
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+      ks_user:
+        requests:
+          memory: "128Mi"
+          cpu: "100m"
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+      rgw_s3_admin:
+        requests:
+          memory: "128Mi"
+          cpu: "100m"
+        limits:
+          memory: "1024Mi"
+          cpu: "2000m"
+
+secrets:
+  keyrings:
+    mon: ceph-mon-keyring
+    mds: ceph-bootstrap-mds-keyring
+    osd: ceph-bootstrap-osd-keyring
+    rgw: os-ceph-bootstrap-rgw-keyring
+    mgr: ceph-bootstrap-mgr-keyring
+    admin: pvc-ceph-client-key
+  identity:
+    admin: ceph-keystone-admin
+    swift: ceph-keystone-user
+    user_rgw: ceph-keystone-user-rgw
+  rgw_s3:
+    admin: radosgw-s3-admin-creds
+  tls:
+    object_store:
+      api:
+        public: ceph-tls-public
+
+network:
+  api:
+    ingress:
+      public: true
+      classes:
+        namespace: "nginx"
+        cluster: "nginx-cluster"
+      annotations:
+        nginx.ingress.kubernetes.io/rewrite-target: /
+        nginx.ingress.kubernetes.io/proxy-body-size: "0"
+        nginx.org/proxy-max-temp-file-size: "0"
+    external_policy_local: false
+    node_port:
+      enabled: false
+      port: 30004
+  public: 192.168.0.0/16
+  cluster: 192.168.0.0/16
+
+conf:
+  templates:
+    keyring:
+      admin: |
+        [client.admin]
+          key = {{ key }}
+          auid = 0
+          caps mds = "allow"
+          caps mon = "allow *"
+          caps osd = "allow *"
+          caps mgr = "allow *"
+      bootstrap:
+        rgw: |
+          [client.bootstrap-rgw]
+            key = {{ key }}
+            caps mgr = "allow profile bootstrap-rgw"
+  features:
+    rgw: true
+  pool:
+  #NOTE(portdirect): this drives a simple approximation of
+  # https://ceph.com/pgcalc/, the `target.osd` key should be set to match the
+  # expected number of osds in a cluster, and the `target.pg_per_osd` should be
+  # set to match the desired number of placement groups on each OSD.
+    crush:
+      #NOTE(portdirect): to use RBD devices with Ubuntu 16.04's 4.4.x series
+      # kernel this should be set to `hammer`
+      tunables: null
+    target:
+      #NOTE(portdirect): arbitrarily we set the default number of expected OSD's to 5
+      # to match the number of nodes in the OSH gate.
+      osd: 5
+      pg_per_osd: 100
+    default:
+      #NOTE(portdirect): this should be 'same_host' for a single node
+      # cluster to be in a healthy state
+      crush_rule: replicated_rule
+    #NOTE(portdirect): this section describes the pools that will be managed by
+    # the ceph pool management job, as it tunes the pgs and crush rule, based on
+    # the above.
+    spec:
+      # RBD pool
+      - name: rbd
+        application: rbd
+        replication: 3
+        percent_total_data: 40
+      # CephFS pools
+      - name: cephfs_metadata
+        application: cephfs
+        replication: 3
+        percent_total_data: 5
+      - name: cephfs_data
+        application: cephfs
+        replication: 3
+        percent_total_data: 10
+      # RadosGW pools
+      - name: .rgw.root
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.control
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.data.root
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.gc
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.log
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.intent-log
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.meta
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.usage
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.users.keys
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.users.email
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.users.swift
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.users.uid
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.buckets.extra
+        application: rgw
+        replication: 3
+        percent_total_data: 0.1
+      - name: default.rgw.buckets.index
+        application: rgw
+        replication: 3
+        percent_total_data: 3
+      - name: default.rgw.buckets.data
+        application: rgw
+        replication: 3
+        percent_total_data: 34.8
+  rgw_ks:
+    enabled: false
+    config:
+      rgw_keystone_api_version: 3
+      rgw_keystone_accepted_roles: "admin, member"
+      rgw_keystone_implicit_tenants: true
+      rgw_keystone_make_new_tenants: true
+      rgw_s3_auth_use_keystone: true
+      rgw_swift_account_in_url: true
+      rgw_swift_url: null
+      #NOTE (portdirect): See http://tracker.ceph.com/issues/21226
+      rgw_keystone_token_cache_size: 0
+  rgw_s3:
+    enabled: false
+    admin_caps: "users=*;buckets=*;zone=*"
+  ceph:
+    global:
+      # auth
+      cephx: true
+      cephx_require_signatures: false
+      cephx_cluster_require_signatures: true
+      cephx_service_require_signatures: false
+    osd:
+      osd_mkfs_type: xfs
+      osd_mkfs_options_xfs: -f -i size=2048
+      osd_max_object_name_len: 256
+      ms_bind_port_min: 6800
+      ms_bind_port_max: 7100
+
+dependencies:
+  dynamic:
+    common:
+      local_image_registry:
+        jobs:
+          - ceph-client-image-repo-sync
+        services:
+          - endpoint: node
+            service: local_image_registry
+  static:
+    bootstrap:
+      jobs: null
+      services:
+        - endpoint: internal
+          service: ceph_mon
+    rgw:
+      jobs:
+        - ceph-rgw-storage-init
+      services:
+        - endpoint: internal
+          service: keystone-api
+    image_repo_sync:
+      services:
+        - endpoint: internal
+          service: local_image_registry
+    ks_endpoints:
+      jobs:
+        - ceph-ks-service
+      services:
+        - endpoint: internal
+          service: identity
+    ks_service:
+      services:
+        - endpoint: internal
+          service: identity
+    ks_user:
+      services:
+        - endpoint: internal
+          service: identity
+    rgw_s3_admin:
+      services:
+        - endpoint: internal
+          service: ceph_object_store
+
+bootstrap:
+  enabled: false
+  script: |
+    ceph -s
+    function ensure_pool () {
+      ceph osd pool stats $1 || ceph osd pool create $1 $2
+      local test_luminous=$(ceph tell osd.* version | egrep -c "12.2|luminous")
+      if [[ ${test_luminous} -gt 0 ]]; then
+        ceph osd pool application enable $1 $3
+      fi
+    }
+    #ensure_pool volumes 8 cinder
+
+
+endpoints:
+  cluster_domain_suffix: cluster.local
+  local_image_registry:
+    name: docker-registry
+    namespace: docker-registry
+    hosts:
+      default: localhost
+      internal: docker-registry
+      node: localhost
+    host_fqdn_override:
+      default: null
+    port:
+      registry:
+        node: 5000
+  identity:
+    name: keystone
+    namespace: null
+    auth:
+      admin:
+        region_name: RegionOne
+        username: admin
+        password: password
+        project_name: admin
+        user_domain_name: default
+        project_domain_name: default
+      swift:
+        role: admin
+        region_name: RegionOne
+        username: swift
+        password: password
+        project_name: service
+        user_domain_name: service
+        project_domain_name: service
+    hosts:
+      default: keystone
+      internal: keystone-api
+    host_fqdn_override:
+      default: null
+    path:
+      default: /v3
+    scheme:
+      default: http
+    port:
+      api:
+        default: 80
+        internal: 5000
+  object_store:
+    name: swift
+    namespace: null
+    hosts:
+      default: ceph-rgw
+      public: radosgw
+    host_fqdn_override:
+      default: null
+      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
+      # endpoints using the following format:
+      # public:
+      #   host: null
+      #   tls:
+      #     crt: null
+      #     key: null
+    path:
+      default: /swift/v1/KEY_$(tenant_id)s
+    scheme:
+      default: http
+    port:
+      api:
+        default: 8088
+        public: 80
+  ceph_object_store:
+    name: radosgw
+    namespace: null
+    auth:
+      admin:
+        # NOTE(srwilkers): These defaults should be used for testing only, and
+        # should be changed before deploying to production
+        username: s3_admin
+        access_key: "32AGKHCIG3FZ62IY1MEC"
+        secret_key: "22S9iCLHcHId9AzAQD32O8jrq7DpFX9RHIOOC4NL"
+    hosts:
+      default: ceph-rgw
+      public: radosgw
+    host_fqdn_override:
+      default: null
+    path:
+      default: null
+    scheme:
+      default: http
+    port:
+      api:
+        default: 8088
+        public: 80
+  ceph_mon:
+    namespace: null
+    hosts:
+      default: ceph-mon
+      discovery: ceph-mon-discovery
+    host_fqdn_override:
+      default: null
+    port:
+      mon:
+        default: 6789
+
+
+manifests:
+  configmap_ceph_templates: true
+  configmap_bin: true
+  configmap_bin_ks: true
+  configmap_etc: true
+  deployment_rgw: true
+  ingress_rgw: true
+  job_ceph_rgw_storage_init: true
+  job_image_repo_sync: true
+  job_ks_endpoints: true
+  job_ks_service: true
+  job_ks_user: true
+  job_s3_admin: true
+  secret_s3_rgw: true
+  secret_keystone_rgw: true
+  secret_ingress_tls: true
+  secret_keystone_rgw: true
+  secret_keystone: true
+  service_ingress_rgw: true
+  service_rgw: true