343 lines
12 KiB
YAML
343 lines
12 KiB
YAML
---
|
|
|
|
# The name of the Cluster API cluster
|
|
# if not given, the release name is used
|
|
clusterName:
|
|
|
|
# Settings for the CNI addon
|
|
cni:
|
|
# Indicates if a CNI should be deployed
|
|
enabled: true
|
|
# The CNI to deploy - supported values are calico or cilium
|
|
type: calico
|
|
# Settings for the calico CNI
|
|
# See https://projectcalico.docs.tigera.io/getting-started/kubernetes/helm
|
|
calico:
|
|
chart:
|
|
repo: https://projectcalico.docs.tigera.io/charts
|
|
name: tigera-operator
|
|
version: v3.27.3
|
|
release:
|
|
namespace: tigera-operator
|
|
values: {}
|
|
# Nova metadata service
|
|
# See https://docs.openstack.org/nova/latest/user/metadata.html#the-metadata-service
|
|
globalNetworkPolicy:
|
|
denyNamespaceSelector: kubernetes.io/metadata.name != 'openstack-system'
|
|
allowPriority: 20
|
|
denyPriority: 10
|
|
allowEgressCidrs:
|
|
- "0.0.0.0/0"
|
|
denyEgressCidrs:
|
|
- "169.254.169.254/32"
|
|
allowv6EgressCidrs:
|
|
- "::/0"
|
|
denyv6EgressCidrs:
|
|
- "fe80::a9fe:a9fe/128"
|
|
# Settings for the Cilium CNI
|
|
# See https://docs.cilium.io/en/stable/gettingstarted/k8s-install-helm/ for details
|
|
cilium:
|
|
chart:
|
|
repo: https://helm.cilium.io/
|
|
name: cilium
|
|
version: 1.15.5
|
|
release:
|
|
namespace: kube-system
|
|
values: {}
|
|
|
|
# Settings for the OpenStack integrations
|
|
openstack:
|
|
# Indicates if the OpenStack integrations should be enabled
|
|
enabled: false
|
|
# The target namespace for the OpenStack integrations
|
|
targetNamespace: openstack-system
|
|
# cloud-config options for the OpenStack integrations
|
|
# The [Global] section is configured to use the target cloud
|
|
# See https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md#config-openstack-cloud-controller-manager
|
|
# and https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md#block-storage
|
|
cloudConfig:
|
|
# By default, ignore volume AZs for Cinder as most clouds have a single globally-attachable Cinder AZ
|
|
BlockStorage:
|
|
ignore-volume-az: true
|
|
# Settings for the Cloud Controller Manager (CCM)
|
|
ccm:
|
|
# Indicates if the OpenStack CCM should be enabled
|
|
# By default, the CCM is enabled if the OpenStack integrations are enabled
|
|
# See https://github.com/kubernetes/cloud-provider-openstack/blob/master/charts/openstack-cloud-controller-manager/values.yaml
|
|
enabled: true
|
|
chart:
|
|
repo: https://kubernetes.github.io/cloud-provider-openstack
|
|
name: openstack-cloud-controller-manager
|
|
version: 2.29.2
|
|
values: {}
|
|
# Settings for the Cinder CSI plugin
|
|
csiCinder:
|
|
# Indicates if the Cinder CSI should be enabled
|
|
# By default, it is enabled if the OpenStack integrations are enabled
|
|
# See https://github.com/kubernetes/cloud-provider-openstack/blob/master/charts/cinder-csi-plugin/values.yaml
|
|
enabled: true
|
|
chart:
|
|
repo: https://kubernetes.github.io/cloud-provider-openstack
|
|
name: openstack-cinder-csi
|
|
version: 2.30.0
|
|
values: {}
|
|
# Variables affecting the definition of the storage class
|
|
defaultStorageClass:
|
|
# Indicates if the storage class should be enabled
|
|
enabled: true
|
|
# The name of the storage class
|
|
name: csi-cinder
|
|
# The reclaim policy for the storage class
|
|
reclaimPolicy: Delete
|
|
# Indicates if volume expansion is allowed
|
|
allowVolumeExpansion: true
|
|
# The Cinder availability zone to use for volumes provisioned by the storage class
|
|
availabilityZone: nova
|
|
# The Cinder volume type to use for volumes provisioned by the storage class
|
|
# If not given, the default volume type will be used
|
|
volumeType:
|
|
# The volumeBindingMode field controls when volume binding and dynamic provisioning should occur.
|
|
volumeBindingMode: WaitForFirstConsumer
|
|
# The allowed topologies for the storage class
|
|
allowedTopologies:
|
|
# Filesystem type supported by Kubernetes. Defaults to "ext4"
|
|
# fstype:
|
|
additionalStorageClasses: []
|
|
# - name: additional-storage-classname
|
|
# The reclaim policy for the storage class
|
|
# reclaimPolicy: Delete
|
|
# The reclaim policy for the storage class
|
|
# allowVolumeExpansion: true
|
|
# The Cinder availability zone to use for volumes provisioned by the storage class
|
|
# availabilityZone: nova
|
|
# The Cinder volume type to use for volumes provisioned by the storage class
|
|
# If not given, the default volume type will be used
|
|
# volumeType:
|
|
# The volumeBindingMode field controls when volume binding and dynamic provisioning should occur.
|
|
# Required for additionalStorageClasses.
|
|
# volumeBindingMode: WaitForFirstConsumer
|
|
# The allowed topologies for the storage class
|
|
# allowedTopologies:
|
|
# Filesystem type supported by Kubernetes. Defaults to "ext4"
|
|
# fstype:
|
|
k8sKeystoneAuth:
|
|
enabled: false
|
|
targetNamespace: kube-system
|
|
chart:
|
|
repo: https://catalyst-cloud.github.io/capi-plugin-helm-charts
|
|
name: k8s-keystone-auth
|
|
version: 1.3.0
|
|
|
|
# Settings for etcd defragmentation jobs
|
|
etcdDefrag:
|
|
# Indicates if the etcd defragmentation job should be enabled
|
|
enabled: true
|
|
chart:
|
|
repo: https://stackhpc.github.io/capi-helm-charts
|
|
name: etcd-defrag
|
|
version: # Defaults to the same version as this chart
|
|
release:
|
|
# This should be namespace in which the etcd pods are deployed
|
|
namespace: kube-system
|
|
values: {}
|
|
|
|
# Settings for the metrics server
|
|
# https://github.com/kubernetes-sigs/metrics-server#helm-chart
|
|
metricsServer:
|
|
# Indicates if the metrics server should be deployed
|
|
enabled: true
|
|
chart:
|
|
repo: https://kubernetes-sigs.github.io/metrics-server
|
|
name: metrics-server
|
|
version: 3.12.1
|
|
release:
|
|
namespace: kube-system
|
|
values: {}
|
|
|
|
# Settings for the Kubernetes dashboard
|
|
# https://github.com/kubernetes/dashboard/tree/master/charts/helm-chart/kubernetes-dashboard
|
|
kubernetesDashboard:
|
|
# Indicates if the Kubernetes dashboard should be enabled
|
|
enabled: false
|
|
chart:
|
|
repo: https://kubernetes.github.io/dashboard
|
|
name: kubernetes-dashboard
|
|
version: 6.0.8
|
|
release:
|
|
namespace: kubernetes-dashboard
|
|
values: {}
|
|
|
|
# Settings for ingress controllers
|
|
ingress:
|
|
# Indicates if ingress controllers should be enabled
|
|
enabled: false
|
|
# Settings for the Nginx ingress controller
|
|
# https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx#configuration
|
|
nginx:
|
|
# Indicates if the Nginx ingress controller should be enabled
|
|
# The Nginx ingress controller is enabled by default if ingress controllers are enabled
|
|
enabled: true
|
|
chart:
|
|
repo: https://kubernetes.github.io/ingress-nginx
|
|
name: ingress-nginx
|
|
version: 4.10.1
|
|
release:
|
|
namespace: ingress-nginx
|
|
values: {}
|
|
|
|
# Settings for cluster monitoring
|
|
monitoring:
|
|
# Indicates if the cluster monitoring should be enabled
|
|
enabled: false
|
|
# Config for the kube-prometheus-stack helm chart
|
|
# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack
|
|
kubePrometheusStack:
|
|
chart:
|
|
repo: https://prometheus-community.github.io/helm-charts
|
|
name: kube-prometheus-stack
|
|
version: 59.1.0
|
|
release:
|
|
namespace: monitoring-system
|
|
values:
|
|
# Enable persistence by default for prometheus and alertmanager
|
|
alertmanager:
|
|
alertmanagerSpec:
|
|
# By default, retain 7 days of data
|
|
retention: 168h
|
|
storage:
|
|
volumeClaimTemplate:
|
|
spec:
|
|
accessModes: ["ReadWriteOnce"]
|
|
resources:
|
|
requests:
|
|
storage: 10Gi
|
|
prometheus:
|
|
prometheusSpec:
|
|
# The amount of data that is retained will be 90 days or 95% of the size of the
|
|
# persistent volume, whichever is reached first
|
|
retention: 90d
|
|
storageSpec:
|
|
volumeClaimTemplate:
|
|
spec:
|
|
accessModes: ["ReadWriteOnce"]
|
|
resources:
|
|
requests:
|
|
storage: 10Gi
|
|
lokiStack:
|
|
enabled: true
|
|
chart:
|
|
repo: https://grafana.github.io/helm-charts
|
|
name: loki-stack
|
|
version: 2.10.2
|
|
release:
|
|
namespace: monitoring-system
|
|
values:
|
|
loki:
|
|
# Enable retention and configure a default retention period of 31 days
|
|
config:
|
|
compactor:
|
|
retention_enabled: true
|
|
limits_config:
|
|
retention_period: 72h
|
|
# Enable persistence by default
|
|
persistence:
|
|
enabled: true
|
|
size: 10Gi
|
|
# Configuration for the blackbox exporter
|
|
blackboxExporter:
|
|
enabled: true
|
|
chart:
|
|
repo: https://prometheus-community.github.io/helm-charts
|
|
name: prometheus-blackbox-exporter
|
|
version: 8.16.0
|
|
release:
|
|
namespace: monitoring-system
|
|
values: {}
|
|
# Example of adding additional scrape targets
|
|
# serviceMonitor:
|
|
# targets:
|
|
# - name: example
|
|
# url: http://example.com/healthz
|
|
|
|
# Settings for node feature discovery
|
|
# https://github.com/kubernetes-sigs/node-feature-discovery/tree/master/deployment/helm/node-feature-discovery
|
|
nodeFeatureDiscovery:
|
|
# Indicates if node feature discovery should be enabled
|
|
enabled: true
|
|
chart:
|
|
repo: https://kubernetes-sigs.github.io/node-feature-discovery/charts
|
|
name: node-feature-discovery
|
|
version: 0.16.0
|
|
release:
|
|
namespace: node-feature-discovery
|
|
values: {}
|
|
|
|
# Settings for the NVIDIA GPU operator
|
|
nvidiaGPUOperator:
|
|
# Indicates if the NVIDIA GPU operator should be enabled
|
|
# Note that because it uses node feature discovery to run only on nodes
|
|
# with an NVIDIA GPU available, the overhead of enabling this on clusters
|
|
# that do not need it now but may need it in the future is low
|
|
enabled: true
|
|
chart:
|
|
repo: https://helm.ngc.nvidia.com/nvidia
|
|
name: gpu-operator
|
|
version: v24.3.0
|
|
release:
|
|
namespace: gpu-operator
|
|
values:
|
|
dcgmExporter:
|
|
serviceMonitor:
|
|
enabled: true
|
|
|
|
# Settings for the Mellanox network operator
|
|
mellanoxNetworkOperator:
|
|
# Indicates if the network operator should be enabled
|
|
# Note that because it uses node feature discovery to run only on nodes
|
|
# with a Mellanox NIC available, the overhead of enabling this on clusters
|
|
# that do not need it now but may need it in the future is low
|
|
enabled: true
|
|
chart:
|
|
repo: https://helm.ngc.nvidia.com/nvidia
|
|
name: network-operator
|
|
version: 23.7.0
|
|
release:
|
|
namespace: network-operator
|
|
values: {}
|
|
|
|
# Settings for any custom addons
|
|
custom: {}
|
|
# #Indexed by the name of the release on the target cluster
|
|
# my-custom-helm-release:
|
|
# # Indicates that this is a Helm addon
|
|
# kind: HelmRelease
|
|
# spec:
|
|
# # The namespace that the addon should be in
|
|
# namespace: my-namespace
|
|
# # Details of the Helm chart to use
|
|
# chart:
|
|
# # The chart repository that contains the chart to use
|
|
# repo: https://my-project/charts
|
|
# # The name of the chart to use
|
|
# name: my-chart
|
|
# # The version of the chart to use (must be an exact version)
|
|
# version: 1.5.0
|
|
# # The Helm values to use for the release
|
|
# values: {}
|
|
# # Indexed by the name of the release on the target cluster
|
|
# my-custom-manifests:
|
|
# # Indicates that this is a Manifests addon
|
|
# kind: Manifests
|
|
# spec:
|
|
# # The namespace that the addon should be in
|
|
# namespace: my-namespace
|
|
# # The manifests for the addon, indexed by filename
|
|
# manifests:
|
|
# secret.yaml: |-
|
|
# apiVersion: v1
|
|
# kind: Secret
|
|
# metadata:
|
|
# name: my-secret
|
|
# stringData:
|
|
# secret-file: "secret-data"
|