Add rook-ceph operator chart

This commit is contained in:
Jonny Ervine 2024-02-11 09:39:20 +08:00
parent 0c052b86a6
commit ed087eae43
28 changed files with 16527 additions and 0 deletions

12
rook-ceph/Chart.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: v2
appVersion: v1.12.7
dependencies:
- name: library
repository: file://../library
version: 0.0.1
description: File, Block, and Object Storage Services for your Cloud-Native Environment
icon: https://rook.io/images/rook-logo.svg
name: rook-ceph
sources:
- https://github.com/rook/rook
version: v1.12.7

1
rook-ceph/README.md Normal file
View File

@ -0,0 +1 @@
See the [Operator Helm Chart](/Documentation/Helm-Charts/operator-chart.md) documentation.

View File

@ -0,0 +1,5 @@
apiVersion: v2
description: A simple library chart to share content between Rook's charts
name: library
type: library
version: 0.0.1

View File

@ -0,0 +1,32 @@
{{/*
ClusterRoleBindings needed for running a Rook CephCluster
*/}}
{{- define "library.cluster.clusterrolebindings" }}
# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr-cluster{{ template "library.suffix-cluster-namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-mgr-cluster
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }} # namespace:cluster
---
# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-osd{{ template "library.suffix-cluster-namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-osd
subjects:
- kind: ServiceAccount
name: rook-ceph-osd
namespace: {{ .Release.Namespace }} # namespace:cluster
{{- end }}

View File

@ -0,0 +1,73 @@
{{/*
RBAC needed for enabling monitoring for a Rook CephCluster.
These should be scoped to the namespace where the CephCluster is located.
*/}}
{{- define "library.cluster.monitoring.roles" -}}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-monitoring
namespace: {{ .Release.Namespace }} # namespace:cluster
rules:
- apiGroups:
- "monitoring.coreos.com"
resources:
- servicemonitors
verbs:
- get
- list
- watch
- create
- update
- delete
---
# Allow management of monitoring resources in the mgr
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-monitoring-mgr
namespace: {{ .Release.Namespace }} # namespace:cluster
rules:
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- get
- list
- create
- update
{{- end }}
{{- define "library.cluster.monitoring.rolebindings" }}
# Allow the operator to get ServiceMonitors in this cluster's namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-monitoring
namespace: {{ .Release.Namespace }} # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-monitoring
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Values.operatorNamespace | default .Release.Namespace }} # namespace:operator
---
# Allow creation of monitoring resources in the mgr
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-monitoring-mgr
namespace: {{ .Release.Namespace }} # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-monitoring-mgr
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }} # namespace:cluster
{{- end }}

View File

@ -0,0 +1,95 @@
{{/*
RoleBindings needed to enable Pod Security Policies for a CephCluster.
*/}}
{{- define "library.cluster.psp.rolebindings" }}
{{- if semverCompare "<1.25.0-0" .Capabilities.KubeVersion.GitVersion }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-default-psp
namespace: {{ .Release.Namespace }} # namespace:cluster
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:rook
subjects:
- kind: ServiceAccount
name: default
namespace: {{ .Release.Namespace }} # namespace:cluster
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-osd-psp
namespace: {{ .Release.Namespace }} # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:rook
subjects:
- kind: ServiceAccount
name: rook-ceph-osd
namespace: {{ .Release.Namespace }} # namespace:cluster
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-rgw-psp
namespace: {{ .Release.Namespace }} # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:rook
subjects:
- kind: ServiceAccount
name: rook-ceph-rgw
namespace: {{ .Release.Namespace }} # namespace:cluster
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-mgr-psp
namespace: {{ .Release.Namespace }} # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:rook
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }} # namespace:cluster
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-cmd-reporter-psp
namespace: {{ .Release.Namespace }} # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:rook
subjects:
- kind: ServiceAccount
name: rook-ceph-cmd-reporter
namespace: {{ .Release.Namespace }} # namespace:cluster
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rook-ceph-purge-osd-psp
namespace: {{ .Release.Namespace }} # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:rook
subjects:
- kind: ServiceAccount
name: rook-ceph-purge-osd
namespace: {{ .Release.Namespace }} # namespace:cluster
{{- end }}
{{- end }}

View File

@ -0,0 +1,151 @@
{{/*
Roles needed for running a Rook CephCluster
*/}}
{{- define "library.cluster.roles" }}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-osd
namespace: {{ .Release.Namespace }} # namespace:cluster
rules:
# this is needed for rook's "key-management" CLI to fetch the vault token from the secret when
# validating the connection details and for key rotation operations.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get","update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: ["ceph.rook.io"]
resources: ["cephclusters", "cephclusters/finalizers"]
verbs: ["get", "list", "create", "update", "delete"]
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-rgw
namespace: {{ .Release.Namespace }} # namespace:cluster
rules:
# Placeholder role so the rgw service account will
# be generated in the csv. Remove this role and role binding
# when fixing https://github.com/rook/rook/issues/10141.
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
---
# Aspects of ceph-mgr that operate within the cluster's namespace
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }} # namespace:cluster
rules:
- apiGroups:
- ""
resources:
- pods
- services
- pods/log
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- ceph.rook.io
resources:
- cephclients
- cephclusters
- cephblockpools
- cephfilesystems
- cephnfses
- cephobjectstores
- cephobjectstoreusers
- cephobjectrealms
- cephobjectzonegroups
- cephobjectzones
- cephbuckettopics
- cephbucketnotifications
- cephrbdmirrors
- cephfilesystemmirrors
- cephfilesystemsubvolumegroups
- cephblockpoolradosnamespaces
- cephcosidrivers
verbs:
- get
- list
- watch
- create
- update
- delete
- patch
- apiGroups:
- apps
resources:
- deployments/scale
- deployments
verbs:
- patch
- delete
- apiGroups:
- ''
resources:
- persistentvolumeclaims
verbs:
- delete
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-cmd-reporter
namespace: {{ .Release.Namespace }} # namespace:cluster
rules:
- apiGroups:
- ""
resources:
- pods
- configmaps
verbs:
- get
- list
- watch
- create
- update
- delete
---
# Aspects of ceph osd purge job that require access to the cluster namespace
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-purge-osd
namespace: {{ .Release.Namespace }} # namespace:cluster
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get", "delete" ]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["get", "list", "delete" ]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "update", "delete", "list"]
{{- end }}

View File

@ -0,0 +1,108 @@
{{/*
RoleBindings needed for running a Rook CephCluster
*/}}
{{- define "library.cluster.rolebindings" }}
# Allow the operator to create resources in this cluster's namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-cluster-mgmt
namespace: {{ .Release.Namespace }} # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-cluster-mgmt
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Values.operatorNamespace | default .Release.Namespace }} # namespace:operator
---
# Allow the osd pods in this namespace to work with configmaps
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-osd
namespace: {{ .Release.Namespace }} # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-osd
subjects:
- kind: ServiceAccount
name: rook-ceph-osd
namespace: {{ .Release.Namespace }} # namespace:cluster
---
# Allow the rgw pods in this namespace to work with configmaps
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-rgw
namespace: {{ .Release.Namespace }} # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-rgw
subjects:
- kind: ServiceAccount
name: rook-ceph-rgw
namespace: {{ .Release.Namespace }} # namespace:cluster
---
# Allow the ceph mgr to access resources scoped to the CephCluster namespace necessary for mgr modules
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }} # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-mgr
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }} # namespace:cluster
---
# Allow the ceph mgr to access resources in the Rook operator namespace necessary for mgr modules
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr-system{{ template "library.suffix-cluster-namespace" . }}
namespace: {{ .Values.operatorNamespace | default .Release.Namespace }} # namespace:operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-mgr-system
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }} # namespace:cluster
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-cmd-reporter
namespace: {{ .Release.Namespace }} # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-cmd-reporter
subjects:
- kind: ServiceAccount
name: rook-ceph-cmd-reporter
namespace: {{ .Release.Namespace }} # namespace:cluster
---
# Allow the osd purge job to run in this namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-purge-osd
namespace: {{ .Release.Namespace }} # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-purge-osd
subjects:
- kind: ServiceAccount
name: rook-ceph-purge-osd
namespace: {{ .Release.Namespace }} # namespace:cluster
{{- end }}

View File

@ -0,0 +1,60 @@
{{/*
ServiceAccounts needed for running a Rook CephCluster
*/}}
{{- define "library.cluster.serviceaccounts" }}
# Service account for Ceph OSDs
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-osd
namespace: {{ .Release.Namespace }} # namespace:cluster
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
{{ include "library.imagePullSecrets" . }}
---
# Service account for Ceph mgrs
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }} # namespace:cluster
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
{{ include "library.imagePullSecrets" . }}
---
# Service account for the job that reports the Ceph version in an image
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-cmd-reporter
namespace: {{ .Release.Namespace }} # namespace:cluster
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
{{ include "library.imagePullSecrets" . }}
---
# Service account for job that purges OSDs from a Rook-Ceph cluster
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-purge-osd
namespace: {{ .Release.Namespace }} # namespace:cluster
{{ include "library.imagePullSecrets" . }}
---
# Service account for RGW server
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-rgw
namespace: {{ .Release.Namespace }} # namespace:cluster
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
{{ include "library.imagePullSecrets" . }}
{{ end }}

View File

@ -0,0 +1,13 @@
{{/*
Define imagePullSecrets option to pass to all service accounts
*/}}
{{- define "library.imagePullSecrets" }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets }}
{{- else }}
{{/* if the secrets are not included, include a comment for generating common.yaml */}}
# imagePullSecrets:
# - name: my-registry-secret
{{- end }}
{{- end }}

View File

@ -0,0 +1,9 @@
{{/*
Common labels
*/}}
{{- define "library.rook-ceph.labels" -}}
app.kubernetes.io/part-of: rook-ceph-operator
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/created-by: helm
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{- end }}

View File

@ -0,0 +1,18 @@
{{/*
Some ClusterRoles or Roles in the operator namespace need to be bound to service accounts in the
CephCluster namespace.
If the cluster namespace is the same as the operator namespace, we want a binding with a basic name.
This is the case for the rook-ceph (Rook-Ceph Operator) chart.
If the cluster namespace is different from the operator namespace, we want to name the binding
(in the operator namespace) with a suffixed that has the cluster namespace. This is the case for
some instances of the rook-ceph-cluster (CephCluster) chart
*/}}
{{- define "library.suffix-cluster-namespace" -}}
{{/* the operator chart won't set .Values.operatorNamespace, so default to .Release.Namespace */}}
{{- $operatorNamespace := .Values.operatorNamespace | default .Release.Namespace -}}
{{- $clusterNamespace := .Release.Namespace -}}
{{- if ne $clusterNamespace $operatorNamespace -}}
{{ printf "-%s" $clusterNamespace }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,11 @@
The Rook Operator has been installed. Check its status by running:
kubectl --namespace {{ .Release.Namespace }} get pods -l "app=rook-ceph-operator"
Visit https://rook.io/docs/rook/latest for instructions on how to create and configure Rook clusters
Important Notes:
- You must customize the 'CephCluster' resource in the sample manifests for your cluster.
- Each CephCluster must be deployed to its own namespace, the samples use `rook-ceph` for the namespace.
- The sample manifests assume you also installed the rook-ceph operator in the `rook-ceph` namespace.
- The helm chart includes all the RBAC required to create a CephCluster CRD in the same namespace.
- Any disk devices you add to the cluster in the 'CephCluster' must be empty (no filesystem and no partitions).

View File

@ -0,0 +1,35 @@
{{- if .Values.rbacAggregate.enableOBCs }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rook-ceph-obc-view
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups:
- objectbucket.io
resources:
- objectbucketclaims
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rook-ceph-obc-edit
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rules:
- apiGroups:
- objectbucket.io
resources:
- objectbucketclaims
verbs:
- create
- delete
- deletecollection
- patch
- update
{{- end }}

View File

@ -0,0 +1,42 @@
{{/*
These are resources scoped to the CephCluster namespace. They will be created in the operator
namespace by default so that a user can easily install a CephCluster into the operator's namespace
using example manifests.
*/}}
{{/*
serviceaccounts
*/}}
{{ include "library.cluster.serviceaccounts" . }}
{{/*
clusterrolebindings
*/}}
---
{{ include "library.cluster.clusterrolebindings" . }}
{{- if .Values.pspEnable }}
{{ include "library.cluster.psp.rolebindings" . }}
{{- end }}
{{/*
roles
*/}}
---
{{ include "library.cluster.roles" . }}
{{- if .Values.monitoring.enabled }}
---
{{ include "library.cluster.monitoring.roles" . }}
{{- end }}
{{/*
rolebindings
*/}}
---
{{ include "library.cluster.rolebindings" . }}
{{- if .Values.monitoring.enabled }}
---
{{ include "library.cluster.monitoring.rolebindings" . }}
{{- end }}

View File

@ -0,0 +1,641 @@
{{- if .Values.rbacEnable }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-system
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
rules:
# Most resources are represented by a string representation of their name, such as "pods", just as it appears in the URL for the relevant API endpoint.
# However, some Kubernetes APIs involve a "subresource", such as the logs for a pod. [...]
# To represent this in an RBAC role, use a slash to delimit the resource and subresource.
# https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources
- apiGroups: [""]
resources: ["pods", "pods/log"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create"]
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["validatingwebhookconfigurations"]
verbs: ["create", "get", "delete", "update"]
- apiGroups: ["csiaddons.openshift.io"]
resources: ["networkfences"]
verbs: ["create", "get", "update", "delete", "watch", "list"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get"]
---
# The cluster role for managing all the cluster-specific resources in a namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rook-ceph-cluster-mgmt
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
- apps
- extensions
resources:
- secrets
- pods
- pods/log
- services
- configmaps
- deployments
- daemonsets
verbs:
- get
- list
- watch
- patch
- create
- update
- delete
---
# The cluster role for managing the Rook CRDs
apiVersion: rbac.authorization.k8s.io/v1
# Rook watches for its CRDs in all namespaces, so this should be a cluster-scoped role unless the
# operator config `ROOK_CURRENT_NAMESPACE_ONLY=true`.
kind: ClusterRole
metadata:
name: rook-ceph-global
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
# Pod access is needed for fencing
- pods
# Node access is needed for determining nodes where mons should run
- nodes
- nodes/proxy
- services
# Rook watches secrets which it uses to configure access to external resources.
# e.g., external Ceph cluster; TLS certificates for the admission controller or object store
- secrets
# Rook watches for changes to the rook-operator-config configmap
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# Rook creates events for its custom resources
- events
# Rook creates PVs and PVCs for OSDs managed by the Rook provisioner
- persistentvolumes
- persistentvolumeclaims
# Rook creates endpoints for mgr and object store access
- endpoints
verbs:
- get
- list
- watch
- patch
- create
- update
- delete
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- jobs
- cronjobs
verbs:
- get
- list
- watch
- create
- update
- delete
- deletecollection
# The Rook operator must be able to watch all ceph.rook.io resources to reconcile them.
- apiGroups: ["ceph.rook.io"]
resources:
- cephclients
- cephclusters
- cephblockpools
- cephfilesystems
- cephnfses
- cephobjectstores
- cephobjectstoreusers
- cephobjectrealms
- cephobjectzonegroups
- cephobjectzones
- cephbuckettopics
- cephbucketnotifications
- cephrbdmirrors
- cephfilesystemmirrors
- cephfilesystemsubvolumegroups
- cephblockpoolradosnamespaces
- cephcosidrivers
verbs:
- get
- list
- watch
# Ideally the update permission is not required, but Rook needs it to add finalizers to resources.
- update
# Rook must have update access to status subresources for its custom resources.
- apiGroups: ["ceph.rook.io"]
resources:
- cephclients/status
- cephclusters/status
- cephblockpools/status
- cephfilesystems/status
- cephnfses/status
- cephobjectstores/status
- cephobjectstoreusers/status
- cephobjectrealms/status
- cephobjectzonegroups/status
- cephobjectzones/status
- cephbuckettopics/status
- cephbucketnotifications/status
- cephrbdmirrors/status
- cephfilesystemmirrors/status
- cephfilesystemsubvolumegroups/status
- cephblockpoolradosnamespaces/status
verbs: ["update"]
# The "*/finalizers" permission may need to be strictly given for K8s clusters where
# OwnerReferencesPermissionEnforcement is enabled so that Rook can set blockOwnerDeletion on
# resources owned by Rook CRs (e.g., a Secret owned by an OSD Deployment). See more:
# https://kubernetes.io/docs/reference/access-authn-authz/_print/#ownerreferencespermissionenforcement
- apiGroups: ["ceph.rook.io"]
resources:
- cephclients/finalizers
- cephclusters/finalizers
- cephblockpools/finalizers
- cephfilesystems/finalizers
- cephnfses/finalizers
- cephobjectstores/finalizers
- cephobjectstoreusers/finalizers
- cephobjectrealms/finalizers
- cephobjectzonegroups/finalizers
- cephobjectzones/finalizers
- cephbuckettopics/finalizers
- cephbucketnotifications/finalizers
- cephrbdmirrors/finalizers
- cephfilesystemmirrors/finalizers
- cephfilesystemsubvolumegroups/finalizers
- cephblockpoolradosnamespaces/finalizers
verbs: ["update"]
- apiGroups:
- policy
- apps
- extensions
resources:
# This is for the clusterdisruption controller
- poddisruptionbudgets
# This is for both clusterdisruption and nodedrain controllers
- deployments
- replicasets
verbs:
- get
- list
- watch
- create
- update
- delete
- deletecollection
- apiGroups:
- apps
resources:
# This is to add osd deployment owner ref on key rotation
# cron jobs.
- deployments/finalizers
verbs:
- update
- apiGroups:
- healthchecking.openshift.io
resources:
- machinedisruptionbudgets
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- machine.openshift.io
resources:
- machines
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- storage.k8s.io
resources:
- csidrivers
verbs:
- create
- delete
- get
- update
- apiGroups:
- k8s.cni.cncf.io
resources:
- network-attachment-definitions
verbs:
- get
---
# Aspects of ceph-mgr that require cluster-wide access
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr-cluster
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
- nodes
- nodes/proxy
- persistentvolumes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- list
- get
- watch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
---
# Aspects of ceph-mgr that require access to the system namespace
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
---
# Used for provisioning ObjectBuckets (OBs) in response to ObjectBucketClaims (OBCs).
# Note: Rook runs a copy of the lib-bucket-provisioner's OBC controller.
# OBCs can be created in any Kubernetes namespace, so this must be a cluster-scoped role.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-object-bucket
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs:
# OBC controller creates secrets and configmaps containing information for users about how to
# connect to object buckets. It deletes them when an OBC is deleted.
- get
- create
- update
- delete
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs:
# OBC controller gets parameters from the OBC's storageclass
# Rook gets additional parameters from the OBC's storageclass
- get
- apiGroups: ["objectbucket.io"]
resources: ["objectbucketclaims"]
verbs:
# OBC controller needs to list/watch OBCs and get latest version of a reconciled OBC
- list
- watch
- get
# Ideally, update should not be needed, but the OBC controller updates the OBC with bucket
# information outside of the status subresource
- update
# OBC controller does not delete OBCs; users do this
- apiGroups: ["objectbucket.io"]
resources: ["objectbuckets"]
verbs:
# OBC controller needs to list/watch OBs and get latest version of a reconciled OB
- list
- watch
- get
# OBC controller creates an OB when an OBC's bucket has been provisioned by Ceph, updates them
# when an OBC is updated, and deletes them when the OBC is de-provisioned.
- create
- update
- delete
- apiGroups: ["objectbucket.io"]
resources: ["objectbucketclaims/status", "objectbuckets/status"]
verbs:
# OBC controller updates OBC and OB statuses
- update
- apiGroups: ["objectbucket.io"]
# This does not strictly allow the OBC/OB controllers to update finalizers. That is handled by
# the direct "update" permissions above. Instead, this allows Rook's controller to create
# resources which are owned by OBs/OBCs and where blockOwnerDeletion is set.
resources: ["objectbucketclaims/finalizers", "objectbuckets/finalizers"]
verbs:
- update
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-osd
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
---
# TODO: remove this, once https://github.com/rook/rook/issues/10141
# is resolved.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
---
{{- if .Values.csi.nfs.enabled }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ceph-nfs-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
---
# TODO: remove this, once https://github.com/rook/rook/issues/10141
# is resolved.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ceph-nfs-csi-nodeplugin
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
---
{{ end }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: [ "get", "list", "watch", "patch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts/token"]
verbs: ["create"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: [ "get", "list", "watch", "patch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts/token"]
verbs: ["create"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: objectstorage-provisioner-role
labels:
app.kubernetes.io/part-of: container-object-storage-interface
app.kubernetes.io/component: driver-ceph
app.kubernetes.io/name: cosi-driver-ceph
rules:
- apiGroups: ["objectstorage.k8s.io"]
resources:
[
"buckets",
"bucketaccesses",
"bucketclaims",
"bucketaccessclasses",
"buckets/status",
"bucketaccesses/status",
"bucketclaims/status",
"bucketaccessclasses/status",
]
verbs: ["get", "list", "watch", "update", "create", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: [""]
resources: ["secrets", "events"]
verbs: ["get", "delete", "update", "create"]
{{- end }}

View File

@ -0,0 +1,152 @@
{{- if .Values.rbacEnable }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-system
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-system
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }} # namespace:operator
---
# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-global
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-global
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }} # namespace:operator
---
kind: ClusterRoleBinding
# Give Rook-Ceph Operator permissions to provision ObjectBuckets in response to ObjectBucketClaims.
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-object-bucket
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-object-bucket
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }} # namespace:operator
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-plugin-sa
namespace: {{ .Release.Namespace }} # namespace:operator
roleRef:
kind: ClusterRole
name: rbd-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-provisioner-sa
namespace: {{ .Release.Namespace }} # namespace:operator
roleRef:
kind: ClusterRole
name: cephfs-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
# This is required by operator-sdk to map the cluster/clusterrolebindings with SA
# otherwise operator-sdk will create a individual file for these.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin-role
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-plugin-sa
namespace: {{ .Release.Namespace }} # namespace:operator
roleRef:
kind: ClusterRole
name: cephfs-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io
---
{{- if .Values.csi.nfs.enabled }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ceph-nfs-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rook-csi-nfs-provisioner-sa
namespace: {{ .Release.Namespace }} # namespace:operator
roleRef:
kind: ClusterRole
name: ceph-nfs-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
# TODO: remove this, once https://github.com/rook/rook/issues/10141
# is resolved.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ceph-nfs-csi-nodeplugin-role
subjects:
- kind: ServiceAccount
name: rook-csi-nfs-plugin-sa
namespace: {{ .Release.Namespace }} # namespace:operator
roleRef:
kind: ClusterRole
name: ceph-nfs-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io
---
{{ end }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-provisioner-sa
namespace: {{ .Release.Namespace }} # namespace:operator
roleRef:
kind: ClusterRole
name: rbd-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
# RBAC for ceph cosi driver service account
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: objectstorage-provisioner-role-binding
labels:
app.kubernetes.io/part-of: container-object-storage-interface
app.kubernetes.io/component: driver-ceph
app.kubernetes.io/name: cosi-driver-ceph
subjects:
- kind: ServiceAccount
name: objectstorage-provisioner
namespace: {{ .Release.Namespace }} # namespace:operator
roleRef:
kind: ClusterRole
name: objectstorage-provisioner-role
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -0,0 +1,245 @@
# Operator settings that can be updated without an operator restart
# Operator settings that require an operator restart are found in the operator env vars
kind: ConfigMap
apiVersion: v1
metadata:
name: rook-ceph-operator-config
data:
ROOK_LOG_LEVEL: {{ .Values.logLevel | quote }}
ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: {{ .Values.cephCommandsTimeoutSeconds | quote }}
ROOK_OBC_WATCH_OPERATOR_NAMESPACE: {{ .Values.enableOBCWatchOperatorNamespace | quote }}
ROOK_CEPH_ALLOW_LOOP_DEVICES: {{ .Values.allowLoopDevices | quote }}
ROOK_DISABLE_ADMISSION_CONTROLLER: {{ .Values.disableAdmissionController | quote }}
ROOK_ENABLE_DISCOVERY_DAEMON: {{ .Values.enableDiscoveryDaemon | quote }}
{{- if .Values.discoverDaemonUdev }}
DISCOVER_DAEMON_UDEV_BLACKLIST: {{ .Values.discoverDaemonUdev | quote }}
{{- end }}
{{- if .Values.csi }}
ROOK_CSI_ENABLE_RBD: {{ .Values.csi.enableRbdDriver | quote }}
ROOK_CSI_ENABLE_CEPHFS: {{ .Values.csi.enableCephfsDriver | quote }}
CSI_ENABLE_CEPHFS_SNAPSHOTTER: {{ .Values.csi.enableCephfsSnapshotter | quote }}
CSI_ENABLE_NFS_SNAPSHOTTER: {{ .Values.csi.enableNFSSnapshotter | quote }}
CSI_ENABLE_RBD_SNAPSHOTTER: {{ .Values.csi.enableRBDSnapshotter | quote }}
CSI_PLUGIN_ENABLE_SELINUX_HOST_MOUNT: {{ .Values.csi.enablePluginSelinuxHostMount | quote }}
CSI_ENABLE_ENCRYPTION: {{ .Values.csi.enableCSIEncryption | quote }}
CSI_ENABLE_OMAP_GENERATOR: {{ .Values.csi.enableOMAPGenerator | quote }}
CSI_ENABLE_HOST_NETWORK: {{ .Values.csi.enableCSIHostNetwork | quote }}
CSI_ENABLE_METADATA: {{ .Values.csi.enableMetadata | quote }}
{{- if .Values.csi.pluginPriorityClassName }}
CSI_PLUGIN_PRIORITY_CLASSNAME: {{ .Values.csi.pluginPriorityClassName | quote }}
{{- end }}
{{- if .Values.csi.provisionerPriorityClassName }}
CSI_PROVISIONER_PRIORITY_CLASSNAME: {{ .Values.csi.provisionerPriorityClassName | quote }}
{{- end }}
{{- if .Values.csi.cephFSPluginUpdateStrategy }}
CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: {{ .Values.csi.cephFSPluginUpdateStrategy | quote }}
{{- end }}
{{- if .Values.csi.cephFSPluginUpdateStrategyMaxUnavailable }}
CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY_MAX_UNAVAILABLE: {{ .Values.csi.cephFSPluginUpdateStrategyMaxUnavailable | quote }}
{{- end }}
{{- if .Values.csi.nfsPluginUpdateStrategy }}
CSI_NFS_PLUGIN_UPDATE_STRATEGY: {{ .Values.csi.nfsPluginUpdateStrategy | quote }}
{{- end }}
{{- if .Values.csi.rbdFSGroupPolicy }}
CSI_RBD_FSGROUPPOLICY: {{ .Values.csi.rbdFSGroupPolicy | quote }}
{{- end }}
{{- if .Values.csi.cephFSFSGroupPolicy }}
CSI_CEPHFS_FSGROUPPOLICY: {{ .Values.csi.cephFSFSGroupPolicy | quote }}
{{- end }}
{{- if .Values.csi.nfsFSGroupPolicy }}
CSI_NFS_FSGROUPPOLICY: {{ .Values.csi.nfsFSGroupPolicy | quote }}
{{- end }}
{{- if .Values.csi.rbdPluginUpdateStrategy }}
CSI_RBD_PLUGIN_UPDATE_STRATEGY: {{ .Values.csi.rbdPluginUpdateStrategy | quote }}
{{- end }}
{{- if .Values.csi.cephFSKernelMountOptions }}
CSI_CEPHFS_KERNEL_MOUNT_OPTIONS: {{ .Values.csi.cephFSKernelMountOptions | quote }}
{{- end }}
{{- if .Values.csi.rbdPluginUpdateStrategyMaxUnavailable }}
CSI_RBD_PLUGIN_UPDATE_STRATEGY_MAX_UNAVAILABLE: {{ .Values.csi.rbdPluginUpdateStrategyMaxUnavailable | quote }}
{{- end }}
{{- if .Values.csi.kubeletDirPath }}
ROOK_CSI_KUBELET_DIR_PATH: {{ .Values.csi.kubeletDirPath | quote }}
{{- end }}
ROOK_CSI_ENABLE_GRPC_METRICS: {{ .Values.csi.enableGrpcMetrics | quote }}
{{- if .Values.csi.cephcsi }}
{{- if .Values.csi.cephcsi.image }}
ROOK_CSI_CEPH_IMAGE: {{ .Values.csi.cephcsi.image | quote }}
{{- end }}
{{- end }}
{{- if .Values.csi.registrar }}
{{- if .Values.csi.registrar.image }}
ROOK_CSI_REGISTRAR_IMAGE: {{ .Values.csi.registrar.image | quote }}
{{- end }}
{{- end }}
{{- if .Values.csi.provisioner }}
{{- if .Values.csi.provisioner.image }}
ROOK_CSI_PROVISIONER_IMAGE: {{ .Values.csi.provisioner.image | quote }}
{{- end }}
{{- end }}
{{- if .Values.csi.snapshotter }}
{{- if .Values.csi.snapshotter.image }}
ROOK_CSI_SNAPSHOTTER_IMAGE: {{ .Values.csi.snapshotter.image | quote }}
{{- end }}
{{- end }}
{{- if .Values.csi.attacher }}
{{- if .Values.csi.attacher.image }}
ROOK_CSI_ATTACHER_IMAGE: {{ .Values.csi.attacher.image | quote }}
{{- end }}
{{- end }}
{{- if .Values.csi.resizer }}
{{- if .Values.csi.resizer.image }}
ROOK_CSI_RESIZER_IMAGE: {{ .Values.csi.resizer.image | quote }}
{{- end }}
{{- end }}
{{- if .Values.csi.imagePullPolicy }}
ROOK_CSI_IMAGE_PULL_POLICY: {{ .Values.csi.imagePullPolicy | quote }}
{{- end }}
{{- if .Values.csi.csiAddons }}
CSI_ENABLE_CSIADDONS: {{ .Values.csi.csiAddons.enabled | quote }}
{{- if .Values.csi.csiAddons.image }}
ROOK_CSIADDONS_IMAGE: {{ .Values.csi.csiAddons.image | quote }}
{{- end }}
{{- end }}
{{- if .Values.csi.topology }}
CSI_ENABLE_TOPOLOGY: {{ .Values.csi.topology.enabled | quote }}
{{- if .Values.csi.topology.domainLabels }}
CSI_TOPOLOGY_DOMAIN_LABELS: {{ .Values.csi.topology.domainLabels | join "," }}
{{- end }}
{{- end }}
{{- if .Values.csi.readAffinity }}
CSI_ENABLE_READ_AFFINITY : {{ .Values.csi.readAffinity.enabled | quote }}
{{- if .Values.csi.readAffinity.crushLocationLabels }}
CSI_CRUSH_LOCATION_LABELS: {{ .Values.csi.readAffinity.crushLocationLabels | join "," }}
{{- end }}
{{- end }}
{{- if .Values.csi.nfs }}
ROOK_CSI_ENABLE_NFS: {{ .Values.csi.nfs.enabled | quote }}
{{- end }}
{{- if .Values.csi.cephfsPodLabels }}
ROOK_CSI_CEPHFS_POD_LABELS: {{ .Values.csi.cephfsPodLabels | quote }}
{{- end }}
{{- if .Values.csi.nfsPodLabels }}
ROOK_CSI_NFS_POD_LABELS: {{ .Values.csi.nfsPodLabels | quote }}
{{- end }}
{{- if .Values.csi.rbdPodLabels }}
ROOK_CSI_RBD_POD_LABELS: {{ .Values.csi.rbdPodLabels | quote }}
{{- end }}
{{- if .Values.csi.provisionerTolerations }}
CSI_PROVISIONER_TOLERATIONS: {{ toYaml .Values.csi.provisionerTolerations | quote }}
{{- end }}
{{- if .Values.csi.provisionerNodeAffinity }}
CSI_PROVISIONER_NODE_AFFINITY: {{ .Values.csi.provisionerNodeAffinity }}
{{- end }}
{{- if .Values.csi.rbdProvisionerTolerations }}
CSI_RBD_PROVISIONER_TOLERATIONS: {{ toYaml .Values.csi.rbdProvisionerTolerations | quote }}
{{- end }}
{{- if .Values.csi.rbdProvisionerNodeAffinity }}
CSI_RBD_PROVISIONER_NODE_AFFINITY: {{ .Values.csi.rbdProvisionerNodeAffinity }}
{{- end }}
{{- if .Values.csi.cephFSProvisionerTolerations }}
CSI_CEPHFS_PROVISIONER_TOLERATIONS: {{ toYaml .Values.csi.cephFSProvisionerTolerations | quote }}
{{- end }}
{{- if .Values.csi.cephFSProvisionerNodeAffinity }}
CSI_CEPHFS_PROVISIONER_NODE_AFFINITY: {{ .Values.csi.cephFSProvisionerNodeAffinity }}
{{- end }}
{{- if .Values.csi.nfsProvisionerTolerations }}
CSI_NFS_PROVISIONER_TOLERATIONS: {{ toYaml .Values.csi.nfsProvisionerTolerations | quote }}
{{- end }}
{{- if .Values.csi.nfsProvisionerNodeAffinity }}
CSI_NFS_PROVISIONER_NODE_AFFINITY: {{ .Values.csi.nfsProvisionerNodeAffinity }}
{{- end }}
{{- if .Values.csi.allowUnsupportedVersion }}
ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: {{ .Values.csi.allowUnsupportedVersion | quote }}
{{- end }}
{{- if .Values.csi.pluginTolerations }}
CSI_PLUGIN_TOLERATIONS: {{ toYaml .Values.csi.pluginTolerations | quote }}
{{- end }}
{{- if .Values.csi.pluginNodeAffinity }}
CSI_PLUGIN_NODE_AFFINITY: {{ .Values.csi.pluginNodeAffinity }}
{{- end }}
{{- if .Values.csi.rbdPluginTolerations }}
CSI_RBD_PLUGIN_TOLERATIONS: {{ toYaml .Values.csi.rbdPluginTolerations | quote }}
{{- end }}
{{- if .Values.csi.rbdPluginNodeAffinity }}
CSI_RBD_PLUGIN_NODE_AFFINITY: {{ .Values.csi.rbdPluginNodeAffinity }}
{{- end }}
{{- if .Values.csi.cephFSPluginTolerations }}
CSI_CEPHFS_PLUGIN_TOLERATIONS: {{ toYaml .Values.csi.cephFSPluginTolerations | quote }}
{{- end }}
{{- if .Values.csi.cephFSPluginNodeAffinity }}
CSI_CEPHFS_PLUGIN_NODE_AFFINITY: {{ .Values.csi.cephFSPluginNodeAffinity }}
{{- end }}
{{- if .Values.csi.nfsPluginTolerations }}
CSI_NFS_PLUGIN_TOLERATIONS: {{ toYaml .Values.csi.nfsPluginTolerations | quote }}
{{- end }}
{{- if .Values.csi.nfsPluginNodeAffinity }}
CSI_NFS_PLUGIN_NODE_AFFINITY: {{ .Values.csi.nfsPluginNodeAffinity }}
{{- end }}
{{- if .Values.csi.cephfsGrpcMetricsPort }}
CSI_CEPHFS_GRPC_METRICS_PORT: {{ .Values.csi.cephfsGrpcMetricsPort | quote }}
{{- end }}
{{- if .Values.csi.cephfsLivenessMetricsPort }}
CSI_CEPHFS_LIVENESS_METRICS_PORT: {{ .Values.csi.cephfsLivenessMetricsPort | quote }}
{{- end }}
{{- if .Values.csi.enableLiveness }}
CSI_ENABLE_LIVENESS: {{ .Values.csi.enableLiveness | quote }}
{{- end }}
{{- if .Values.csi.rbdGrpcMetricsPort }}
CSI_RBD_GRPC_METRICS_PORT: {{ .Values.csi.rbdGrpcMetricsPort | quote }}
{{- end }}
{{- if .Values.csi.rbdLivenessMetricsPort }}
CSI_RBD_LIVENESS_METRICS_PORT: {{ .Values.csi.rbdLivenessMetricsPort | quote }}
{{- end }}
{{- if .Values.csi.forceCephFSKernelClient }}
CSI_FORCE_CEPHFS_KERNEL_CLIENT: {{ .Values.csi.forceCephFSKernelClient | quote }}
{{- end }}
{{- if .Values.csi.logLevel }}
CSI_LOG_LEVEL: {{ .Values.csi.logLevel | quote }}
{{- end }}
{{- if .Values.csi.sidecarLogLevel }}
CSI_SIDECAR_LOG_LEVEL: {{ .Values.csi.sidecarLogLevel | quote }}
{{- end }}
{{- if .Values.csi.clusterName }}
CSI_CLUSTER_NAME: {{ .Values.csi.clusterName | quote }}
{{- end }}
{{- if .Values.csi.grpcTimeoutInSeconds }}
CSI_GRPC_TIMEOUT_SECONDS: {{ .Values.csi.grpcTimeoutInSeconds | quote }}
{{- end }}
{{- if .Values.csi.provisionerReplicas }}
CSI_PROVISIONER_REPLICAS: {{ .Values.csi.provisionerReplicas | quote }}
{{- end }}
{{- if .Values.csi.csiRBDProvisionerResource }}
CSI_RBD_PROVISIONER_RESOURCE: {{ .Values.csi.csiRBDProvisionerResource | quote }}
{{- end }}
{{- if .Values.csi.csiRBDPluginResource }}
CSI_RBD_PLUGIN_RESOURCE: {{ .Values.csi.csiRBDPluginResource | quote }}
{{- end }}
{{- if .Values.csi.csiCephFSProvisionerResource }}
CSI_CEPHFS_PROVISIONER_RESOURCE: {{ .Values.csi.csiCephFSProvisionerResource | quote }}
{{- end }}
{{- if .Values.csi.csiCephFSPluginResource }}
CSI_CEPHFS_PLUGIN_RESOURCE: {{ .Values.csi.csiCephFSPluginResource | quote }}
{{- end }}
{{- if .Values.csi.csiNFSProvisionerResource }}
CSI_NFS_PROVISIONER_RESOURCE: {{ .Values.csi.csiNFSProvisionerResource | quote }}
{{- end }}
{{- if .Values.csi.csiNFSPluginResource }}
CSI_NFS_PLUGIN_RESOURCE: {{ .Values.csi.csiNFSPluginResource | quote }}
{{- end }}
{{- if .Values.csi.csiRBDPluginVolume }}
CSI_RBD_PLUGIN_VOLUME: {{ toYaml .Values.csi.csiRBDPluginVolume | quote }}
{{- end }}
{{- if .Values.csi.csiRBDPluginVolumeMount }}
CSI_RBD_PLUGIN_VOLUME_MOUNT: {{ toYaml .Values.csi.csiRBDPluginVolumeMount | quote }}
{{- end }}
{{- if .Values.csi.csiCephFSPluginVolume }}
CSI_CEPHFS_PLUGIN_VOLUME: {{ toYaml .Values.csi.csiCephFSPluginVolume | quote }}
{{- end }}
{{- if .Values.csi.csiCephFSPluginVolumeMount }}
CSI_CEPHFS_PLUGIN_VOLUME_MOUNT: {{ toYaml .Values.csi.csiCephFSPluginVolumeMount | quote }}
{{- end }}
CSI_CEPHFS_ATTACH_REQUIRED: {{ .Values.csi.cephFSAttachRequired | quote }}
CSI_RBD_ATTACH_REQUIRED: {{ .Values.csi.rbdAttachRequired | quote }}
CSI_NFS_ATTACH_REQUIRED: {{ .Values.csi.nfsAttachRequired | quote }}
{{- end }}

View File

@ -0,0 +1,129 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: rook-ceph-operator
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.scaleDownOperator | ternary 0 1 }}
selector:
matchLabels:
app: rook-ceph-operator
strategy:
type: Recreate
template:
metadata:
labels:
app: rook-ceph-operator
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{- if .Values.annotations }}
annotations:
{{ toYaml .Values.annotations | indent 8 }}
{{- end }}
spec:
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
containers:
- name: rook-ceph-operator
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: ["ceph", "operator"]
securityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
volumeMounts:
- mountPath: /var/lib/rook
name: rook-config
- mountPath: /etc/ceph
name: default-config-dir
- mountPath: /etc/webhook
name: webhook-cert
ports:
- containerPort: 9443
name: https-webhook
protocol: TCP
env:
- name: ROOK_CURRENT_NAMESPACE_ONLY
value: {{ .Values.currentNamespaceOnly | quote }}
{{- if .Values.discover }}
{{- if .Values.discover.toleration }}
- name: DISCOVER_TOLERATION
value: {{ .Values.discover.toleration }}
{{- end }}
{{- if .Values.discover.tolerationKey }}
- name: DISCOVER_TOLERATION_KEY
value: {{ .Values.discover.tolerationKey }}
{{- end }}
{{- if .Values.discover.tolerations }}
- name: DISCOVER_TOLERATIONS
value: {{ toYaml .Values.discover.tolerations | quote }}
{{- end }}
{{- if .Values.discover.priorityClassName }}
- name: DISCOVER_PRIORITY_CLASS_NAME
value: {{ .Values.discover.priorityClassName }}
{{- end }}
{{- if .Values.discover.nodeAffinity }}
- name: DISCOVER_AGENT_NODE_AFFINITY
value: {{ .Values.discover.nodeAffinity }}
{{- end }}
{{- if .Values.discover.podLabels }}
- name: DISCOVER_AGENT_POD_LABELS
value: {{ .Values.discover.podLabels }}
{{- end }}
{{- if .Values.discover.resources }}
- name: DISCOVER_DAEMON_RESOURCES
value: {{ .Values.discover.resources }}
{{- end }}
{{- end }}
{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1" }}
- name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
value: "true"
{{- else }}
- name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
value: "{{ .Values.hostpathRequiresPrivileged }}"
{{- end }}
- name: ROOK_DISABLE_DEVICE_HOTPLUG
value: "{{ .Values.disableDeviceHotplug }}"
- name: ROOK_DISCOVER_DEVICES_INTERVAL
value: "{{ .Values.discoveryDaemonInterval }}"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.resources }}
resources:
{{ toYaml .Values.resources | indent 10 }}
{{- end }}
{{- if .Values.useOperatorHostNetwork }}
hostNetwork: true
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- if .Values.rbacEnable }}
serviceAccountName: rook-ceph-system
{{- end }}
volumes:
- name: rook-config
emptyDir: {}
- name: default-config-dir
emptyDir: {}
- name: webhook-cert
emptyDir: {}

View File

@ -0,0 +1,173 @@
{{- if .Values.pspEnable }}
{{- if semverCompare "<1.25.0-0" .Capabilities.KubeVersion.GitVersion }}
# We expect most Kubernetes teams to follow the Kubernetes docs and have these PSPs.
# * privileged (for kube-system namespace)
# * restricted (for all logged in users)
#
# PSPs are applied based on the first match alphabetically. `rook-ceph-operator` comes after
# `restricted` alphabetically, so we name this `00-rook-privileged`, so it stays somewhere
# close to the top and so `rook-system` gets the intended PSP. This may need to be renamed in
# environments with other `00`-prefixed PSPs.
#
# More on PSP ordering: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#policy-order
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: 00-rook-privileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
spec:
privileged: true
allowedCapabilities:
# required by CSI
- SYS_ADMIN
- MKNOD
fsGroup:
rule: RunAsAny
# runAsUser, supplementalGroups - Rook needs to run some pods as root
# Ceph pods could be run as the Ceph user, but that user isn't always known ahead of time
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
# seLinux - seLinux context is unknown ahead of time; set if this is well-known
seLinux:
rule: RunAsAny
volumes:
# recommended minimum set
- configMap
- downwardAPI
- emptyDir
- persistentVolumeClaim
- secret
- projected
# required for Rook
- hostPath
# allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known
# allowedHostPaths:
# - pathPrefix: "/run/udev" # for OSD prep
# readOnly: false
# - pathPrefix: "/dev" # for OSD prep
# readOnly: false
# - pathPrefix: "/var/lib/rook" # or whatever the dataDirHostPath value is set to
# readOnly: false
# Ceph requires host IPC for setting up encrypted devices
hostIPC: true
# Ceph OSDs need to share the same PID namespace
hostPID: true
# hostNetwork can be set to 'false' if host networking isn't used
hostNetwork: true
hostPorts:
# Ceph messenger protocol v1
- min: 6789
max: 6790 # <- support old default port
# Ceph messenger protocol v2
- min: 3300
max: 3300
# Ceph RADOS ports for OSDs, MDSes
- min: 6800
max: 7300
# # Ceph dashboard port HTTP (not recommended)
# - min: 7000
# max: 7000
# Ceph dashboard port HTTPS
- min: 8443
max: 8443
# Ceph mgr Prometheus Metrics
- min: 9283
max: 9283
# port for CSIAddons
- min: 9070
max: 9070
{{- if .Values.rbacEnable }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: 'psp:rook'
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- 00-rook-privileged
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-ceph-system-psp
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }} # namespace:operator
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-csi-cephfs-provisioner-sa-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-provisioner-sa
namespace: {{ .Release.Namespace }} # namespace:operator
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-csi-cephfs-plugin-sa-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-plugin-sa
namespace: {{ .Release.Namespace }} # namespace:operator
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-csi-rbd-plugin-sa-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-plugin-sa
namespace: {{ .Release.Namespace }} # namespace:operator
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rook-csi-rbd-provisioner-sa-psp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'psp:rook'
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-provisioner-sa
namespace: {{ .Release.Namespace }} # namespace:operator
{{- end }}
{{- end }}
{{- end }}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,101 @@
{{- if .Values.rbacEnable }}
# Allow the operator to manage resources in its own namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: rook-ceph-system
namespace: {{ .Release.Namespace }} # namespace:operator
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- pods
- configmaps
- services
verbs:
- get
- list
- watch
- patch
- create
- update
- delete
- apiGroups:
- apps
- extensions
resources:
- daemonsets
- statefulsets
- deployments
verbs:
- get
- list
- watch
- create
- update
- delete
- deletecollection
- apiGroups:
- batch
resources:
- cronjobs
verbs:
- delete
- apiGroups:
- cert-manager.io
resources:
- certificates
- issuers
verbs:
- get
- create
- delete
- apiGroups:
- multicluster.x-k8s.io
resources:
- serviceexports
verbs:
- get
- create
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-external-provisioner-cfg
namespace: {{ .Release.Namespace }} # namespace:operator
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
{{- if and .Values.csi.csiAddons .Values.csi.csiAddons.enabled }}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
namespace: {{ .Release.Namespace }} # namespace:operator
rules:
- apiGroups: ["csiaddons.openshift.io"]
resources: ["csiaddonsnodes"]
verbs: ["create"]
---
{{- end }}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-cfg
namespace: {{ .Release.Namespace }} # namespace:operator
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
{{- if and .Values.csi.csiAddons .Values.csi.csiAddons.enabled }}
- apiGroups: ["csiaddons.openshift.io"]
resources: ["csiaddonsnodes"]
verbs: ["create"]
{{- end }}
{{- end }}

View File

@ -0,0 +1,64 @@
{{- if .Values.rbacEnable }}
# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-system
namespace: {{ .Release.Namespace }} # namespace:operator
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-system
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }} # namespace:operator
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-provisioner-role-cfg
namespace: {{ .Release.Namespace }} # namespace:operator
subjects:
- kind: ServiceAccount
name: rook-csi-cephfs-provisioner-sa
namespace: {{ .Release.Namespace }} # namespace:operator
roleRef:
kind: Role
name: cephfs-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io
---
{{- if and .Values.csi.csiAddons .Values.csi.csiAddons.enabled }}
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin-role-cfg
namespace: {{ .Release.Namespace }} # namespace:operator
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-plugin-sa
namespace: {{ .Release.Namespace }} # namespace:operator
roleRef:
kind: Role
name: rbd-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io
---
{{- end }}
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role-cfg
namespace: {{ .Release.Namespace }} # namespace:operator
subjects:
- kind: ServiceAccount
name: rook-csi-rbd-provisioner-sa
namespace: {{ .Release.Namespace }} # namespace:operator
roleRef:
kind: Role
name: rbd-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -0,0 +1,87 @@
# scc for the Rook and Ceph daemons
# for creating cluster in openshift
{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1" }}
kind: SecurityContextConstraints
apiVersion: security.openshift.io/v1
metadata:
name: rook-ceph
allowPrivilegedContainer: true
allowHostDirVolumePlugin: true
allowHostPID: false
# set to true if running rook with host networking enabled
allowHostNetwork: false
# set to true if running rook with the provider as host
allowHostPorts: false
priority:
allowedCapabilities: ["MKNOD"]
allowHostIPC: true
readOnlyRootFilesystem: false
# drop all default privileges
requiredDropCapabilities: ["All"]
defaultAddCapabilities: []
runAsUser:
type: RunAsAny
seLinuxContext:
type: MustRunAs
fsGroup:
type: MustRunAs
supplementalGroups:
type: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- hostPath
- persistentVolumeClaim
- projected
- secret
users:
# A user needs to be added for each rook service account.
- system:serviceaccount:{{ .Release.Namespace }}:rook-ceph-system
---
# scc for the CSI driver
kind: SecurityContextConstraints
apiVersion: security.openshift.io/v1
metadata:
name: rook-ceph-csi
# To allow running privilegedContainers
allowPrivilegedContainer: true
# CSI daemonset pod needs hostnetworking
allowHostNetwork: true
# This need to be set to true as we use HostPath
allowHostDirVolumePlugin: true
priority:
# SYS_ADMIN is needed for rbd to execute rbd map command
allowedCapabilities: ["SYS_ADMIN"]
# Needed as we run liveness container on daemonset pods
allowHostPorts: true
# Needed as we are setting this in RBD plugin pod
allowHostPID: true
# Required for encryption
allowHostIPC: true
# Set to false as we write to RootFilesystem inside csi containers
readOnlyRootFilesystem: false
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
fsGroup:
type: RunAsAny
supplementalGroups:
type: RunAsAny
# The type of volumes which are mounted to csi pods
volumes:
- configMap
- projected
- emptyDir
- hostPath
users:
# A user needs to be added for each rook service account.
- system:serviceaccount:{{ .Release.Namespace }}:rook-csi-rbd-plugin-sa
- system:serviceaccount:{{ .Release.Namespace }}:rook-csi-rbd-provisioner-sa
- system:serviceaccount:{{ .Release.Namespace }}:rook-csi-cephfs-plugin-sa
- system:serviceaccount:{{ .Release.Namespace }}:rook-csi-cephfs-provisioner-sa
- system:serviceaccount:{{ .Release.Namespace }}:rook-csi-nfs-plugin-sa
- system:serviceaccount:{{ .Release.Namespace }}:rook-csi-nfs-provisioner-sa
---
{{- end }}

View File

@ -0,0 +1,73 @@
# Service account for the Rook-Ceph operator
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-system
namespace: {{ .Release.Namespace }} # namespace:operator
labels:
operator: rook
storage-backend: ceph
{{- include "library.rook-ceph.labels" . | nindent 4 }}
{{ template "library.imagePullSecrets" . }}
---
# Service account for the CephFS CSI driver
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-cephfs-plugin-sa
namespace: {{ .Release.Namespace }} # namespace:operator
{{ template "library.imagePullSecrets" . }}
---
{{- if .Values.csi.nfs.enabled }}
# Service account for the NFS CSI driver
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-nfs-plugin-sa
namespace: {{ .Release.Namespace }} # namespace:operator
{{ template "library.imagePullSecrets" . }}
---
# Service account for the NFS CSI provisioner
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-nfs-provisioner-sa
namespace: {{ .Release.Namespace }} # namespace:operator
{{ template "library.imagePullSecrets" . }}
---
{{ end }}
# Service account for the CephFS CSI provisioner
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-cephfs-provisioner-sa
namespace: {{ .Release.Namespace }} # namespace:operator
{{ template "library.imagePullSecrets" . }}
---
# Service account for the RBD CSI driver
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-rbd-plugin-sa
namespace: {{ .Release.Namespace }} # namespace:operator
{{ template "library.imagePullSecrets" . }}
---
# Service account for the RBD CSI provisioner
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-csi-rbd-provisioner-sa
namespace: {{ .Release.Namespace }} # namespace:operator
{{ template "library.imagePullSecrets" . }}
---
# Service account for Ceph COSI driver
apiVersion: v1
kind: ServiceAccount
metadata:
name: objectstorage-provisioner
namespace: {{ .Release.Namespace }} # namespace:operator
labels:
app.kubernetes.io/part-of: container-object-storage-interface
app.kubernetes.io/component: driver-ceph
app.kubernetes.io/name: cosi-driver-ceph
{{ template "library.imagePullSecrets" . }}

View File

@ -0,0 +1,29 @@
{{- if and .Values.monitoring.enabled .Values.csi.serviceMonitor.enabled (or .Values.csi.enableLiveness .Values.csi.enableGrpcMetrics) }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: csi-metrics
namespace: {{ .Release.Namespace }}
labels: {{- include "library.rook-ceph.labels" . | nindent 4 }}
{{- with .Values.csi.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: csi-metrics
endpoints:
{{- if .Values.csi.enableLiveness }}
- port: csi-http-metrics
path: /metrics
interval: {{ .Values.csi.serviceMonitor.interval }}
{{- end }}
{{- if .Values.csi.enableGrpcMetrics }}
- port: csi-grpc-metrics
path: /metrics
interval: {{ .Values.csi.serviceMonitor.interval }}
{{- end }}
{{- end }}

631
rook-ceph/values.home.yaml Normal file
View File

@ -0,0 +1,631 @@
# Default values for rook-ceph-operator
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
# -- Image
repository: rook/ceph
# -- Image tag
# @default -- `v1.12.7`
tag: v1.12.7
# -- Image pull policy
pullPolicy: IfNotPresent
crds:
# -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
# managed independently with deploy/examples/crds.yaml.
# **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
# If the CRDs are deleted in this case, see
# [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
# to restore them.
enabled: true
# -- Pod resource requests & limits
resources:
limits:
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
nodeSelector: {}
# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# disktype: ssd
# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
tolerations: []
# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
# the Kubernetes default of 5 minutes
unreachableNodeTolerationSeconds: 5
# -- Whether the operator should watch cluster CRD in its own namespace or not
currentNamespaceOnly: false
# -- Pod annotations
annotations: {}
# -- Global log level for the operator.
# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
logLevel: INFO
# -- If true, create & use RBAC resources
rbacEnable: true
rbacAggregate:
# -- If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims
enableOBCs: false
# -- If true, create & use PSP resources
pspEnable: false
# -- Set the priority class for the rook operator deployment if desired
priorityClassName:
# -- If true, loop devices are allowed to be used for osds in test clusters
allowLoopDevices: false
# Settings for whether to disable the drivers or other daemons if they are not
# needed
csi:
# -- Enable Ceph CSI RBD driver
enableRbdDriver: true
# -- Enable Ceph CSI CephFS driver
enableCephfsDriver: true
# -- Enable Ceph CSI GRPC Metrics
enableGrpcMetrics: false
# -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
# in some network configurations where the SDN does not provide access to an external cluster or
# there is significant drop in read/write performance
enableCSIHostNetwork: true
# -- Enable Snapshotter in CephFS provisioner pod
enableCephfsSnapshotter: true
# -- Enable Snapshotter in NFS provisioner pod
enableNFSSnapshotter: true
# -- Enable Snapshotter in RBD provisioner pod
enableRBDSnapshotter: true
# -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
enablePluginSelinuxHostMount: false
# -- Enable Ceph CSI PVC encryption support
enableCSIEncryption: false
# -- PriorityClassName to be set on csi driver plugin pods
pluginPriorityClassName: system-node-critical
# -- PriorityClassName to be set on csi driver provisioner pods
provisionerPriorityClassName: system-cluster-critical
# -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
rbdFSGroupPolicy: "File"
# -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
cephFSFSGroupPolicy: "File"
# -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
nfsFSGroupPolicy: "File"
# -- OMAP generator generates the omap mapping between the PV name and the RBD image
# which helps CSI to identify the rbd images for CSI operations.
# `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
# By default OMAP generator is disabled and when enabled, it will be deployed as a
# sidecar with CSI provisioner pod, to enable set it to true.
enableOMAPGenerator: false
# -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
# Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
cephFSKernelMountOptions:
# -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
# Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
# Hence enable metadata is false by default
enableMetadata: false
# -- Set replicas for csi provisioner deployment
provisionerReplicas: 2
# -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
# in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
clusterName: home-ceph
# -- Set logging level for cephCSI containers maintained by the cephCSI.
# Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
logLevel: 0
# -- Set logging level for Kubernetes-csi sidecar containers.
# Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
# @default -- `0`
sidecarLogLevel:
# -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
rbdPluginUpdateStrategy:
# -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
# @default -- `1`
rbdPluginUpdateStrategyMaxUnavailable:
# -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
cephFSPluginUpdateStrategy:
# -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
# @default -- `1`
cephFSPluginUpdateStrategyMaxUnavailable:
# -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
nfsPluginUpdateStrategy:
# -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
grpcTimeoutInSeconds: 150
# -- Allow starting an unsupported ceph-csi image
allowUnsupportedVersion: false
# -- The volume of the CephCSI RBD plugin DaemonSet
csiRBDPluginVolume:
# - name: lib-modules
# hostPath:
# path: /run/booted-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# -- The volume mounts of the CephCSI RBD plugin DaemonSet
csiRBDPluginVolumeMount:
# - name: host-nix
# mountPath: /nix
# readOnly: true
# -- The volume of the CephCSI CephFS plugin DaemonSet
csiCephFSPluginVolume:
# - name: lib-modules
# hostPath:
# path: /run/booted-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# -- The volume mounts of the CephCSI CephFS plugin DaemonSet
csiCephFSPluginVolumeMount:
# - name: host-nix
# mountPath: /nix
# readOnly: true
# -- CEPH CSI RBD provisioner resource requirement list
# csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
# @default -- see values.yaml
csiRBDProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-rbdplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : csi-omap-generator
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI RBD plugin resource requirement list
# @default -- see values.yaml
csiRBDPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
- name : csi-rbdplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI CephFS provisioner resource requirement list
# @default -- see values.yaml
csiCephFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-cephfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI CephFS plugin resource requirement list
# @default -- see values.yaml
csiCephFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
- name : csi-cephfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI NFS provisioner resource requirement list
# @default -- see values.yaml
csiNFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-nfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : csi-attacher
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
# -- CEPH CSI NFS plugin resource requirement list
# @default -- see values.yaml
csiNFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
- name : csi-nfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
# Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
# The CSI provisioner would be best to start on the same nodes as other ceph daemons.
# -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
provisionerTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of the CSI provisioner deployment [^1]
provisionerNodeAffinity: #key1=value1,value2; key2=value3
# Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
# The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
pluginTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
pluginNodeAffinity: # key1=value1,value2; key2=value3
# -- Enable Ceph CSI Liveness sidecar deployment
enableLiveness: false
# -- CSI CephFS driver GRPC metrics port
# @default -- `9091`
cephfsGrpcMetricsPort:
# -- CSI CephFS driver metrics port
# @default -- `9081`
cephfsLivenessMetricsPort:
# -- Ceph CSI RBD driver GRPC metrics port
# @default -- `9090`
rbdGrpcMetricsPort:
# -- CSI Addons server port
# @default -- `9070`
csiAddonsPort:
# -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
# you may want to disable this setting. However, this will cause an issue during upgrades
# with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
forceCephFSKernelClient: true
# -- Ceph CSI RBD driver metrics port
# @default -- `8080`
rbdLivenessMetricsPort:
serviceMonitor:
# -- Enable ServiceMonitor for Ceph CSI drivers
enabled: true
# -- Service monitor scrape interval
interval: 5s
# -- ServiceMonitor additional labels
labels: {}
# -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
# @default -- `/var/lib/kubelet`
kubeletDirPath:
cephcsi:
# -- Ceph CSI image
# @default -- `quay.io/cephcsi/cephcsi:v3.9.0`
image:
registrar:
# -- Kubernetes CSI registrar image
# @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0`
image:
provisioner:
# -- Kubernetes CSI provisioner image
# @default -- `registry.k8s.io/sig-storage/csi-provisioner:v3.5.0`
image:
snapshotter:
# -- Kubernetes CSI snapshotter image
# @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2`
image:
attacher:
# -- Kubernetes CSI Attacher image
# @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.3.0`
image:
resizer:
# -- Kubernetes CSI resizer image
# @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.8.0`
image:
# -- Image pull policy
imagePullPolicy: IfNotPresent
# -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
cephfsPodLabels: #"key1=value1,key2=value2"
# -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
nfsPodLabels: #"key1=value1,key2=value2"
# -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
rbdPodLabels: #"key1=value1,key2=value2"
csiAddons:
# -- Enable CSIAddons
enabled: true
# -- CSIAddons Sidecar image
image: "quay.io/csiaddons/k8s-sidecar:v0.7.0"
nfs:
# -- Enable the nfs csi driver
enabled: false
topology:
# -- Enable topology based provisioning
enabled: false
# NOTE: the value here serves as an example and needs to be
# updated with node labels that define domains of interest
# -- domainLabels define which node labels to use as domains
# for CSI nodeplugins to advertise their domains
domainLabels:
# - kubernetes.io/hostname
# - topology.kubernetes.io/zone
# - topology.rook.io/rack
readAffinity:
# -- Enable read affinity for RBD volumes. Recommended to
# set to true if running kernel 5.8 or newer.
# @default -- `false`
enabled: false
# -- Define which node labels to use
# as CRUSH location. This should correspond to the values set
# in the CRUSH map.
# @default -- labels listed [here](../CRDs/Cluster/ceph-cluster-crd.md#osd-topology)
crushLocationLabels:
# -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
# of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
# CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
cephFSAttachRequired: true
# -- Whether to skip any attach operation altogether for RBD PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
# **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
# csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
# to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
# Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
rbdAttachRequired: true
# -- Whether to skip any attach operation altogether for NFS PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
# of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
# NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
nfsAttachRequired: true
# -- Enable discovery daemon
enableDiscoveryDaemon: false
# -- Set the discovery daemon device discovery interval (default to 60m)
discoveryDaemonInterval: 60m
# -- The timeout for ceph commands in seconds
cephCommandsTimeoutSeconds: "15"
# -- If true, run rook operator on the host network
useOperatorHostNetwork:
# -- If true, scale down the rook operator.
# This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
# to deploy your helm charts.
scaleDownOperator: false
## Rook Discover configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
## tolerations: Array of tolerations in YAML format which will be added to agent deployment
## nodeAffinity: Set to labels of the node to match
discover:
# -- Toleration for the discover pods.
# Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
toleration:
# -- The specific key of the taint to tolerate
tolerationKey:
# -- Array of tolerations in YAML format which will be added to discover deployment
tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of `discover-agent` [^1]
nodeAffinity: # key1=value1,value2; key2=value3
# -- Labels to add to the discover pods
podLabels: # "key1=value1,key2=value2"
# -- Add resources to discover daemon pods
resources:
# - limits:
# cpu: 500m
# memory: 512Mi
# - requests:
# cpu: 100m
# memory: 128Mi
# -- Whether to disable the admission controller
disableAdmissionController: true
# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
hostpathRequiresPrivileged: false
# -- Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
# -- Blacklist certain disks according to the regex provided.
discoverDaemonUdev:
# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
imagePullSecrets:
# - name: my-registry-secret
# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
enableOBCWatchOperatorNamespace: true
# -- Set tolerations and nodeAffinity [^1] for admission controller pod.
# The admission controller would be best to start on the same nodes as other ceph daemons.
admissionController:
# tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# nodeAffinity: key1=value1,value2; key2=value3
# [^1]: `nodeAffinity` and `*NodeAffinity` options should have the format `"role=storage,rook; storage=ceph"` or `storage=;role=rook-example` or `storage=;` (_checks only for presence of key_)
monitoring:
# -- Enable monitoring. Requires Prometheus to be pre-installed.
# Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
enabled: true

656
rook-ceph/values.yaml Normal file
View File

@ -0,0 +1,656 @@
# Default values for rook-ceph-operator
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
# -- Image
repository: rook/ceph
# -- Image tag
# @default -- `v1.12.7`
tag: v1.12.7
# -- Image pull policy
pullPolicy: IfNotPresent
crds:
# -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
# managed independently with deploy/examples/crds.yaml.
# **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
# If the CRDs are deleted in this case, see
# [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
# to restore them.
enabled: true
# -- Pod resource requests & limits
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
nodeSelector: {}
# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# disktype: ssd
# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
tolerations: []
# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
# the Kubernetes default of 5 minutes
unreachableNodeTolerationSeconds: 5
# -- Whether the operator should watch cluster CRD in its own namespace or not
currentNamespaceOnly: false
# -- Pod annotations
annotations: {}
# -- Global log level for the operator.
# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
logLevel: INFO
# -- If true, create & use RBAC resources
rbacEnable: true
rbacAggregate:
# -- If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims
enableOBCs: false
# -- If true, create & use PSP resources
pspEnable: false
# -- Set the priority class for the rook operator deployment if desired
priorityClassName:
# -- If true, loop devices are allowed to be used for osds in test clusters
allowLoopDevices: false
# Settings for whether to disable the drivers or other daemons if they are not
# needed
csi:
# -- Enable Ceph CSI RBD driver
enableRbdDriver: true
# -- Enable Ceph CSI CephFS driver
enableCephfsDriver: true
# -- Enable Ceph CSI GRPC Metrics
enableGrpcMetrics: false
# -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
# in some network configurations where the SDN does not provide access to an external cluster or
# there is significant drop in read/write performance
enableCSIHostNetwork: true
# -- Enable Snapshotter in CephFS provisioner pod
enableCephfsSnapshotter: true
# -- Enable Snapshotter in NFS provisioner pod
enableNFSSnapshotter: true
# -- Enable Snapshotter in RBD provisioner pod
enableRBDSnapshotter: true
# -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
enablePluginSelinuxHostMount: false
# -- Enable Ceph CSI PVC encryption support
enableCSIEncryption: false
# -- PriorityClassName to be set on csi driver plugin pods
pluginPriorityClassName: system-node-critical
# -- PriorityClassName to be set on csi driver provisioner pods
provisionerPriorityClassName: system-cluster-critical
# -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
rbdFSGroupPolicy: "File"
# -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
cephFSFSGroupPolicy: "File"
# -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
nfsFSGroupPolicy: "File"
# -- OMAP generator generates the omap mapping between the PV name and the RBD image
# which helps CSI to identify the rbd images for CSI operations.
# `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
# By default OMAP generator is disabled and when enabled, it will be deployed as a
# sidecar with CSI provisioner pod, to enable set it to true.
enableOMAPGenerator: false
# -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
# Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
cephFSKernelMountOptions:
# -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
# Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
# Hence enable metadata is false by default
enableMetadata: false
# -- Set replicas for csi provisioner deployment
provisionerReplicas: 2
# -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
# in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
clusterName:
# -- Set logging level for cephCSI containers maintained by the cephCSI.
# Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
logLevel: 0
# -- Set logging level for Kubernetes-csi sidecar containers.
# Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
# @default -- `0`
sidecarLogLevel:
# -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
rbdPluginUpdateStrategy:
# -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
# @default -- `1`
rbdPluginUpdateStrategyMaxUnavailable:
# -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
cephFSPluginUpdateStrategy:
# -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
# @default -- `1`
cephFSPluginUpdateStrategyMaxUnavailable:
# -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
nfsPluginUpdateStrategy:
# -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
grpcTimeoutInSeconds: 150
# -- Allow starting an unsupported ceph-csi image
allowUnsupportedVersion: false
# -- The volume of the CephCSI RBD plugin DaemonSet
csiRBDPluginVolume:
# - name: lib-modules
# hostPath:
# path: /run/booted-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# -- The volume mounts of the CephCSI RBD plugin DaemonSet
csiRBDPluginVolumeMount:
# - name: host-nix
# mountPath: /nix
# readOnly: true
# -- The volume of the CephCSI CephFS plugin DaemonSet
csiCephFSPluginVolume:
# - name: lib-modules
# hostPath:
# path: /run/booted-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# -- The volume mounts of the CephCSI CephFS plugin DaemonSet
csiCephFSPluginVolumeMount:
# - name: host-nix
# mountPath: /nix
# readOnly: true
# -- CEPH CSI RBD provisioner resource requirement list
# csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
# @default -- see values.yaml
csiRBDProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-rbdplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
- name : csi-omap-generator
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
# -- CEPH CSI RBD plugin resource requirement list
# @default -- see values.yaml
csiRBDPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
- name : csi-rbdplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
# -- CEPH CSI CephFS provisioner resource requirement list
# @default -- see values.yaml
csiCephFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-cephfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
# -- CEPH CSI CephFS plugin resource requirement list
# @default -- see values.yaml
csiCephFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
- name : csi-cephfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
# -- CEPH CSI NFS provisioner resource requirement list
# @default -- see values.yaml
csiNFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
- name : csi-nfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
- name : csi-attacher
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
# -- CEPH CSI NFS plugin resource requirement list
# @default -- see values.yaml
csiNFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
cpu: 100m
- name : csi-nfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
# Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
# The CSI provisioner would be best to start on the same nodes as other ceph daemons.
# -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
provisionerTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of the CSI provisioner deployment [^1]
provisionerNodeAffinity: #key1=value1,value2; key2=value3
# Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
# The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
pluginTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
pluginNodeAffinity: # key1=value1,value2; key2=value3
# -- Enable Ceph CSI Liveness sidecar deployment
enableLiveness: false
# -- CSI CephFS driver GRPC metrics port
# @default -- `9091`
cephfsGrpcMetricsPort:
# -- CSI CephFS driver metrics port
# @default -- `9081`
cephfsLivenessMetricsPort:
# -- Ceph CSI RBD driver GRPC metrics port
# @default -- `9090`
rbdGrpcMetricsPort:
# -- CSI Addons server port
# @default -- `9070`
csiAddonsPort:
# -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
# you may want to disable this setting. However, this will cause an issue during upgrades
# with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
forceCephFSKernelClient: true
# -- Ceph CSI RBD driver metrics port
# @default -- `8080`
rbdLivenessMetricsPort:
serviceMonitor:
# -- Enable ServiceMonitor for Ceph CSI drivers
enabled: false
# -- Service monitor scrape interval
interval: 5s
# -- ServiceMonitor additional labels
labels: {}
# -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
# @default -- `/var/lib/kubelet`
kubeletDirPath:
cephcsi:
# -- Ceph CSI image
# @default -- `quay.io/cephcsi/cephcsi:v3.9.0`
image:
registrar:
# -- Kubernetes CSI registrar image
# @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0`
image:
provisioner:
# -- Kubernetes CSI provisioner image
# @default -- `registry.k8s.io/sig-storage/csi-provisioner:v3.5.0`
image:
snapshotter:
# -- Kubernetes CSI snapshotter image
# @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2`
image:
attacher:
# -- Kubernetes CSI Attacher image
# @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.3.0`
image:
resizer:
# -- Kubernetes CSI resizer image
# @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.8.0`
image:
# -- Image pull policy
imagePullPolicy: IfNotPresent
# -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
cephfsPodLabels: #"key1=value1,key2=value2"
# -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
nfsPodLabels: #"key1=value1,key2=value2"
# -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
rbdPodLabels: #"key1=value1,key2=value2"
csiAddons:
# -- Enable CSIAddons
enabled: false
# -- CSIAddons Sidecar image
image: "quay.io/csiaddons/k8s-sidecar:v0.7.0"
nfs:
# -- Enable the nfs csi driver
enabled: false
topology:
# -- Enable topology based provisioning
enabled: false
# NOTE: the value here serves as an example and needs to be
# updated with node labels that define domains of interest
# -- domainLabels define which node labels to use as domains
# for CSI nodeplugins to advertise their domains
domainLabels:
# - kubernetes.io/hostname
# - topology.kubernetes.io/zone
# - topology.rook.io/rack
readAffinity:
# -- Enable read affinity for RBD volumes. Recommended to
# set to true if running kernel 5.8 or newer.
# @default -- `false`
enabled: false
# -- Define which node labels to use
# as CRUSH location. This should correspond to the values set
# in the CRUSH map.
# @default -- labels listed [here](../CRDs/Cluster/ceph-cluster-crd.md#osd-topology)
crushLocationLabels:
# -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
# of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
# CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
cephFSAttachRequired: true
# -- Whether to skip any attach operation altogether for RBD PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
# **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
# csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
# to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
# Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
rbdAttachRequired: true
# -- Whether to skip any attach operation altogether for NFS PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
# of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
# NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
nfsAttachRequired: true
# -- Enable discovery daemon
enableDiscoveryDaemon: false
# -- Set the discovery daemon device discovery interval (default to 60m)
discoveryDaemonInterval: 60m
# -- The timeout for ceph commands in seconds
cephCommandsTimeoutSeconds: "15"
# -- If true, run rook operator on the host network
useOperatorHostNetwork:
# -- If true, scale down the rook operator.
# This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
# to deploy your helm charts.
scaleDownOperator: false
## Rook Discover configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
## tolerations: Array of tolerations in YAML format which will be added to agent deployment
## nodeAffinity: Set to labels of the node to match
discover:
# -- Toleration for the discover pods.
# Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
toleration:
# -- The specific key of the taint to tolerate
tolerationKey:
# -- Array of tolerations in YAML format which will be added to discover deployment
tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of `discover-agent` [^1]
nodeAffinity: # key1=value1,value2; key2=value3
# -- Labels to add to the discover pods
podLabels: # "key1=value1,key2=value2"
# -- Add resources to discover daemon pods
resources:
# - limits:
# cpu: 500m
# memory: 512Mi
# - requests:
# cpu: 100m
# memory: 128Mi
# -- Whether to disable the admission controller
disableAdmissionController: true
# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
hostpathRequiresPrivileged: false
# -- Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
# -- Blacklist certain disks according to the regex provided.
discoverDaemonUdev:
# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
imagePullSecrets:
# - name: my-registry-secret
# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
enableOBCWatchOperatorNamespace: true
# -- Set tolerations and nodeAffinity [^1] for admission controller pod.
# The admission controller would be best to start on the same nodes as other ceph daemons.
admissionController:
# tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# nodeAffinity: key1=value1,value2; key2=value3
# [^1]: `nodeAffinity` and `*NodeAffinity` options should have the format `"role=storage,rook; storage=ceph"` or `storage=;role=rook-example` or `storage=;` (_checks only for presence of key_)
monitoring:
# -- Enable monitoring. Requires Prometheus to be pre-installed.
# Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
enabled: false