rook-ceph-cluster:
# copied from
https://github.com/rook/rook/commits/master/deploy/examples/cluster-test.yaml
configOverride: |
[global]
osd_pool_default_size = 1
mon_warn_on_pool_no_redundancy = false
bdev_flock_retry = 20
bluefs_buffered_io = false
mon_data_avail_warn = 10
toolbox:
enabled: true
resources:
limits:
cpu: 20000m
memory: 20000Mi
requests:
cpu: "50m"
memory: "128Mi"
cephClusterSpec:
mon:
# Set the number of mons to be started. Generally recommended to be 3.
# For highest availability, an odd number of mons should be specified.
count: 1
#count: 2
#count: 3
# The mons should be on unique nodes. For production, at least 3 nodes are
recommended for this reason.
# Mons should only be allowed on the same node for test environments where
data loss is acceptable.
allowMultiplePerNode: false
mgr:
# When higher availability of the mgr is needed, increase the count to 2.
# In that case, one mgr will be active and one in standby. When Ceph updates
which
# mgr is active, Rook will update the mgr services to match the active mgr.
count: 1
#count: 2
allowMultiplePerNode: false
modules:
# Several modules should not need to be included in this list. The
"dashboard" and "monitoring" modules
# are already enabled by other settings in the cluster CR.
- name: pg_autoscaler
enabled: true
# enable the ceph dashboard for viewing cluster status
dashboard:
enabled: true
# serve the dashboard under a subpath (useful when you are accessing the
dashboard via a reverse proxy)
# urlPrefix: /ceph-dashboard
# serve the dashboard at the given port.
# port: 8443
#port: 8080
# Serve the dashboard using SSL (if using ingress to expose the dashboard and
`ssl: true` you need to set
# the corresponding "backend protocol" annotation(s) for your ingress
controller of choice)
ssl: true
#ssl: false
# enables 'NodePort' for external access by consumer k8s clusters
network:
provider: host
# provider: multus
# selectors:
# public: rook-ceph/rook-public-nw
# cluster: rook-ceph/rook-cluster-nw
removeOSDsIfOutAndSafeToRemove: true
resources:
mgr:
limits:
cpu: 20000m
memory: 20000Mi
requests:
cpu: "50m"
# memory: "512Mi"
mon:
limits:
cpu: 20000m
memory: 20000Mi
requests:
cpu: "50m"
# memory: "1Gi"
osd:
limits:
cpu: 20000m
memory: 20000Mi
requests:
cpu: "50m"
# memory: "4Gi"
# prepareosd:
# # limits: It is not recommended to set limits on the OSD prepare job since
it's a one-time burst for memory
# # that must be allowed to complete without an OOM kill
# requests:
# cpu: "250m"
# memory: "50Mi"
mgr-sidecar:
limits:
cpu: 20000m
memory: 20000Mi
requests:
cpu: "50m"
# memory: "40Mi"
crashcollector:
limits:
cpu: 20000m
memory: 20000Mi
requests:
cpu: "50m"
# memory: "60Mi"
logcollector:
limits:
cpu: 20000m
memory: 20000Mi
requests:
cpu: "50m"
# memory: "100Mi"
cleanup:
limits:
cpu: 20000m
memory: 20000Mi
requests:
cpu: "50m"
# memory: "100Mi"
cephFileSystems:
- name: cephfs
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-
Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
spec:
metadataPool:
replicated:
size: 1
#size: 2
#size: 3
dataPools:
- failureDomain: osd
replicated:
size: 1
#size: 2
#size: 3
# Optional and highly recommended, 'data0' by default, see
https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-
filesystem-crd.md#pools
name: data0
metadataServer:
activeCount: 1
activeStandby: true
resources:
limits:
memory: 20000Mi
cpu: 20000m
requests:
cpu: "500m"
memory: "4Gi"
priorityClassName: system-cluster-critical
storageClass:
enabled: true
isDefault: true
name: cephfs
# (Optional) specify a data pool to use, must be the name of one of the data
pools above, 'data0' by default
pool: data0
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions: []
# see https://github.com/rook/rook/blob/master/Documentation/ceph-
filesystem.md#provision-storage for available configuration
parameters:
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-
provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
# Specify the filesystem type of the volume. If not specified, csi-
provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to
potential deadlock
# in hyperconverged settings where the volume is mounted on the same node
as the osds.
csi.storage.k8s.io/fstype: ext4
cephBlockPools:
- name: ceph-rbd
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-
Storage/ceph-block-pool-crd.md#spec for available configuration
spec:
failureDomain: osd
replicated:
size: 1
#size: 2
#size: 3
storageClass:
enabled: true
name: ceph-rbd
isDefault: false
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions: []
# see https://github.com/rook/rook/blob/master/Documentation/ceph-
block.md#provision-storage for available configuration
parameters:
# (optional) mapOptions is a comma-separated list of map options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# mapOptions: lock_on_read,queue_depth=1024
# (optional) unmapOptions is a comma-separated list of unmap options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# unmapOptions: force
# RBD image format. Defaults to "2".
imageFormat: "2"
# RBD image features, equivalent to OR'd bitfield value: 63
# Available for imageFormat: "2". Older releases of CSI RBD
# support only the `layering` feature. The Linux kernel (KRBD) supports the
# full feature complement as of 5.4
imageFeatures: layering
# These secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
# Specify the filesystem type of the volume. If not specified, csi-
provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to
potential deadlock
# in hyperconverged settings where the volume is mounted on the same node
as the osds.
csi.storage.k8s.io/fstype: ext4
cephObjectStores:
- name: ceph-objectstore
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-
Storage/ceph-object-store-crd.md#object-store-settings for available configuration
spec:
metadataPool:
failureDomain: osd
replicated:
size: 1
#size: 2
#size: 3
dataPool:
failureDomain: osd
replicated:
size: 1
#size: 2
#size: 3
# erasureCoded:
# dataChunks: 2
# codingChunks: 1
preservePoolsOnDelete: true
gateway:
port: 80
resources:
limits:
memory: 20000Mi
cpu: 20000m
# requests:
# cpu: "50m"
# memory: "1Gi"
# resources:
# limits:
# cpu: "1000m"
# memory: "2Gi"
requests:
cpu: "500m"
memory: "1Gi"
# securePort: 443
# sslCertificateRef:
instances: 1
priorityClassName: system-cluster-critical
healthCheck:
bucket:
interval: 60s
storageClass:
enabled: false
name: ceph-bucket
reclaimPolicy: Delete
# see https://github.com/rook/rook/blob/master/Documentation/ceph-object-
bucket-claim.md#storageclass for available configuration
parameters:
# note: objectStoreNamespace and objectStoreName are configured by the
chart
region: us-east-1
ingress:
dashboard:
annotations:
cert-manager.io/issuer: "cluster-adcs-issuer" #use
specific name of issuer
cert-manager.io/issuer-kind: "ClusterAdcsIssuer" #or
ClusterAdcsIssuer
cert-manager.io/issuer-group: "adcs.certmanager.csf.nokia.com"
# If the dashboard has ssl: true the following will make sure the NGINX
Ingress controller can expose the dashboard correctly
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
nginx.ingress.kubernetes.io/server-snippet: |
proxy_ssl_verify off;
tls:
- hosts:
- dashboard.storage.k.home.net
secretName: dashboard.storage.k.home.net-tls
host:
name: dashboard.storage.k.home.net
ingressClassName: nginx