apiVersion: v1
kind: Namespace
metadata:name: rook-ceph
---
apiVersion: v1
kind: ServiceAccount
metadata:name: rook-ceph-clusternamespace: rook-ceph
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:name: rook-ceph-clusternamespace: rook-ceph
rules:
- apiGroups: [""]resources: ["configmaps"]verbs: [ "get", "list", "watch", "create", "update", "delete" ]
---
# Allow the operator to create resources in this cluster's namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:name: rook-ceph-cluster-mgmtnamespace: rook-ceph
roleRef:apiGroup: rbac.authorization.k8s.iokind: ClusterRolename: rook-ceph-cluster-mgmt
subjects:
- kind: ServiceAccountname: rook-ceph-systemnamespace: rook-ceph-system
---
# Allow the pods in this namespace to work with configmaps
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:name: rook-ceph-clusternamespace: rook-ceph
roleRef:apiGroup: rbac.authorization.k8s.iokind: Rolename: rook-ceph-cluster
subjects:
- kind: ServiceAccountname: rook-ceph-clusternamespace: rook-ceph
---
apiVersion: ceph.rook.io/v1beta1
kind: Cluster
metadata:name: rook-cephnamespace: rook-ceph
spec:cephVersion:# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).# v12 is luminous, v13 is mimic, and v14 is nautilus.# RECOMMENDATION: In production, use a specific version tag instead of the general v13 flag, which pulls the latest release and could result in different# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.image: ceph/ceph:v13# Whether to allow unsupported versions of Ceph. Currently only luminous and mimic are supported.# After nautilus is released, Rook will be updated to support nautilus.# Do not set to true in production.allowUnsupported: false# The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended).# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.dataDirHostPath: /var/lib/rook# The service account under which to run the daemon pods in this cluster if the default account is not sufficient (OSDs)serviceAccount: rook-ceph-cluster# set the amount of mons to be started# count可以定義ceph-mon運行的數量,這里默認三個就行了mon:count: 3allowMultiplePerNode: true# enable the ceph dashboard for viewing cluster status# 開啟ceph資源面板dashboard:enabled: true# serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)# urlPrefix: /ceph-dashboardnetwork:# toggle to use hostNetwork# 使用宿主機的網絡進行通訊# 使用宿主機的網絡貌似可以讓集群外的主機掛載ceph# 但是我沒試過,有興趣的兄弟可以試試改成true# 反正這里只是集群內用,我就不改了hostNetwork: false# To control where various services will be scheduled by kubernetes, use the placement configuration sections below.# The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and# tolerate taints with a key of 'storage-node'.placement:
# all:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - storage-node
# podAffinity:
# podAntiAffinity:
# tolerations:
# - key: storage-node
# operator: Exists
# The above placement information can also be specified for mon, osd, and mgr components
# mon:
# osd:
# mgr:
# nodeAffinity:通過選擇標簽的方式,可以限制pod被調度到特定的節點上
# 建議限制一下,為了讓這幾個pod不亂跑mon:nodeAffinity:requiredDuringSchedulingIgnoredDuringExecution:nodeSelectorTerms:- matchExpressions:- key: ceph-monoperator: Invalues:- enabledosd:nodeAffinity:requiredDuringSchedulingIgnoredDuringExecution:nodeSelectorTerms:- matchExpressions:- key: ceph-osdoperator: Invalues:- enabledmgr:nodeAffinity:requiredDuringSchedulingIgnoredDuringExecution:nodeSelectorTerms:- matchExpressions:- key: ceph-mgroperator: Invalues:- enabledresources:
# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
# mgr:
# limits:
# cpu: "500m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"
# The above example requests/limits can also be added to the mon and osd components
# mon:
# osd:storage: # cluster level storage configuration and selectionuseAllNodes: falseuseAllDevices: falsedeviceFilter:location:config:# The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories.# Set the storeType explicitly only if it is required not to use the default.# storeType: bluestore# databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger)# journalSizeMB: "1024" # this value can be removed for environments with normal sized disks (20 GB or larger)
# Cluster level list of directories to use for storage. These values will be set for all nodes that have no `directories` set.
# directories:
# - path: /rook/storage-dir
# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
#建議磁盤配置方式如下:
#name: 選擇一個節點,節點名字為kubernetes.io/hostname的標簽,也就是kubectl get nodes看到的名字
#devices: 選擇磁盤設置為OSD
# - name: "sdb":將/dev/sdb設置為osdnodes:- name: "kube-node1"devices:- name: "sdb"- name: "kube-node2"devices:- name: "sdb"- name: "kube-node3"devices:- name: "sdb"# directories: # specific directories to use for storage can be specified for each node
# - path: "/rook/storage-dir"
# resources:
# limits:
# cpu: "500m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"
# - name: "172.17.4.201"
# devices: # specific devices to use for storage can be specified for each node
# - name: "sdb"
# - name: "sdc"
# config: # configuration can be specified at the node level which overrides the cluster level config
# storeType: filestore
# - name: "172.17.4.301"
# deviceFilter: "^sd."
開始部署ceph
部署ceph
kubectl apply -f cluster.yaml# cluster會在rook-ceph這個namesapce創建資源
# 盯著這個namesapce的pod你就會發現,它在按照順序創建Podkubectl -n rook-ceph get pod -o wide -w# 看到所有的pod都Running就行了
# 注意看一下pod分布的宿主機,跟我們打標簽的主機是一致的kubectl -n rook-ceph get pod -o wide
切換到其他主機看一下磁盤
切換到kube-node1
lsblk
切換到kube-node3
lsblk
配置ceph dashboard
看一眼dashboard在哪個service上
kubectl -n rook-ceph get service
#可以看到dashboard監聽了8443端口
創建個nodeport類型的service以便集群外部訪問
kubectl apply -f dashboard-external-https.yaml# 查看一下nodeport在哪個端口
ss -tanl
kubectl -n rook-ceph get service
找出Dashboard的登陸賬號和密碼
MGR_POD=`kubectl get pod -n rook-ceph | grep mgr | awk '{print $1}'`kubectl -n rook-ceph logs $MGR_POD | grep password
apiVersion: ceph.rook.io/v1beta1
kind: Pool
metadata:#這個name就是創建成ceph pool之后的pool名字name: replicapoolnamespace: rook-ceph
spec:replicated:size: 1# size 池中數據的副本數,1就是不保存任何副本failureDomain: osd# failureDomain:數據塊的故障域,# 值為host時,每個數據塊將放置在不同的主機上# 值為osd時,每個數據塊將放置在不同的osd上
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:name: ceph# StorageClass的名字,pvc調用時填的名字
provisioner: ceph.rook.io/block
parameters:pool: replicapool# Specify the namespace of the rook cluster from which to create volumes.# If not specified, it will use `rook` as the default namespace of the cluster.# This is also the namespace where the cluster will beclusterNamespace: rook-ceph# Specify the filesystem type of the volume. If not specified, it will use `ext4`.fstype: xfs
# 設置回收策略默認為:Retain
reclaimPolicy: Retain