参考博客原文实现
本地环境版本:
- kernel: 5.10.2-1.el7.elrepo.x86_64
- docker: 20.10.1
- system: CentOS Linux release 7.9.2009 (Core)
- ceph: 14.2.16
-
我的实践步骤如下,请参考:
ceph节点执行创建,并进行配额设置
ceph osd pool create kubernetes 512 512
ceph osd pool set-quota kubernetes max_objects 1000000
ceph osd pool set-quota kubernetes max_bytes 2T
2、为kubernetes和ceph-csi创建一个新的用户
[root@node3 kubernetes]# ceph auth get-or-create client.kubernetes mon 'profile rbd' osd 'profile rbd pool=kubernetes' mgr 'profile rbd pool=kubernetes'
[client.kubernetes]
key = AQAi4PNfwri8IxAAvtIQgCNGIMQAjMytoTXeSw==
生成/etc/ceph/ceph.client.kubernetes.keyring
ceph auth get client.kubernetes >> /etc/ceph/ceph.client.kubernetes.keyring
scp拷贝/etc/ceph/ceph.client.kubernetes.keyring
至k8s所有node节点/etc/ceph/
下
ceph mon dump
输出如下:
dumped monmap epoch 3
epoch 3
fsid 1fc9f495-498c-4fe2-b3d5-80a041bc5c49
last_changed 2020-12-21 18:53:05.535581
created 2020-12-21 18:40:09.332030
min_mon_release 14 (nautilus)
0: [v2:192.168.1.1:3300/0,v1:192.168.1.1:6789/0] mon.node5
1: [v2:192.168.1.2:3300/0,v1:192.168.1.2:6789/0] mon.node4
2: [v2:192.168.1.3:3300/0,v1:192.168.1.3:6789/0] mon.node3
id
即为1fc9f495-498c-4fe2-b3d5-80a041bc5c49
k8s.gcr.io/sig-storage/csi-provisioner:v2.0.4
k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.2
k8s.gcr.io/sig-storage/csi-attacher:v3.0.2
k8s.gcr.io/sig-storage/csi-resizer:v1.0.1
quay.io/cephcsi/cephcsi:v3.2.0
quay.io/cephcsi/cephcsi:v3.2.0
quay.io/cephcsi/cephcsi:v3.2.0
k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.0.1
quay.io/cephcsi/cephcsi:v3.2.0
quay.io/cephcsi/cephcsi:v3.2.0
上传ceph-csi-3.2.0.tar.gz至k8s主节点服务器,解压
tar zxvf ceph-csi-3.2.0.tar.gz
cd ceph-csi-3.2.0/deploy/rbd/kubernetes
6、创建namespace专门用来部署ceph-csi
kubectl create ns ceph-csi
clusterID
与monitors
参考第3步返回信息
cat <<EOF > csi-config-map.yaml
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[
{
"clusterID": "1fc9f495-498c-4fe2-b3d5-80a041bc5c49",
"monitors": [
"192.168.1.1:6789",
"192.168.1.2:6789",
"192.168.1.3:6789"
]
}
]
metadata:
name: ceph-csi-config
EOF
创建
kubectl -n ceph-csi apply -f csi-config-map.yaml
8、创建ceph-csi cephx Secret
userKey
参考第2步返回
cat <<EOF > csi-rbd-secret.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: kubernetes-csi-rbd-secret
namespace: default
stringData:
userID: kubernetes
userKey: AQAi4PNfwri8IxAAvtIQgCNGIMQAjMytoTXeSw==
EOF
创建
kubectl -n ceph-csi apply -f csi-rbd-secret.yaml
rbac
sed -i "s/namespace: default/namespace: ceph-csi/g" $(grep -rl "namespace: default" ./)
sed -i -e "/^kind: ServiceAccount/{N;N;a\ namespace: ceph-csi
}" $(egrep -rl "^kind: ServiceAccount" ./)
kubectl apply -f csi-provisioner-rbac.yaml
kubectl apply -f csi-nodeplugin-rbac.yaml
kubectl create -f csi-provisioner-psp.yaml
kubectl create -f csi-nodeplugin-psp.yaml
修改csi-rbdplugin-provisioner.yaml
内镜像为本地私有仓库镜像
修改csi-rbdplugin.yaml
内镜像为本地私有仓库镜像
[root@node3 kubernetes]# cat csi-rbdplugin.yaml |grep "image:"
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.0.1
image: quay.io/cephcsi/cephcsi:v3.2.0
image: quay.io/cephcsi/cephcsi:v3.2.0
[root@node3 kubernetes]# cat csi-rbdplugin-provisioner.yaml |grep "image:"
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.0.4
image: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.2
image: k8s.gcr.io/sig-storage/csi-attacher:v3.0.2
image: k8s.gcr.io/sig-storage/csi-resizer:v1.0.1
image: quay.io/cephcsi/cephcsi:v3.2.0
image: quay.io/cephcsi/cephcsi:v3.2.0
image: quay.io/cephcsi/cephcsi:v3.2.0
创建
kubectl -n ceph-csi apply -f csi-rbdplugin-provisioner.yaml
kubectl -n ceph-csi apply -f csi-rbdplugin.yaml
生成配置文件,clusterID
参考第3步返回信息
cat <<EOF > csi-rbd-sc.yaml
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: kubernetes-csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
clusterID: 1fc9f495-498c-4fe2-b3d5-80a041bc5c49
pool: kubernetes
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: kubernetes-csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: ceph-csi
csi.storage.k8s.io/controller-expand-secret-name: kubernetes-csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: ceph-csi
csi.storage.k8s.io/node-stage-secret-name: kubernetes-csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: ceph-csi
csi.storage.k8s.io/fstype: ext4
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- discard
EOF
创建StorageClass
kubectl -n ceph-csi apply -f csi-rbd-sc.yaml
[root@node3 kubernetes]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
hsa-ceph-class ceph.com/rbd Delete Immediate false 7d23h
hsa-csi-rbd-sc rbd.csi.ceph.com Delete Immediate true 5d20h
kubernetes-csi-rbd-sc rbd.csi.ceph.com Delete Immediate true 18m
local (default) openebs.io/local Delete WaitForFirstConsumer false 15d



