k8s使用ceph存储
ceph提供底层存储功能,cephfs方式支持k8s的pv的3种访问模式ReadWriteOnce,ReadOnlyMany ,ReadWriteMany ,RBD支持ReadWriteOnce,ReadOnlyMany两种模式
动态供给主要是能够自动帮你创建pv,需要多大的空间就创建多大的pv。k8s帮助创建pv,创建pvc就直接api调用存储类来寻找pv。
如果是存储静态供给的话,会需要我们手动去创建pv,如果没有足够的资源,找不到合适的pv,那么pod就会处于pending等待的状态。而动态供给主要的一个实现就是StorageClass存储对象,其实它就是声明你使用哪个存储,然后帮你去连接,再帮你去自动创建pv。
使用Ceph RBD作为持久数据卷
配置 rbd-provisioner
1、编写yaml文件
root@k8s-master ~# cat >external-storage-rbd-provisioner.yaml<<EOF
apiVersionv1
kindServiceAccount
metadata
namerbd-provisioner
namespacekube-system
---
kindClusterRole
apiVersionrbac.authorization.k8s.io/v1
metadata
namerbd-provisioner
rules
apiGroups""
resources"persistentvolumes"
verbs"get" "list" "watch" "create" "delete"
apiGroups""
resources"persistentvolumeclaims"
verbs"get" "list" "watch" "update"
apiGroups"storage.k8s.io"
resources"storageclasses"
verbs"get" "list" "watch"
apiGroups""
resources"events"
verbs"create" "update" "patch"
apiGroups""
resources"endpoints"
verbs"get" "list" "watch" "create" "update" "patch"
apiGroups""
resources"services"
resourceNames"kube-dns"
verbs"list" "get"
---
kindClusterRoleBinding
apiVersionrbac.authorization.k8s.io/v1
metadata
namerbd-provisioner
subjects
kindServiceAccount
namerbd-provisioner
namespacekube-system
roleRef
kindClusterRole
namerbd-provisioner
apiGrouprbac.authorization.k8s.io
---
apiVersionrbac.authorization.k8s.io/v1
kindRole
metadata
namerbd-provisioner
namespacekube-system
rules
apiGroups""
resources"secrets"
verbs"get"
---
apiVersionrbac.authorization.k8s.io/v1
kindRoleBinding
metadata
namerbd-provisioner
namespacekube-system
roleRef
apiGrouprbac.authorization.k8s.io
kindRole
namerbd-provisioner
subjects
kindServiceAccount
namerbd-provisioner
namespacekube-system
---
apiVersionapps/v1
kindDeployment
metadata
namerbd-provisioner
namespacekube-system
spec
selector
matchLabels
apprbd-provisioner
replicas1
strategy
typeRecreate
template
metadata
labels
apprbd-provisioner
spec
containers
namerbd-provisioner
image"registry.cn-chengdu.aliyuncs.com/ives/rbd-provisioner:v2.0.0-k8s1.11"
env
namePROVISIONER_NAME
valueceph.com/rbd
serviceAccountrbd-provisioner
EOF2、创建相关资源
[root@k8s-master ~]# kubectl apply -f external-storage-rbd-provisioner.yaml
[root@k8s-master ~]# kubectl get pods -n kube-system |grep rbd
rbd-provisioner-7c77dcfd67-9xv2m 1/1 Running 0 59s配置 storageclass
创建pod时,kubelet需要使用rbd命令去检测和挂载pv对应的ceph image,所以要在所有k8s的worker节点安装ceph客户端ceph-common。将ceph的ceph.client.admin.keyring和ceph.conf文件拷贝到master的/etc/ceph目录下。
1、安装ceph-common(k8s所有工作节点)
# yum -y install ceph-common2、创建 osd pool,在ceph的mon或者admin节点
[root@ceph_node1 ~]# ceph osd pool create kube 8
pool 'kube' created
[root@ceph_node1 ~]# ceph osd pool ls
kube3、创建k8s访问ceph的用户,在ceph的mon或者admin节点
[root@ceph_node1 ~]# ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=kube' -o ceph.client.kube.keyring4、查看key,在ceph的mon或者admin节点
[root@ceph_node1 ~]# ceph auth get-key client.admin
AQCzcPFeYnOoABAATaM1Wt8tMgvYTQjj6YEuVg==
[root@ceph_node1 ~]# ceph auth get-key client.kube
AQC5+vJehk7XIRAAr9mtGFHlUSfT7yQMANeWaw==5、创建admin secret,在k8s管理节点
#CEPH_ADMIN_SECRET替换为 client.admin 获取到的key
[root@k8s-master ~]# export CEPH_ADMIN_SECRET='AQCzcPFeYnOoABAATaM1Wt8tMgvYTQjj6YEuVg=='
[root@k8s-master ~]# kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_ADMIN_SECRET \
--namespace=kube-system6、在default命名空间创建pvc用于访问ceph 的secret,在k8s管理节点
#CEPH_KUBE_SECRET替换为 client.kube 获取到的key
[root@k8s-master ~]# export CEPH_KUBE_SECRET='AQC5+vJehk7XIRAAr9mtGFHlUSfT7yQMANeWaw=='
[root@k8s-master ~]# kubectl create secret generic ceph-user-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_KUBE_SECRET \
--namespace=default7、查看secret
[root@k8s-master ~]# kubectl get secret ceph-user-secret -o yaml
[root@k8s-master ~]# kubectl get secret ceph-secret -n kube-system -o yaml8、配置StorageClass
[root@k8s-master ~]# cat >storageclass-ceph-rdb.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: dynamic-ceph-rdb
provisioner: ceph.com/rbd
parameters:
monitors: 192.168.3.27:6789,192.168.3.60:6789,192.168.3.95:6789
adminId: admin
adminSecretName: ceph-secret
adminSecretNamespace: kube-system
pool: kube
userId: kube
userSecretName: ceph-user-secret
fsType: ext4
imageFormat: "2"
imageFeatures: "layering"
EOF9、创建StorageClass
[root@k8s-master ~]# kubectl apply -f storageclass-ceph-rdb.yaml10、查看
[root@k8s-master ~]# kubectl get sc测试使用
1、创建pvc测试
root@k8s-master ~# cat >ceph-rdb-pvc-test.yaml<<EOF
kindPersistentVolumeClaim
apiVersionv1
metadata
nameceph-rdb-claim
spec
accessModes
ReadWriteOnce
storageClassNamedynamic-ceph-rdb
resources
requests
storage2Gi
EOF
root@k8s-master ~# kubectl apply -f ceph-rdb-pvc-test.yaml
persistentvolumeclaim/ceph-rdb-claim created2、查看pvc和pv
[root@k8s-master ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
ceph-rdb-claim Bound pvc-bd2363f1-a841-46d0-ad54-99267173bc04 2Gi RWO dynamic-ceph-rdb 16s
[root@k8s-master ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-bd2363f1-a841-46d0-ad54-99267173bc04 2Gi RWO Delete Bound default/ceph-rdb-claim dynamic-ceph-rdb 29s3、编写nginx pod资源配置清单进行测试
root@k8s-master ~# cat >nginx-pod.yaml<<EOF
apiVersionv1
kindPod
metadata
namenginx-pod1
labels
namenginx-pod1
spec
containers
namenginx-pod1
imagenginxalpine
ports
nameweb
containerPort80
volumeMounts
nameceph-rdb
mountPath/usr/share/nginx/html
volumes
nameceph-rdb
persistentVolumeClaim
claimNameceph-rdb-claim
EOF4、创建pod 并查看
[root@k8s-master ~]# kubectl apply -f nginx-pod.yaml
pod/nginx-pod1 created
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-pod1 1/1 Running 0 2m25s
[root@k8s-master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-pod1 1/1 Running 0 2m34s 10.244.1.5 k8s-node1 <none> <none>5、修改文件内容
[root@k8s-master ~]# kubectl exec -it nginx-pod1 -- /bin/sh -c 'echo Hello World from Ceph RBD!!! > /usr/share/nginx/html/index.html'6、访问测试
[root@k8s-master ~]# POD_IP=$(kubectl get pods -o wide |grep nginx-pod1 |awk '{print $(NF-3)}')
[root@k8s-master ~]# curl $POD_IP
Hello World from Ceph RBD!!!7、清理
[root@k8s-master ~]# kubectl delete -f nginx-pod.yaml
[root@k8s-master ~]# kubectl delete -f ceph-rdb-pvc-test.yaml使用CephFS作为持久数据卷
Ceph端创建CephFS pool
1、创建两个pool分别存储数据和元数据,在ceph的mon或者admin节点 (这里测试,所以只给了8个pg_num)
[root@ceph_node1 ~]# ceph osd pool create fs_data 8
pool 'fs_data' created
[root@ceph_node1 ~]# ceph osd pool create fs_metadata 8
pool 'fs_metadata' created2、创建一个CephFS,在ceph的mon或者admin节点
[root@ceph_node1 ~]# ceph fs new cephfs fs_metadata fs_data
new fs with metadata pool 8 and data pool 73、查看
[root@ceph_node1 ~]# ceph fs ls
name: cephfs, metadata pool: fs_metadata, data pools: [fs_data ]配置 cephfs-provisioner
官方没有提供cephfs动态卷支持,使用社区提供的cephfs-provisioner
1、编写yaml文件
root@k8s-master ~# cat >external-storage-cephfs-provisioner.yaml<<EOF
apiVersionv1
kindServiceAccount
metadata
namecephfs-provisioner
namespacekube-system
---
kindClusterRole
apiVersionrbac.authorization.k8s.io/v1
metadata
namecephfs-provisioner
rules
apiGroups""
resources"persistentvolumes"
verbs"get" "list" "watch" "create" "delete"
apiGroups""
resources"persistentvolumeclaims"
verbs"get" "list" "watch" "update"
apiGroups"storage.k8s.io"
resources"storageclasses"
verbs"get" "list" "watch"
apiGroups""
resources"events"
verbs"create" "update" "patch"
apiGroups""
resources"endpoints"
verbs"get" "list" "watch" "create" "update" "patch"
apiGroups""
resources"secrets"
verbs"create" "get" "delete"
---
kindClusterRoleBinding
apiVersionrbac.authorization.k8s.io/v1
metadata
namecephfs-provisioner
subjects
kindServiceAccount
namecephfs-provisioner
namespacekube-system
roleRef
kindClusterRole
namecephfs-provisioner
apiGrouprbac.authorization.k8s.io
---
apiVersionrbac.authorization.k8s.io/v1
kindRole
metadata
namecephfs-provisioner
namespacekube-system
rules
apiGroups""
resources"secrets"
verbs"create" "get" "delete"
---
apiVersionrbac.authorization.k8s.io/v1
kindRoleBinding
metadata
namecephfs-provisioner
namespacekube-system
roleRef
apiGrouprbac.authorization.k8s.io
kindRole
namecephfs-provisioner
subjects
kindServiceAccount
namecephfs-provisioner
namespacekube-system
---
apiVersionapps/v1
kindDeployment
metadata
namecephfs-provisioner
namespacekube-system
spec
selector
matchLabels
appcephfs-provisioner
replicas1
strategy
typeRecreate
template
metadata
labels
appcephfs-provisioner
spec
containers
namecephfs-provisioner
image"registry.cn-chengdu.aliyuncs.com/ives/cephfs-provisioner:latest"
env
namePROVISIONER_NAME
valueceph.com/cephfs
command
"/usr/local/bin/cephfs-provisioner"
args
"-id=cephfs-provisioner-1"
serviceAccountcephfs-provisioner
EOF2、创建相关资源
[root@k8s-master ~]# kubectl apply -f external-storage-cephfs-provisioner.yaml
[root@k8s-master ~]# kubectl get pods -n kube-system |grep cephfs
cephfs-provisioner-6d76ff6bd5-zzlmt 1/1 Running 0 28s配置 storageclass
1、查看key,在ceph的mon或者admin节点
[root@ceph_node1 ~]# ceph auth get-key client.admin
AQCzcPFeYnOoABAATaM1Wt8tMgvYTQjj6YEuVg==2、创建admin secret,在k8s管理节点
#CEPH_ADMIN_SECRET替换为 client.admin 获取到的key
[root@k8s-master ~]# export CEPH_ADMIN_SECRET='AQCzcPFeYnOoABAATaM1Wt8tMgvYTQjj6YEuVg=='
[root@k8s-master ~]# kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_ADMIN_SECRET \
--namespace=kube-system3、查看secret
[root@k8s-master ~]# kubectl get secret ceph-secret -n kube-system -o yaml4、配置StorageClass
root@k8s-master ~# cat >storageclass-cephfs.yaml<<EOF
kindStorageClass
apiVersionstorage.k8s.io/v1
metadata
namedynamic-cephfs
provisionerceph.com/cephfs
parameters
monitors192.168.3.276789,192.168.3.606789,192.168.3.956789
adminIdadmin
adminSecretNameceph-secret
adminSecretNamespace"kube-system"
claimRoot/volumes/kubernetes
EOF5、创建StorageClass
[root@k8s-master ~]# kubectl apply -f storageclass-cephfs.yaml
storageclass.storage.k8s.io/dynamic-cephfs created6、查看
[root@k8s-master ~]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
dynamic-cephfs ceph.com/cephfs Delete Immediate false 17s测试使用
1、创建pvc测试
root@k8s-master ~# cat >cephfs-pvc-test.yaml<<EOF
kindPersistentVolumeClaim
apiVersionv1
metadata
namecephfs-claim
spec
accessModes
ReadWriteMany
storageClassNamedynamic-cephfs
resources
requests
storage2Gi
EOF
root@k8s-master ~# kubectl apply -f cephfs-pvc-test.yaml
persistentvolumeclaim/cephfs-claim created2、查看pv和pvc
[root@k8s-master ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
cephfs-claim Bound pvc-b8194840-2664-418c-bad1-df1a4b028f30 2Gi RWX dynamic-cephfs 3s
[root@k8s-master ~]# kubectl get pv |grep pvc
pvc-b8194840-2664-418c-bad1-df1a4b028f30 2Gi RWX Delete Bound default/cephfs-claim dynamic-cephfs 33s3、编写nginx pod资源配置清单进行测试
root@k8s-master ~# cat >nginx-pod.yaml<<EOF
apiVersionv1
kindPod
metadata
namenginx-pod2
labels
namenginx-pod2
spec
containers
namenginx-pod2
imagenginx
ports
nameweb
containerPort80
volumeMounts
namecephfs
mountPath/usr/share/nginx/html
volumes
namecephfs
persistentVolumeClaim
claimNamecephfs-claim
EOF4、创建pod 并查看
[root@k8s-master ~]# kubectl apply -f nginx-pod.yaml
pod/nginx-pod2 created
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-pod2 1/1 Running 0 16s
[root@k8s-master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-pod2 1/1 Running 0 88s 10.244.1.7 k8s-node1 <none> <none>5、修改文件内容
[root@k8s-master ~]# kubectl exec -it nginx-pod2 -- /bin/sh -c 'echo Hello World from CephFS!!! > /usr/share/nginx/html/index.html'6、访问测试
[root@k8s-master ~]# POD_IP=$(kubectl get pods -o wide |grep nginx-pod2 |awk '{print $(NF-3)}')
[root@k8s-master ~]# curl $POD_IP
Hello World from CephFS!!!7、清理
[root@k8s-master ~]# kubectl delete -f nginx-pod.yaml
[root@k8s-master ~]# kubectl delete -f cephfs-pvc-test.yaml人生是条无名的河,是浅是深都要过; 人生是杯无色的茶,是苦是甜都要喝; 人生是首无畏的歌,是高是低都要唱。










