本文包含:
- gluster各存储卷详解、创建及使用
- gluster-kubernetes搭建glusterfs存储
前言
传统的运维中,往往需要管理员手动先在存储集群分配空间,然后才能挂载到应用中去。Kubernetes 的最新版中,dynamic provisioning 升级到了 beta ,并支持多种存储服务的动态预配置,从而可以更有效地利用存储环境中的存储容量,达到按需使用存储空间的目的。本文将介绍 dynamic provisioning 这一特性,并以 GlusterFS 为例,说明存储服务与 k8s 的对接。
简介
⚠️熟悉的小伙伴直接跳过啦
gluster-kubernetes搭建glusterfs存储
heketi官网推荐通过gluster-kubernetes搭建,生产环境可以直接利用gluster-kubernetes提供的脚本搭建,减小复杂度,个人观点,仁者见仁,智者见智
环境
- k8s 1.14.1
- 4 nodes with volume: /dev/vdb
- 1 master
注意⚠️
1. 至少需要3个kubernetes slave节点用来部署glusterfs集群,并且这3个slave节点每个节点需要至少一个空余的磁盘
2. 查看是否运行内核模块lsmod |grep thin
,每个kubernetes集群的节点运行modprobe dm_thin_pool
,加载内核模块。
下载脚本
git clone https://github.com/gluster/gluster-kubernetes.git
cd xxx/gluster-kubernetes/deploy
修改topology.json
cp topology.json.sample topology.json
修改对应的主机名(nodes),ip,和数据卷
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"10.8.4.92"
],
"storage": [
"10.8.4.92"
]
},
"zone": 1
},
"devices": [
"/dev/vdb"
]
},
{
"node": {
"hostnames": {
"manage": [
"10.8.4.93"
],
"storage": [
"10.8.4.93"
]
},
"zone": 1
},
"devices": [
"/dev/vdb"
]
},
{
"node": {
"hostnames": {
"manage": [
"10.8.4.131"
],
"storage": [
"10.8.4.131"
]
},
"zone": 1
},
"devices": [
"/dev/vdb"
]
},
{
"node": {
"hostnames": {
"manage": [
"10.8.4.132"
],
"storage": [
"10.8.4.132"
]
},
"zone": 1
},
"devices": [
"/dev/vdb"
]
}
]
}
]
}
修改heketi.json.template
{
"_port_comment": "Heketi Server Port Number",
"port" : "8080",
"_use_auth": "Enable JWT authorization. Please enable for deployment",
"use_auth" : true, #开启用户认证
"_jwt" : "Private keys for access",
"jwt" : {
"_admin" : "Admin has access to all APIs",
"admin" : {
"key" : "adminkey" #管理员密码
},
"_user" : "User only has access to /volumes endpoint",
"user" : {
"key" : "userkey" #用户密码
}
},
"_glusterfs_comment": "GlusterFS Configuration",
"glusterfs" : {
"_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
"executor" : "${HEKETI_EXECUTOR}",#本文搭建为kubernete方式
"_db_comment": "Database file name",
"db" : "/var/lib/heketi/heketi.db", #heketi数据存储
"kubeexec" : {
"rebalance_on_expansion": true
},
"sshexec" : {
"rebalance_on_expansion": true,
"keyfile" : "/etc/heketi/private_key",
"port" : "${SSH_PORT}",
"user" : "${SSH_USER}",
"sudo" : ${SSH_SUDO}
}
},
"backup_db_to_kube_secret": false
}
gk-deploy脚本概述
./gk-deploy -h
概述
-g, --deploy-gluster #pod部署gluster使用
-s, --ssh-keyfile #ssh方式管理gluster使用,/root/.ssh/id_rsa.pub
--admin-key ADMIN_KEY#管理员secret设置
--user-key USER_KEY #用户secret设置
--abort #删除heketi资源使用
vi gk-deploy
脚本主要内容
- 创建资源
- 添加glusterfs设备节点
- 对heketi的存储进行挂载
⚠️想要深入理解脚本都做了什么,可以查看https://www.kubernetes.org.cn/3893.html
#添加glusterfs设备节点
heketi_cli="${CLI} exec -i ${heketi_pod} -- heketi-cli -s http://localhost:8080 --user admin --secret '${ADMIN_KEY}'"
load_temp=$(mktemp)
eval_output "${heketi_cli} topology load --json=/etc/heketi/topology.json 2>&1" | tee "${load_temp}"
执行脚本
⚠️Adding device时比较慢,耐心等待
kubectl create ns glusterfs
./gk-deploy -y -n glusterfs -g --user-key=userkey --admin-key=adminkey
Using namespace "glusterfs".
Checking that heketi pod is not running ... OK
serviceaccount "heketi-service-account" created
clusterrolebinding "heketi-sa-view" created
node "10.8.4.92" labeled
node "10.8.4.93" labeled
node "10.8.4.131" labeled
node "10.8.4.132" labeled
daemonset "glusterfs" created
Waiting for GlusterFS pods to start ... OK
service "deploy-heketi" created
deployment "deploy-heketi" created
Waiting for deploy-heketi pod to start ... OK
Creating cluster ... ID: 4cfe35ce3cdc64b8afb8dbc46cad0e09
Creating node 10.8.4.92 ... ID: 1d323ddf243fd4d8c7f0ed58eb0e2c0ab
Adding device /dev/vdb ... OK
Creating node 10.8.4.93 ... ID: 12df23f339dj4jf8jdk3oodd31ba9e12c52
Adding device /dev/vdb ... OK
Creating node 10.8.4.131 ... ID: 1c529sd3ewewed1286e29e260668a1
Adding device /dev/vdb ... OK
Creating node 10.8.4.132 ... ID: 12ff323cd1121232323fddf9e260668a1
Adding device /dev/vdb ... OK
heketi topology loaded.
Saving heketi-storage.json
secret "heketi-storage-secret" created
endpoints "heketi-storage-endpoints" created
service "heketi-storage-endpoints" created
job "heketi-storage-copy-job" created
service "deploy-heketi" deleted
job "heketi-storage-copy-job" deleted
deployment "deploy-heketi" deleted
secret "heketi-storage-secret" deleted
service "heketi" created
deployment "heketi" created
Waiting for heketi pod to start ... OK
heketi is now running and accessible via http://10.10.23.148:8080/
Ready to create and provide GlusterFS volumes.
kubectl get po -o wide -n glusterfs
[root@k8s1-master1 deploy]# export HEKETI_CLI_SERVER=$(kubectl get svc/heketi -n glusterfs --template 'http://{{.spec.clusterIP}}:{{(index .s
pec.ports 0).port}}')
[root@k8s1-master1 deploy]# echo $HEKETI_CLI_SERVER
http://10.0.0.131:8080
[root@k8s1-master1 deploy]# curl $HEKETI_CLI_SERVER/hello
Hello from Heketi
失败重试
kubectl delete -f kube-templates/deploy-heketi-deployment.yaml
kubectl delete -f kube-templates/heketi-deployment.yaml
kubectl delete -f kube-templates/heketi-service-account.yaml
kubectl delete -f kube-templates/glusterfs-daemonset.yaml
#每个节点执行
rm -rf /var/lib/heketi
rm -rf /var/lib/glusterd
问题:Unable to add device,尝试格式化vdb
#每个节点执行
dd if=/dev/zero of=/dev/vdb bs=1k count=1
blockdev --rereadpt /dev/vdb
其他错误排查
Connected状态
[root@k8s1-master2 ~]# kubectl exec -ti glusterfs-sb7l9 -n glusterfs bash
[root@k8s1-master2 /]# gluster peer status
Number of Peers: 3
Hostname: 10.8.4.93
Uuid: 52824c41-2fce-468a-b9c9-7c3827ed7a34
State: Peer in Cluster (Connected)
Hostname: 10.8.4.131
Uuid: 6a27b31f-dbd9-4de5-aefd-73c1ac9b81c5
State: Peer in Cluster (Connected)
Hostname: 10.8.4.132
Uuid: 7b7b53ff-af7f-49aa-b371-29dd1e784ad1
State: Peer in Cluster (Connected)
存储已经挂载
[root@k8s1-master2 ~]# kubectl exec -ti glusterfs-sb7l9 -n glusterfs bash
[root@k8s1-master2 /]# gluster volume info
Volume Name: heketidbstorage
Type: Replicate
Volume ID: 02fd891f-dd43-4c1b-a2ba-87e1be7c706f
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: 10.8.4.132:/var/lib/heketi/mounts/vg_5634269dc08edd964032871801920f1e/brick_b980d3f5ce7b1b4314c4b57c8aaf35fa/brick
Brick2: 10.8.4.93:/var/lib/heketi/mounts/vg_1d2cf75ab474dd63edb917a78096e429/brick_b375443687051038234e50fe3cd5fe12/brick
Brick3: 10.8.4.92:/var/lib/heketi/mounts/vg_a5d145795d59c51d2335153880049760/brick_e8f9ec722a235448fbf6730c25d7441a/brick
Options Reconfigured:
user.heketi.id: dfed68e6dca82c7cd5911c8ddda7746b
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
搭建StorageClass
vi storageclass-dev-glusterfs.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: heketi-secret
namespace: glusterfs
data:
# base64 encoded password. E.g.: echo -n "adminkey" | base64
key: YWRtaW5rZXk=
type: kubernetes.io/glusterfs
---
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: glusterfs
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://10.8.4.91:42951"
clusterid: "364a0a72b3343c537c20db5576ffd46c"
restauthenabled: "true"
restuser: "admin"
secretNamespace: "glusterfs"
secretName: "heketi-secret"
#restuserkey: "adminkey"
gidMin: "40000"
gidMax: "50000"
volumetype: "none"
属性概述
- resturl :heketi地址
- clusterid:
heketi-cli --user admin --secret adminkey cluster list
进入Podheketi-549c999b6f-5l8sp
获取 - restauthenabled:是否开启认证
- restuser:用户
- secretName:密码
主要说下volumetype
- volumetype
- volumetype: disperse:4:2
gluster volume create gv1 disperse 4 redundancy 1 10.8.4.92:/var/lib/heketi/mounts/gv1 10.8.4.93:/var/lib/heketi/mounts/gv1 10.8.4.131:/var/lib/heketi/mounts/gv1 10.8.4.132:/var/lib/heketi/mounts/gv1
gluster volume start gv1
gluster volume info
输出如下
Volume Name: gv2
Type: Disperse
Volume ID: e072f9fa-6139-4471-a163-0e0dde0265ef
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x (3 + 1) = 4
Transport-type: tcp
Bricks:
Brick1: 10.8.4.92:/var/lib/heketi/mounts/gv2
Brick2: 10.8.4.93:/var/lib/heketi/mounts/gv2
Brick3: 10.8.4.131:/var/lib/heketi/mounts/gv2
Brick4: 10.8.4.132:/var/lib/heketi/mounts/gv2
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
- volumetype: replicate:3
Volume Name: vol_d78f449dbeab2286267c7e1842086a8f
Type: Replicate
Volume ID: 02fd891f-dd43-4c1b-a2ba-87e1be7c706f
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: 10.8.4.132:/var/lib/heketi/mounts/vg_5634269dc08edd964032871801920f1e/brick_b980d3f5ce7b1b4314c4b57c8aaf35fa/brick
Brick2: 10.8.4.93:/var/lib/heketi/mounts/vg_1d2cf75ab474dd63edb917a78096e429/brick_b375443687051038234e50fe3cd5fe12/brick
Brick3: 10.8.4.92:/var/lib/heketi/mounts/vg_a5d145795d59c51d2335153880049760/brick_e8f9ec722a235448fbf6730c25d7441a/brick
Options Reconfigured:
user.heketi.id: dfed68e6dca82c7cd5911c8ddda7746b
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
- volumetype: none
Volume Name: vol_e1b27d580cbe18a96b0fdf7cbfe69cc2
Type: Distribute
Volume ID: cb4a7e4f-3850-4809-b159-fc8000527d71
Status: Started
Snapshot Count: 0
Number of Bricks: 1
Transport-type: tcp
Bricks:
Brick1: 10.8.4.93:/var/lib/heketi/mounts/vg_1d2cf75ab474dd63edb917a78096e429/brick_8f62218753db589204b753295a318795/brick
Options Reconfigured:
user.heketi.id: e1b27d580cbe18a96b0fdf7cbfe69cc2
transport.address-family: inet
nfs.disable: on
创建pvc
vi glusterfs-pv.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: glusterfs
annotations:
volume.beta.kubernetes.io/storage-class: "glusterfs"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
亲爱的朋友,您应该根据具体情况作出选择,想要继续了解存储卷模式,和使用方式,请查看《GlusterFs卷类型分析及创建、使用(结合kubernetes集群分析)
》
手码无坑,有问题欢迎打扰