k8s 安装 mongodb 分片(Sharding)+ 副本集(Replica Set)
- 1.安装环境
- 2.配置nfs并重启 (nfs 已经安装)
- 3.创建 PV 持久卷
- 4.部署 config server(配置服务器)
- 5.创建config 集群
- 6.部署 shard server(分片服务器)
- 7.部署 route server(mongos 路由服务器)
- 8.分片测试
1.安装环境
k8s 1.23. docker 20.10.12 centos 7
2.配置nfs并重启 (nfs 已经安装)
mkdir -p /data/k8s/mongodb/shard/pv1 &&
mkdir -p /data/k8s/mongodb/shard/pv2 &&
mkdir -p /data/k8s/mongodb/shard/pv3 &&
mkdir -p /data/k8s/mongodb/shard/pv4 &&
mkdir -p /data/k8s/mongodb/shard/pv5 &&
mkdir -p /data/k8s/mongodb/shard/pv6 &&
mkdir -p /data/k8s/mongodb/config/pv1 &&
mkdir -p /data/k8s/mongodb/config/pv2 &&
mkdir -p /data/k8s/mongodb/config/pv3
vi /etc/exports
#加入如下内容
/data/k8s/mongodb/shard/pv1 *(rw,sync,no_root_squash)
/data/k8s/mongodb/shard/pv2 *(rw,sync,no_root_squash)
/data/k8s/mongodb/shard/pv3 *(rw,sync,no_root_squash)
/data/k8s/mongodb/shard/pv4 *(rw,sync,no_root_squash)
/data/k8s/mongodb/shard/pv5 *(rw,sync,no_root_squash)
/data/k8s/mongodb/shard/pv6 *(rw,sync,no_root_squash)
/data/k8s/mongodb/config/pv1 *(rw,sync,no_root_squash)
/data/k8s/mongodb/config/pv2 *(rw,sync,no_root_squash)
/data/k8s/mongodb/config/pv3 *(rw,sync,no_root_squash)
#重启
systemctl restart rpcbind
systemctl restart nfs
验证
[root@k8smaster mongodb-shard]# exportfs -v
/data/k8s <world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/opt/nfs/maven <world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/data/k8s/www-data
<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/data/k8s/mongodb/shard/pv1
<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/data/k8s/mongodb/shard/pv2
<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/data/k8s/mongodb/shard/pv3
<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/data/k8s/mongodb/shard/pv4
<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/data/k8s/mongodb/shard/pv5
<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/data/k8s/mongodb/shard/pv6
<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/data/k8s/mongodb/config/pv1
<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/data/k8s/mongodb/config/pv2
<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/data/k8s/mongodb/config/pv3
<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
[root@k8smaster mongodb-shard]#
3.创建 PV 持久卷
#创建shard的第1个PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongodb-shard-pv1
labels:
app: mongodb
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: course-nfs-storage
nfs:
server: 10.10.220.101
path: "/data/k8s/mongodb/shard/pv1"
persistentVolumeReclaimPolicy: Retain
---
#创建shard的第2个PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongodb-shard-pv2
labels:
app: mongodb
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: course-nfs-storage
nfs:
server: 10.10.220.101
path: "/data/k8s/mongodb/shard/pv2"
persistentVolumeReclaimPolicy: Retain
---
#创建shard的第3个PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongodb-shard-pv3
labels:
app: mongodb
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: course-nfs-storage
nfs:
server: 10.10.220.101
path: "/data/k8s/mongodb/shard/pv3"
persistentVolumeReclaimPolicy: Retain
---
#创建shard的第4个PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongodb-shard-pv4
labels:
app: mongodb
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: course-nfs-storage
nfs:
server: 10.10.220.101
path: "/data/k8s/mongodb/shard/pv4"
persistentVolumeReclaimPolicy: Retain
---
#创建shard的第5个PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongodb-shard-pv5
labels:
app: mongodb
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: course-nfs-storage
nfs:
server: 10.10.220.101
path: "/data/k8s/mongodb/shard/pv5"
persistentVolumeReclaimPolicy: Retain
---
#创建shard的第6个PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongodb-shard-pv6
labels:
app: mongodb
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: course-nfs-storage
nfs:
server: 10.10.220.101
path: "/data/k8s/mongodb/shard/pv6"
persistentVolumeReclaimPolicy: Retain
---
#创建config的第1个PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongodb-config-pv1
labels:
app: mongodb
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
storageClassName: mongodb-config-storage
nfs:
server: 10.10.220.101
path: "/data/k8s/mongodb/config/pv1"
persistentVolumeReclaimPolicy: Retain
---
#创建config的第2个PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongodb-config-pv2
labels:
app: mongodb
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
storageClassName: mongodb-config-storage
nfs:
server: 10.10.220.101
path: "/data/k8s/mongodb/config/pv2"
persistentVolumeReclaimPolicy: Retain
---
#创建config的第3个PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongodb-config-pv3
labels:
app: mongodb
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
storageClassName: mongodb-config-storage
nfs:
server: 10.10.220.101
path: "/data/k8s/mongodb/config/pv3"
persistentVolumeReclaimPolicy: Retain
查看创建结果
[root@k8smaster mongodb-shard]# kubectl get pv|grep mongo
mongodb-config-pv1 1Gi RWO Retain Available mongodb-config-storage 4d11h
mongodb-config-pv2 1Gi RWO Retain Available mongodb-config-storage 4d11h
mongodb-config-pv3 1Gi RWO Retain Available mongodb-config-storage 4d11h
mongodb-shard-pv1 10Gi RWO Retain Bound default/mongdb-config-persistent-storage-mongodb-config-2 course-nfs-storage 4d11h
mongodb-shard-pv2 10Gi RWO Retain Bound default/mongdb-config-persistent-storage-mongodb-config-1 course-nfs-storage 4d11h
mongodb-shard-pv3 10Gi RWO Retain Bound default/mongdb-config-persistent-storage-mongodb-config-0 course-nfs-storage 4d11h
mongodb-shard-pv4 10Gi RWO Retain Bound default/mongdb-shard-persistent-storage-mongodb-shard0-0 course-nfs-storage 4d11h
mongodb-shard-pv5 10Gi RWO Retain Bound default/mongdb-shard-persistent-storage-mongodb-shard1-0 course-nfs-storage 4d11h
mongodb-shard-pv6 10Gi RWO Retain Bound default/mongdb-shard-persistent-storage-mongodb-shard1-1 course-nfs-storage 4d11h
4.部署 config server(配置服务器)
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mongodb-config
spec:
selector:
matchLabels:
app: mongodb-config
serviceName: "mongodb-config-hs"
replicas: 3
template:
metadata:
labels:
app: mongodb-config
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongodb-config
image: mongo:4.4
command:
- mongod
- "--port"
- "27017"
- "--configsvr"
- "--replSet"
- "configs"
- "--bind_ip"
- 0.0.0.0
ports:
- containerPort: 27017
volumeMounts:
- name: mongdb-config-persistent-storage
mountPath: /data/configdb
volumeClaimTemplates:
- metadata:
name: mongdb-config-persistent-storage
spec:
storageClassName: course-nfs-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
# headless 无头服务(提供域名供StatefulSet内部pod访问使用)
apiVersion: v1
kind: Service
metadata:
name: mongodb-config-hs
labels:
name: mongodb-config
spec:
ports:
- port: 27017
targetPort: 27017
clusterIP: None
selector:
app: mongodb-config
验证是否创建成功
[root@k8smaster ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
mongodb-config-0 1/1 Running 0 4d11h
mongodb-config-1 1/1 Running 0 4d11h
mongodb-config-2 1/1 Running 0 4d11h
[root@k8smaster ~]#
[root@k8smaster ~]# kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 112d
mongodb-config-hs ClusterIP None <none> 27017/TCP 4d11h
注 意 : 存 储 卷 没 有 绑 定 成 功 ? \color{red}{注意:存储卷没有绑定成功?} 注意:存储卷没有绑定成功?
# 第1个分片(每个分片有3个副本)
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mongodb-shard0
spec:
selector:
matchLabels:
app: mongodb-shard
serviceName: "mongodb-shard-hs"
replicas: 3
template:
metadata:
labels:
app: mongodb-shard
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongodb-shard
image: mongo:4.4
command:
- mongod
- "--port"
- "27017"
- "--shardsvr"
- "--replSet"
- shard0
- "--bind_ip"
- 0.0.0.0
ports:
- containerPort: 27017
volumeMounts:
- name: mongdb-shard-persistent-storage
mountPath: /data/db
volumeClaimTemplates:
- metadata:
name: mongdb-shard-persistent-storage
spec:
storageClassName: course-nfs-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
# 第2个分片(每个分片有3个副本)
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mongodb-shard1
spec:
selector:
matchLabels:
app: mongodb-shard
serviceName: "mongodb-shard-hs"
replicas: 3
template:
metadata:
labels:
app: mongodb-shard
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongodb-shard
image: mongo:4.4
command:
- mongod
- "--port"
- "27017"
- "--shardsvr"
- "--replSet"
- shard1
- "--bind_ip"
- 0.0.0.0
ports:
- containerPort: 27017
volumeMounts:
- name: mongdb-shard-persistent-storage
mountPath: /data/db
volumeClaimTemplates:
- metadata:
name: mongdb-shard-persistent-storage
spec:
storageClassName: course-nfs-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
# headless 无头服务(提供域名供StatefulSet内部pod访问使用)
apiVersion: v1
kind: Service
metadata:
name: mongodb-shard-hs
labels:
name: mongodb-shard
spec:
ports:
- port: 27017
targetPort: 27017
clusterIP: None
selector:
app: mongodb-shard
5.创建config 集群
5.1 进入任意一个pod
kubectl exec -it mongodb-config-0 /bin/bash
5.2 连接上mongodb
bin/mongo mongodb-config-0.mongodb-config-hs.default.svc.cluster.local:27017
5.3 配置副本集
cfg = {
_id : "configs",
members : [
{_id : 0, host : "mongodb-config-0.mongodb-config-hs.default.svc.cluster.local:27017" },
{_id : 1, host : "mongodb-config-1.mongodb-config-hs.default.svc.cluster.local:27017" },
{_id : 2, host : "mongodb-config-2.mongodb-config-hs.default.svc.cluster.local:27017" }
]
}
rs.initiate(cfg)
configs:PRIMARY> rs.status()
{
"set" : "configs",
"date" : ISODate("2022-04-07T02:44:27.717Z"),
"myState" : 1,
"term" : NumberLong(1),
"syncSourceHost" : "",
"syncSourceId" : -1,
"configsvr" : true,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 2,
"votingMembersCount" : 3,
"writableVotingMembersCount" : 3,
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1649299466, 1),
"t" : NumberLong(1)
},
"lastCommittedWallTime" : ISODate("2022-04-07T02:44:26.741Z"),
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1649299466, 1),
"t" : NumberLong(1)
},
"readConcernMajorityWallTime" : ISODate("2022-04-07T02:44:26.741Z"),
"appliedOpTime" : {
"ts" : Timestamp(1649299466, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1649299466, 1),
"t" : NumberLong(1)
},
"lastAppliedWallTime" : ISODate("2022-04-07T02:44:26.741Z"),
"lastDurableWallTime" : ISODate("2022-04-07T02:44:26.741Z")
},
"lastStableRecoveryTimestamp" : Timestamp(1649299451, 1),
"electionCandidateMetrics" : {
"lastElectionReason" : "electionTimeout",
"lastElectionDate" : ISODate("2022-04-02T15:02:26.647Z"),
"electionTerm" : NumberLong(1),
"lastCommittedOpTimeAtElection" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"lastSeenOpTimeAtElection" : {
"ts" : Timestamp(1648911736, 1),
"t" : NumberLong(-1)
},
"numVotesNeeded" : 2,
"priorityAtElection" : 1,
"electionTimeoutMillis" : NumberLong(10000),
"numCatchUpOps" : NumberLong(0),
"newTermStartDate" : ISODate("2022-04-02T15:02:26.750Z"),
"wMajorityWriteAvailabilityDate" : ISODate("2022-04-02T15:02:27.552Z")
},
"members" : [
{
"_id" : 0,
"name" : "mongodb-config-0.mongodb-config-hs.default.svc.cluster.local:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 388015,
"optime" : {
"ts" : Timestamp(1649299466, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-04-07T02:44:26Z"),
"lastAppliedWallTime" : ISODate("2022-04-07T02:44:26.741Z"),
"lastDurableWallTime" : ISODate("2022-04-07T02:44:26.741Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1648911746, 1),
"electionDate" : ISODate("2022-04-02T15:02:26Z"),
"configVersion" : 1,
"configTerm" : 1,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "mongodb-config-1.mongodb-config-hs.default.svc.cluster.local:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 387731,
"optime" : {
"ts" : Timestamp(1649299465, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1649299465, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-04-07T02:44:25Z"),
"optimeDurableDate" : ISODate("2022-04-07T02:44:25Z"),
"lastAppliedWallTime" : ISODate("2022-04-07T02:44:26.741Z"),
"lastDurableWallTime" : ISODate("2022-04-07T02:44:26.741Z"),
"lastHeartbeat" : ISODate("2022-04-07T02:44:26.196Z"),
"lastHeartbeatRecv" : ISODate("2022-04-07T02:44:26.199Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "mongodb-config-0.mongodb-config-hs.default.svc.cluster.local:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 1,
"configTerm" : 1
},
{
"_id" : 2,
"name" : "mongodb-config-2.mongodb-config-hs.default.svc.cluster.local:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 387731,
"optime" : {
"ts" : Timestamp(1649299466, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1649299466, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-04-07T02:44:26Z"),
"optimeDurableDate" : ISODate("2022-04-07T02:44:26Z"),
"lastAppliedWallTime" : ISODate("2022-04-07T02:44:26.741Z"),
"lastDurableWallTime" : ISODate("2022-04-07T02:44:26.741Z"),
"lastHeartbeat" : ISODate("2022-04-07T02:44:27.554Z"),
"lastHeartbeatRecv" : ISODate("2022-04-07T02:44:27.554Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "mongodb-config-0.mongodb-config-hs.default.svc.cluster.local:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 1,
"configTerm" : 1
}
],
"ok" : 1,
"$gleStats" : {
"lastOpTime" : Timestamp(0, 0),
"electionId" : ObjectId("7fffffff0000000000000001")
},
"lastCommittedOpTime" : Timestamp(1649299466, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1649299466, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1649299466, 1)
}
configs:PRIMARY>
可以看到 configs 副本集中已经有一个 Primary 节点和两个 Secondary 节点,说明副本集的启动配置已完成。
6.部署 shard server(分片服务器)
# 第1个分片(每个分片有3个副本)
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mongodb-shard0
spec:
selector:
matchLabels:
app: mongodb-shard
serviceName: "mongodb-shard-hs"
replicas: 3
template:
metadata:
labels:
app: mongodb-shard
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongodb-shard
image: mongo:4.4
command:
- mongod
- "--port"
- "27017"
- "--shardsvr"
- "--replSet"
- shard0
- "--bind_ip"
- 0.0.0.0
ports:
- containerPort: 27017
volumeMounts:
- name: mongdb-shard-persistent-storage
mountPath: /data/db
volumeClaimTemplates:
- metadata:
name: mongdb-shard-persistent-storage
spec:
storageClassName: course-nfs-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
# 第2个分片(每个分片有3个副本)
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mongodb-shard1
spec:
selector:
matchLabels:
app: mongodb-shard
serviceName: "mongodb-shard-hs"
replicas: 3
template:
metadata:
labels:
app: mongodb-shard
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongodb-shard
image: mongo:4.4
command:
- mongod
- "--port"
- "27017"
- "--shardsvr"
- "--replSet"
- shard1
- "--bind_ip"
- 0.0.0.0
ports:
- containerPort: 27017
volumeMounts:
- name: mongdb-shard-persistent-storage
mountPath: /data/db
volumeClaimTemplates:
- metadata:
name: mongdb-shard-persistent-storage
spec:
storageClassName: course-nfs-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
# headless 无头服务(提供域名供StatefulSet内部pod访问使用)
apiVersion: v1
kind: Service
metadata:
name: mongodb-shard-hs
labels:
name: mongodb-shard
spec:
ports:
- port: 27017
targetPort: 27017
clusterIP: None
selector:
app: mongodb-shard
6.1验证是否创建成功
[root@k8smaster mongodb-shard]# kubectl get pods
NAME READY STATUS RESTARTS AGE
mongodb-config-0 1/1 Running 0 4d12h
mongodb-config-1 1/1 Running 0 4d12h
mongodb-config-2 1/1 Running 0 4d12h
mongodb-shard0-0 1/1 Running 0 4d11h
mongodb-shard0-1 1/1 Running 0 4d11h
mongodb-shard0-2 1/1 Running 0 4d11h
mongodb-shard1-0 1/1 Running 0 4d11h
mongodb-shard1-1 1/1 Running 0 4d11h
mongodb-shard1-2 1/1 Running 0 4d11h
6.2 配置第一分片集群
#进入任意一pods
kubectl exec -it mongodb-shard0-0 /bin/bash
#连接上任意一个实例
bin/mongo mongodb-shard0-0.mongodb-shard-hs.default.svc.cluster.local:27017
#切换数据库
use adminrs
#配置副本集
cfg = {_id: "shard0",members:[{_id: 0,host: 'mongodb-shard0-0.mongodb-shard-hs.default.svc.cluster.local:27017',priority: 3},{_id: 1,host: 'mongodb-shard0-1.mongodb-shard-hs.default.svc.cluster.local:27017',priority: 2},{_id: 2,host: 'mongodb-shard0-2.mongodb-shard-hs.default.svc.cluster.local:27017',priority: 1}]};
#初始化
rs.initiate(cfg)
#验证 可以看到第一个分片中已经有一个 Primary 节点和两个 Secondary 节点
shard0:PRIMARY> rs.status()
{
"set" : "shard0",
"date" : ISODate("2022-04-07T03:08:08.686Z"),
"myState" : 1,
"term" : NumberLong(1),
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 2,
"votingMembersCount" : 3,
"writableVotingMembersCount" : 3,
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1649300885, 1),
"t" : NumberLong(1)
},
"lastCommittedWallTime" : ISODate("2022-04-07T03:08:05.048Z"),
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1649300885, 1),
"t" : NumberLong(1)
},
"readConcernMajorityWallTime" : ISODate("2022-04-07T03:08:05.048Z"),
"appliedOpTime" : {
"ts" : Timestamp(1649300885, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1649300885, 1),
"t" : NumberLong(1)
},
"lastAppliedWallTime" : ISODate("2022-04-07T03:08:05.048Z"),
"lastDurableWallTime" : ISODate("2022-04-07T03:08:05.048Z")
},
"lastStableRecoveryTimestamp" : Timestamp(1649300855, 1),
"electionCandidateMetrics" : {
"lastElectionReason" : "electionTimeout",
"lastElectionDate" : ISODate("2022-04-02T15:09:19.879Z"),
"electionTerm" : NumberLong(1),
"lastCommittedOpTimeAtElection" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"lastSeenOpTimeAtElection" : {
"ts" : Timestamp(1648912149, 1),
"t" : NumberLong(-1)
},
"numVotesNeeded" : 2,
"priorityAtElection" : 3,
"electionTimeoutMillis" : NumberLong(10000),
"numCatchUpOps" : NumberLong(0),
"newTermStartDate" : ISODate("2022-04-02T15:09:20.095Z"),
"wMajorityWriteAvailabilityDate" : ISODate("2022-04-02T15:09:20.581Z")
},
"members" : [
{
"_id" : 0,
"name" : "mongodb-shard0-0.mongodb-shard-hs.default.svc.cluster.local:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 388910,
"optime" : {
"ts" : Timestamp(1649300885, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-04-07T03:08:05Z"),
"lastAppliedWallTime" : ISODate("2022-04-07T03:08:05.048Z"),
"lastDurableWallTime" : ISODate("2022-04-07T03:08:05.048Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1648912159, 1),
"electionDate" : ISODate("2022-04-02T15:09:19Z"),
"configVersion" : 2,
"configTerm" : 1,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "mongodb-shard0-1.mongodb-shard-hs.default.svc.cluster.local:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 388739,
"optime" : {
"ts" : Timestamp(1649300885, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1649300885, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-04-07T03:08:05Z"),
"optimeDurableDate" : ISODate("2022-04-07T03:08:05Z"),
"lastAppliedWallTime" : ISODate("2022-04-07T03:08:05.048Z"),
"lastDurableWallTime" : ISODate("2022-04-07T03:08:05.048Z"),
"lastHeartbeat" : ISODate("2022-04-07T03:08:08.191Z"),
"lastHeartbeatRecv" : ISODate("2022-04-07T03:08:08.660Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "mongodb-shard0-0.mongodb-shard-hs.default.svc.cluster.local:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 2,
"configTerm" : 1
},
{
"_id" : 2,
"name" : "mongodb-shard0-2.mongodb-shard-hs.default.svc.cluster.local:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 388739,
"optime" : {
"ts" : Timestamp(1649300885, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1649300885, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-04-07T03:08:05Z"),
"optimeDurableDate" : ISODate("2022-04-07T03:08:05Z"),
"lastAppliedWallTime" : ISODate("2022-04-07T03:08:05.048Z"),
"lastDurableWallTime" : ISODate("2022-04-07T03:08:05.048Z"),
"lastHeartbeat" : ISODate("2022-04-07T03:08:08.258Z"),
"lastHeartbeatRecv" : ISODate("2022-04-07T03:08:06.702Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "mongodb-shard0-0.mongodb-shard-hs.default.svc.cluster.local:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 2,
"configTerm" : 1
}
],
"ok" : 1,
"$gleStats" : {
"lastOpTime" : Timestamp(0, 0),
"electionId" : ObjectId("7fffffff0000000000000001")
},
"lastCommittedOpTime" : Timestamp(1649300885, 1),
"$configServerState" : {
"opTime" : {
"ts" : Timestamp(1649300886, 5),
"t" : NumberLong(1)
}
},
"$clusterTime" : {
"clusterTime" : Timestamp(1649300886, 5),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1649300885, 1)
}
shard0:PRIMARY>
6.3 配置第二分片集群
# 进入第二分片任意一pod
kubectl exec -it mongodb-shard1-0 /bin/bash
#连接上mongodb
bin/mongo mongodb-shard1-0.mongodb-shard-hs.default.svc.cluster.local:27017
#切换数据库
use adminrs
#配置副本集
cfg = {_id: "shard1",members:[{_id: 0,host: 'mongodb-shard1-0.mongodb-shard-hs.default.svc.cluster.local:27017',priority: 3},{_id: 1,host: 'mongodb-shard1-1.mongodb-shard-hs.default.svc.cluster.local:27017',priority: 2},{_id: 2,host: 'mongodb-shard1-2.mongodb-shard-hs.default.svc.cluster.local:27017',priority: 1}]};
#初始化
rs.initiate(cfg)
shard1:PRIMARY> rs.status()
{
"set" : "shard1",
"date" : ISODate("2022-04-07T03:14:28.124Z"),
"myState" : 1,
"term" : NumberLong(1),
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 2,
"votingMembersCount" : 3,
"writableVotingMembersCount" : 3,
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1649301263, 1),
"t" : NumberLong(1)
},
"lastCommittedWallTime" : ISODate("2022-04-07T03:14:23.314Z"),
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1649301263, 1),
"t" : NumberLong(1)
},
"readConcernMajorityWallTime" : ISODate("2022-04-07T03:14:23.314Z"),
"appliedOpTime" : {
"ts" : Timestamp(1649301263, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1649301263, 1),
"t" : NumberLong(1)
},
"lastAppliedWallTime" : ISODate("2022-04-07T03:14:23.314Z"),
"lastDurableWallTime" : ISODate("2022-04-07T03:14:23.314Z")
},
"lastStableRecoveryTimestamp" : Timestamp(1649301253, 1),
"electionCandidateMetrics" : {
"lastElectionReason" : "electionTimeout",
"lastElectionDate" : ISODate("2022-04-02T15:15:11.059Z"),
"electionTerm" : NumberLong(1),
"lastCommittedOpTimeAtElection" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"lastSeenOpTimeAtElection" : {
"ts" : Timestamp(1648912500, 1),
"t" : NumberLong(-1)
},
"numVotesNeeded" : 2,
"priorityAtElection" : 3,
"electionTimeoutMillis" : NumberLong(10000),
"numCatchUpOps" : NumberLong(0),
"newTermStartDate" : ISODate("2022-04-02T15:15:11.151Z"),
"wMajorityWriteAvailabilityDate" : ISODate("2022-04-02T15:15:11.299Z")
},
"members" : [
{
"_id" : 0,
"name" : "mongodb-shard1-0.mongodb-shard-hs.default.svc.cluster.local:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 389290,
"optime" : {
"ts" : Timestamp(1649301263, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-04-07T03:14:23Z"),
"lastAppliedWallTime" : ISODate("2022-04-07T03:14:23.314Z"),
"lastDurableWallTime" : ISODate("2022-04-07T03:14:23.314Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1648912511, 1),
"electionDate" : ISODate("2022-04-02T15:15:11Z"),
"configVersion" : 2,
"configTerm" : 1,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "mongodb-shard1-1.mongodb-shard-hs.default.svc.cluster.local:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 388767,
"optime" : {
"ts" : Timestamp(1649301263, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1649301263, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-04-07T03:14:23Z"),
"optimeDurableDate" : ISODate("2022-04-07T03:14:23Z"),
"lastAppliedWallTime" : ISODate("2022-04-07T03:14:23.314Z"),
"lastDurableWallTime" : ISODate("2022-04-07T03:14:23.314Z"),
"lastHeartbeat" : ISODate("2022-04-07T03:14:26.145Z"),
"lastHeartbeatRecv" : ISODate("2022-04-07T03:14:28.091Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "mongodb-shard1-0.mongodb-shard-hs.default.svc.cluster.local:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 2,
"configTerm" : 1
},
{
"_id" : 2,
"name" : "mongodb-shard1-2.mongodb-shard-hs.default.svc.cluster.local:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 388767,
"optime" : {
"ts" : Timestamp(1649301263, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1649301263, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2022-04-07T03:14:23Z"),
"optimeDurableDate" : ISODate("2022-04-07T03:14:23Z"),
"lastAppliedWallTime" : ISODate("2022-04-07T03:14:23.314Z"),
"lastDurableWallTime" : ISODate("2022-04-07T03:14:23.314Z"),
"lastHeartbeat" : ISODate("2022-04-07T03:14:26.144Z"),
"lastHeartbeatRecv" : ISODate("2022-04-07T03:14:28.091Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "mongodb-shard1-1.mongodb-shard-hs.default.svc.cluster.local:27017",
"syncSourceId" : 1,
"infoMessage" : "",
"configVersion" : 2,
"configTerm" : 1
}
],
"ok" : 1,
"$gleStats" : {
"lastOpTime" : Timestamp(0, 0),
"electionId" : ObjectId("7fffffff0000000000000001")
},
"lastCommittedOpTime" : Timestamp(1649301263, 1),
"$configServerState" : {
"opTime" : {
"ts" : Timestamp(1649301267, 3),
"t" : NumberLong(1)
}
},
"$clusterTime" : {
"clusterTime" : Timestamp(1649301267, 3),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1649301263, 1)
}
shard1:PRIMARY>
7.部署 route server(mongos 路由服务器)
apiVersion: apps/v1
kind: Deployment
metadata:
name: mongodb-mongos
spec:
selector:
matchLabels:
app: mongodb-mongos
replicas: 1
template:
metadata:
labels:
app: mongodb-mongos
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongodb-mongos
image: mongo:4.4
command:
- mongos
- "--port"
- "27017"
- "--configdb"
- "configs/mongodb-config-0.mongodb-config-hs.default.svc.cluster.local:27017,mongodb-config-1.mongodb-config-hs.default.svc.cluster.local:27017,mongodb-config-2.mongodb-config-hs.default.svc.cluster.local:27017"
- "--bind_ip"
- 0.0.0.0
ports:
- containerPort: 27017
---
# 标准服务(供外部访问)
apiVersion: v1
kind: Service
metadata:
name: mongodb-mongos-svc
labels:
name: mongodb-mongos
spec:
type: NodePort
ports:
- port: 27017
targetPort: 27017
nodePort: 30717
selector:
app: mongodb-mongos
7.1验证
[root@k8smaster mongodb-shard]# kubectl get pods
NAME READY STATUS RESTARTS AGE
mongodb-config-0 1/1 Running 0 4d12h
mongodb-config-1 1/1 Running 0 4d12h
mongodb-config-2 1/1 Running 0 4d12h
mongodb-mongos-5bf5f8956d-ckpzv 1/1 Running 0 4d11h
mongodb-shard0-0 1/1 Running 0 4d12h
mongodb-shard0-1 1/1 Running 0 4d12h
mongodb-shard0-2 1/1 Running 0 4d12h
mongodb-shard1-0 1/1 Running 0 4d12h
mongodb-shard1-1 1/1 Running 0 4d12h
mongodb-shard1-2 1/1 Running 0 4d12h
[root@k8smaster mongodb-shard]#
7.2 将路由服务器和分片副本集串联起来
#进入pod
kubectl exec -it mongodb-mongos-5bf5f8956d-ckpzv /bin/bash
#连接mongodb
bin/mongo --port 27017
#切换库
use admin
#将副本集添加到分片集
sh.addShard("shard0/mongodb-shard0-0.mongodb-shard-hs.default.svc.cluster.local:27017,mongodb-shard0-1.mongodb-shard-hs.default.svc.cluster.local:27017,mongodb-shard0-2.mongodb-shard-hs.default.svc.cluster.local:27017");
sh.addShard("shard1/mongodb-shard1-0.mongodb-shard-hs.default.svc.cluster.local:27017,mongodb-shard1-1.mongodb-shard-hs.default.svc.cluster.local:27017,mongodb-shard1-2.mongodb-shard-hs.default.svc.cluster.local:27017");
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("624865833544ee7f865b86d7")
}
shards:
{ "_id" : "shard0", "host" : "shard0/mongodb-shard0-0.mongodb-shard-hs.default.svc.cluster.local:27017,mongodb-shard0-1.mongodb-shard-hs.default.svc.cluster.local:27017,mongodb-shard0-2.mongodb-shard-hs.default.svc.cluster.local:27017", "state" : 1 }
{ "_id" : "shard1", "host" : "shard1/mongodb-shard1-0.mongodb-shard-hs.default.svc.cluster.local:27017,mongodb-shard1-1.mongodb-shard-hs.default.svc.cluster.local:27017,mongodb-shard1-2.mongodb-shard-hs.default.svc.cluster.local:27017", "state" : 1 }
active mongoses:
"4.4.11" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ "_id" : "config", "primary" : "config", "partitioned" : true }
config.system.sessions
shard key: { "_id" : 1 }
unique: false
balancing: true
chunks:
shard0 512
shard1 512
too many chunks to print, use verbose if you want to force print
{ "_id" : "mom", "primary" : "shard0", "partitioned" : true, "version" : { "uuid" : UUID("5bd831f4-b5bb-44ee-9885-c736aa0a7e18"), "lastMod" : 1 } }
8.分片测试
#连接 route server
kubectl exec -it mongodb-mongos-5bf5f8956d-ckpzv /bin/bash
bin/mongo --port 27017
use admin
#假设我们需要让mom这个数据库启用分片,只需要执行如下命令,便可以对该数据库内的集合进行分片了
sh.enableSharding("mom")
#对集合分片时,我们要选择一个分片键(shard key)。分片键是集合的一个键,MongoDB 根据这个键拆分数据。执行下面命令让 ApiRecord 集合依据 traceId 对集合进行分片:
sh.shardCollection("mom.ApiRecord", {"traceId" : "hashed"})
#切换到mom库
use mom
#接着执行如下命令往 mom 集合中插入 5.5 千条数据:
for(var i=100000;i<105500;i++){
db.ApiRecord.insert({"traceId" : "user"+i , "created_at" : new Date()});
}
db.ApiRecord.getShardDistribution()
mongos> db.mom.getShardDistribution()
Collection mom.mom is not sharded.
mongos> use admin
switched to db admin
mongos> db.mom.getShardDistribution()
Collection admin.mom is not sharded.
mongos> db.ApiRecord.getShardDistribution()
Collection admin.ApiRecord is not sharded.
mongos> use mom
switched to db mom
mongos> db.ApiRecord.getShardDistribution()
Shard shard0 at shard0/mongodb-shard0-0.mongodb-shard-hs.default.svc.cluster.local:27017,mongodb-shard0-1.mongodb-shard-hs.default.svc.cluster.local:27017,mongodb-shard0-2.mongodb-shard-hs.default.svc.cluster.local:27017
data : 178KiB docs : 2775 chunks : 2
estimated data per chunk : 89KiB
estimated docs per chunk : 1387
Shard shard1 at shard1/mongodb-shard1-0.mongodb-shard-hs.default.svc.cluster.local:27017,mongodb-shard1-1.mongodb-shard-hs.default.svc.cluster.local:27017,mongodb-shard1-2.mongodb-shard-hs.default.svc.cluster.local:27017
data : 175KiB docs : 2725 chunks : 2
estimated data per chunk : 87KiB
estimated docs per chunk : 1362
Totals
data : 354KiB docs : 5500 chunks : 4
Shard shard0 contains 50.45% data, 50.45% docs in cluster, avg obj size on shard : 66B
Shard shard1 contains 49.54% data, 49.54% docs in cluster, avg obj size on shard : 66B
参考:K8s - 安装部署MongoDB数据库教程3(分片集群)