Kubeadm、Minikkube、二进制、Rancher等方式安装Kubernetes集群
Kubernetes、K8S的安装
官网:https://kubernetes.io/zh/
中文社区: https://www.kubernetes.org.cn/
一、kubeadm方式
初始化准备
关闭防火墙
# 临时
systemctl stop firewalld
# 永久
systemctl disable firewalld
关闭Selinux
# 永久
sed -i 's/enforcing/disabled/' /etc/selinux/config
# 临时
setenforce 0
关闭Swap
# 临时
swapoff -a
# 永久;把文件中带有swap的行注释
vim /etc/fstab
添加主机名与IP对应关系
hostnamectl set-hostname <hostname>
172.29.234.1 hostnamectl set-hostname node001
172.29.234.2 hostnamectl set-hostname node002
172.29.234.3 hostnamectl set-hostname node003
在master添加hosts设置
cat >> /etc/hosts << EOF
172.29.234.1 node001 node001
172.29.234.2 node002 node002
172.29.234.3 node003 node003
EOF
配置内核参数,将桥接的IPv4流量传递到iptables的链
[root@node001 ~]# cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
看需求,配置SSH密码登录
安装Docker和kubeadm
在所有节点安装Docker和kubeadm
安装Docker
# 添加Docker阿里镜像源
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
# 所有docker版本
yum list docker-ce --showduplicates | sort -r
# 选择特定版本安装
yum install <FQPN>
# 默认最新稳定版
yum install docker-ce
# 设置开启启动并立即启动Docker
systemctl enable docker && systemctl start docker
# docker版本
docker --version
设置Yum软件源
设置kubernetes源 vim /etc/yum.repos.d/kubernetes.repo
# 添加阿里云Kubernetes yum镜像源
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
设置docker拉取镜像仓库的源 vim /etc/docker/daemon.json
[root@master ~]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://a5amagpr.mirror.aliyuncs.com"]
}
将配置文件分发到其他节点
[root@node001 ~]# scp /etc/yum.repos.d/kubernetes.repo node002:/etc/yum.repos.d/
kubernetes.repo 100% 275 1.1MB/s 00:00
[root@node001 ~]# scp /etc/yum.repos.d/kubernetes.repo node003:/etc/yum.repos.d/
kubernetes.repo
# 注意:先每个节点: mkdir /etc/docker
[root@node001 ~]# scp /etc/docker/daemon.json node002:/etc/docker/
daemon.json 100% 68 346.0KB/s 00:00
[root@node001 ~]# scp /etc/docker/daemon.json node003:/etc/docker/
daemon.json 100% 68 331.5KB/s 00:00
[root@node001 ~]#
所有节点重启Docker
[root@node001 ~]# systemctl daemon-reload
[root@node001 ~]# systemctl restart docker
安装kubeadm、kubelet、kubectl
注意: 所有节点版本必须一致
# 默认下载最新版
yum install -y kubelet kubeadm kubectl
# 指定稳定版本下载
yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0
# 开机自启
systemctl enable kubelet
部署Kubernetes Master
在Master节点(node001)部署Kubernetes Master
--apiserver-advertise-address:api server地址,指定机器IP
--image-repository:默认拉取国外镜像,修改指定拉取阿里云镜像仓库地址
--kubernetes-version:指定Kubernetes版本
--service-cidr:自定义使用IP,不冲突即可
--pod-network-cidr:POD的网段,自定义使用IP,不冲突即可
kubeadm init \
--apiserver-advertise-address=172.29.234.1 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.18.0 \
--service-cidr=10.10.0.0/16 \
--pod-network-cidr=10.20.0.0/16
出现警告,需要执行systemctl enable kubelet
与swapoff -a
关闭swap
[WARNING Swap]: swap is enabled; production deployments should disable swap unless testing the NodeSwap feature gate of the kubelet
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
出现异常:
[kubelet-check] The HTTP call equal to 'curl -sSL http://localhost:10248/healthz' failed with error: Get "http://localhost:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused.
在/etc/docker/daemon.json
配置文件中加入"exec-opts": ["native.cgroupdriver=systemd"]
,并重启Docker
[root@master ~]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://a5amagpr.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
[root@master ~]# systemctl restart docker
清除kubeadm信息后重新初始化
kubeadm reset -f
当出现Your Kubernetes control-plane has initialized successfully!
即安装成功
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 172.29.234.1:6443 --token j3yoqc.n0puscwoaw024cer \
--discovery-token-ca-cert-hash sha256:a319ee2c305bcf30661cc70ae4a1c8790e450ef77fb908e5473f813019c03c19
查看拉取的相关镜像
[root@node001 ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
cd4ae6cff8e1 43940c34f24f "/usr/local/bin/kube…" 43 seconds ago Up 43 seconds k8s_kube-proxy_kube-proxy-8nhxg_kube-system_68da55c6-8cc3-456d-9ef0-40c08c79476c_0
07baba228a19 registry.aliyuncs.com/google_containers/pause:3.2 "/pause" 44 seconds ago Up 43 seconds k8s_POD_kube-proxy-8nhxg_kube-system_68da55c6-8cc3-456d-9ef0-40c08c79476c_0
177181973b54 a31f78c7c8ce "kube-scheduler --au…" About a minute ago Up About a minute k8s_kube-scheduler_kube-scheduler-node001_kube-system_ca2aa1b3224c37fa1791ef6c7d883bbe_0
ad07f05aaafe d3e55153f52f "kube-controller-man…" About a minute ago Up About a minute k8s_kube-controller-manager_kube-controller-manager-node001_kube-system_c3e36b1ca7f02f5d08f86ab49a445523_0
f63eb32e060c 303ce5db0e90 "etcd --advertise-cl…" About a minute ago Up About a minute k8s_etcd_etcd-node001_kube-system_1650fba102ae7fd48af91ae7227a8918_0
4ff5afd36074 74060cea7f70 "kube-apiserver --ad…" About a minute ago Up About a minute k8s_kube-apiserver_kube-apiserver-node001_kube-system_c7f4ce820e11c35aea50e314696db0c5_0
66de0ba4c586 registry.aliyuncs.com/google_containers/pause:3.2 "/pause" About a minute ago Up About a minute k8s_POD_kube-scheduler-node001_kube-system_ca2aa1b3224c37fa1791ef6c7d883bbe_0
5795be27b3d6 registry.aliyuncs.com/google_containers/pause:3.2 "/pause" About a minute ago Up About a minute k8s_POD_kube-controller-manager-node001_kube-system_c3e36b1ca7f02f5d08f86ab49a445523_0
402958c279a1 registry.aliyuncs.com/google_containers/pause:3.2 "/pause" About a minute ago Up About a minute k8s_POD_kube-apiserver-node001_kube-system_c7f4ce820e11c35aea50e314696db0c5_0
0fe24c7bf783 registry.aliyuncs.com/google_containers/pause:3.2 "/pause" About a minute ago Up About a minute k8s_POD_etcd-node001_kube-system_1650fba102ae7fd48af91ae7227a8918_0
按初始化提示执行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
执行kubectl get nodes
[root@node001 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node001 NotReady master 2m35s v1.18.0
Node节点加入K8S集群
获取Node001节点执行kubeadm init输出的kubeadm join命令
:
kubeadm join 172.29.234.1:6443 --token j3yoqc.n0puscwoaw024cer \
--discovery-token-ca-cert-hash sha256:a319ee2c305bcf30661cc70ae4a1c8790e450ef77fb908e5473f813019c03c19
拿到此命令后,在所有Node节点执行该命令,向集群添加新节点,Mast节点不能执行该命令
[root@node002 ~]# kubeadm join 172.29.234.1:6443 --token j3yoqc.n0puscwoaw024cer \
> --discovery-token-ca-cert-hash sha256:a319ee2c305bcf30661cc70ae4a1c8790e450ef77fb908e5473f813019c03c19
W0319 13:23:34.581734 14132 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.13. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
master节点查询集群状态和数量:kubectl get nodes
[root@node001 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node001 NotReady master 7m18s v1.18.0
node002 NotReady <none> 3m9s v1.18.0
node003 NotReady <none> 84s v1.18.0
安装Pod网络插件
节点状态:NotReady,因为corednspod没有启动,缺少网络Pod,安装Pod网络插件
# 下载kube-flannel.yml
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# 将kube-flannel.yml里的配置应用到flannel中
kubectl apply -f kube-flannel.yml
查看创建了哪些pod:kubectl get pods -n kube-system
[root@node001 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-7ff77c879f-nh4sw 0/1 Pending 0 9m38s
coredns-7ff77c879f-rmm7q 0/1 Pending 0 9m38s
etcd-node001 1/1 Running 0 9m47s
kube-apiserver-node001 1/1 Running 0 9m47s
kube-controller-manager-node001 1/1 Running 0 9m47s
kube-flannel-ds-sskm9 0/1 Init:0/2 0 45s
kube-flannel-ds-t7jkg 0/1 Init:0/2 0 45s
kube-flannel-ds-zmxsv 0/1 Init:0/2 0 45s
kube-proxy-4dtsb 1/1 Running 0 4m3s
kube-proxy-8nhxg 1/1 Running 0 9m38s
kube-proxy-z2b58 1/1 Running 0 5m48s
kube-scheduler-node001 1/1 Running 0 9m47s
需耐心等待所有Pod启动成功
[root@node001 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-7ff77c879f-nh4sw 0/1 Running 0 13m
coredns-7ff77c879f-rmm7q 0/1 Running 0 13m
etcd-node001 1/1 Running 0 13m
kube-apiserver-node001 1/1 Running 0 13m
kube-controller-manager-node001 1/1 Running 0 13m
kube-flannel-ds-sskm9 1/1 Running 0 4m39s
kube-flannel-ds-t7jkg 1/1 Running 0 4m39s
kube-flannel-ds-zmxsv 1/1 Running 0 4m39s
kube-proxy-4dtsb 1/1 Running 0 7m57s
kube-proxy-8nhxg 1/1 Running 0 13m
kube-proxy-z2b58 1/1 Running 0 9m42s
kube-scheduler-node001 1/1 Running 0 13m
master节点查询集群状态和数量:kubectl get nodes
[root@node001 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node001 Ready master 14m v1.18.0
node002 Ready <none> 10m v1.18.0
node003 Ready <none> 8m42s v1.18.0
kubernetes集群测试
建一个资源对象deployment,服务名称:nginx,拉取镜像:nginx
kubectl create deployment nginx --image=nginx
暴露端口
kubectl expose deployment nginx --port=80 --type=NodePort
查看nginx运行在哪个节点上,还有具体的访问端口
[root@node001 ~]# kubectl get pod,svc -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/nginx-f89759699-6bbsk 1/1 Running 0 2m8s 10.20.1.2 node002 <none> <none>
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/kubernetes ClusterIP 10.10.0.1 <none> 443/TCP 20m <none>
service/nginx NodePort 10.10.181.43 <none> 80:30102/TCP 119s app=nginx
访问Nginx运行节点(node002:30102)
二、Minikube方式
官网: https://minikube.sigs.k8s.io/docs/start/
下载Minikube
下载Minikube的二进制安装包并安装
curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64
sudo install minikube-linux-amd64 /usr/local/bin/minikube
启动Minikube
启动异常:
[root@administrator program]# minikube start
* minikube v1.25.2 on Centos 7.9.2009 (amd64)
* Automatically selected the docker driver. Other choices: ssh, none
* The "docker" driver should not be used with root privileges.
* If you are running minikube within a VM, consider using --driver=none:
* https://minikube.sigs.k8s.io/docs/reference/drivers/none/
X Exiting due to DRV_AS_ROOT: The "docker" driver should not be used with root privileges.
注意: 需要具有管理员访问权限的终端(但未以 root 身份登录)运行
创建了一个属于docker用户组的k8s用户,并切换到该用户
# 创建用户
useradd -g docker k8s
# 设置用户密码
passwd k8s
# 切换用户
su k8s
再次启动Minikube
[k8s@administrator program]$ minikube start
* minikube v1.25.2 on Centos 7.9.2009 (amd64)
* Automatically selected the docker driver
* Starting control plane node minikube in cluster minikube
* Pulling base image ...
* Downloading Kubernetes v1.23.3 preload ...
> preloaded-images-k8s-v17-v1...: 505.68 MiB / 505.68 MiB 100.00% 11.89 Mi
> index.docker.io/kicbase/sta...: 379.06 MiB / 379.06 MiB 100.00% 2.56 MiB
! minikube was unable to download gcr.io/k8s-minikube/kicbase:v0.0.30, but successfully downloaded docker.io/kicbase/stable:v0.0.30 as a fallback image
* Creating docker container (CPUs=2, Memory=2200MB) ...
! This container is having trouble accessing https://k8s.gcr.io
* To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/
* Preparing Kubernetes v1.23.3 on Docker 20.10.12 ...
- kubelet.housekeeping-interval=5m
- Generating certificates and keys ...
- Booting up control plane ...
- Configuring RBAC rules ...
* Verifying Kubernetes components...
- Using image gcr.io/k8s-minikube/storage-provisioner:v5
* Enabled addons: default-storageclass, storage-provisioner
* kubectl not found. If you need it, try: 'minikube kubectl -- get pods -A'
* Done! kubectl is now configured to use "minikube" cluster and "default" namespace by default
验证Minikube
查看Minikube的版本号
[k8s@administrator program]$ minikube version
minikube version: v1.25.2
commit: 362d5fdc0a3dbee389b3d3f1034e8023e72bd3a7
查看kubectl版本号,第一次使用会直接安装kubectl
[k8s@administrator program]$ minikube kubectl version
> kubectl.sha256: 64 B / 64 B [--------------------------] 100.00% ? p/s 0s
> kubectl: 44.43 MiB / 44.43 MiB [-------------] 100.00% 14.91 MiB p/s 3.2s
Client Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.3", GitCommit:"816c97ab8cff8a1c72eccca1026f7820e93e0d25", GitTreeState:"clean", BuildDate:"2022-01-25T21:25:17Z", GoVersion:"go1.17.6", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.3", GitCommit:"816c97ab8cff8a1c72eccca1026f7820e93e0d25", GitTreeState:"clean", BuildDate:"2022-01-25T21:19:12Z", GoVersion:"go1.17.6", Compiler:"gc", Platform:"linux/amd64"}
minikube是以: minikube kubectl cluster-info
语法形式操作; 如果想直接使用kubectl命令,需要将其复制到/bin目录下
注意: 需要以root权限查询kubectl命令位置
# 查找kubectl命令的位置
[root@administrator program]# find / -name kubectl
/home/k8s/.minikube/cache/linux/amd64/v1.23.3/kubectl
/www/server/docker/volumes/minikube/_data/lib/minikube/binaries/v1.23.3/kubectl
# 复制到/bin目录下
[root@administrator program]# cp /www/server/docker/volumes/minikube/_data/lib/minikube/binaries/v1.23.3/kubectl /bin/
[root@administrator program]# su k8s
[k8s@administrator program]$ kubectl version
Client Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.3", GitCommit:"816c97ab8cff8a1c72eccca1026f7820e93e0d25", GitTreeState:"clean", BuildDate:"2022-01-25T21:25:17Z", GoVersion:"go1.17.6", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.3", GitCommit:"816c97ab8cff8a1c72eccca1026f7820e93e0d25", GitTreeState:"clean", BuildDate:"2022-01-25T21:19:12Z", GoVersion:"go1.17.6", Compiler:"gc", Platform:"linux/amd64"}
查看集群信息
[k8s@administrator root]$ kubectl cluster-info
Kubernetes control plane is running at https://192.168.49.2:8443
CoreDNS is running at https://192.168.49.2:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
查看集群中Node(Minikube创建了一个单节点的简单集群)
[k8s@administrator root]$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
minikube Ready control-plane,master 4h v1.23.3
可视化管理
查看Minikube内置插件,默认情况下Dashboard插件未启用:minikube addons list
|-----------------------------|----------|--------------|--------------------------------|
| ADDON NAME | PROFILE | STATUS | MAINTAINER |
|-----------------------------|----------|--------------|--------------------------------|
| ambassador | minikube | disabled | third-party (ambassador) |
| auto-pause | minikube | disabled | google |
| csi-hostpath-driver | minikube | disabled | kubernetes |
| dashboard | minikube | disabled | kubernetes |
| default-storageclass | minikube | enabled ✅ | kubernetes |
| efk | minikube | disabled | third-party (elastic) |
| freshpod | minikube | disabled | google |
| gcp-auth | minikube | disabled | google |
| gvisor | minikube | disabled | google |
| helm-tiller | minikube | disabled | third-party (helm) |
| ingress | minikube | disabled | unknown (third-party) |
| ingress-dns | minikube | disabled | google |
| istio | minikube | disabled | third-party (istio) |
| istio-provisioner | minikube | disabled | third-party (istio) |
| kong | minikube | disabled | third-party (Kong HQ) |
| kubevirt | minikube | disabled | third-party (kubevirt) |
| logviewer | minikube | disabled | unknown (third-party) |
| metallb | minikube | disabled | third-party (metallb) |
| metrics-server | minikube | disabled | kubernetes |
| nvidia-driver-installer | minikube | disabled | google |
| nvidia-gpu-device-plugin | minikube | disabled | third-party (nvidia) |
| olm | minikube | disabled | third-party (operator |
| | | | framework) |
| pod-security-policy | minikube | disabled | unknown (third-party) |
| portainer | minikube | disabled | portainer.io |
| registry | minikube | disabled | google |
| registry-aliases | minikube | disabled | unknown (third-party) |
| registry-creds | minikube | disabled | third-party (upmc enterprises) |
| storage-provisioner | minikube | enabled ✅ | google |
| storage-provisioner-gluster | minikube | disabled | unknown (third-party) |
| volumesnapshots | minikube | disabled | kubernetes |
|-----------------------------|----------|--------------|--------------------------------|
启用Dashboard插件:minikube addons enable dashboard
- Using image kubernetesui/dashboard:v2.3.1
- Using image kubernetesui/metrics-scraper:v1.0.7
* Some dashboard features require the metrics-server addon. To enable all features please run:
minikube addons enable metrics-server
* The 'dashboard' addon is enabled
通过–url参数在控制台获得访问路径,此方式不会打开管理页面:minikube dashboard --url
* Verifying dashboard health ...
* Launching proxy ...
* Verifying proxy health ...
http://127.0.0.1:33211/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
使用kubectl设置代理,–address设置为服务器地址(内外IP,外网IP不行),然后才能从外部访问Dashboard
kubectl proxy --port=8100 --address=172.22.4.21 --accept-hosts='^.*' &
修改访问路径中的IP及端口后访问
http://ip:8100/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
通过yaml脚本创建资源
管理集群
停止集群 minikube stop
启动集群 minikube start
删除集群 minikube delete
不影响已部署应用情况下暂停 Kubernetes:minikube pause
取消暂停的实例:minikube unpause
增加默认内存限制(需重启):minikube config set memory 16384
Minikube内置插件:minikube addons list
三、二进制方式
TODO
四、Rancher方式
Rancher的安装
拉取镜像
docker pull rancher/rancher:v2.5.12
启动容器
docker run -p 80:80 -p 443:443 --name rancher --restart=unless-stopped --privileged -d rancher/rancher:v2.5.12
注意
:Rancher 2.5.x 及之后的版本,需要添加–privileged标志变量,启用特权模式安装
Rancher的初始配置及概览
访问Rancher的主页,第一次需要设置管理员(admin)账号密码
设置Rancher的Server URL,其他Node都可以访问到的地址
进入Rancher首页,默认安装了k3s的集群
点击集群名称可以查看集群状态信息
点击仪表盘按钮,查看集群的各种Dashboard
Rancher应用部署
方式一
填写表单信息进行部署
导入YAML文件部署
方式二
1.创建Deployment对象
找到Deployments->Create-> Edit as YAML
填写deployment.yaml信息
2.创建Service暴露端口信息
填写service.yaml信息
Rancher部署MySQL应用
创建Deployment
# API版本
apiVersion: apps/v1
# API对象类型
kind: Deployment
metadata:
# 指定Deployment的名称
name: mysql-deployment
# 指定Deployment的空间,否则会无法创建
namespace: default
# 指定Deployment的标签
labels:
app: mysql
spec:
# 指定创建的Pod副本数量
replicas: 1
# 定义如何查找要管理的Pod
selector:
# 管理标签app为mysql的Pod
matchLabels:
app: mysql
# 指定创建Pod的模板
template:
metadata:
# 给Pod打上app:mysql标签
labels:
app: mysql
# Pod的模板规约
spec:
containers:
- name: mysql
# 指定容器镜像
image: mysql:5.7
# 指定开放的端口
ports:
- containerPort: 3306
# 设置环境变量
env:
- name: MYSQL_ROOT_PASSWORD
value: root123456
# 使用存储卷
volumeMounts:
# 将存储卷挂载到容器内部路径
- mountPath: /var/log/mysql
name: log-volume
- mountPath: /var/lib/mysql
name: data-volume
- mountPath: /etc/mysql
name: conf-volume
# 定义存储卷
volumes:
- name: log-volume
# hostPath类型存储卷在宿主机上的路径
hostPath:
path: /usr/local/program/mysql/log
# 当目录不存在时创建
type: DirectoryOrCreate
- name: data-volume
hostPath:
path: /usr/local/program/mysql/data
type: DirectoryOrCreate
- name: conf-volume
hostPath:
path: /usr/local/program/mysql/conf
type: DirectoryOrCreate
创建Service
apiVersion: v1
kind: Service
metadata:
# 定义空间
namespace: default
# 定义服务名称,其他Pod可以通过服务名称作为域名进行访问
name: mysql-service
spec:
# 指定服务类型,通过Node上的静态端口暴露服务
type: NodePort
# 管理标签app为mysql的Pod
selector:
app: mysql
ports:
- name: http
protocol: TCP
port: 3307
targetPort: 3306
# Node上的静态端口
nodePort: 30303
访问测试
获取Rancher容器IP地址
[root@administrator ~]# docker inspect rancher |grep IPAddress
"SecondaryIPAddresses": null,
"IPAddress": "172.17.0.2",
"IPAddress": "172.17.0.2",
连接数据库
[root@administrator ~]# mysql -h 172.17.0.2 -P 30303 -uroot -proot123456
mysql: [Warning] Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 3
Server version: 5.7.37 MySQL Community Server (GPL)
Copyright (c) 2000, 2021, Oracle and/or its affiliates.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql>
Rancher部署SpringBoot应用
创建Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: default
name: jar-deployment
labels:
app: jar-test
spec:
replicas: 1
selector:
matchLabels:
app: jar-test
template:
metadata:
labels:
app: jar-test
spec:
containers:
- name: jar-name
# 指定Docker Hub中的镜像地址
image: IP/jar-test:0.0.1-SNAPSHOT
ports:
- containerPort: 8080
env:
# 指定数据库连接地址
- name: spring.datasource.url
value: jdbc:mysql://mysql-service:3307/demo?useUnicode=true&characterEncoding=utf-8&serverTimezone=Asia/Shanghai
- name: logging.path
value: /var/logs
volumeMounts:
- mountPath: /var/logs
name: log-volume
volumes:
- name: log-volume
hostPath:
path: /usr/local/program/app/logs
type: DirectoryOrCreate
创建Service
apiVersion: v1
kind: Service
metadata:
namespace: default
name: jar-service
spec:
type: NodePort
selector:
app: jar-test
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
# Node上的静态端口
nodePort: 30001
访问测试
curl http://172.17.0.2:30001index.html
添加集群
在master节点运行命令导入rancher
五、Kubernetes的基本操作
应用部署
创建一个名称为nginx-test的Deployment,同时指定应用镜像
kubectl create deployment nginx-test --image=nginx
查看所有Deployment:kubectl get deployments
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-test 1/1 1 1 18s
删除部署应用
kubectl delete deployment nginx-test
查看应用信息
查看所有Pod的状态:kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-test-84b478f9c5-vz7bw 1/1 Running 0 29s
查看Pod的详细状态:kubectl describe pods
Name: nginx-test-84b478f9c5-vz7bw
Namespace: default
Priority: 0
Node: minikube/192.168.49.2
Start Time: Tue, 22 Mar 2022 09:03:09 +0800
Labels: app=nginx-test
pod-template-hash=84b478f9c5
Annotations: <none>
Status: Running
IP: 172.17.0.3
IPs:
IP: 172.17.0.3
Controlled By: ReplicaSet/nginx-test-84b478f9c5
Containers:
nginx:
Container ID: docker://8f20af263a8c7dce564fa6d49943fbef4fe151aaaef24e3564e57e13787c7213
Image: nginx
Image ID: docker-pullable://nginx@sha256:e1211ac17b29b585ed1aee166a17fad63d344bc973bc63849d74c6452d549b3e
Port: <none>
Host Port: <none>
State: Running
Started: Tue, 22 Mar 2022 09:03:13 +0800
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-2hldl (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
kube-api-access-2hldl:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 39s default-scheduler Successfully assigned default/nginx-test-84b478f9c5-vz7bw to minikube
Normal Pulling 39s kubelet Pulling image "nginx"
Normal Pulled 36s kubelet Successfully pulled image "nginx" in 2.952573727s
Normal Created 35s kubelet Created container nginx
Normal Started 35s kubelet Started container nginx
将Pod名称设置为环境变量,方便使用$POD_NAME
来应用Pod的名称
export NGINX_POD=nginx-test-84b478f9c5-vz7bw
查看Pod打印的日志:kubectl logs $NGINX_POD
/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf
10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
/docker-entrypoint.sh: Configuration complete; ready for start up
2022/03/22 01:03:13 [notice] 1#1: using the "epoll" event method
2022/03/22 01:03:13 [notice] 1#1: nginx/1.21.6
2022/03/22 01:03:13 [notice] 1#1: built by gcc 10.2.1 20210110 (Debian 10.2.1-6)
2022/03/22 01:03:13 [notice] 1#1: OS: Linux 3.10.0-1160.59.1.el7.x86_64
2022/03/22 01:03:13 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576
2022/03/22 01:03:13 [notice] 1#1: start worker processes
2022/03/22 01:03:13 [notice] 1#1: start worker process 32
2022/03/22 01:03:13 [notice] 1#1: start worker process 33
使用exec在Pod的容器中执行命令
kubectl exec nginx-test-84b478f9c5-vz7bw -- echo hello world
进入容器内部并执行bash命令,退出容器使用exit命令
kubectl exec -it nginx-test-84b478f9c5-vz7bw -- bash
公开暴露应用
创建Service暴露nginx-test这个Deployment,通过NodePort属性得到暴露到外部的端口
kubectl expose deployment nginx-test --type=NodePort --port 80
查看所有Service的状态:kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 16h
nginx-test NodePort 10.101.176.18 <none> 80:32299/TCP 77s
查看Service的详情
kubectl describe services nginx-test
访问服务:IP:32299
Name: nginx-test
Namespace: default
Labels: app=nginx-test
Annotations: <none>
Selector: app=nginx-test
Type: NodePort
IP Family Policy: SingleStack
IP Families: IPv4
IP: 10.101.176.18
IPs: 10.101.176.18
Port: <unset> 80/TCP
TargetPort: 80/TCP
NodePort: <unset> 32299/TCP
Endpoints: 172.17.0.3:80
Session Affinity: None
External Traffic Policy: Cluster
Events: <none>
标签的使用
查看Deployment中所包含的Label:kubectl describe deployment
Name: nginx-test
Namespace: default
CreationTimestamp: Tue, 22 Mar 2022 09:03:09 +0800
Labels: app=nginx-test
Annotations: deployment.kubernetes.io/revision: 1
Selector: app=nginx-test
Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge
Pod Template:
Labels: app=nginx-test
Containers:
nginx:
Image: nginx
Port: <none>
Host Port: <none>
Environment: <none>
Mounts: <none>
Volumes: <none>
Conditions:
Type Status Reason
---- ------ ------
Available True MinimumReplicasAvailable
Progressing True NewReplicaSetAvailable
OldReplicaSets: <none>
NewReplicaSet: nginx-test-84b478f9c5 (1/1 replicas created)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ScalingReplicaSet 32m deployment-controller Scaled up replica set nginx-test-84b478f9c5 to 1
通过Label查询Pod:kubectl get pods -l app=nginx-test
NAME READY STATUS RESTARTS AGE
nginx-test-84b478f9c5-vz7bw 1/1 Running 0 34m
通过Label查询Service:kubectl get services -l app=nginx-test
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-test NodePort 10.101.176.18 <none> 80:32299/TCP 16m
给Pod添加Label:kubectl label pod nginx-test-84b478f9c5-vz7bw env_role=dev
查看Pod的详细信息:kubectl describe pods nginx-test-84b478f9c5-vz7bw
Name: nginx-test-84b478f9c5-vz7bw
Namespace: default
Priority: 0
Node: minikube/192.168.49.2
Start Time: Tue, 22 Mar 2022 09:03:09 +0800
Labels: app=nginx-test
env_role=dev
通过Label删除服务:kubectl delete service -l app=nginx-test
service "nginx-test" deleted
kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 16h