实验环境
系统: ubuntu24.04
cat /etc/hosts
192.168.0.71 jichao71
192.168.0.72 jichao72
192.168.0.73 jichao73
192.168.0.74 jichao74
192.168.0.75 jichao75

主节点安装ansible
apt-get -y install ansible
下载kubeasz 最新版本
export release=3.6.4
wget https://github.com/easzlab/kubeasz/releases/download/3.6.4/ezdown
chmod +x ./ezdown

第一次安装-ezdown
同时 你需要在service启动文件里面增加内容--kubeasZ自建的
vim /etc/systemd/system/docker.service
Environment=HTTP_PROXY=http://127.0.0.1:7890
Environment=HTTPS_PROXY=http://127.0.0.1:7890
Environment=NO_PROXY=localhost,127.0.0.1,docker
vim /etc/docker/daemon.json
---
{
exec-opts: [native.cgroupdriver=systemd],
registry-mirrors: [
https://docker.1panel.live,
https://kuamavit.mirror.aliyuncs.com
],
insecure-registries: [http://easzlab.io.local:5000],
max-concurrent-downloads: 10,
log-driver: json-file,
log-level: warn,
log-opts: {
max-size: 10m,
max-file: 3
},
data-root: /var/lib/docker
}
以上都执行了之后
systemctl daemon-reload
systemctl restart docker
./ezdown -D 下载 镜像到本地
下图会出现retry 不要管

docker images | wc -l
正常一共23个镜像,但是这个时候 上图的retry是不健康的拉取 所以这23镜像里面有的实际上根本就没拉

第二次安装ezdown
unset http_proxy
unset https_proxy
你把翻墙关掉,把/etc/systemd/system/docker.service 里面刚才添加的这三行
Environment=HTTP_PROXY=http:
Environment=HTTPS_PROXY=http:
Environment=NO_PROXY=localhost,127.0.0.1,docker
在注释掉
本地在进行一次ezdown
./ezdown -D
这样 你的镜像才是完整的,不然你docker images 看这是23个 实际上根本没有23个 有的是不完整的镜像
你会发现原来retry的都能拉了

创建集群

cd /etc/kubeasz/
./ezctl new k8s1.30.1-cluster

更改hosts文件
cd /etc/kubeasz/clusters/k8s1.30.1-cluster
vim hosts

更改config.yaml文件

启动集群
01 prepare
cd /etc/kubeasz
./ezctl setup k8s1.30.1-cluster 01 ---》 系统环境 初始化



02 etcd
./ezctl setup k8s1.30.1-cluster 02
etcd 集群验证
vim /etc/profile
----
PATH=$PATH:$HOME/bin:/opt/kube/bin
----
source /etc/profile
验证ETCD:
export NODE_IPS=192.168.0.71 192.168.0.72 192.168.0.73
vim etcd-healthy.sh
---
for ip in ${NODE_IPS}; do ETCDCTL_API=3 etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health; done
---
. ./etcd-healthy.sh

03 contained runtime
./ezctl setup k8s1.30.1-cluster 03
ps -ef |grep container
kubectl get node

04 master
./ezctl setup k8s1.30.1-cluster 04
kubectl get node


05 node
./ezctl setup k8s1.30.1-cluster 05
kubectl get node

06 network 网络插件calico
./ezctl setup k8s1.30.1-cluster 06
# 这块因为这里面的config 没有定义接口,我们的calico起不来
kubectl get pod -n kube-system
有的是跑不起来的


kubectl get deploy -n kube-system
kubectl delede deploy xx -n kube-system

# 然后 应用calico的yaml文件进行更新覆盖
# 注意 apply 之前 需要你进行 网卡ens的数字更改 还有10.244.0.0/16的设置
kubectl apply -f calico.yaml
#

kubectl get pods -n kube-system
# 网络插件OK 集群才能READY状态
查看集群状态
kubectl get node

07 其他应用插件
./ezctl setup k8s1.30.1-cluster 07
kubectl get pod -n kube-system
# 这个我们就不装了
测试集群
安装dashboard、token
kubectl apply -f recommended.yaml
kubectl apply -f dashboard-user.yaml
kubectl get ns
kubectl create token admin-user -n kubernetes-dashboard
eyJhbGciOiJSUzI1NiIsImtpZCI6InhLMjhfLXEwMS1VMGhRdzR3YTlqUEpBWDJvZGRxSEZmX3JIUWoyUW5OelEifQ.eyJhdWQiOlsiYXBpIiwiaXN0aW8tY2EiXSwiZXhwIjoxNzI2OTE0OTM2LCJpYXQiOjE3MjY5MTEzMzYsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2YyIsImp0aSI6IjlhMTk2ODdiLTFiZTMtNDFhZC1hNzgyLWQ1YTQ4NzczY2I3OSIsImt1YmVybmV0ZXMuaW8iOnsibmFtZXNwYWNlIjoia3ViZXJuZXRlcy1kYXNoYm9hcmQiLCJzZXJ2aWNlYWNjb3VudCI6eyJuYW1lIjoiYWRtaW4tdXNlciIsInVpZCI6ImFiNWYzOWQ0LTczNGYtNDVjMi05MTc2LWI2MzgxZGUyYmU3YyJ9fSwibmJmIjoxNzI2OTExMzM2LCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.gvNmBVBMHXdwyrKHzZ6ATgJLHAsRdx7B2mlWz_cNuIZTsit1X63JpBfz61YpP1W0qsBVLXCvQ-QstimcQI7Ajuq-xwHkYPlEx1-MuxqmkeKx4-Osp3TdPJFhLPObVjQunOJob5ga7TQzhCdcFLGoUqtqOklSBd_ft-jc9D9q4DO_wxZrXnPn_bxhVCVF4WmvMtfAxSiY45YzPjKbDHMiVIvOCNyN8Tk8OhROwQSxHYC2q6l7JTm1-o9xvDtv-MOIFiFC9ecCN7hcLjhgP93HtKi-U3y6EnQX40ghQm6yL13k4tQdStSKvBxiICKtxUBgw8SlHlkVIMI225NeFsikJA

登录dashboard
kubectl get svc -A
192.168.0.71:30001


部署Nginx 测试
kubectl apply -f nginx-web.yaml


dashboard进行scala测试



