0
点赞
收藏
分享

微信扫一扫

部署Kubernetes集群

第一部分 系统初始化

基础环境

CentOS7.9_x64

修改yum源

# 安装阿里云源
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo

# 安装epel源
yum -y install epel-release

# 建立缓存
yum makecache

# 更新系统
yum -y update --exclude=kernel*

问题:There are unfinished transactions remaining. You might consider running yum-complete-transaction
# 清除yum缓存
yum -y install yum-utils
yum clean all

# 清理未完成事物
yum-complete-transaction --cleanup-only

安装工具

yum -y install gcc gcc-c++ lrzsz tree unzip openssl-devel pcre-devel rsync wget tree lsof telnet zip net-tools bind-utils vim git nc psmisc jq

添加管理员用户(非必需)

useradd mece
echo '123456'|passwd --stdin mece
cat >>/etc/sudoers <<EOF
mece ALL=(ALL) ALL,!/usr/bin/passwd,/usr/bin/passwd [A-Za-z]*,!/usr/bin/passwd root
EOF

关闭SELinux

sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config

关闭swap

swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

关闭服务

systemctl disable --now firewalld
systemctl disable --now postfix
systemctl disable --now NetworkManager

配置资源限制

echo -e 'ulimit -c unlimited'  >> /etc/profile
echo -e 'ulimit -s unlimited' >> /etc/profile
echo -e 'ulimit -SHn 65535' >> /etc/profile
echo -e 'export HISTTIMEFORMAT="%F %T `whoami` "' >>/etc/profile
# echo -e 'export TMOUT=300' >>/etc/profile
# echo -e "HISTFILESIZE=100" >>/etc/profile
source /etc/profile

cat >>/etc/security/limits.conf <<EOF
# add parameters
* soft nofile 655350
* hard nofile 655350
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF

内核参数优化

cat >> /etc/sysctl.conf <<EOF
############add#################
net.core.somaxconn = 262144
net.core.netdev_max_backlog = 262144
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.ip_forward = 1
net.ipv4.route.gc_timeout = 20
net.ipv4.ip_local_port_range = 1024 65535
net.ipv4.tcp_retries2 = 5
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_syn_retries = 1
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_keepalive_time = 120
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_wmem = 8192 131072 16777216
net.ipv4.tcp_rmem = 32768 131072 16777216
net.ipv4.tcp_mem = 94500000 915000000 927000000
vm.swappiness = 0
fs.file-max = 6553560
EOF

/sbin/sysctl -p

配置时钟同步

yum -y install chrony
systemctl enable --now chronyd && chronyc sources

重启服务器

reboot

第二部分 kubernetes初始化

基础环境**

适用版本:k8s 1.17+
虚拟机:禁止使用克隆或注意修改网卡信息
安装方式:二进制
操作系统:CentOS 7.9
版本:推荐用小版本5+
Host:
192.168.94.200 k8s-master-lb
192.168.94.138 k8s-master01
192.168.94.139 k8s-node01
K8s Service网段:10.96.0.0/12
K8s Pod网段:172.168.0.0/12
Docker版本:
Kubernetes版本:

设置主机名

每台主机分别设置
hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-node01

配置hosts

# 每台主机都执行
cat >>/etc/hosts <<EOF
192.168.94.200 k8s-master-lb
192.168.94.138 k8s-master-01
192.168.94.139 k8s-node-01
EOF

创建目录

mkdir -pv /data/kubernetes
备注:用于存放kubernetes相关yaml文件,主节点即可

配置kubernetes源(kubeadmin安装时)

cat >/etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

配置免密登陆(非必需)

ssh-keygen -t rsa
ssh-copy-id k8s-master-01
ssh-copy-id k8s-node-01

升级内核

CentOS7.9内核版本:3.10.0-1160.53.1.el7.x86_64
升级为:最新版本

升级内核
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
yum --disablerepo=\* --enablerepo=elrepo-kernel repolist
yum --disablerepo=\* --enablerepo=elrepo-kernel list kernel*
yum --disablerepo=\* --enablerepo=elrepo-kernel install -y kernel-ml.x86_64
grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"

升级内核工具包
yum -y remove kernel-tools-libs.x86_64 kernel-tools.x86_64
yum --disablerepo=\* --enablerepo=elrepo-kernel install -y kernel-ml-tools.x86_64

检查默认内核
grubby --default-kernel

重启
reboot

安装依赖包

yum -y install conntrack ipvsadm ipset libseccomp sysstat

安装IPVS模块

# 在内核4.19+版本nf_conntrack_ipv4已经更改为nf_conntrack;在4.19以下使用nf_conntrack_ipv4
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
cat <<EOF |tee /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_tables
ip_set
xt_set
ipt_set
ipip
ipt_rpfilter
ipt_REJECT
br_netfilter
nf_conntrack
EOF

systemctl enable --now systemd-modules-load.service
lsmod |grep -e ip_vs -e nf_conntrack

配置内核参数

cat <<EOF |tee /etc/sysctl.d/kubernetes.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv6.conf.all.disable_ipv6=1
EOF
sysctl --system

重启

reboot

第三部分 Docker安装

卸载旧版本

yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine

设置yum源

yum -y install yum-utils

yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

安装docker

yum list docker-ce --showduplicates |sort -r
yum -y install docker-ce-19.03.*

配置docker

mkdir -pv /etc/docker /data/docker
cat <<EOF |tee /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"data-root": "/data/docker",
"registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}
EOF

启动容器

systemctl daemon-reload && systemctl enable --now docker

测试容器状态

docker run hello-world
echo -e "GET /containers/redis-slave1/stats HTTP/1.0\r\n\ " | nc -U /var/run/docker.sock

第四部分 二进制安装

安装etcd

cd /data/kubernetes
wget https://objects.githubusercontent.com/github-production-release-asset-2e65be/11225014/66137e80-e605-11ea-8ae7-c977e84f43d6?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20220430%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20220430T034712Z&X-Amz-Expires=300&X-Amz-Signature=5ac0b08f15822954b27d343128c697e59f9c0cf5773860470c47d52bbd115fa3&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=11225014&response-content-disposition=attachment%3B%20filename%3Detcd-v3.4.13-linux-amd64.tar.gz&response-content-type=application%2Foctet-stream

tar xf etcd-v3.4.13-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin/ etcd-v3.4.13-linux-amd64/etcd{,ctl}
etcdctl verion

安装kubernetes

cd /data/kubernetes
wget https://storage.googleapis.com/kubernetes-release/release/v1.20.0/kubernetes-server-linux-amd64.tar.gz

tar xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin/ kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
kubelet --version

推送到其他节点

# master节点
for node in k8s-master-01;do scp -r /usr/local/bin/kube{ctl,-apiserver,-controller-manager,-scheduler} $node:/usr/local/bin/;done
# node节点
for node in k8s-node-01;do scp -r /usr/local/bin/kube{let,-proxy} $node:/usr/local/bin/;done

配置calico

# 创建目录
mkdir -pv /opt/cni/bin

生成证书

wget "https://pkg.cfssl.org/R1.2/cfcssl_linux-amd64" -O /usr/local/bin/cfssl
wget "https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64" -O /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl*

# 所有master节点创建etcd证书目录
mkdir -pv /etc/etcd/ssl

# 所有节点创建kubernetes证书目录
mkdir -pv /etc/kubernetes/pki


# Master01节点生成etcd证书
生成证书的CSR文件,即证书签名的请求文件
etcd-ca-csr.json

# 生成etcd的CA证书和CA证书的Key
cd /data/kubernetes
cfssl gencert -initca etcd-ca-csr.json |cfssljson -bare /etc/etcd/ssl/etcd-ca

# 颁发etcd的客户端证书和key
cfssl gencert -ca=/etc/etcd/ssl/etcd-ca.pem -ca-key=/etc/etcd/ssl/etcd-ca-key.pem -config=ca-config.json -hostname=127.0.0.1,k8s-master-01,k8s-master-02,k8s-master-03,192.168.94.138 -profile=kubernetes etcd-csr.json |cfssljson -bare /etc/etcd/ssl/etcd

# 将证书复制到其他master节点
scp -r /etc/etcd/ssl/etcd-ca* /etc/etcd/ssl/etcd{-key.pem,.pem}

# Master01节点生成kubernetes证书
生成证书的CSR文件,即证书签名的请求文件
ca-csr.json

# 生成kubernetes的CA证书和CA证书的Key
cd /data/kubernetes
cfssl gencert -initca ca-csr.json |cfssljson -bare /etc/kubernetes/pki/ca

# 颁发apiserver的客户端证书和key
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -hostname=10.96.0.1,192.168.94.138,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,192.168.94.200,k8s-master-01,k8s-master-02,k8s-master-03 -profile=kubernetes apiserver-csr.json |cfssljson -bare /etc/kubernetes/pki/apiserver

# 生成apiserver的聚合证书
cfssl gencert -initca front-proxy-ca-csr.json |cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
cfssl gencert -ca=/etc/kubernetes/pki/front-proxy-ca.pem -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json |cfssljson -bare /etc/kubernetes/pki/front-proxy-client

# 生成controller-manager的证书
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -profile=kubernetes manager-csr.json |cfssljson -bare /etc/kubernetes/pki/controller-manager

# 配置controller-manager的kubeconfig文件
# set-cluster:设置一个集群项
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.94.200:8443 --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 设置一个用户项
kubectl config set-credentials system:kube-controller-manager --client-certificate=/etc/kubernetes/pki/controller-manager.pem --client-key=/etc/kubernetes/pki/controller-manager-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 设置一个环境上下文
kubectl config set-context system:kube-controller-manager@kubernetes --cluster=kubernetes --user=sysetm:kube-controller-manager --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 使用某个环境当作默认环境
kubectl config use-context system:kube-controller-manager@kubernetes --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

# 生成kube-scheduler证书
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -profile=kubernetes scheduler-csr.json |cfssljson -bare /etc/kubernetes/pki/scheduler
# 设置一个集群项
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.94.200:8443 --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
# 设置一个用户项
kubectl config set-credentials system:kube-scheduler --client-certificate=/etc/kubernetes/pki/scheduler.pem --client-key=/etc/kubernetes/pki/scheduler-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
# 设置一个环境上下文
kubectl config set-context system:kube-scheduler@kubernetes --cluster=kubernetes --user=sysetm:kube-scheduler --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
# 使用某个环境当作默认环境
kubectl config use-context system:kube-scheduler@kubernetes --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

# 生成admin证书
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json |cfssljson -bare /etc/kubernetes/pki/admin
# 设置一个集群项
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.94.200:8443 --kubeconfig=/etc/kubernetes/admin.kubeconfig
# 设置一个用户项
kubectl config set-credentials system:kube-admin --client-certificate=/etc/kubernetes/pki/admin.pem --client-key=/etc/kubernetes/pki/scheduler-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/admin.kubeconfig
# 设置一个环境上下文
kubectl config set-context system:kube-admin@kubernetes --cluster=kubernetes --user=sysetm:kube-admin --kubeconfig=/etc/kubernetes/admin.kubeconfig
# 使用某个环境当作默认环境
kubectl config use-context system:kube-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig

# 生成ServiceAccount Key
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub

# 生成kubelet证书
自动颁发

# 拷贝证书到其他节点
cd /etc/kubernetes/pki
scp -r /etc/kubernetes/pki/* root@k8s-master-01:/etc/kubernetes/pki/

cd /et/kubernetes
scp -r admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig root@k8s-master-01

系统组件配置

配置etcd

vi /etc/etcd/etcd.config.yml

name: 'k8s-master-01'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.94.138:2380'
listen-client-urls: 'https://192.168.94.138:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.94.138:2380'
advertise-client-urls: 'https://192.168.94.138:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
# initial-cluster: 'k8s-master-01=https://192.168.94.138:2380,k8s-master02=https://192.168.1.18:2380,k8s-master03=https://192.168.1.20:2380'
initial-cluster: 'k8s-master-01=https://192.168.94.138:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
peer-transport-security:
ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-output: default
force-new-cluster: false


# 启动
/usr/lib/systemd/system/etcd.service
举报

相关推荐

0 条评论