0
点赞
收藏
分享

微信扫一扫

kubesphere离线部署


kubesphere离线部署

需要在k8s环境安装完成后进行,支持k8s版本:

  • 1.19.+
  • 1.20.+
  • 1.21.+
  • 1.22.+
  • 1.23.+


strongeclass默认动态存储准备

部署kubesphere需要k8s有一个任意默认存储,因为pod拉起期间kubesphere在带的pod会去申请存储资源,所以这里使用nfs代替

nfs安装

这里随便找个主机安装,数据目录也随便创建,因为我这边是找了个单机的k8s,自己根据自己的需求创建部署即可。

mkdir -p /data/file
yum -y install nfs-utils rpcbind

vim /etc/exports
/data/file *(rw,no_root_squash,sync)

systemctl start nfs && systemctl enable nfs

nfs-provider部署

  • deployment.yaml
  • rbac.yaml
  • class.yaml

deployment.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: lizhenliang/nfs-subdir-external-provisioner:v4.0.1
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 172.22.254.147
- name: NFS_PATH
value: /data/file/
volumes:
- name: nfs-client-root
nfs:
server: 172.22.254.147
path: /data/file/

rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io

class.yaml

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-storage
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"

部署strongclass并修改为默认存储

kubectl apply -f .

kubectl get sc

kubectl patch storageclass nfs-storage -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

镜像准备

docker前提准备

需要修改daemon.json安全白名单模式,pull push镜像到镜像仓库用

{
"registry-mirrors": ["https://registry.docker-cn.com"],
"insecure-registries": ["172.22.254.86"]
}

通过官网提供的离线脚本拉取镜像

mkdir /kubesphere-offline-install
chmod +x offline-installation-tool.sh

筛选镜像列表

这里镜像列表我已经筛选好了,如下:

##k8s-images
##kubesphere/kube-apiserver:v1.23.7
##kubesphere/kube-controller-manager:v1.23.7
##kubesphere/kube-proxy:v1.23.7
##kubesphere/kube-scheduler:v1.23.7
##kubesphere/kube-apiserver:v1.24.1
##kubesphere/kube-controller-manager:v1.24.1
##kubesphere/kube-proxy:v1.24.1
##kubesphere/kube-scheduler:v1.24.1
##kubesphere/kube-apiserver:v1.22.10
##kubesphere/kube-controller-manager:v1.22.10
##kubesphere/kube-proxy:v1.22.10
##kubesphere/kube-scheduler:v1.22.10
##kubesphere/kube-apiserver:v1.21.13
##kubesphere/kube-controller-manager:v1.21.13
##kubesphere/kube-proxy:v1.21.13
##kubesphere/kube-scheduler:v1.21.13
##kubesphere/pause:3.7
##kubesphere/pause:3.6
##kubesphere/pause:3.5
##kubesphere/pause:3.4.1
##coredns/coredns:1.8.0
##coredns/coredns:1.8.6
##calico/cni:v3.20.0
##calico/kube-controllers:v3.20.0
##calico/node:v3.20.0
##calico/pod2daemon-flexvol:v3.20.0
##calico/typha:v3.20.0
##kubesphere/flannel:v0.12.0
##openebs/provisioner-localpv:2.10.1
##openebs/linux-utils:2.10.0
##library/haproxy:2.3
##kubesphere/nfs-subdir-external-provisioner:v4.0.2
##kubesphere/k8s-dns-node-cache:1.15.12
##kubesphere-images
kubesphere/ks-installer:v3.3.0
kubesphere/ks-apiserver:v3.3.0
kubesphere/ks-console:v3.3.0
kubesphere/ks-controller-manager:v3.3.0
kubesphere/kubectl:v1.22.0
kubesphere/kubectl:v1.21.0
kubesphere/kubectl:v1.20.0
kubesphere/kubefed:v0.8.1
kubesphere/tower:v0.2.0
minio/minio:RELEASE.2019-08-07T01-59-21Z
minio/mc:RELEASE.2019-08-07T23-14-43Z
csiplugin/snapshot-controller:v4.0.0
##kubesphere/nginx-ingress-controller:v1.1.0
##mirrorgooglecontainers/defaultbackend-amd64:1.4
##kubesphere/metrics-server:v0.4.2
##redis:5.0.14-alpine
##haproxy:2.0.25-alpine
##alpine:3.14
##osixia/openldap:1.3.0
##kubesphere/netshoot:v1.0
##kubeedge-images
##kubeedge/cloudcore:v1.9.2
##kubeedge/iptables-manager:v1.9.2
##kubesphere/edgeservice:v0.2.0
##gatekeeper-images
##openpolicyagent/gatekeeper:v3.5.2
##openpitrix-images
##kubesphere/openpitrix-jobs:v3.2.1
##kubesphere-devops-images
##kubesphere/devops-apiserver:v3.3.0
##kubesphere/devops-controller:v3.3.0
##kubesphere/devops-tools:v3.3.0
##kubesphere/ks-jenkins:v3.3.0-2.319.1
##jenkins/inbound-agent:4.10-2
##kubesphere/builder-base:v3.2.2
##kubesphere/builder-nodejs:v3.2.0
##kubesphere/builder-maven:v3.2.0
##kubesphere/builder-maven:v3.2.1-jdk11
##kubesphere/builder-python:v3.2.0
##kubesphere/builder-go:v3.2.0
##kubesphere/builder-go:v3.2.2-1.16
##kubesphere/builder-go:v3.2.2-1.17
##kubesphere/builder-go:v3.2.2-1.18
##kubesphere/builder-base:v3.2.2-podman
##kubesphere/builder-nodejs:v3.2.0-podman
##kubesphere/builder-maven:v3.2.0-podman
##kubesphere/builder-maven:v3.2.1-jdk11-podman
##kubesphere/builder-python:v3.2.0-podman
##kubesphere/builder-go:v3.2.0-podman
##kubesphere/builder-go:v3.2.2-1.16-podman
##kubesphere/builder-go:v3.2.2-1.17-podman
##kubesphere/builder-go:v3.2.2-1.18-podman
##kubesphere/s2ioperator:v3.2.1
##kubesphere/s2irun:v3.2.0
##kubesphere/s2i-binary:v3.2.0
##kubesphere/tomcat85-java11-centos7:v3.2.0
##kubesphere/tomcat85-java11-runtime:v3.2.0
##kubesphere/tomcat85-java8-centos7:v3.2.0
##kubesphere/tomcat85-java8-runtime:v3.2.0
##kubesphere/java-11-centos7:v3.2.0
##kubesphere/java-8-centos7:v3.2.0
##kubesphere/java-8-runtime:v3.2.0
##kubesphere/java-11-runtime:v3.2.0
##kubesphere/nodejs-8-centos7:v3.2.0
##kubesphere/nodejs-6-centos7:v3.2.0
##kubesphere/nodejs-4-centos7:v3.2.0
##kubesphere/python-36-centos7:v3.2.0
##kubesphere/python-35-centos7:v3.2.0
##kubesphere/python-34-centos7:v3.2.0
##kubesphere/python-27-centos7:v3.2.0
##quay.io/argoproj/argocd:v2.3.3
##quay.io/argoproj/argocd-applicationset:v0.4.1
##ghcr.io/dexidp/dex:v2.30.2
##redis:6.2.6-alpine
##kubesphere-monitoring-images
jimmidyson/configmap-reload:v0.5.0
prom/prometheus:v2.34.0
kubesphere/prometheus-config-reloader:v0.55.1
kubesphere/prometheus-operator:v0.55.1
kubesphere/kube-rbac-proxy:v0.11.0
kubesphere/kube-state-metrics:v2.3.0
prom/node-exporter:v1.3.1
prom/alertmanager:v0.23.0
thanosio/thanos:v0.25.2
grafana/grafana:8.3.3
kubesphere/kube-rbac-proxy:v0.8.0
kubesphere/notification-manager-operator:v1.4.0
kubesphere/notification-manager:v1.4.0
kubesphere/notification-tenant-sidecar:v3.2.0
##kubesphere-logging-images
##kubesphere/elasticsearch-curator:v5.7.6
##kubesphere/elasticsearch-oss:6.8.22
##kubesphere/fluentbit-operator:v0.13.0
##docker:19.03
##kubesphere/fluent-bit:v1.8.11
##kubesphere/log-sidecar-injector:1.1
##elastic/filebeat:6.7.0
##kubesphere/kube-events-operator:v0.4.0
##kubesphere/kube-events-exporter:v0.4.0
##kubesphere/kube-events-ruler:v0.4.0
##kubesphere/kube-auditing-operator:v0.2.0
##kubesphere/kube-auditing-webhook:v0.2.0
##istio-images
##istio/pilot:1.11.1
##istio/proxyv2:1.11.1
##jaegertracing/jaeger-operator:1.27
##jaegertracing/jaeger-agent:1.27
##jaegertracing/jaeger-collector:1.27
##jaegertracing/jaeger-query:1.27
##jaegertracing/jaeger-es-index-cleaner:1.27
##kubesphere/kiali-operator:v1.38.1
##kubesphere/kiali:v1.38
##example-images
##busybox:1.31.1
##nginx:1.14-alpine
##joosthofman/wget:1.0
##nginxdemos/hello:plain-text
##wordpress:4.8-apache
##mirrorgooglecontainers/hpa-example:latest
##java:openjdk-8-jre-alpine
##fluent/fluentd:v1.4.2-2.0
##perl:latest
##kubesphere/examples-bookinfo-productpage-v1:1.16.2
##kubesphere/examples-bookinfo-reviews-v1:1.16.2
##kubesphere/examples-bookinfo-reviews-v2:1.16.2
##kubesphere/examples-bookinfo-details-v1:1.16.2
##kubesphere/examples-bookinfo-ratings-v1:1.16.3
##weave-scope-images
##weaveworks/scope:1.13.0

harbor新建以下项目,以作push镜像使用

csiplugin
grafana
jimmidyson
kubesphere
minio
mirrorgooglecontainers
prom
thanosio

![image.png](https://img-blog.csdnimg.cn/img_convert/746dd54f7f2beffe5ada41eabe34806c.png#clientId=u35c1e100-b5bd-4&crop=0&crop=0&crop=1&crop=1&errorMessage=unknown error&from=paste&height=722&id=uf38cc3ae&margin=[object Object]&name=image.png&originHeight=902&originWidth=1920&originalType=binary&ratio=1&rotation=0&showTitle=false&size=134344&status=error&style=none&taskId=u44b94813-7692-4a6b-881d-085f663a24d&title=&width=1536)

推送至harbor仓库

cd /kubesphere-offline-install
tar -zxvf kubesphere-images.tar.gz
./offline-installation-tool.sh -s -l image-list.txt -d ./kubesphere-images

由于有部分镜像拉取问题需要手动

docker load < defaultbackend-amd64.tar 
docker tag mirrorgooglecontainers/defaultbackend-amd64:1.4 172.22.254.147/mirrorgooglecontainers/defaultbackend-amd64:1.4
docker push 172.22.254.147/mirrorgooglecontainers/defaultbackend-amd64:1.4

到这里 镜像准备完成

ks-installer安装kubesphere

需要修改两个地方:

  • cluster配置文件的local

local_registry: “172.22.254.147”

  • install配置文件的ks-installer镜像

image: 172.22.254.147/kubesphere/ks-installer:v3.3.0

cluster-configuration.yaml

---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
version: v3.3.0
spec:
persistence:
storageClass: "" # If there is no default StorageClass in your cluster, you need to specify an existing StorageClass here.
authentication:
jwtSecret: "" # Keep the jwtSecret consistent with the Host Cluster. Retrieve the jwtSecret by executing "kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret" on the Host Cluster.
local_registry: "172.22.254.147" # Add your private registry address if it is needed.
# dev_tag: "" # Add your kubesphere image tag you want to install, by default it's same as ks-installer release version.
etcd:
monitoring: false # Enable or disable etcd monitoring dashboard installation. You have to create a Secret for etcd before you enable it.
endpointIps: localhost # etcd cluster EndpointIps. It can be a bunch of IPs here.
port: 2379 # etcd port.
tlsEnable: true
common:
core:
console:
enableMultiLogin: true # Enable or disable simultaneous logins. It allows different users to log in with the same account at the same time.
port: 30880
type: NodePort
# apiserver: # Enlarge the apiserver and controller manager's resource requests and limits for the large cluster
# resources: {}
# controllerManager:
# resources: {}
redis:
enabled: false
enableHA: false
volumeSize: 2Gi # Redis PVC size.
openldap:
enabled: false
volumeSize: 2Gi # openldap PVC size.
minio:
volumeSize: 20Gi # Minio PVC size.
monitoring:
# type: external # Whether to specify the external prometheus stack, and need to modify the endpoint at the next line.
endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090 # Prometheus endpoint to get metrics data.
GPUMonitoring: # Enable or disable the GPU-related metrics. If you enable this switch but have no GPU resources, Kubesphere will set it to zero.
enabled: false
gpu: # Install GPUKinds. The default GPU kind is nvidia.com/gpu. Other GPU kinds can be added here according to your needs.
kinds:
- resourceName: "nvidia.com/gpu"
resourceType: "GPU"
default: true
es: # Storage backend for logging, events and auditing.
# master:
# volumeSize: 4Gi # The volume size of Elasticsearch master nodes.
# replicas: 1 # The total number of master nodes. Even numbers are not allowed.
# resources: {}
# data:
# volumeSize: 20Gi # The volume size of Elasticsearch data nodes.
# replicas: 1 # The total number of data nodes.
# resources: {}
logMaxAge: 7 # Log retention time in built-in Elasticsearch. It is 7 days by default.
elkPrefix: logstash # The string making up index names. The index name will be formatted as ks-<elk_prefix>-log.
basicAuth:
enabled: false
username: ""
password: ""
externalElasticsearchHost: ""
externalElasticsearchPort: ""
alerting: # (CPU: 0.1 Core, Memory: 100 MiB) It enables users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from.
enabled: false # Enable or disable the KubeSphere Alerting System.
# thanosruler:
# replicas: 1
# resources: {}
auditing: # Provide a security-relevant chronological set of records,recording the sequence of activities happening on the platform, initiated by different tenants.
enabled: false # Enable or disable the KubeSphere Auditing Log System.
# operator:
# resources: {}
# webhook:
# resources: {}
devops: # (CPU: 0.47 Core, Memory: 8.6 G) Provide an out-of-the-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image.
enabled: false # Enable or disable the KubeSphere DevOps System.
# resources: {}
jenkinsMemoryLim: 2Gi # Jenkins memory limit.
jenkinsMemoryReq: 1500Mi # Jenkins memory request.
jenkinsVolumeSize: 8Gi # Jenkins volume size.
jenkinsJavaOpts_Xms: 1200m # The following three fields are JVM parameters.
jenkinsJavaOpts_Xmx: 1600m
jenkinsJavaOpts_MaxRAM: 2g
events: # Provide a graphical web console for Kubernetes Events exporting, filtering and alerting in multi-tenant Kubernetes clusters.
enabled: false # Enable or disable the KubeSphere Events System.
# operator:
# resources: {}
# exporter:
# resources: {}
# ruler:
# enabled: true
# replicas: 2
# resources: {}
logging: # (CPU: 57 m, Memory: 2.76 G) Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd.
enabled: false # Enable or disable the KubeSphere Logging System.
logsidecar:
enabled: true
replicas: 2
# resources: {}
metrics_server: # (CPU: 56 m, Memory: 44.35 MiB) It enables HPA (Horizontal Pod Autoscaler).
enabled: false # Enable or disable metrics-server.
monitoring:
storageClass: "" # If there is an independent StorageClass you need for Prometheus, you can specify it here. The default StorageClass is used by default.
node_exporter:
port: 9100
# resources: {}
# kube_rbac_proxy:
# resources: {}
# kube_state_metrics:
# resources: {}
# prometheus:
# replicas: 1 # Prometheus replicas are responsible for monitoring different segments of data source and providing high availability.
# volumeSize: 20Gi # Prometheus PVC size.
# resources: {}
# operator:
# resources: {}
# alertmanager:
# replicas: 1 # AlertManager Replicas.
# resources: {}
# notification_manager:
# resources: {}
# operator:
# resources: {}
# proxy:
# resources: {}
gpu: # GPU monitoring-related plug-in installation.
nvidia_dcgm_exporter: # Ensure that gpu resources on your hosts can be used normally, otherwise this plug-in will not work properly.
enabled: false # Check whether the labels on the GPU hosts contain "nvidia.com/gpu.present=true" to ensure that the DCGM pod is scheduled to these nodes.
# resources: {}
multicluster:
clusterRole: none # host | member | none # You can install a solo cluster, or specify it as the Host or Member Cluster.
network:
networkpolicy: # Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods).
# Make sure that the CNI network plugin used by the cluster supports NetworkPolicy. There are a number of CNI network plugins that support NetworkPolicy, including Calico, Cilium, Kube-router, Romana and Weave Net.
enabled: false # Enable or disable network policies.
ippool: # Use Pod IP Pools to manage the Pod network address space. Pods to be created can be assigned IP addresses from a Pod IP Pool.
type: none # Specify "calico" for this field if Calico is used as your CNI plugin. "none" means that Pod IP Pools are disabled.
topology: # Use Service Topology to view Service-to-Service communication based on Weave Scope.
type: none # Specify "weave-scope" for this field to enable Service Topology. "none" means that Service Topology is disabled.
openpitrix: # An App Store that is accessible to all platform tenants. You can use it to manage apps across their entire lifecycle.
store:
enabled: false # Enable or disable the KubeSphere App Store.
servicemesh: # (0.3 Core, 300 MiB) Provide fine-grained traffic management, observability and tracing, and visualized traffic topology.
enabled: false # Base component (pilot). Enable or disable KubeSphere Service Mesh (Istio-based).
istio: # Customizing the istio installation configuration, refer to https://istio.io/latest/docs/setup/additional-setup/customize-installation/
components:
ingressGateways:
- name: istio-ingressgateway
enabled: false
cni:
enabled: false
edgeruntime: # Add edge nodes to your cluster and deploy workloads on edge nodes.
enabled: false
kubeedge: # kubeedge configurations
enabled: false
cloudCore:
cloudHub:
advertiseAddress: # At least a public IP address or an IP address which can be accessed by edge nodes must be provided.
- "" # Note that once KubeEdge is enabled, CloudCore will malfunction if the address is not provided.
service:
cloudhubNodePort: "30000"
cloudhubQuicNodePort: "30001"
cloudhubHttpsNodePort: "30002"
cloudstreamNodePort: "30003"
tunnelNodePort: "30004"
# resources: {}
# hostNetWork: false
iptables-manager:
enabled: true
mode: "external"
# resources: {}
# edgeService:
# resources: {}
gatekeeper: # Provide admission policy and rule management, A validating (mutating TBA) webhook that enforces CRD-based policies executed by Open Policy Agent.
enabled: false # Enable or disable Gatekeeper.
# controller_manager:
# resources: {}
# audit:
# resources: {}
terminal:
# image: 'alpine:3.15' # There must be an nsenter program in the image
timeout: 600 # Container timeout, if set to 0, no timeout will be used. The unit is seconds

kubesphere-installer.yaml

---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterconfigurations.installer.kubesphere.io
spec:
group: installer.kubesphere.io
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
x-kubernetes-preserve-unknown-fields: true
status:
type: object
x-kubernetes-preserve-unknown-fields: true
scope: Namespaced
names:
plural: clusterconfigurations
singular: clusterconfiguration
kind: ClusterConfiguration
shortNames:
- cc

---
apiVersion: v1
kind: Namespace
metadata:
name: kubesphere-system

---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ks-installer
namespace: kubesphere-system

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ks-installer
rules:
- apiGroups:
- ""
resources:
- '*'
verbs:
- '*'
- apiGroups:
- apps
resources:
- '*'
verbs:
- '*'
- apiGroups:
- extensions
resources:
- '*'
verbs:
- '*'
- apiGroups:
- batch
resources:
- '*'
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- apiregistration.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- tenant.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- certificates.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- devops.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- '*'
verbs:
- '*'
- apiGroups:
- logging.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- jaegertracing.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- storage.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- admissionregistration.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- policy
resources:
- '*'
verbs:
- '*'
- apiGroups:
- autoscaling
resources:
- '*'
verbs:
- '*'
- apiGroups:
- networking.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- config.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- iam.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- notification.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- auditing.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- events.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- core.kubefed.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- installer.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- storage.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- security.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- monitoring.kiali.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- kiali.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- networking.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- edgeruntime.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- types.kubefed.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- monitoring.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- application.kubesphere.io
resources:
- '*'
verbs:
- '*'


---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ks-installer
subjects:
- kind: ServiceAccount
name: ks-installer
namespace: kubesphere-system
roleRef:
kind: ClusterRole
name: ks-installer
apiGroup: rbac.authorization.k8s.io

---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
app: ks-installer
spec:
replicas: 1
selector:
matchLabels:
app: ks-installer
template:
metadata:
labels:
app: ks-installer
spec:
serviceAccountName: ks-installer
containers:
- name: installer
image: 172.22.254.147/kubesphere/ks-installer:v3.3.0
imagePullPolicy: "Always"
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 20m
memory: 100Mi
volumeMounts:
- mountPath: /etc/localtime
name: host-time
readOnly: true
volumes:
- hostPath:
path: /etc/localtime
type: ""
name: host-time

执行安装命令

kubectl apply -f kubesphere-installer.yaml
kubectl apply -f cluster-configuration.yaml

查看安装日志及结果

kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f

安装成功示例

Collecting installation results ...
#####################################################
### Welcome to KubeSphere! ###
#####################################################

Console: http://172.22.254.85:30880
Account: admin
Password: P@88w0rd

NOTES:
1. After you log into the console, please check the
monitoring status of service components in
"Cluster Management". If any service is not
ready, please wait patiently until all components
are up and running.
2. Please change the default password after login.

#####################################################
https://kubesphere.io 2022-09-29 10:52:13
#####################################################

脚本方式离线自动安装

目录

![image.png](https://img-blog.csdnimg.cn/img_convert/a29a79a0416a8eef67129ef1c513b078.png#clientId=u449e2dd1-1fef-4&crop=0&crop=0&crop=1&crop=1&from=paste&height=325&id=u308ec20a&margin=[object Object]&name=image.png&originHeight=325&originWidth=647&originalType=binary&ratio=1&rotation=0&showTitle=false&size=55685&status=done&style=none&taskId=udcfc819b-0660-4d51-9809-5771196de4e&title=&width=647)

解压离线部署包

注:此处要求必须解压至/opt下

tar -zxvf offline-kubesphere-autoinstall.tar.gz -C /opt

手动修改nfs动态存储目录

spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: lizhenliang/nfs-subdir-external-provisioner:v4.0.1
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 172.22.254.23 ##ip替换成本机ip
- name: NFS_PATH
value: /data/file/
volumes:
- name: nfs-client-root
nfs:
server: 172.22.254.23 ##ip替换成本机ip
path: /data/file/

harbor仓库新建项目

依次新建以下项目再执行脚本:

  • csiplugin
  • grafana
  • jimmidyson
  • kubesphere
  • minio
  • mirrorgooglecontainers
  • prom
  • thanosio

![image.png](https://img-blog.csdnimg.cn/img_convert/805530564595099dcb37cfad1597a039.png#clientId=u449e2dd1-1fef-4&crop=0&crop=0&crop=1&crop=1&from=paste&height=902&id=u30fd8b53&margin=[object Object]&name=image.png&originHeight=902&originWidth=1920&originalType=binary&ratio=1&rotation=0&showTitle=false&size=131308&status=done&style=none&taskId=u5ec6aaa7-b499-4659-a5bf-c1d535c5d07&title=&width=1920)

执行OfflineAutoKS.sh脚本安装

给脚本赋权后,根据指示自动安装即可

chmod +x /opt/offline-kubesphere-autoinstall/OfflineAutoKS.sh
./opt/offline-kubesphere-autoinstall/OfflineAutoKS.sh

安装完成示例

Start installing monitoring
Start installing multicluster
Start installing openpitrix
Start installing network
**************************************************
Waiting for all tasks to be completed ...
task network status is successful (1/4)
task multicluster status is successful (2/4)
task openpitrix status is successful (3/4)
task monitoring status is successful (4/4)
**************************************************
Collecting installation results ...
#####################################################
### Welcome to KubeSphere! ###
#####################################################

Console: http://172.22.254.23:30880
Account: admin
Password: P@88w0rd

NOTES:
1. After you log into the console, please check the
monitoring status of service components in
"Cluster Management". If any service is not
ready, please wait patiently until all components
are up and running.
2. Please change the default password after login.

#####################################################
https://kubesphere.io 2022-10-08 16:27:49
#####################################################

OfflineAutoKS.sh

注意:脚本最好结合离线包一起使用,不要参考以上文档思路

#!/bin/bash

echo -e "
Kubesphere离线安装脚本
支持k8s版本:v1.19.+ v1.20.+ v1.21.+ v1.22.+ v1.23.+
author: alexclownfish
"
echo -e " 将镜像推送至Harbor,推送之前确保Harbor已经创建好以下项目:
csiplugin
grafana
jimmidyson
kubesphere
minio
mirrorgooglecontainers
prom
thanosio"

function StrongClassDeploy() {
cd /opt/offline-kubesphere-autoinstall/strongeClass/nfs
echo -e "部署nfs-provider动态存储"
tar -zxvf nfs-utils.tar.gz
cd nfs-utils
rpm -Uvh *.rpm --nodeps --force
cd ..
tar -zxvf rpcbind.tar.gz
cd rpcbind
rpm -Uvh *.rpm --nodeps --force
mkdir -p /data/file
echo "/data/file *(rw,no_root_squash,sync)" > /etc/exports
systemctl start nfs && systemctl enable nfs
if [ $? -eq 0 ]; then
echo -e "nfs动态从存储安装成功"
else
echo -e "nfs动态从存储安装失败"
return
fi
echo -e "部署nfs-provider到k8s"
cd /opt/offline-kubesphere-autoinstall/strongeClass
kubectl apply -f .
}

function SetStrongeClassDefault() {
echo -e "设置nfs-provider为默认存储"
ScName=`kubectl get sc | awk {'print $1'} | sed -n "2p"`
kubectl patch storageclass $ScName -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
if [ $? -eq 0 ]; then
echo -e "设置nfs-provider为默认存储成功"
else
echo -e "设置nfs-"
return
fi
}

function SetHttpHabor() {
echo -e "设置docker镜像推送http"
sed -i '11a"insecure-registries": ["172.22.254.23"],' /etc/docker/daemon.json
systemctl daemon-reload && systemctl restart docker
}
function PushImageToHarbor() {
cd /opt/offline-kubesphere-autoinstall/kubesphere-offline-install
chmod +x offline-installation-tool.sh
tar -zxvf kubesphere-images.tar.gz
./offline-installation-tool.sh -l image-list.txt -d ./kubesphere-images -r $HarborIP
docker load < defaultbackend-amd64.tar
docker tag mirrorgooglecontainers/defaultbackend-amd64:1.4 $HarborIP/mirrorgooglecontainers/defaultbackend-amd64:1.4
docker push $HarborIP/mirrorgooglecontainers/defaultbackend-amd64:1.4
}

function ChangeKSConfig() {
cd /opt/offline-kubesphere-autoinstall/kubesphere-offline-install
sed -ri "s#^\s*image: kubesphere.*/ks-installer:.*# image: $HarborIP/kubesphere/ks-installer:v3.3.0#" ./kubesphere-installer.yaml
sed -ri "s@local_registry: .*@local_registry: \"$HarborIP\"@g" ./cluster-configuration.yaml
}
function DeployKS() {
cd /opt/offline-kubesphere-autoinstall/kubesphere-offline-install
kubectl apply -f kubesphere-installer.yaml
kubectl apply -f cluster-configuration.yaml
echo -e "正在创建ks-install......"
sleep 15s
echo -e "正在加载ks日志......"
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f
}

read -p "输入harbor仓库地址:" HarborIP

read -p "是否部署nfs-provider为默认存储(是/否):" IsDeploySC
if [ $IsDeploySC == "是" ]; then
StrongClassDeploy
SetStrongeClassDefault
elif [ $IsDeploySC == "否" ]; then
return
else
echo -e "请输入是/否"
fi
read -p "是否针对docker对harbor设置白名单(是/否):" IsDockerWriteList
if [ $IsDockerWriteList == "是" ]; then
SetHttpHabor
elif [ $ISDockerWriteList == "否" ]; then
return
else
echo -e "请输入是/否"
fi

PushImageToHarbor
ChangeKSConfig
DeployKS


举报

相关推荐

0 条评论