创建部署问题时,请参考下面模板,你提供的信息越多,越容易及时获得解答。如果未按模板创建问题,管理员有权关闭问题。
确保帖子格式清晰易读,用 markdown code block 语法格式化代码块。
你只花一分钟创建的问题,不能指望别人花上半个小时给你解答。
操作系统信息
例如:虚拟机/物理机,Centos7.5/Ubuntu18.04,4C/8G
Kubernetes版本信息
将 kubectl version 命令执行结果贴在下方
容器运行时
将 docker version / crictl version / nerdctl version 结果贴在下方
KubeSphere版本信息
例如:v2.1.1/v3.0.0。离线安装还是在线安装。在已有K8s上安装还是使用kk安装。
问题是什么
v3.4.1/v1.26.15 离线安装,使用./kk create cluster -f ksp-v341-v1226-offline.yaml -a ksp-v3.4.1-v1.26-artifact.tar.gz –with-packages –skip-push-images命令进行离线安装部署。镜像在我的本地harbor中已经准备好,测试过可以在本地用 ctr -n k8s.io images pull 拉取,为何集群部署时还是拉取了阿里云镜像。
以下时离线部署配置文件
apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
name: sample
spec:
hosts:
- {name: ksp-master-1, address: 172.18.2.114, internalAddress: 172.18.2.114, user: root, password: "ThsKPCNm0W3yYTFE"}
- {name: ksp-master-2, address: 172.18.2.115, internalAddress: 172.18.2.115, user: root, password: "ThsKPCNm0W3yYTFE"}
- {name: ksp-master-3, address: 172.18.2.116, internalAddress: 172.18.2.116, user: root, password: "ThsKPCNm0W3yYTFE"}
# - {name: ksp-registry, address: 172.18.2.112, internalAddress: 172.18.2.112, user: root, password: "ThsKPCNm0W3yYTFE"}
- {name: ksp-worker-1, address: 172.18.2.111, internalAddress: 172.18.2.111, user: root, password: "ThsKPCNm0W3yYTFE"}
- {name: ksp-worker-2, address: 172.18.2.113, internalAddress: 172.18.2.113, user: root, password: "ThsKPCNm0W3yYTFE"}
roleGroups:
etcd:
- ksp-master-1
- ksp-master-2
- ksp-master-3
control-plane:
- ksp-master-1
- ksp-master-2
- ksp-master-3
worker:
- ksp-master-1
- ksp-master-2
- ksp-master-3
#- ksp-registry
- ksp-worker-1
- ksp-worker-2
# - ksp-registry
# registry:
# - ksp-registry
controlPlaneEndpoint:
## Internal loadbalancer for apiservers
internalLoadbalancer: haproxy
domain: lb.dtsw.local
address: ""
port: 6443
kubernetes:
version: v1.26.15
clusterName: dtsw.cluster.local
autoRenewCerts: true
containerManager: containerd
etcd:
type: kubekey
network:
plugin: calico
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
calico:
ipipMode: CrossSubnet # 设置 IPIP 模式为 CrossSubnet
vxlanMode: Never # 禁用 VXLAN
## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
multusCNI:
enabled: false
registry:
type: harbor
auths:
"registry.dtsw.cn":
username: admin
password: Harbor12345
certsPath: "/etc/harbor/certs.d/registry.dtsw.cn"
privateRegistry: "registry.dtsw.cn"
namespaceOverride: "kubesphereio"
registryMirrors: []
insecureRegistries: []
addons:
- name: longhorn
namespace: longhorn-system
sources:
chart:
name: longhorn
repo: http://172.18.2.112:8080
version: 1.10.1
values: |
persistence:
defaultClass: true
defaultClassReplicaCount: 3
defaultSettings:
defaultDataPath: /data/longhorn/
defaultDataLocality: disabled
defaultReplicaCount: 3
guaranteedEngineCPU: 0.25
ingress:
enabled: false
metrics:
enabled: true
---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
version: v3.4.1
spec:
persistence:
storageClass: longhorn
authentication:
jwtSecret: ""
local_registry: "registry.dtsw.cn" # 你的私有仓库地址
namespace_override: "kubesphereio" # 镜像命名空间
components:
ks-installer:
image: registry.dtsw.cn/kubesphereio/ks-installer
tag: v3.4.1-custom
ks-apiserver:
image: registry.dtsw.cn/kubesphereio/ks-apiserver
tag: v3.4.1
ks-controller-manager:
image: registry.dtsw.cn/kubesphereio/ks-controller-manager
tag: v3.4.1-custom
etcd:
monitoring: true
endpointIps: localhost
port: 2379
tlsEnable: true
common:
core:
console:
enableMultiLogin: true
port: 30880
type: NodePort
# apiserver:
# resources: {}
# controllerManager:
# resources: {}
redis:
enabled: false
enableHA: false
volumeSize: 2Gi
openldap:
enabled: false
volumeSize: 2Gi
minio:
volumeSize: 10Gi
monitoring:
# type: external
endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
GPUMonitoring:
enabled: false
gpu:
kinds:
- resourceName: "nvidia.com/gpu"
resourceType: "GPU"
default: true
es:
# master:
# volumeSize: 4Gi
# replicas: 1
# resources: {}
# data:
# volumeSize: 20Gi
# replicas: 1
# resources: {}
enabled: false
logMaxAge: 7
elkPrefix: logstash
basicAuth:
enabled: false
username: ""
password: ""
externalElasticsearchHost: ""
externalElasticsearchPort: ""
opensearch:
# master:
# volumeSize: 4Gi
# replicas: 1
# resources: {}
# data:
# volumeSize: 20Gi
# replicas: 1
# resources: {}
enabled: false
logMaxAge: 7
opensearchPrefix: whizard
basicAuth:
enabled: true
username: "admin"
password: "admin"
externalOpensearchHost: ""
externalOpensearchPort: ""
dashboard:
enabled: false
alerting:
enabled: false
# thanosruler:
# replicas: 1
# resources: {}
auditing:
enabled: false
# operator:
# resources: {}
# webhook:
# resources: {}
devops:
enabled: false
jenkinsCpuReq: 0.5
jenkinsCpuLim: 1
jenkinsMemoryReq: 4Gi
jenkinsMemoryLim: 4Gi
jenkinsVolumeSize: 16Gi
events:
enabled: false
# operator:
# resources: {}
# exporter:
# resources: {}
ruler:
enabled: false
replicas: 2
# resources: {}
logging:
enabled: false
logsidecar:
enabled: false
replicas: 2
# resources: {}
metrics_server:
enabled: true
monitoring:
storageClass: longhorn
node_exporter:
port: 9100
# resources: {}
# kube_rbac_proxy:
# resources: {}
# kube_state_metrics:
# resources: {}
# prometheus:
# replicas: 1
# volumeSize: 20Gi
# resources: {}
# operator:
# resources: {}
# alertmanager:
# replicas: 1
# resources: {}
# notification_manager:
# resources: {}
# operator:
# resources: {}
# proxy:
# resources: {}
alertmanager:
replicas: 1
thanosruler: # 添加这个配置
enabled: false
replicas: 1
gpu:
nvidia_dcgm_exporter:
enabled: false
# resources: {}
multicluster:
clusterRole: none
network:
networkpolicy:
enabled: true
ippool:
type: calico
topology:
type: weave-scope
openpitrix:
store:
enabled: true
servicemesh:
enabled: false
istio:
components:
ingressGateways:
- name: istio-ingressgateway
enabled: false
cni:
enabled: false
edgeruntime:
enabled: false
kubeedge:
enabled: false
cloudCore:
cloudHub:
advertiseAddress:
- ""
service:
cloudhubNodePort: "30000"
cloudhubQuicNodePort: "30001"
cloudhubHttpsNodePort: "30002"
cloudstreamNodePort: "30003"
tunnelNodePort: "30004"
# resources: {}
# hostNetWork: false
iptables-manager:
enabled: true
mode: "external"
# resources: {}
# edgeService:
# resources: {}
gatekeeper:
enabled: false
# controller_manager:
# resources: {}
# audit:
# resources: {}
terminal:
timeout: 600
zone: ""