我就扩容一个node2 这么多都要重新下载 ,内核参数把现有的master和其他node全部都重新刷了一遍
我的yaml文件 本地新扩容的node2
apiVersion: kubekey.kubesphere.io/v1alpha1
kind: Cluster
metadata:
name: sample
spec:
hosts:
##You should complete the ssh information of the hosts
{name: master1, address: 192.168.4.230, internalAddress: 192.168.4.230, user: root, password:t}
{name: master2, address: 192.168.4.231, internalAddress: 192.168.4.231, user: root, password: t}
{name: master3, address: 192.168.4.232, internalAddress: 192.168.4.232, user: root, password: t}
{name: node1, address: 192.168.4.233, internalAddress: 192.168.4.233, user: root, password: t}
{name: node2, address: 192.168.4.234, internalAddress: 192.168.4.234, user: root, password: t}
roleGroups:
etcd:
- master1
- master2
- master3
master:
- master1
- master2
- master3
worker:
- node1
- node2
controlPlaneEndpoint:
##Internal loadbalancer for apiservers
#internalLoadbalancer: haproxy
##If the external loadbalancer was used, 'address' should be set to loadbalancer's ip.
domain: lb.kubesphere.local
address: "192.168.4.230"
port: 6443
kubernetes:
version: v1.21.5
clusterName: cluster.local
proxyMode: ipvs
masqueradeAll: false
maxPods: 110
nodeCidrMaskSize: 24
network:
plugin: calico
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
registry:
privateRegistry: ""