yunkunrao pod/calico-node-xljh2 和 pod/calico-node-6947s

[root@node1 ~]# kubectl get events
LAST SEEN   TYPE     REASON                    OBJECT       MESSAGE
3h5m        Normal   Starting                  node/node1   Starting kubelet.
3h5m        Normal   NodeHasSufficientMemory   node/node1   Node node1 status is now: NodeHasSufficientMemory
3h5m        Normal   NodeHasNoDiskPressure     node/node1   Node node1 status is now: NodeHasNoDiskPressure
3h5m        Normal   NodeHasSufficientPID      node/node1   Node node1 status is now: NodeHasSufficientPID
3h5m        Normal   NodeAllocatableEnforced   node/node1   Updated Node Allocatable limit across pods
3h4m        Normal   Starting                  node/node1   Starting kube-proxy.
3h4m        Normal   RegisteredNode            node/node1   Node node1 event: Registered Node node1 in Controller
3h4m        Normal   Starting                  node/node1   Starting kube-proxy.
7m17s       Normal   Starting                  node/node1   Starting kubelet.
7m16s       Normal   NodeHasSufficientMemory   node/node1   Node node1 status is now: NodeHasSufficientMemory
7m16s       Normal   NodeHasNoDiskPressure     node/node1   Node node1 status is now: NodeHasNoDiskPressure
7m16s       Normal   NodeHasSufficientPID      node/node1   Node node1 status is now: NodeHasSufficientPID
7m17s       Normal   NodeAllocatableEnforced   node/node1   Updated Node Allocatable limit across pods
5m53s       Normal   Starting                  node/node1   Starting kube-proxy.
5m38s       Normal   Starting                  node/node1   Starting kube-proxy.
5m32s       Normal   RegisteredNode            node/node1   Node node1 event: Registered Node node1 in Controller
3h4m        Normal   RegisteredNode            node/node2   Node node2 event: Registered Node node2 in Controller
3h3m        Normal   NodeNotReady              node/node2   Node node2 status is now: NodeNotReady
6m49s       Normal   Starting                  node/node2   Starting kubelet.
6m35s       Normal   NodeHasSufficientMemory   node/node2   Node node2 status is now: NodeHasSufficientMemory
6m42s       Normal   NodeHasNoDiskPressure     node/node2   Node node2 status is now: NodeHasNoDiskPressure
6m35s       Normal   NodeHasSufficientPID      node/node2   Node node2 status is now: NodeHasSufficientPID
6m49s       Normal   NodeAllocatableEnforced   node/node2   Updated Node Allocatable limit across pods
5m51s       Normal   Starting                  node/node2   Starting kube-proxy.
5m38s       Normal   Starting                  node/node2   Starting kube-proxy.
5m32s       Normal   RegisteredNode            node/node2   Node node2 event: Registered Node node2 in Controller
3h4m        Normal   RegisteredNode            node/node3   Node node3 event: Registered Node node3 in Controller
3h3m        Normal   NodeNotReady              node/node3   Node node3 status is now: NodeNotReady
6m43s       Normal   Starting                  node/node3   Starting kubelet.
6m29s       Normal   NodeHasSufficientMemory   node/node3   Node node3 status is now: NodeHasSufficientMemory
6m36s       Normal   NodeHasNoDiskPressure     node/node3   Node node3 status is now: NodeHasNoDiskPressure
6m29s       Normal   NodeHasSufficientPID      node/node3   Node node3 status is now: NodeHasSufficientPID
6m42s       Normal   NodeAllocatableEnforced   node/node3   Updated Node Allocatable limit across pods
5m51s       Normal   Starting                  node/node3   Starting kube-proxy.
5m39s       Normal   Starting                  node/node3   Starting kube-proxy.
5m32s       Normal   RegisteredNode            node/node3   Node node3 event: Registered Node node3 in Controller
[root@node1 ~]# 

[root@node1 ~]# kubectl get events -n kube-system |grep  calico-node-6947s
4h1m        Warning   Unhealthy                pod/calico-node-6947s                          Readiness probe failed: calico/node is not ready: BIRD is not ready: Failed to stat() nodename file: stat /var/lib/calico/nodename: no such file or directory
3h52m       Warning   BackOff                  pod/calico-node-6947s                          Back-off restarting failed container
15m         Warning   FailedMount              pod/calico-node-6947s                          MountVolume.SetUp failed for volume "calico-node-token-qtlkr" : failed to sync secret cache: timed out waiting for the condition
15m         Normal    SandboxChanged           pod/calico-node-6947s                          Pod sandbox changed, it will be killed and re-created.
15m         Normal    Pulled                   pod/calico-node-6947s                          Container image "dockerhub.kubekey.local/calico/cni:v3.15.1" already present on machine
15m         Normal    Created                  pod/calico-node-6947s                          Created container upgrade-ipam
15m         Normal    Started                  pod/calico-node-6947s                          Started container upgrade-ipam
15m         Normal    Pulled                   pod/calico-node-6947s                          Container image "dockerhub.kubekey.local/calico/cni:v3.15.1" already present on machine
15m         Normal    Created                  pod/calico-node-6947s                          Created container install-cni
15m         Normal    Started                  pod/calico-node-6947s                          Started container install-cni
15m         Normal    Pulled                   pod/calico-node-6947s                          Container image "dockerhub.kubekey.local/calico/pod2daemon-flexvol:v3.15.1" already present on machine
15m         Normal    Created                  pod/calico-node-6947s                          Created container flexvol-driver
15m         Normal    Started                  pod/calico-node-6947s                          Started container flexvol-driver
5m55s       Normal    Pulled                   pod/calico-node-6947s                          Container image "dockerhub.kubekey.local/calico/node:v3.15.1" already present on machine
15m         Normal    Created                  pod/calico-node-6947s                          Created container calico-node
15m         Normal    Started                  pod/calico-node-6947s                          Started container calico-node
14m         Warning   Unhealthy                pod/calico-node-6947s                          Readiness probe failed: calico/node is not ready: BIRD is not ready: Failed to stat() nodename file: stat /var/lib/calico/nodename: no such file or directory
14m         Warning   Unhealthy                pod/calico-node-6947s                          Liveness probe failed: calico/node is not ready: bird/confd is not live: exit status 1
55s         Warning   BackOff                  pod/calico-node-6947s                          Back-off restarting failed container
[root@node1 ~]# 

[root@node1 ~]# kubectl get events -n kube-system |grep  calico-node-xljh2 
4h8m        Normal    Pulled                   pod/calico-node-xljh2                          Container image "dockerhub.kubekey.local/calico/node:v3.15.1" already present on machine
3h53m       Warning   Unhealthy                pod/calico-node-xljh2                          Readiness probe failed: calico/node is not ready: BIRD is not ready: Failed to stat() nodename file: stat /var/lib/calico/nodename: no such file or directory
3h58m       Warning   BackOff                  pod/calico-node-xljh2                          Back-off restarting failed container
17m         Normal    SandboxChanged           pod/calico-node-xljh2                          Pod sandbox changed, it will be killed and re-created.
17m         Normal    Pulled                   pod/calico-node-xljh2                          Container image "dockerhub.kubekey.local/calico/cni:v3.15.1" already present on machine
17m         Normal    Created                  pod/calico-node-xljh2                          Created container upgrade-ipam
17m         Normal    Started                  pod/calico-node-xljh2                          Started container upgrade-ipam
16m         Normal    Pulled                   pod/calico-node-xljh2                          Container image "dockerhub.kubekey.local/calico/cni:v3.15.1" already present on machine
16m         Normal    Created                  pod/calico-node-xljh2                          Created container install-cni
16m         Normal    Started                  pod/calico-node-xljh2                          Started container install-cni
16m         Normal    Pulled                   pod/calico-node-xljh2                          Container image "dockerhub.kubekey.local/calico/pod2daemon-flexvol:v3.15.1" already present on machine
16m         Normal    Created                  pod/calico-node-xljh2                          Created container flexvol-driver
16m         Normal    Started                  pod/calico-node-xljh2                          Started container flexvol-driver
16m         Normal    Pulled                   pod/calico-node-xljh2                          Container image "dockerhub.kubekey.local/calico/node:v3.15.1" already present on machine
16m         Normal    Created                  pod/calico-node-xljh2                          Created container calico-node
16m         Normal    Started                  pod/calico-node-xljh2                          Started container calico-node
6m53s       Warning   Unhealthy                pod/calico-node-xljh2                          Readiness probe failed: calico/node is not ready: BIRD is not ready: Failed to stat() nodename file: stat /var/lib/calico/nodename: no such file or directory
15m         Warning   Unhealthy                pod/calico-node-xljh2                          Liveness probe failed: calico/node is not ready: bird/confd is not live: exit status 1
15m         Normal    Killing                  pod/calico-node-xljh2                          Container calico-node failed liveness probe, will be restarted
2m1s        Warning   BackOff                  pod/calico-node-xljh2                          Back-off restarting failed container
[root@node1 ~]# 

使用kk离线安装的话,k8s版本能自己选择版本吗,而不是使用默认的1.17.9版本?

    我看了下离线包里面k8s相关镜像版本是1.17.9的,应该离线安装包只能安装1.17.9版本

    7 天 后
    Cauchy 更改标题为「KubeKey 离线环境部署 KubeSphere v3.0.0
    2 个月 后

    请教老师,第一次安装失败,第二次安装时修改了kubeservicecidr和kubepodscidr,kube-apiserver启动失败;

      dongweibh

      k8s不支持直接修改podcidr和servicecidr, 需要修改的话建议删除集群重新部署,当然也有保留集群修改cidr的方法,但是很复杂。

      修改配置文件中cidr后,把kubekey/network-plgin.yaml也删除下,然后再重新部署。

        Cauchy 问题找到了,是我安装的vip端口与安装配置文件中的不一样了,修改一致后,安装正常了,非常感谢。

        willqy 欢迎Will 大神来写一篇基于 K8s 离线安装 KubeSphere 新的博客,阅读量估计又是 1000+ 😀


        请问我这个是啥错误呀,安装all-in-one的

          jcm
          这个应该是粘贴复制的格式有问题,可以尝试着自己敲下那条命令。

          • jcm 回复了此帖

            Cauchy 谢谢大神,我重装了一遍可以了。还有就是在本地的虚拟机上装的all-in-one,是虚拟机开启,kubesphere就自己启动的吗

            离线部署,elastcsearch-logging-curator-elastcsearch-curator和jaeger-es-index-cleaner容器一直正在拉取镜像,请问要如何解决?

              hkstudio jaegertracing/jaeger-es-index-cleaner:1.17.1这个镜像已存在,elastcsearch-logging-curator-elastcsearch-curator该job对应的镜像为kubesphere/elasticsearch-curator:v5.7.6, 这个镜像也存在,你是否通过上面的链接下载的呢?对照下md5值。

              # md5: 65e9a1158a682412faa1166c0cf06772
              curl -Ok https://kubesphere-installer.pek3b.qingstor.com/offline/v3.0.0/kubesphere-all-v3.0.0-offline-linux-amd64.tar.gz
              
              [root@ks-allinone kubesphere-images-v3.0.0]# docker load -i ks_logger_images.tar
              Loaded image: kubesphere/elasticsearch-curator:v5.7.6
              ace0eda3e3be: Loading layer [==================================================>]  5.843MB/5.843MB
              911954e2fe49: Loading layer [==================================================>]   5.54MB/5.54MB
              31b11ec6ff38: Loading layer [==================================================>]   2.56kB/2.56kB

              Feynman 可以直接提供离线镜像包吗,不用自己在本地拉取了,理论上可以复用kubekey离线包

              curl -Ok https://kubesphere-installer.pek3b.qingstor.com/offline/v3.0.0/kubesphere-all-v3.0.0-offline-linux-amd64.tar.gz
              cd xx
              sh push-image.sh <local_registry>

              或者在打一个不包含kubekey和kubernetes镜像的包。

              我目前可以这样部署,还是有点繁琐,
              1、create_project_harbor.sh没有包含在离线包里
              2、要改create_project_harbor.sh,现在registry都是基于registry v2了吧
              3、kubesphere-all-v3.0.0-offline-linux-amd64.tar.gz这个包略大一些,不过也大不是很多

              基于k8s 离线部署kubesphere

              1、准备harbor镜像仓库,镜像仓库地址:http://192.168.93.9

              2、下载kubesphere离线镜像包并解压

              curl -Ok https://kubesphere-installer.pek3b.qingstor.com/offline/v3.0.0/kubesphere-all-v3.0.0-offline-linux-amd64.tar.gz
              tar -zxvf kubesphere-all-v3.0.0-offline-linux-amd64.tar.gz

              3、推送镜像到harbor仓库

              下载脚本

              wget https://raw.githubusercontent.com/kubesphere/ks-installer/master/scripts/create_project_harbor.sh

              修改create_project_harbor.sh脚本,指定镜像仓库地址和登录信息:

              url="http://192.168.93.9"
              user="admin"
              passwd="Harbor12345"

              如果使用2.x版本harbor修改最后行为以下内容:

              ${url}/api/v2.0/projects

              创建项目

              sh create_project_harbor.sh

              推送镜像到私有镜像仓库

              cd kubesphere-all-v3.0.0-offline-linux-amd64/kubesphere-images-v3.0.0
              sh push-images.sh 192.168.93.9

              3、部署kubesphere容器平台

              helm repo add test https://charts.kubesphere.io/test
              helm pull test/ks-installer
              tar -zxvf ks-installer-0.2.1.tgz
              
              helm install kubesphere \
                --namespace=kubesphere-system \
                --create-namespace \
                --set image.repository=192.168.93.9/kubesphere/ks-installer \
                --set image.tag=v3.0.0 \
                --set persistence.storageClass=longhorn \
                --set .registry=192.168.93.9 \
                ./ks-installer