• 安装部署
  • 开启multicluster,Error: UPGRADE FAILED: \"kubefed\" has no deployed releases

Kubernetes版本信息

v3.2.1

./kk create cluster --with-kubernetes v1.21.5 --with-kubesphere v3.2.1
kubectl edit cc ks-installer -n kubesphere-system
Waiting for all tasks to be completed ...
task network status is successful  (1/4)
task openpitrix status is successful  (2/4)
task monitoring status is successful  (3/4)
task multicluster status is failed  (4/4)
**************************************************
Collecting installation results ...


Task 'multicluster' failed:
******************************************************************************************************************************************************
{
  "counter": 65,
  "created": "2022-02-24T06:00:22.491148",
  "end_line": 67,
  "event": "runner_on_failed",
  "event_data": {
    "duration": 657.194901,
    "end": "2022-02-24T06:00:22.490862",
    "event_loop": null,
    "host": "localhost",
    "ignore_errors": null,
    "play": "localhost",
    "play_pattern": "localhost",
    "play_uuid": "6a09ff7e-f966-1409-755e-000000000005",
    "playbook": "/kubesphere/playbooks/multicluster.yaml",
    "playbook_uuid": "5b1b3191-2fd2-4eeb-aeef-b71f7e7bfc4c",
    "remote_addr": "127.0.0.1",
    "res": {
      "_ansible_no_log": false,
      "attempts": 10,
      "changed": true,
      "cmd": "/usr/local/bin/helm upgrade --install kubefed /kubesphere/kubesphere/kubefed/kubefed -f /kubesphere/kubesphere/kubefed/custom-values-kubefed.yaml --namespace kube-federation-system --wait --timeout 1800s\n",
      "delta": "0:00:04.783402",
      "end": "2022-02-24 14:00:22.403295",
      "invocation": {
        "module_args": {
          "_raw_params": "/usr/local/bin/helm upgrade --install kubefed /kubesphere/kubesphere/kubefed/kubefed -f /kubesphere/kubesphere/kubefed/custom-values-kubefed.yaml --namespace kube-federation-system --wait --timeout 1800s\n",
          "_uses_shell": true,
          "argv": null,
          "chdir": null,
          "creates": null,
          "executable": null,
          "removes": null,
          "stdin": null,
          "stdin_add_newline": true,
          "strip_empty_ends": true,
          "warn": true
        }
      },
      "msg": "non-zero return code",
      "rc": 1,
      "start": "2022-02-24 14:00:17.619893",
      "stderr": "Error: UPGRADE FAILED: \"kubefed\" has no deployed releases",
      "stderr_lines": [
        "Error: UPGRADE FAILED: \"kubefed\" has no deployed releases"
      ],
      "stdout": "",
      "stdout_lines": []
    },
    "role": "ks-multicluster",
    "start": "2022-02-24T05:49:25.295961",
    "task": "Kubefed | Initing kube-federation-system",
    "task_action": "command",
    "task_args": "",
    "task_path": "/kubesphere/installer/roles/ks-multicluster/tasks/main.yml:51",
    "task_uuid": "6a09ff7e-f966-1409-755e-00000000001f",
    "uuid": "f7cb25f1-3761-42c7-a575-d525d6c07654"
  },
  "parent_uuid": "6a09ff7e-f966-1409-755e-00000000001f",
  "pid": 8819,
  "runner_ident": "multicluster",
  "start_line": 66,
  "stdout": "fatal: [localhost]: FAILED! => {\"attempts\": 10, \"changed\": true, \"cmd\": \"/usr/local/bin/helm upgrade --install kubefed /kubesphere/kubesphere/kubefed/kubefed -f /kubesphere/kubesphere/kubefed/custom-values-kubefed.yaml --namespace kube-federation-system --wait --timeout 1800s\\n\", \"delta\": \"0:00:04.783402\", \"end\": \"2022-02-24 14:00:22.403295\", \"msg\": \"non-zero return code\", \"rc\": 1, \"start\": \"2022-02-24 14:00:17.619893\", \"stderr\": \"Error: UPGRADE FAILED: \\\"kubefed\\\" has no deployed releases\", \"stderr_lines\": [\"Error: UPGRADE FAILED: \\\"kubefed\\\" has no deployed releases\"], \"stdout\": \"\", \"stdout_lines\": []}",
  "uuid": "f7cb25f1-3761-42c7-a575-d525d6c07654"
}

pod都正常

[root@hk-public-zone-gitlab-01-01 kubesphere]# kubectl get pod -A
NAMESPACE                      NAME                                                  READY   STATUS    RESTARTS   AGE
kube-federation-system         kubefed-admission-webhook-6f9f5dcbbf-c9zwf            1/1     Running   1          3h46m
kube-federation-system         kubefed-controller-manager-57c9786bd5-pj25h           1/1     Running   1          3h46m
kube-system                    calico-kube-controllers-846b5f484d-vbmg9              1/1     Running   1          4h3m
kube-system                    calico-node-7sjq7                                     1/1     Running   1          4h3m
kube-system                    coredns-b5648d655-bzggc                               1/1     Running   1          4h3m
kube-system                    coredns-b5648d655-flw54                               1/1     Running   1          4h3m
kube-system                    kube-apiserver-hk-public-zone-gitlab-01-01            1/1     Running   1          4h4m
kube-system                    kube-controller-manager-hk-public-zone-gitlab-01-01   1/1     Running   1          4h4m
kube-system                    kube-proxy-2r4sb                                      1/1     Running   1          4h3m
kube-system                    kube-scheduler-hk-public-zone-gitlab-01-01            1/1     Running   1          4h4m
kube-system                    nodelocaldns-wvx5w                                    1/1     Running   1          4h3m
kube-system                    openebs-localpv-provisioner-5bfb67894-ghr5z           1/1     Running   1          4h3m
kube-system                    snapshot-controller-0                                 1/1     Running   1          70m
kubesphere-controls-system     default-http-backend-5bf68ff9b8-zwp49                 1/1     Running   1          68m
kubesphere-controls-system     kubectl-admin-6667774bb-drj46                         1/1     Running   1          62m
kubesphere-monitoring-system   alertmanager-main-0                                   2/2     Running   2          64m
kubesphere-monitoring-system   kube-state-metrics-5547ddd4cc-vv5gm                   3/3     Running   0          64m
kubesphere-monitoring-system   node-exporter-7qlg7                                   2/2     Running   0          64m
kubesphere-monitoring-system   notification-manager-deployment-78664576cb-p2k9t      2/2     Running   2          63m
kubesphere-monitoring-system   notification-manager-operator-7d44854f54-fv7sj        2/2     Running   2          63m
kubesphere-monitoring-system   prometheus-k8s-0                                      2/2     Running   1          64m
kubesphere-monitoring-system   prometheus-operator-5c5db79546-knqnj                  2/2     Running   2          64m
kubesphere-system              ks-apiserver-668c56545c-6m449                         1/1     Running   0          18m
kubesphere-system              ks-console-65f4d44d88-znksf                           1/1     Running   1          68m
kubesphere-system              ks-controller-manager-77d8ff65d4-gkkfm                1/1     Running   0          17m
kubesphere-system              ks-installer-85dcfff87d-9bpqw                         1/1     Running   1          72m
kubesphere-system              tower-786bb99f5d-8smj9                                1/1     Running   0          29m

    hjfeng1988

    1. 首先执行helm list -n kube-federation-system查看下是否有kubefed的release,有的话使用 helm del kubefed -n kube-federation-system 删除该release,重启ks-installer尝试重新安装kubefed。
    2. 如果没有kubefed的release,执行 kubectl get secret -n kube-federation-system查看该namespace下的secret,删除helm相关的secret,重启ks-installer尝试重新安装kubefed。

      我用kk重装了kubenertes,莫名其妙又变好了,不知如何复现

      Cauchy 谢谢