• 安装部署
  • 本地k8s集群部署kubesphere时monitoring安装失败

k8s 1.7x,用的kubesphere 3.x版本,前置环境准备好后直接执行官网命令

kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml
   
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml

进行安装,界面倒是能进去,但是日志有个monitoring报错,而且控制台上监控各个节点的信息也不对

Start installing monitoring
Start installing multicluster
Start installing openpitrix
Start installing network
**************************************************
Waiting for all tasks to be completed ...
task network status is successful  (1/4)
task multicluster status is successful  (2/4)
task openpitrix status is successful  (3/4)
task monitoring status is failed  (4/4)
**************************************************
Collecting installation results ...


Task 'monitoring' failed:
******************************************************************************************************************************************************
{
  "counter": 75,
  "created": "2021-08-11T16:11:47.545022",
  "end_line": 74,
  "event": "runner_on_failed",
  "event_data": {
    "duration": 1.19733,
    "end": "2021-08-11T16:11:47.544801",
    "event_loop": null,
    "host": "localhost",
    "ignore_errors": null,
    "play": "localhost",
    "play_pattern": "localhost",
    "play_uuid": "160ed5e4-f324-d4eb-9fae-000000000005",
    "playbook": "/kubesphere/playbooks/monitoring.yaml",
    "playbook_uuid": "8d8958a9-4240-431f-939d-9fdde3d19919",
    "remote_addr": "127.0.0.1",
    "res": {
      "_ansible_no_log": false,
      "changed": true,
      "cmd": "/usr/local/bin/kubectl apply -f /kubesphere/kubesphere/prometheus/node-exporter --force",
      "delta": "0:00:00.593342",
      "end": "2021-08-11 16:11:47.465865",
      "invocation": {
        "module_args": {
          "_raw_params": "/usr/local/bin/kubectl apply -f /kubesphere/kubesphere/prometheus/node-exporter --force",
          "_uses_shell": true,
          "argv": null,
          "chdir": null,
          "creates": null,
          "executable": null,
          "removes": null,
          "stdin": null,
          "stdin_add_newline": true,
          "strip_empty_ends": true,
          "warn": true
        }
      },
      "msg": "non-zero return code",
      "rc": 1,
      "start": "2021-08-11 16:11:46.872523",
      "stderr": "error: unable to recognize \"/kubesphere/kubesphere/prometheus/node-exporter/node-exporter-serviceMonitor.yaml\": no matches for kind \"ServiceMonitor\" in version \"monitoring.coreos.com/v1\"",
      "stderr_lines": [
        "error: unable to recognize \"/kubesphere/kubesphere/prometheus/node-exporter/node-exporter-serviceMonitor.yaml\": no matches for kind \"ServiceMonitor\" in version \"monitoring.coreos.com/v1\""
      ],
      "stdout": "clusterrole.rbac.authorization.k8s.io/kubesphere-node-exporter created\nclusterrolebinding.rbac.authorization.k8s.io/kubesphere-node-exporter created\ndaemonset.apps/node-exporter created\nservice/node-exporter created\nserviceaccount/node-exporter created",
      "stdout_lines": [
        "clusterrole.rbac.authorization.k8s.io/kubesphere-node-exporter created",
        "clusterrolebinding.rbac.authorization.k8s.io/kubesphere-node-exporter created",
        "daemonset.apps/node-exporter created",
        "service/node-exporter created",
        "serviceaccount/node-exporter created"
      ]
    },
    "role": "ks-monitor",
    "start": "2021-08-11T16:11:46.347471",
    "task": "Monitoring | Installing node-exporter",
    "task_action": "command",
    "task_args": "",
    "task_path": "/kubesphere/installer/roles/ks-monitor/tasks/node-exporter.yaml:2",
    "task_uuid": "160ed5e4-f324-d4eb-9fae-000000000036",
    "uuid": "b1d95f0a-0ada-49b7-8a0c-2491e001cd06"
  },
  "parent_uuid": "160ed5e4-f324-d4eb-9fae-000000000036",
  "pid": 3904,
  "runner_ident": "monitoring",
  "start_line": 73,
  "stdout": "fatal: [localhost]: FAILED! => {\"changed\": true, \"cmd\": \"/usr/local/bin/kubectl apply -f /kubesphere/kubesphere/prometheus/node-exporter --force\", \"delta\": \"0:00:00.593342\", \"end\": \"2021-08-11 16:11:47.465865\", \"msg\": \"non-zero return code\", \"rc\": 1, \"start\": \"2021-08-11 16:11:46.872523\", \"stderr\": \"error: unable to recognize \\\"/kubesphere/kubesphere/prometheus/node-exporter/node-exporter-serviceMonitor.yaml\\\": no matches for kind \\\"ServiceMonitor\\\" in version \\\"monitoring.coreos.com/v1\\\"\", \"stderr_lines\": [\"error: unable to recognize \\\"/kubesphere/kubesphere/prometheus/node-exporter/node-exporter-serviceMonitor.yaml\\\": no matches for kind \\\"ServiceMonitor\\\" in version \\\"monitoring.coreos.com/v1\\\"\"], \"stdout\": \"clusterrole.rbac.authorization.k8s.io/kubesphere-node-exporter created\\nclusterrolebinding.rbac.authorization.k8s.io/kubesphere-node-exporter created\\ndaemonset.apps/node-exporter created\\nservice/node-exporter created\\nserviceaccount/node-exporter created\", \"stdout_lines\": [\"clusterrole.rbac.authorization.k8s.io/kubesphere-node-exporter created\", \"clusterrolebinding.rbac.authorization.k8s.io/kubesphere-node-exporter created\", \"daemonset.apps/node-exporter created\", \"service/node-exporter created\", \"serviceaccount/node-exporter created\"]}",
  "uuid": "b1d95f0a-0ada-49b7-8a0c-2491e001cd06"

按照kubesphere2版本提前安装好helm和tiller之后重新安装kubesphere3就好了,不知道是这个原因还是之前的网络原因。。。还有,2版本的最小化安装yaml文件404。难道是刻意为之想让大家转移到3版本?

    5 天 后