`Start installing monitoring
Start installing multicluster


task monitoring status is running
task multicluster status is successful
total: 2 completed:1


task monitoring status is running
task multicluster status is successful
total: 2 completed:1


task monitoring status is running
task multicluster status is successful
total: 2 completed:1


task monitoring status is running
task multicluster status is successful
total: 2 completed:1


task monitoring status is successful
task multicluster status is successful
total: 2 completed:2


Failed to ansible-playbook result-info.yaml
error: unexpected EOF

[root@master1 ]# kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath=‘{.items[0].metadata.name}’) |grep FAILED -A 1 -B 5

TASK [common : Setting PersistentVolumeSize (etcd)] ****************************
skipping: [localhost]

TASK [common : Kubesphere | Check mysql PersistentVolumeClaim] *****************
fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system mysql-pvc -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.088369″, “end”: “2020-09-09 14:07:50.742056″, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-09-09 14:07:50.653687″, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“mysql-pvc\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“mysql-pvc\” not found"], “stdout”: "", “stdout_lines”: []}
…ignoring

TASK [common : Kubesphere | Setting mysql db pv size] **************************
skipping: [localhost]

TASK [common : Kubesphere | Check redis PersistentVolumeClaim] *****************
fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system redis-pvc -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.094291”, “end”: “2020-09-09 14:07:51.099680″, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-09-09 14:07:51.005389”, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“redis-pvc\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“redis-pvc\” not found"], “stdout”: "", “stdout_lines”: []}
…ignoring

TASK [common : Kubesphere | Setting redis db pv size] **************************
skipping: [localhost]

TASK [common : Kubesphere | Check minio PersistentVolumeClaim] *****************
fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system minio -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.092779”, “end”: “2020-09-09 14:07:51.459988″, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-09-09 14:07:51.367209”, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“minio\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“minio\” not found"], “stdout”: "", “stdout_lines”: []}
…ignoring

TASK [common : Kubesphere | Setting minio pv size] *****************************
skipping: [localhost]

TASK [common : Kubesphere | Check openldap PersistentVolumeClaim] **************
fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system openldap-pvc-openldap-0 -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.081902″, “end”: “2020-09-09 14:07:51.809697″, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-09-09 14:07:51.727795″, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“openldap-pvc-openldap-0\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“openldap-pvc-openldap-0\” not found"], “stdout”: "", “stdout_lines”: []}
…ignoring

TASK [common : Kubesphere | Setting openldap pv size] **************************
skipping: [localhost]

TASK [common : Kubesphere | Check etcd db PersistentVolumeClaim] ***************
fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system etcd-pvc -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.085201”, “end”: “2020-09-09 14:07:52.159817”, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-09-09 14:07:52.074616″, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“etcd-pvc\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“etcd-pvc\” not found"], “stdout”: "", “stdout_lines”: []}
…ignoring

TASK [common : Kubesphere | Setting etcd pv size] ******************************
skipping: [localhost]

TASK [common : Kubesphere | Check redis ha PersistentVolumeClaim] **************
fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system data-redis-ha-server-0 -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.092925″, “end”: “2020-09-09 14:07:52.521257”, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-09-09 14:07:52.428332″, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“data-redis-ha-server-0\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“data-redis-ha-server-0\” not found"], “stdout”: "", “stdout_lines”: []}
…ignoring

TASK [common : Kubesphere | Setting redis ha pv size] **************************
skipping: [localhost]

TASK [common : Kubesphere | Check es-master PersistentVolumeClaim] *************
fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-logging-system data-elasticsearch-logging-discovery-0 -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.079415”, “end”: “2020-09-09 14:07:52.864793″, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-09-09 14:07:52.785378″, “stderr”: "Error from server (NotFound): namespaces \“kubesphere-logging-system\” not found", “stderr_lines”: ["Error from server (NotFound): namespaces \“kubesphere-logging-system\” not found"], “stdout”: "", “stdout_lines”: []}
…ignoring

TASK [common : Kubesphere | Setting es master pv size] *************************
skipping: [localhost]

TASK [common : Kubesphere | Check es data PersistentVolumeClaim] ***************
fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-logging-system data-elasticsearch-logging-data-0 -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.079943”, “end”: “2020-09-09 14:07:53.208772”, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-09-09 14:07:53.128829″, “stderr”: "Error from server (NotFound): namespaces \“kubesphere-logging-system\” not found", “stderr_lines”: ["Error from server (NotFound): namespaces \“kubesphere-logging-system\” not found"], “stdout”: "", “stdout_lines”: []}
…ignoring
`

kubesphere/ks-installer:v3.0.0 b25132e64358 42 hours ago

Failed to ansible-playbook result-info.yaml

执行下边命令看下这个playbook失败的原因
kubectl exec -it -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') cat results/result-info/result/stdout

    Cauchy
    `TASK [ks-core/config : ks-upgrade | restart ks-apiserver] **********************
    fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl -n kubesphere-system rollout restart deployment ks-apiserver”, “delta”: “0:00:00.091473″, “end”: “2020-09-10 14:21:02.400140″, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-09-10 14:21:02.308667”, “stderr”: "Error from server (NotFound): deployments.apps \“ks-apiserver\” not found", “stderr_lines”: ["Error from server (NotFound): deployments.apps \“ks-apiserver\” not found"], “stdout”: "", “stdout_lines”: []}
    …ignoring

    TASK [ks-core/config : ks-upgrade | restart ks-controller-manager] *************
    fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl -n kubesphere-system rollout restart deployment ks-controller-manager”, “delta”: “0:00:00.091220″, “end”: “2020-09-10 14:21:02.722397”, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-09-10 14:21:02.631177”, “stderr”: "Error from server (NotFound): deployments.apps \“ks-controller-manager\” not found", “stderr_lines”: ["Error from server (NotFound): deployments.apps \“ks-controller-manager\” not found"], “stdout”: "", “stdout_lines”: []}
    …ignoring

    TASK [check-result : ks-devops | Getting ks-sonarqube NodeIp] ******************
    changed: [localhost]

    TASK [check-result : KubeSphere | Waiting for ks-console] **********************
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (30 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (29 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (28 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (27 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (26 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (25 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (24 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (23 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (22 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (21 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (20 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (19 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (18 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (17 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (16 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (15 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (14 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (13 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (12 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (11 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (10 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (9 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (8 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (7 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (6 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (5 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (4 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (3 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (2 retries left).
    FAILED - RETRYING: KubeSphere | Waiting for ks-console (1 retries left).
    fatal: [localhost]: FAILED! => {“attempts”: 30, “changed”: true, “cmd”: “/usr/local/bin/kubectl get pod -n kubesphere-system -o wide | grep ks-console | awk ‘{print $3}’”, “delta”: “0:00:00.087094”, “end”: “2020-09-10 14:36:13.191538”, “rc”: 0, “start”: “2020-09-10 14:36:13.104444”, “stderr”: "", “stderr_lines”: [], “stdout”: "", “stdout_lines”: []}

    PLAY RECAP *********************************************************************
    localhost : ok=12 changed=10 unreachable=0 failed=1 skipped=6 rescued=0 ignored=2

    只有一个ks-installer,没其他的了[root@master1 ]# kubectl get pod -n kubesphere-system
    NAME READY STATUS RESTARTS AGE
    ks-installer-7cb866bd-hljrm 1/1 Running 1 18h`

    • Jeff 回复了此帖

      errorcode7 你看下deployment是不是创建了,再describe replicaset看下是不是资源不足了

        我是今天才遇到这个相同的异常,是不是官方更新了什么

        **************************************************
        task monitoring status is successful
        task multicluster status is successful
        task alerting status is successful
        task auditing status is successful
        task devops status is successful
        task events status is successful
        task logging status is successful
        task notification status is successful
        task openpitrix status is successful
        task servicemesh status is successful
        total: 10     completed:10
        **************************************************
        Failed to ansible-playbook result-info.yaml
        
        
        TASK [check-result : KubeSphere | Waiting for ks-apiserver] ********************
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (30 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (29 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (28 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (27 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (26 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (25 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (24 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (23 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (22 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (21 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (20 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (19 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (18 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (17 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (16 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (15 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (14 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (13 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (12 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (11 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (10 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (9 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (8 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (7 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (6 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (5 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (4 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (3 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (2 retries left).
        FAILED - RETRYING: KubeSphere | Waiting for ks-apiserver (1 retries left).
        fatal: [localhost]: FAILED! => {"attempts": 30, "changed": true, "cmd": "/usr/local/bin/kubectl get pod -n kubesphere-system -o wide | grep ks-apiserver | awk '{print $3}'", "delta": "0:00:00.102618", "end": "2020-09-11 14:43:50.416738", "rc": 0, "start": "2020-09-11 14:43:50.314120", "stderr": "", "stderr_lines": [], "stdout": "CrashLoopBackOff\nCrashLoopBackOff\nCrashLoopBackOff\nCrashLoopBackOff\nCrashLoopBackOff", "stdout_lines": ["CrashLoopBackOff", "CrashLoopBackOff", "CrashLoopBackOff", "CrashLoopBackOff", "CrashLoopBackOff"]}
        
        PLAY RECAP *********************************************************************
        localhost                  : ok=14   changed=11   unreachable=0    failed=1    skipped=5    rescued=0    ignored=0   

        selfLink: /apis/installer.kubesphere.io/v1alpha1/namespaces/kubesphere-system/clusterconfigurations/ks-installer
        uid: cabbd642-14cc-4388-b2e0-dae6d6f41531
        spec:
        alerting:
        enabled: true
        auditing:
        enabled: true
        authentication:
        jwtSecret: ""
        common:
        es:
        elasticsearchDataVolumeSize: 200Gi
        elasticsearchMasterVolumeSize: 8Gi
        elkPrefix: logstash
        logMaxAge: 7
        etcdVolumeSize: 60Gi
        minioVolumeSize: 100Gi
        mysqlVolumeSize: 100Gi
        openldapVolumeSize: 20Gi
        redisVolumSize: 20Gi
        console:
        enableMultiLogin: true
        port: 30880
        devops:
        enabled: true
        jenkinsJavaOpts_MaxRAM: 8g
        jenkinsJavaOpts_Xms: 8192m
        jenkinsJavaOpts_Xmx: 8192m
        jenkinsMemoryLim: 16Gi
        jenkinsMemoryReq: 8Gi
        jenkinsVolumeSize: 100Gi
        etcd:
        endpointIps: 192.168.66.1,192.168.66.2,192.168.66.3,192.168.66.4,192.168.66.5
        monitoring: true
        port: 2379
        tlsEnable: true

        Jeff
        `[root@kube-master-02 ]# kubectl describe deployments -n kubesphere-system ks-installer
        Name: ks-installer
        Namespace: kubesphere-system
        CreationTimestamp: Fri, 11 Sep 2020 13:56:07 +0800
        Labels: app=ks-install
        Annotations: deployment.kubernetes.io/revision: 1
        Selector: app=ks-install
        Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable
        StrategyType: RollingUpdate
        MinReadySeconds: 0
        RollingUpdateStrategy: 25% max unavailable, 25% max surge
        Pod Template:
        Labels: app=ks-install
        Service Account: ks-installer
        Containers:
        installer:
        Image: kubesphere/ks-installer:v3.0.0
        Port: <none>
        Host Port: <none>
        Environment: <none>
        Mounts:
        /etc/localtime from host-time (rw)
        Volumes:
        host-time:
        Type: HostPath (bare host directory volume)
        Path: /etc/localtime
        HostPathType:

        Conditions:
        Type Status Reason


        Progressing True NewReplicaSetAvailable
        Available True MinimumReplicasAvailable
        OldReplicaSets: <none>
        NewReplicaSet: ks-installer-7cb866bd (1/1 replicas created)
        Events: <none>
        `

        • Jeff 回复了此帖

          erinyeo 你看下 kubectl -n kubesphere-system get po ,只要按照文档把那些前置条件准备好了,安装一点也不复杂,也要怕看文档,遇到相同问题你在论坛搜下

          初步排查出现
          driver name rbd.csi.ceph.com not found in the list of registered CSI dri vers

          则应该设置ceph-rbd (default)
          若设置csi-rbd-sc (default) 会出现以上情况

          yunkunrao 可我是二进制安装ceph cluster,不清楚这里的端口配置,不适合我这类型吧