os  env:
os version: CentOS Linux release 7.8.2003 (Core)
kk version: v1.0.0
kubernetes version: v1.18.6
kubesphere version: v3.0.0
ceph version : ceph version 15.2.4 (7447c15c6ff58d7fce91843b705a268a1917325c) octopus (stable)
etcd/kube-master count:3
worker/kube-node count:5

相关配置文件如下:
`# config-sample.yaml
  addons: 
  - name: rbd-provisioner
    namespace: kube-system
    sources:
      chart:
        name: rbd-provisioner
        repo: https://charts.kubesphere.io/test
        values:
        - ceph.mon=192.168.55.1:6789
        - ceph.pool=kubeceph
        - ceph.adminId=admin
        - ceph.adminKey=QVFDUy9rQmY4RWczT0JBQXhwdTlzWStWUU43K1VGYldudWEyY0E9PQ==
        - ceph.userId=admin
        - ceph.userKey=QVFDUy9rQmY4RWczT0JBQXhwdTlzWStWUU43K1VGYldudWEyY0E9PQ==
        - sc.isDefault=true  

---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
  name: ks-installer
  namespace: kubesphere-system
  labels:
    version: v3.0.0
spec:
  local_registry: ""
  persistence:
    storageClass: ""
  authentication:
    jwtSecret: "


# ceph status
[root@ceph-node-01 ~]# ceph -s
  cluster:
    id:     f86f2702-e7d3-4d24-830a-9a49a581b186
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-node-01,ceph-node-02,ceph-node-03 (age 7d)
    mgr: ceph-node-02(active, since 7d), standbys: ceph-node-03, ceph-node-01
    mds:  3 up:standby
    osd: 3 osds: 3 up (since 14m), 3 in (since 2w)
 
  data:
    pools:   2 pools, 97 pgs
    objects: 4.93k objects, 8.4 GiB
    usage:   29 GiB used, 1.4 TiB / 1.5 TiB avail
    pgs:     97 active+clean
 
[root@ceph-node-01 ~]# ceph osd pool ls
device_health_metrics
kubeceph

**kubekey 安装错误日志如下:**

`2020-09-08T15:13:01+08:00 INFO     : shell-operator latest
2020-09-08T15:13:01+08:00 INFO     : HTTP SERVER Listening on 0.0.0.0:9115
2020-09-08T15:13:01+08:00 INFO     : Use temporary dir: /tmp/shell-operator
2020-09-08T15:13:01+08:00 INFO     : Initialize hooks manager ...
2020-09-08T15:13:01+08:00 INFO     : Search and load hooks ...
2020-09-08T15:13:01+08:00 INFO     : Load hook config from '/hooks/kubesphere/installRunner.py'
2020-09-08T15:13:03+08:00 INFO     : Load hook config from '/hooks/kubesphere/schedule.sh'
2020-09-08T15:13:03+08:00 INFO     : Initializing schedule manager ...
2020-09-08T15:13:03+08:00 INFO     : KUBE Init Kubernetes client
2020-09-08T15:13:03+08:00 INFO     : KUBE-INIT Kubernetes client is configured successfully
2020-09-08T15:13:03+08:00 INFO     : MAIN: run main loop
2020-09-08T15:13:03+08:00 INFO     : MAIN: add onStartup tasks
2020-09-08T15:13:03+08:00 INFO     : Running schedule manager ...
2020-09-08T15:13:03+08:00 INFO     : QUEUE add all HookRun@OnStartup
2020-09-08T15:13:03+08:00 INFO     : MSTOR Create new metric shell_operator_live_ticks
2020-09-08T15:13:03+08:00 INFO     : MSTOR Create new metric shell_operator_tasks_queue_length
2020-09-08T15:13:03+08:00 INFO     : GVR for kind 'ClusterConfiguration' is installer.kubesphere.io/v1alpha1, Resource=clusterconfigurations
2020-09-08T15:13:03+08:00 INFO     : EVENT Kube event '4c95b047-e6ed-4fe6-94df-e93332d2b9b9'
2020-09-08T15:13:03+08:00 INFO     : QUEUE add TASK_HOOK_RUN@KUBE_EVENTS kubesphere/installRunner.py
2020-09-08T15:13:06+08:00 INFO     : TASK_RUN HookRun@KUBE_EVENTS kubesphere/installRunner.py
2020-09-08T15:13:06+08:00 INFO     : Running hook 'kubesphere/installRunner.py' binding 'KUBE_EVENTS' ...
[WARNING]: No inventory was parsed, only implicit localhost is available
[WARNING]: provided hosts list is empty, only localhost is available. Note that
the implicit localhost does not match 'all'

PLAY [localhost] ***************************************************************

TASK [download : include_tasks] ************************************************
skipping: [localhost]

TASK [download : Download items] ***********************************************
skipping: [localhost]

TASK [download : Sync container] ***********************************************
skipping: [localhost]

TASK [kubesphere-defaults : Configure defaults] ********************************
ok: [localhost] => {
    "msg": "Check roles/kubesphere-defaults/defaults/main.yml"
}

TASK [preinstall : check k8s version] ******************************************
changed: [localhost]

TASK [preinstall : init k8s version] *******************************************
ok: [localhost]

TASK [preinstall : Stop if kubernetes version is nonsupport] *******************
ok: [localhost] => {
    "changed": false,
    "msg": "All assertions passed"
}

TASK [preinstall : check storage class] ****************************************
changed: [localhost]

TASK [preinstall : Stop if StorageClass was not found] *************************
skipping: [localhost]

TASK [preinstall : check default storage class] ********************************
changed: [localhost]

TASK [preinstall : Stop if defaultStorageClass was not found] ******************
ok: [localhost] => {
    "changed": false,
    "msg": "All assertions passed"
}

TASK [preinstall : Kubesphere | Checking kubesphere component] *****************
changed: [localhost]

TASK [preinstall : Kubesphere | Get kubesphere component version] **************
skipping: [localhost]

TASK [preinstall : Kubesphere | Get kubesphere component version] **************
skipping: [localhost] => (item=ks-openldap) 
skipping: [localhost] => (item=ks-redis) 
skipping: [localhost] => (item=ks-minio) 
skipping: [localhost] => (item=ks-openpitrix) 
skipping: [localhost] => (item=elasticsearch-logging) 
skipping: [localhost] => (item=elasticsearch-logging-curator) 
skipping: [localhost] => (item=istio) 
skipping: [localhost] => (item=istio-init) 
skipping: [localhost] => (item=jaeger-operator) 
skipping: [localhost] => (item=ks-jenkins) 
skipping: [localhost] => (item=ks-sonarqube) 
skipping: [localhost] => (item=logging-fluentbit-operator) 
skipping: [localhost] => (item=uc) 
skipping: [localhost] => (item=metrics-server) 

PLAY RECAP *********************************************************************
localhost                  : ok=8    changed=4    unreachable=0    failed=0    skipped=6    rescued=0    ignored=0   

[WARNING]: No inventory was parsed, only implicit localhost is available
[WARNING]: provided hosts list is empty, only localhost is available. Note that
the implicit localhost does not match 'all'

PLAY [localhost] ***************************************************************

TASK [download : include_tasks] ************************************************
skipping: [localhost]

TASK [download : Download items] ***********************************************
skipping: [localhost]

TASK [download : Sync container] ***********************************************
skipping: [localhost]

TASK [kubesphere-defaults : Configure defaults] ********************************
ok: [localhost] => {
    "msg": "Check roles/kubesphere-defaults/defaults/main.yml"
}

TASK [metrics-server : Metrics-Server | Checking old installation files] *******
ok: [localhost]

TASK [metrics-server : Metrics-Server | deleting old metrics-server] ***********
skipping: [localhost]

TASK [metrics-server : Metrics-Server | deleting old metrics-server files] *****
[DEPRECATION WARNING]: evaluating {'changed': False, 'stat': {'exists': False},
 'failed': False} as a bare variable, this behaviour will go away and you might
 need to add |bool to the expression in the future. Also see 
CONDITIONAL_BARE_VARS configuration toggle.. This feature will be removed in 
version 2.12. Deprecation warnings can be disabled by setting 
deprecation_warnings=False in ansible.cfg.
ok: [localhost] => (item=metrics-server)

TASK [metrics-server : Metrics-Server | Getting metrics-server installation files] ***
changed: [localhost]

TASK [metrics-server : Metrics-Server | Creating manifests] ********************
changed: [localhost] => (item={'name': 'values', 'file': 'values.yaml', 'type': 'config'})

TASK [metrics-server : Metrics-Server | Check Metrics-Server] ******************
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/helm list metrics-server -n kube-system\n", "delta": "0:00:00.134151", "end": "2020-09-08 15:13:30.162528", "msg": "non-zero return code", "rc": 1, "start": "2020-09-08 15:13:30.028377", "stderr": "Error: \"helm list\" accepts no arguments\n\nUsage:  helm list [flags]", "stderr_lines": ["Error: \"helm list\" accepts no arguments", "", "Usage:  helm list [flags]"], "stdout": "", "stdout_lines": []}
...ignoring

TASK [metrics-server : Metrics-Server | Installing metrics-server] *************
changed: [localhost]

TASK [metrics-server : Metrics-Server | Installing metrics-server retry] *******
skipping: [localhost]

TASK [metrics-server : Metrics-Server | Waitting for v1beta1.metrics.k8s.io ready] ***
FAILED - RETRYING: Metrics-Server | Waitting for v1beta1.metrics.k8s.io ready (60 retries left).
FAILED - RETRYING: Metrics-Server | Waitting for v1beta1.metrics.k8s.io ready (59 retries left).
FAILED - RETRYING: Metrics-Server | Waitting for v1beta1.metrics.k8s.io ready (58 retries left).
changed: [localhost]

TASK [metrics-server : Metrics-Server | import metrics-server status] **********
changed: [localhost]

PLAY RECAP *********************************************************************
localhost                  : ok=9    changed=6    unreachable=0    failed=0    skipped=5    rescued=0    ignored=1   

[WARNING]: No inventory was parsed, only implicit localhost is available
[WARNING]: provided hosts list is empty, only localhost is available. Note that
the implicit localhost does not match 'all'

PLAY [localhost] ***************************************************************

TASK [download : include_tasks] ************************************************
skipping: [localhost]

TASK [download : Download items] ***********************************************
skipping: [localhost]

TASK [download : Sync container] ***********************************************
skipping: [localhost]

TASK [kubesphere-defaults : Configure defaults] ********************************
ok: [localhost] => {
    "msg": "Check roles/kubesphere-defaults/defaults/main.yml"
}

TASK [common : Kubesphere | Check kube-node-lease namespace] *******************
changed: [localhost]

TASK [common : KubeSphere | Get system namespaces] *****************************
ok: [localhost]

TASK [common : set_fact] *******************************************************
ok: [localhost]

TASK [common : debug] **********************************************************
ok: [localhost] => {
    "msg": [
        "kubesphere-system",
        "kubesphere-controls-system",
        "kubesphere-monitoring-system",
        "kube-node-lease",
        "kubesphere-logging-system",
        "openpitrix-system",
        "kubesphere-devops-system",
        "istio-system",
        "kubesphere-alerting-system",
        "istio-system"
    ]
}

TASK [common : KubeSphere | Create kubesphere namespace] ***********************
changed: [localhost] => (item=kubesphere-system)
changed: [localhost] => (item=kubesphere-controls-system)
changed: [localhost] => (item=kubesphere-monitoring-system)
changed: [localhost] => (item=kube-node-lease)
changed: [localhost] => (item=kubesphere-logging-system)
changed: [localhost] => (item=openpitrix-system)
changed: [localhost] => (item=kubesphere-devops-system)
changed: [localhost] => (item=istio-system)
changed: [localhost] => (item=kubesphere-alerting-system)
changed: [localhost] => (item=istio-system)

TASK [common : KubeSphere | Labeling system-workspace] *************************
changed: [localhost] => (item=default)
changed: [localhost] => (item=kube-public)
changed: [localhost] => (item=kube-system)
changed: [localhost] => (item=kubesphere-system)
changed: [localhost] => (item=kubesphere-controls-system)
changed: [localhost] => (item=kubesphere-monitoring-system)
changed: [localhost] => (item=kube-node-lease)
changed: [localhost] => (item=kubesphere-logging-system)
changed: [localhost] => (item=openpitrix-system)
changed: [localhost] => (item=kubesphere-devops-system)
changed: [localhost] => (item=istio-system)
changed: [localhost] => (item=kubesphere-alerting-system)
changed: [localhost] => (item=istio-system)

TASK [common : KubeSphere | Create ImagePullSecrets] ***************************
changed: [localhost] => (item=default)
changed: [localhost] => (item=kube-public)
changed: [localhost] => (item=kube-system)
changed: [localhost] => (item=kubesphere-system)
changed: [localhost] => (item=kubesphere-controls-system)
changed: [localhost] => (item=kubesphere-monitoring-system)
changed: [localhost] => (item=kube-node-lease)
changed: [localhost] => (item=kubesphere-logging-system)
changed: [localhost] => (item=openpitrix-system)
changed: [localhost] => (item=kubesphere-devops-system)
changed: [localhost] => (item=istio-system)
changed: [localhost] => (item=kubesphere-alerting-system)
changed: [localhost] => (item=istio-system)

TASK [common : Kubesphere | Label namespace for network policy] ****************
changed: [localhost]

TASK [common : KubeSphere | Getting kubernetes master num] *********************
changed: [localhost]

TASK [common : KubeSphere | Setting master num] ********************************
ok: [localhost]

TASK [common : Kubesphere | Getting common component installation files] *******
changed: [localhost] => (item=common)
changed: [localhost] => (item=ks-crds)

TASK [common : KubeSphere | Create KubeSphere crds] ****************************
changed: [localhost]

TASK [common : KubeSphere | Recreate KubeSphere crds] **************************
changed: [localhost]

TASK [common : KubeSphere | check k8s version] *********************************
changed: [localhost]

TASK [common : Kubesphere | Getting common component installation files] *******
changed: [localhost] => (item=snapshot-controller)

TASK [common : Kubesphere | Creating snapshot controller values] ***************
changed: [localhost] => (item={'name': 'custom-values-snapshot-controller', 'file': 'custom-values-snapshot-controller.yaml'})

TASK [common : Kubesphere | Remove old snapshot crd] ***************************
changed: [localhost]

TASK [common : Kubesphere | Deploy snapshot controller] ************************
changed: [localhost]

TASK [common : Kubesphere | Checking openpitrix common component] **************
changed: [localhost]

TASK [common : include_tasks] **************************************************
skipping: [localhost] => (item={'op': 'openpitrix-db', 'ks': 'mysql-pvc'}) 
skipping: [localhost] => (item={'op': 'openpitrix-etcd', 'ks': 'etcd-pvc'}) 

TASK [common : Getting PersistentVolumeName (mysql)] ***************************
skipping: [localhost]

TASK [common : Getting PersistentVolumeSize (mysql)] ***************************
skipping: [localhost]

TASK [common : Setting PersistentVolumeName (mysql)] ***************************
skipping: [localhost]

TASK [common : Setting PersistentVolumeSize (mysql)] ***************************
skipping: [localhost]

TASK [common : Getting PersistentVolumeName (etcd)] ****************************
skipping: [localhost]

TASK [common : Getting PersistentVolumeSize (etcd)] ****************************
skipping: [localhost]

TASK [common : Setting PersistentVolumeName (etcd)] ****************************
skipping: [localhost]

TASK [common : Setting PersistentVolumeSize (etcd)] ****************************
skipping: [localhost]

TASK [common : Kubesphere | Check mysql PersistentVolumeClaim] *****************
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-system mysql-pvc -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.078671", "end": "2020-09-08 15:14:51.808948", "msg": "non-zero return code", "rc": 1, "start": "2020-09-08 15:14:51.730277", "stderr": "Error from server (NotFound): persistentvolumeclaims \"mysql-pvc\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"mysql-pvc\" not found"], "stdout": "", "stdout_lines": []}
...ignoring

TASK [common : Kubesphere | Setting mysql db pv size] **************************
skipping: [localhost]

TASK [common : Kubesphere | Check redis PersistentVolumeClaim] *****************
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-system redis-pvc -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.076767", "end": "2020-09-08 15:14:52.157802", "msg": "non-zero return code", "rc": 1, "start": "2020-09-08 15:14:52.081035", "stderr": "Error from server (NotFound): persistentvolumeclaims \"redis-pvc\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"redis-pvc\" not found"], "stdout": "", "stdout_lines": []}
...ignoring

TASK [common : Kubesphere | Setting redis db pv size] **************************
skipping: [localhost]

TASK [common : Kubesphere | Check minio PersistentVolumeClaim] *****************
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-system minio -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.121202", "end": "2020-09-08 15:14:52.548301", "msg": "non-zero return code", "rc": 1, "start": "2020-09-08 15:14:52.427099", "stderr": "Error from server (NotFound): persistentvolumeclaims \"minio\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"minio\" not found"], "stdout": "", "stdout_lines": []}
...ignoring

TASK [common : Kubesphere | Setting minio pv size] *****************************
skipping: [localhost]

TASK [common : Kubesphere | Check openldap PersistentVolumeClaim] **************
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-system openldap-pvc-openldap-0 -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.075441", "end": "2020-09-08 15:14:52.896884", "msg": "non-zero return code", "rc": 1, "start": "2020-09-08 15:14:52.821443", "stderr": "Error from server (NotFound): persistentvolumeclaims \"openldap-pvc-openldap-0\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"openldap-pvc-openldap-0\" not found"], "stdout": "", "stdout_lines": []}
...ignoring

TASK [common : Kubesphere | Setting openldap pv size] **************************
skipping: [localhost]

TASK [common : Kubesphere | Check etcd db PersistentVolumeClaim] ***************
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-system etcd-pvc -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.078169", "end": "2020-09-08 15:14:53.240773", "msg": "non-zero return code", "rc": 1, "start": "2020-09-08 15:14:53.162604", "stderr": "Error from server (NotFound): persistentvolumeclaims \"etcd-pvc\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"etcd-pvc\" not found"], "stdout": "", "stdout_lines": []}
...ignoring

TASK [common : Kubesphere | Setting etcd pv size] ******************************
skipping: [localhost]

TASK [common : Kubesphere | Check redis ha PersistentVolumeClaim] **************
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-system data-redis-ha-server-0 -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.082143", "end": "2020-09-08 15:14:53.587320", "msg": "non-zero return code", "rc": 1, "start": "2020-09-08 15:14:53.505177", "stderr": "Error from server (NotFound): persistentvolumeclaims \"data-redis-ha-server-0\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"data-redis-ha-server-0\" not found"], "stdout": "", "stdout_lines": []}
...ignoring

TASK [common : Kubesphere | Setting redis ha pv size] **************************
skipping: [localhost]

TASK [common : Kubesphere | Check es-master PersistentVolumeClaim] *************
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-logging-system data-elasticsearch-logging-discovery-0 -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.076549", "end": "2020-09-08 15:14:53.948648", "msg": "non-zero return code", "rc": 1, "start": "2020-09-08 15:14:53.872099", "stderr": "Error from server (NotFound): persistentvolumeclaims \"data-elasticsearch-logging-discovery-0\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"data-elasticsearch-logging-discovery-0\" not found"], "stdout": "", "stdout_lines": []}
...ignoring

TASK [common : Kubesphere | Setting es master pv size] *************************
skipping: [localhost]

TASK [common : Kubesphere | Check es data PersistentVolumeClaim] ***************
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-logging-system data-elasticsearch-logging-data-0 -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.079480", "end": "2020-09-08 15:14:54.308542", "msg": "non-zero return code", "rc": 1, "start": "2020-09-08 15:14:54.229062", "stderr": "Error from server (NotFound): persistentvolumeclaims \"data-elasticsearch-logging-data-0\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"data-elasticsearch-logging-data-0\" not found"], "stdout": "", "stdout_lines": []}
...ignoring

TASK [common : Kubesphere | Setting es data pv size] ***************************
skipping: [localhost]

TASK [common : Kubesphere | Creating common component manifests] ***************
changed: [localhost] => (item={'path': 'etcd', 'file': 'etcd.yaml'})
changed: [localhost] => (item={'name': 'mysql', 'file': 'mysql.yaml'})
changed: [localhost] => (item={'path': 'redis', 'file': 'redis.yaml'})

TASK [common : Kubesphere | Creating mysql sercet] *****************************
changed: [localhost]

TASK [common : Kubesphere | Deploying etcd and mysql] **************************
skipping: [localhost] => (item=etcd.yaml) 
skipping: [localhost] => (item=mysql.yaml) 

TASK [common : Kubesphere | Getting minio installation files] ******************
skipping: [localhost] => (item=minio-ha) 

TASK [common : Kubesphere | Creating manifests] ********************************
skipping: [localhost] => (item={'name': 'custom-values-minio', 'file': 'custom-values-minio.yaml'}) 

TASK [common : Kubesphere | Check minio] ***************************************
skipping: [localhost]

TASK [common : Kubesphere | Deploy minio] **************************************
skipping: [localhost]

TASK [common : debug] **********************************************************
skipping: [localhost]

TASK [common : fail] ***********************************************************
skipping: [localhost]

TASK [common : Kubesphere | create minio config directory] *********************
skipping: [localhost]

TASK [common : Kubesphere | Creating common component manifests] ***************
skipping: [localhost] => (item={'path': '/root/.config/rclone', 'file': 'rclone.conf'}) 

TASK [common : include_tasks] **************************************************
skipping: [localhost] => (item=helm) 
skipping: [localhost] => (item=vmbased) 

TASK [common : Kubesphere | import minio status] *******************************
skipping: [localhost]

TASK [common : Kubesphere | Check ha-redis] ************************************
skipping: [localhost]

TASK [common : Kubesphere | Getting redis installation files] ******************
skipping: [localhost] => (item=redis-ha) 

TASK [common : Kubesphere | Creating manifests] ********************************
skipping: [localhost] => (item={'name': 'custom-values-redis', 'file': 'custom-values-redis.yaml'}) 

TASK [common : Kubesphere | Check old redis status] ****************************
skipping: [localhost]

TASK [common : Kubesphere | Delete and backup old redis svc] *******************
skipping: [localhost]

TASK [common : Kubesphere | Deploying redis] ***********************************
skipping: [localhost]

TASK [common : Kubesphere | Getting redis PodIp] *******************************
skipping: [localhost]

TASK [common : Kubesphere | Creating redis migration script] *******************
skipping: [localhost] => (item={'path': '/etc/kubesphere', 'file': 'redisMigrate.py'}) 

TASK [common : Kubesphere | Check redis-ha status] *****************************
skipping: [localhost]

TASK [common : ks-logging | Migrating redis data] ******************************
skipping: [localhost]

TASK [common : Kubesphere | Disable old redis] *********************************
skipping: [localhost]

TASK [common : Kubesphere | Deploying redis] ***********************************
skipping: [localhost] => (item=redis.yaml) 

TASK [common : Kubesphere | import redis status] *******************************
skipping: [localhost]

TASK [common : Kubesphere | Getting openldap installation files] ***************
skipping: [localhost] => (item=openldap-ha) 

TASK [common : Kubesphere | Creating manifests] ********************************
skipping: [localhost] => (item={'name': 'custom-values-openldap', 'file': 'custom-values-openldap.yaml'}) 

TASK [common : Kubesphere | Check old openldap status] *************************
skipping: [localhost]

TASK [common : KubeSphere | Shutdown ks-account] *******************************
skipping: [localhost]

TASK [common : Kubesphere | Delete and backup old openldap svc] ****************
skipping: [localhost]

TASK [common : Kubesphere | Check openldap] ************************************
skipping: [localhost]

TASK [common : Kubesphere | Deploy openldap] ***********************************
skipping: [localhost]

TASK [common : Kubesphere | Load old openldap data] ****************************
skipping: [localhost]

TASK [common : Kubesphere | Check openldap-ha status] **************************
skipping: [localhost]

TASK [common : Kubesphere | Get openldap-ha pod list] **************************
skipping: [localhost]

TASK [common : Kubesphere | Get old openldap data] *****************************
skipping: [localhost]

TASK [common : Kubesphere | Migrating openldap data] ***************************
skipping: [localhost]

TASK [common : Kubesphere | Disable old openldap] ******************************
skipping: [localhost]

TASK [common : Kubesphere | Restart openldap] **********************************
skipping: [localhost]

TASK [common : KubeSphere | Restarting ks-account] *****************************
skipping: [localhost]

TASK [common : Kubesphere | import openldap status] ****************************
skipping: [localhost]

TASK [common : Kubesphere | Check ha-redis] ************************************
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/helm list -n kubesphere-system | grep \"ks-redis\"\n", "delta": "0:00:00.062274", "end": "2020-09-08 15:14:57.775187", "msg": "non-zero return code", "rc": 1, "start": "2020-09-08 15:14:57.712913", "stderr": "", "stderr_lines": [], "stdout": "", "stdout_lines": []}
...ignoring

TASK [common : Kubesphere | Getting redis installation files] ******************
changed: [localhost] => (item=redis-ha)

TASK [common : Kubesphere | Creating manifests] ********************************
changed: [localhost] => (item={'name': 'custom-values-redis', 'file': 'custom-values-redis.yaml'})

TASK [common : Kubesphere | Check old redis status] ****************************
changed: [localhost]

TASK [common : Kubesphere | Delete and backup old redis svc] *******************
skipping: [localhost]

TASK [common : Kubesphere | Deploying redis] ***********************************
changed: [localhost]

TASK [common : Kubesphere | Getting redis PodIp] *******************************
skipping: [localhost]

TASK [common : Kubesphere | Creating redis migration script] *******************
skipping: [localhost] => (item={'path': '/etc/kubesphere', 'file': 'redisMigrate.py'}) 

TASK [common : Kubesphere | Check redis-ha status] *****************************
skipping: [localhost]

TASK [common : ks-logging | Migrating redis data] ******************************
skipping: [localhost]

TASK [common : Kubesphere | Disable old redis] *********************************
skipping: [localhost]

TASK [common : Kubesphere | Deploying redis] ***********************************
skipping: [localhost] => (item=redis.yaml) 

TASK [common : Kubesphere | import redis status] *******************************
changed: [localhost]

TASK [common : Kubesphere | Getting openldap installation files] ***************
changed: [localhost] => (item=openldap-ha)

TASK [common : Kubesphere | Creating manifests] ********************************
changed: [localhost] => (item={'name': 'custom-values-openldap', 'file': 'custom-values-openldap.yaml'})

TASK [common : Kubesphere | Check old openldap status] *************************
changed: [localhost]

TASK [common : KubeSphere | Shutdown ks-account] *******************************
skipping: [localhost]

TASK [common : Kubesphere | Delete and backup old openldap svc] ****************
skipping: [localhost]

TASK [common : Kubesphere | Check openldap] ************************************
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/helm list -n kubesphere-system | grep \"ks-openldap\"\n", "delta": "0:00:00.063882", "end": "2020-09-08 15:15:15.454746", "msg": "non-zero return code", "rc": 1, "start": "2020-09-08 15:15:15.390864", "stderr": "", "stderr_lines": [], "stdout": "", "stdout_lines": []}
...ignoring

TASK [common : Kubesphere | Deploy openldap] ***********************************
changed: [localhost]

TASK [common : Kubesphere | Load old openldap data] ****************************
skipping: [localhost]

TASK [common : Kubesphere | Check openldap-ha status] **************************
skipping: [localhost]

TASK [common : Kubesphere | Get openldap-ha pod list] **************************
skipping: [localhost]

TASK [common : Kubesphere | Get old openldap data] *****************************
skipping: [localhost]

TASK [common : Kubesphere | Migrating openldap data] ***************************
skipping: [localhost]

TASK [common : Kubesphere | Disable old openldap] ******************************
skipping: [localhost]

TASK [common : Kubesphere | Restart openldap] **********************************
skipping: [localhost]

TASK [common : KubeSphere | Restarting ks-account] *****************************
skipping: [localhost]

TASK [common : Kubesphere | import openldap status] ****************************
changed: [localhost]

TASK [common : Kubesphere | Getting minio installation files] ******************
changed: [localhost] => (item=minio-ha)

TASK [common : Kubesphere | Creating manifests] ********************************
changed: [localhost] => (item={'name': 'custom-values-minio', 'file': 'custom-values-minio.yaml'})

TASK [common : Kubesphere | Check minio] ***************************************
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/helm list -n kubesphere-system | grep \"ks-minio\"\n", "delta": "0:00:00.092635", "end": "2020-09-08 15:15:26.009366", "msg": "non-zero return code", "rc": 1, "start": "2020-09-08 15:15:25.916731", "stderr": "", "stderr_lines": [], "stdout": "", "stdout_lines": []}
...ignoring

TASK [common : Kubesphere | Deploy minio] **************************************
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/helm upgrade --install ks-minio /kubesphere/kubesphere/minio-ha -f /kubesphere/kubesphere/custom-values-minio.yaml --set fullnameOverride=minio --namespace kubesphere-system --wait --timeout 1800s\n", "delta": "0:30:00.753289", "end": "2020-09-08 15:45:27.101339", "msg": "non-zero return code", "rc": 1, "start": "2020-09-08 15:15:26.348050", "stderr": "Error: timed out waiting for the condition", "stderr_lines": ["Error: timed out waiting for the condition"], "stdout": "Release \"ks-minio\" does not exist. Installing it now.", "stdout_lines": ["Release \"ks-minio\" does not exist. Installing it now."]}
...ignoring

TASK [common : debug] **********************************************************
ok: [localhost] => {
    "msg": [
        "1. check the storage configuration and storage server",
        "2. make sure the DNS address in /etc/resolv.conf is available",
        "3. execute 'kubectl logs -n kubesphere-system -l job-name=minio-make-bucket-job' to watch logs",
        "4. execute 'helm -n kubesphere-system uninstall ks-minio && kubectl -n kubesphere-system delete job minio-make-bucket-job'",
        "5. Restart the installer pod in kubesphere-system namespace"
    ]
}

TASK [common : fail] ***********************************************************
fatal: [localhost]: FAILED! => {"changed": false, "msg": "It is suggested to refer to the above methods for troubleshooting problems ."}

PLAY RECAP *********************************************************************
localhost                  : ok=47   changed=41   unreachable=0    failed=1    skipped=77   rescued=0    ignored=12  
  • Forest-L 回复了此帖
  • > 	解决问题时应该注意的细节:
    > 	1、介质kk 应选择主节点执行操作;
    > 	2、目前暂时不支持关于多节点ceph cluster + VIP(keepalived+haproxy);
    > 	3、yaml 文件的书写规范化;
    > 	4、注意ceph-rbd key 需要base64转化,而ceph-csi key不必base64转化;
    > 	5、安装ceph-rbd中建议执行helm repo add test https://charts.kubesphere.io/test;
    #config-sample.yaml
      addons: 
      - name: rbd-provisioner
        namespace: kube-system
        sources:
          chart:
            name: rbd-provisioner
            repo: https://charts.kubesphere.io/test
            values: /root/rbd-provisioner-value.yaml
      - name: ceph-csi-rbd
        namespace: kube-system
        sources:
          chart:
            name: ceph-csi-rbd
            repo: https://ceph.github.io/csi-charts
            values: /root/ceph-csi-config.yaml
      - name: ceph-csi-rbd-sc
        sources:
          yaml:
            path:
            - /root/ceph-csi-rbd-sc.yaml
    # rbd-provisioner-value.yaml 
    ceph:
      adminId: admin
      adminKey: QVFDUy9rQmY4RWczT0JBQXhwdTlzWStWUU43K1VGYldudWEyY0E9PQ==  # is base64
      userId: admin
      userKey: QVFDUy9rQmY4RWczT0JBQXhwdTlzWStWUU43K1VGYldudWEyY0E9PQ==
      mon: 192.168.99.11:6789,192.168.99.12:6789,192.168.99.13:6789
      pool: kubeceph
    sc:
      isDefault: true
    # ceph-csi-config.yaml 
    csiConfig:
      - clusterID: "cluster1"
        monitors:
          - 192.168.99.11:6789 
          - 192.168.99.12:6789 
          - 192.168.99.13:6789 
      
    
    > 不新增容忍度则将master置为worker节点,不然则会出现如下异常
    > driver name rbd.csi.ceph.com not found in the list of registered CSI drivers
    
    nodeplugin:
      tolerations: 
        - key: node-role.kubernetes.io/master
          effect: NoSchedule 
        - key: CriticalAddonsOnly 
          operator: Exists 
        - effect: NoExecute 
          key: node.kubernetes.io/not-ready 
          operator: Exists 
          tolerationSeconds: 60 
        - effect: NoExecute 
          key: node.kubernetes.io/unreachable 
          operator: Exists 
          tolerationSeconds: 60
    # ceph-csi-rbd-sc.yaml 
    apiVersion: v1
    kind: Secret
    metadata:
      name: csi-rbd-secret
      namespace: kube-system
    stringData:
      userID: admin
      userKey: AQCS/kBf8Eg3OBAAxpu9sY+VQN7+UFbWnua2cA==   #not base64
      encryptionPassphrase: test_passphrase
    ---
    apiVersion: storage.k8s.io/v1
    kind: StorageClass
    metadata:
       name: csi-rbd-sc
       annotations:
         storageclass.beta.kubernetes.io/is-default-class: "false" #rbd is sc.isDefault=true
         storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce","ReadOnlyMany","ReadWriteMany"]'
    provisioner: rbd.csi.ceph.com
    parameters:
       clusterID: "cluster1"
       pool: kubeceph
       imageFeatures: layering
       csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
       csi.storage.k8s.io/provisioner-secret-namespace: kube-system
       csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
       csi.storage.k8s.io/controller-expand-secret-namespace: kube-system
       csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
       csi.storage.k8s.io/node-stage-secret-namespace: kube-system
       csi.storage.k8s.io/fstype: ext4
    reclaimPolicy: Delete
    allowVolumeExpansion: true
    mountOptions:
       - discard
    erinyeo 更改标题为「kk 创建多节点集群 Error from server (NotFound): persistentvolumeclaims

    Forest-L [root@kube-master-02 ~]# kubectl get sc
    NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
    rbd (default) ceph.com/rbd Delete Immediate false 114m
    [root@kube-master-02 ~]#

      Forest-L 麻烦您有空时,再次协助一下,这一个问题折腾两天了,向日葵已发您邮箱

      > 	解决问题时应该注意的细节:
      > 	1、介质kk 应选择主节点执行操作;
      > 	2、目前暂时不支持关于多节点ceph cluster + VIP(keepalived+haproxy);
      > 	3、yaml 文件的书写规范化;
      > 	4、注意ceph-rbd key 需要base64转化,而ceph-csi key不必base64转化;
      > 	5、安装ceph-rbd中建议执行helm repo add test https://charts.kubesphere.io/test;
      #config-sample.yaml
        addons: 
        - name: rbd-provisioner
          namespace: kube-system
          sources:
            chart:
              name: rbd-provisioner
              repo: https://charts.kubesphere.io/test
              values: /root/rbd-provisioner-value.yaml
        - name: ceph-csi-rbd
          namespace: kube-system
          sources:
            chart:
              name: ceph-csi-rbd
              repo: https://ceph.github.io/csi-charts
              values: /root/ceph-csi-config.yaml
        - name: ceph-csi-rbd-sc
          sources:
            yaml:
              path:
              - /root/ceph-csi-rbd-sc.yaml
      # rbd-provisioner-value.yaml 
      ceph:
        adminId: admin
        adminKey: QVFDUy9rQmY4RWczT0JBQXhwdTlzWStWUU43K1VGYldudWEyY0E9PQ==  # is base64
        userId: admin
        userKey: QVFDUy9rQmY4RWczT0JBQXhwdTlzWStWUU43K1VGYldudWEyY0E9PQ==
        mon: 192.168.99.11:6789,192.168.99.12:6789,192.168.99.13:6789
        pool: kubeceph
      sc:
        isDefault: true
      # ceph-csi-config.yaml 
      csiConfig:
        - clusterID: "cluster1"
          monitors:
            - 192.168.99.11:6789 
            - 192.168.99.12:6789 
            - 192.168.99.13:6789 
        
      
      > 不新增容忍度则将master置为worker节点,不然则会出现如下异常
      > driver name rbd.csi.ceph.com not found in the list of registered CSI drivers
      
      nodeplugin:
        tolerations: 
          - key: node-role.kubernetes.io/master
            effect: NoSchedule 
          - key: CriticalAddonsOnly 
            operator: Exists 
          - effect: NoExecute 
            key: node.kubernetes.io/not-ready 
            operator: Exists 
            tolerationSeconds: 60 
          - effect: NoExecute 
            key: node.kubernetes.io/unreachable 
            operator: Exists 
            tolerationSeconds: 60
      # ceph-csi-rbd-sc.yaml 
      apiVersion: v1
      kind: Secret
      metadata:
        name: csi-rbd-secret
        namespace: kube-system
      stringData:
        userID: admin
        userKey: AQCS/kBf8Eg3OBAAxpu9sY+VQN7+UFbWnua2cA==   #not base64
        encryptionPassphrase: test_passphrase
      ---
      apiVersion: storage.k8s.io/v1
      kind: StorageClass
      metadata:
         name: csi-rbd-sc
         annotations:
           storageclass.beta.kubernetes.io/is-default-class: "false" #rbd is sc.isDefault=true
           storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce","ReadOnlyMany","ReadWriteMany"]'
      provisioner: rbd.csi.ceph.com
      parameters:
         clusterID: "cluster1"
         pool: kubeceph
         imageFeatures: layering
         csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
         csi.storage.k8s.io/provisioner-secret-namespace: kube-system
         csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
         csi.storage.k8s.io/controller-expand-secret-namespace: kube-system
         csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
         csi.storage.k8s.io/node-stage-secret-namespace: kube-system
         csi.storage.k8s.io/fstype: ext4
      reclaimPolicy: Delete
      allowVolumeExpansion: true
      mountOptions:
         - discard
        erinyeo 更改标题为「kk 创建多节点集群ceph存储 Error from server (NotFound): persistentvolumeclaims
        6 天 后