• 安装部署v2.1.1
  • KubeSphere2.1.1安装过程中报3个PVC(mysql-pvc,etcd-pvc和data-redis-ha-server)没有创建

在线安装KubeSphere2.1.1时,在ks-installer 运行的过程日志中,发现3个pvc都没有生成,通过命令查看,结果如下:
[root@k8sphere01 ]# kubectl get pvc -n kubesphere-system
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
openldap-pvc-openldap-0 Pending glusterfs 40h
redis-pvc Terminating pvc-55214db7-abe8-461a-a122-c37674784d4b 2Gi RWO glusterfs 40h
[root@k8sphere01 ]#

具体日志信息如下:
TASK [common : KubeSphere | Labeling system-workspace] *************************
changed: [localhost] => (item=default)
changed: [localhost] => (item=kube-public)
changed: [localhost] => (item=kube-system)
changed: [localhost] => (item=kubesphere-system)
changed: [localhost] => (item=kubesphere-controls-system)
changed: [localhost] => (item=kubesphere-monitoring-system)
changed: [localhost] => (item=kube-node-lease)

TASK [common : KubeSphere | Create ImagePullSecrets] ***************************
changed: [localhost] => (item=default)
changed: [localhost] => (item=kube-public)
changed: [localhost] => (item=kube-system)
changed: [localhost] => (item=kubesphere-system)
changed: [localhost] => (item=kubesphere-controls-system)
changed: [localhost] => (item=kubesphere-monitoring-system)
changed: [localhost] => (item=kube-node-lease)

TASK [common : KubeSphere | Getting kubernetes master num] *********************
changed: [localhost]

TASK [common : KubeSphere | Setting master num] ********************************
ok: [localhost]

TASK [common : Kubesphere | Getting common component installation files] *******
changed: [localhost] => (item=common)
changed: [localhost] => (item=ks-crds)

TASK [common : KubeSphere | Create KubeSphere crds] ****************************
changed: [localhost]

TASK [common : Kubesphere | Checking openpitrix common component] **************
changed: [localhost]

TASK [common : include_tasks] **************************************************
skipping: [localhost] => (item={u’ks’: u’mysql-pvc’, u’op’: u’openpitrix-db’})
skipping: [localhost] => (item={u’ks’: u’etcd-pvc’, u’op’: u’openpitrix-etcd’})

TASK [common : Getting PersistentVolumeName (mysql)] ***************************
skipping: [localhost]

TASK [common : Getting PersistentVolumeSize (mysql)] ***************************
skipping: [localhost]

TASK [common : Setting PersistentVolumeName (mysql)] ***************************
skipping: [localhost]

TASK [common : Setting PersistentVolumeSize (mysql)] ***************************
skipping: [localhost]

TASK [common : Getting PersistentVolumeName (etcd)] ****************************
skipping: [localhost]

TASK [common : Getting PersistentVolumeSize (etcd)] ****************************
skipping: [localhost]

TASK [common : Setting PersistentVolumeName (etcd)] ****************************
skipping: [localhost]

TASK [common : Setting PersistentVolumeSize (etcd)] ****************************
skipping: [localhost]

TASK [common : Kubesphere | Check mysql PersistentVolumeClaim] *****************
fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system mysql-pvc -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.582094″, “end”: “2020-05-11 11:35:15.207716”, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-05-11 11:35:14.625622”, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“mysql-pvc\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“mysql-pvc\” not found"], “stdout”: "", “stdout_lines”: []}
…ignoring

TASK [common : Kubesphere | Setting mysql db pv size] **************************
skipping: [localhost]

TASK [common : Kubesphere | Check redis PersistentVolumeClaim] *****************
changed: [localhost]

TASK [common : Kubesphere | Setting redis db pv size] **************************
ok: [localhost]

TASK [common : Kubesphere | Check minio PersistentVolumeClaim] *****************
changed: [localhost]

TASK [common : Kubesphere | Setting minio pv size] *****************************
skipping: [localhost]

TASK [common : Kubesphere | Check openldap PersistentVolumeClaim] **************
changed: [localhost]

TASK [common : Kubesphere | Setting openldap pv size] **************************
skipping: [localhost]

TASK [common : Kubesphere | Check etcd db PersistentVolumeClaim] ***************
fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system etcd-pvc -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.688133”, “end”: “2020-05-11 11:35:18.777267”, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-05-11 11:35:18.089134″, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“etcd-pvc\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“etcd-pvc\” not found"], “stdout”: "", “stdout_lines”: []}
…ignoring

TASK [common : Kubesphere | Setting etcd pv size] ******************************
skipping: [localhost]

TASK [common : Kubesphere | Check redis ha PersistentVolumeClaim] **************
fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system data-redis-ha-server-0 -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.584829″, “end”: “2020-05-11 11:35:19.632633”, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-05-11 11:35:19.047804”, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“data-redis-ha-server-0\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“data-redis-ha-server-0\” not found"], “stdout”: "", “stdout_lines”: []}
…ignoring

TASK [common : Kubesphere | Setting redis ha pv size] **************************
skipping: [localhost]

TASK [common : Kubesphere | Creating common component manifests] ***************
changed: [localhost] => (item={u’path’: u’etcd’, u’file’: u’etcd.yaml’})
changed: [localhost] => (item={u’name’: u’mysql’, u’file’: u’mysql.yaml’})
changed: [localhost] => (item={u’path’: u’redis’, u’file’: u’redis.yaml’})

TASK [common : Kubesphere | Creating mysql sercet] *****************************
changed: [localhost]

TASK [common : Kubesphere | Deploying etcd and mysql] **************************
skipping: [localhost] => (item=etcd.yaml)
skipping: [localhost] => (item=mysql.yaml)

TASK [common : Kubesphere | Getting minio installation files] ******************
skipping: [localhost] => (item=minio-ha)

TASK [common : Kubesphere | Creating manifests] ********************************
skipping: [localhost] => (item={u’name’: u’custom-values-minio’, u’file’: u’custom-values-minio.yaml’})

TASK [common : Kubesphere | Check minio] ***************************************
skipping: [localhost]

TASK [common : Kubesphere | Deploy minio] **************************************
skipping: [localhost]

TASK [common : debug] **********************************************************
skipping: [localhost]

TASK [common : fail] ***********************************************************
skipping: [localhost]

TASK [common : Kubesphere | create minio config directory] *********************
skipping: [localhost]

TASK [common : Kubesphere | Creating common component manifests] ***************
skipping: [localhost] => (item={u’path’: u’/root/.config/rclone’, u’file’: u’rclone.conf’})

TASK [common : include_tasks] **************************************************
skipping: [localhost] => (item=helm)
skipping: [localhost] => (item=vmbased)

TASK [common : Kubesphere | Check ha-redis] ************************************
skipping: [localhost]

TASK [common : Kubesphere | Getting redis installation files] ******************
skipping: [localhost] => (item=redis-ha)

TASK [common : Kubesphere | Creating manifests] ********************************
skipping: [localhost] => (item={u’name’: u’custom-values-redis’, u’file’: u’custom-values-redis.yaml’})

TASK [common : Kubesphere | Check old redis status] ****************************
skipping: [localhost]

TASK [common : Kubesphere | Delete and backup old redis svc] *******************
skipping: [localhost]

TASK [common : Kubesphere | Deploying redis] ***********************************
skipping: [localhost]

TASK [common : Kubesphere | Getting redis PodIp] *******************************
skipping: [localhost]

TASK [common : Kubesphere | Creating redis migration script] *******************
skipping: [localhost] => (item={u’path’: u’/etc/kubesphere’, u’file’: u’redisMigrate.py’})

TASK [common : Kubesphere | Check redis-ha status] *****************************
skipping: [localhost]

TASK [common : ks-logging | Migrating redis data] ******************************
skipping: [localhost]

TASK [common : Kubesphere | Disable old redis] *********************************
skipping: [localhost]

TASK [common : Kubesphere | Deploying redis] ***********************************
skipping: [localhost] => (item=redis.yaml)

TASK [common : Kubesphere | Getting openldap installation files] ***************
skipping: [localhost] => (item=openldap-ha)

TASK [common : Kubesphere | Creating manifests] ********************************
skipping: [localhost] => (item={u’name’: u’custom-values-openldap’, u’file’: u’custom-values-openldap.yaml’})

TASK [common : Kubesphere | Check old openldap status] *************************
skipping: [localhost]

TASK [common : KubeSphere | Shutdown ks-account] *******************************
skipping: [localhost]

TASK [common : Kubesphere | Delete and backup old openldap svc] ****************
skipping: [localhost]

TASK [common : Kubesphere | Check openldap] ************************************
skipping: [localhost]

TASK [common : Kubesphere | Deploy openldap] ***********************************
skipping: [localhost]

TASK [common : Kubesphere | Load old openldap data] ****************************
skipping: [localhost]

TASK [common : Kubesphere | Check openldap-ha status] **************************
skipping: [localhost]

TASK [common : Kubesphere | Get openldap-ha pod list] **************************
skipping: [localhost]

TASK [common : Kubesphere | Get old openldap data] *****************************
skipping: [localhost]

TASK [common : Kubesphere | Migrating openldap data] ***************************
skipping: [localhost]

TASK [common : Kubesphere | Disable old openldap] ******************************
skipping: [localhost]

TASK [common : Kubesphere | Restart openldap] **********************************
skipping: [localhost]

TASK [common : KubeSphere | Restarting ks-account] *****************************
skipping: [localhost]

TASK [common : Kubesphere | Check ha-redis] ************************************
changed: [localhost]

TASK [common : Kubesphere | Getting redis installation files] ******************
skipping: [localhost] => (item=redis-ha)

TASK [common : Kubesphere | Creating manifests] ********************************
skipping: [localhost] => (item={u’name’: u’custom-values-redis’, u’file’: u’custom-values-redis.yaml’})

TASK [common : Kubesphere | Check old redis status] ****************************
skipping: [localhost]

TASK [common : Kubesphere | Delete and backup old redis svc] *******************
skipping: [localhost]

TASK [common : Kubesphere | Deploying redis] ***********************************
skipping: [localhost]

TASK [common : Kubesphere | Getting redis PodIp] *******************************
skipping: [localhost]

TASK [common : Kubesphere | Creating redis migration script] *******************
skipping: [localhost] => (item={u’path’: u’/etc/kubesphere’, u’file’: u’redisMigrate.py’})

TASK [common : Kubesphere | Check redis-ha status] *****************************
skipping: [localhost]

TASK [common : ks-logging | Migrating redis data] ******************************
skipping: [localhost]

TASK [common : Kubesphere | Disable old redis] *********************************
skipping: [localhost]

TASK [common : Kubesphere | Deploying redis] ***********************************

    qcloud
    执行以下命令看下

    kubectl describe pvc -n kubesphere-system openldap-pvc-openldap-0
    kubectl describe pvc -n kubesphere-system redis-pvc

    [root@k8sphere01 ]# kubectl describe pvc -n kubesphere-system openldap-pvc-openldap-0
    Name: openldap-pvc-openldap-0
    Namespace: kubesphere-system
    StorageClass: glusterfs
    Status: Pending
    Volume:

    Labels: app.kubernetes.io/instance=ks-openldap
    app.kubernetes.io/name=openldap-ha
    Annotations: volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/glusterfs
    Finalizers: [kubernetes.io/pvc-protection]
    Capacity:

    Access Modes:

    VolumeMode: Filesystem
    Mounted By: openldap-0
    Events:
    Type Reason Age From Message


    Warning ProvisioningFailed 67s (x46 over 88m) persistentvolume-controller Failed to provision volume with StorageClass “glusterfs”: failed to create volume: failed to create volume: Failed to allocate new volume: No space
    [root@k8sphere01 ]#

    [root@k8sphere01 ]# kubectl describe pvc -n kubesphere-system redis-pvc
    Name: redis-pvc
    Namespace: kubesphere-system
    StorageClass: glusterfs
    Status: Terminating (lasts 39h)
    Volume: pvc-55214db7-abe8-461a-a122-c37674784d4b
    Labels: <none>
    Annotations: kubectl.kubernetes.io/last-applied-configuration:
    {“apiVersion”:“v1″,“kind”:“PersistentVolumeClaim”,“metadata”:{“annotations”:{},“name”:“redis-pvc”,“namespace”:“kubesphere-system”},“spec”:…
    pv.kubernetes.io/bind-completed: yes
    pv.kubernetes.io/bound-by-controller: yes
    volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/glusterfs
    Finalizers: [kubernetes.io/pvc-protection]
    Capacity: 2Gi
    Access Modes: RWO
    VolumeMode: Filesystem
    Mounted By: redis-6fd6c6d6f9-lfmwp
    Events: <none>
    [root@k8sphere01 ]#

    huanggzeK零SK壹 是这个原因吗?————– Failed to allocate new volume: No space

      rayzhou2017 大佬,我存储空间扩容了,不再有空间满的问题后,再次install.sh,都已经进入到ks-install过程中了,以及出现以下成功界面:
      Start installing monitoring


      task monitoring status is successful
      total: 1 completed:1


      #####################################################

      Welcome to KubeSphere!

      #####################################################

      Console: http://192.168.108.52:30880
      Account: admin
      Password: P@88w0rd

      NOTES:

      1. After logging into the console, please check the
        monitoring status of service components in
        the “Cluster Status”. If the service is not
        ready, please wait patiently. You can start
        to use when all components are ready.
      2. Please modify the default password after login.

      #####################################################

      但还是存在pvc的几个报错,如下:
      TASK [common : Kubesphere | Check mysql PersistentVolumeClaim] *****************
      fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system mysql-pvc -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.575182″, “end”: “2020-05-13 09:31:09.169599”, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-05-13 09:31:08.594417”, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“mysql-pvc\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“mysql-pvc\” not found"], “stdout”: "", “stdout_lines”: []}
      …ignoring

      TASK [common : Kubesphere | Setting mysql db pv size] **************************
      skipping: [localhost]

      TASK [common : Kubesphere | Check redis PersistentVolumeClaim] *****************
      changed: [localhost]

      TASK [common : Kubesphere | Setting redis db pv size] **************************
      ok: [localhost]

      TASK [common : Kubesphere | Check minio PersistentVolumeClaim] *****************
      fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system minio -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.589378″, “end”: “2020-05-13 09:31:10.802323”, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-05-13 09:31:10.212945”, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“minio\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“minio\” not found"], “stdout”: "", “stdout_lines”: []}
      …ignoring

      TASK [common : Kubesphere | Setting minio pv size] *****************************
      skipping: [localhost]

      TASK [common : Kubesphere | Check openldap PersistentVolumeClaim] **************
      changed: [localhost]

      TASK [common : Kubesphere | Setting openldap pv size] **************************
      ok: [localhost]

      TASK [common : Kubesphere | Check etcd db PersistentVolumeClaim] ***************
      fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system etcd-pvc -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.565141”, “end”: “2020-05-13 09:31:12.423824”, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-05-13 09:31:11.858683”, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“etcd-pvc\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“etcd-pvc\” not found"], “stdout”: "", “stdout_lines”: []}
      …ignoring

      TASK [common : Kubesphere | Setting etcd pv size] ******************************
      skipping: [localhost]

      TASK [common : Kubesphere | Check redis ha PersistentVolumeClaim] **************
      fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system data-redis-ha-server-0 -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.573883”, “end”: “2020-05-13 09:31:13.234416”, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-05-13 09:31:12.660533”, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“data-redis-ha-server-0\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“data-redis-ha-server-0\” not found"], “stdout”: "", “stdout_lines”: []}
      …ignoring

      这几个pvc能否通过手工的方式来创建? 还是有其它方法能解决?

      因有这个报错,http://192.168.108.52:30880/控制台无法打开

      …ignoring的不用管
      看下kubesphere-system下的pod状态

        Cauchy
        [root@k8sphere01 conf]# kubectl get pod -n kubesphere-system
        NAME READY STATUS RESTARTS AGE
        ks-account-596657f8c6-292zp 0/1 Init:0/2 0 5h35m
        ks-account-6ddfd5bfcb-cnczz 0/1 Init:0/2 0 129m
        ks-apigateway-9d446b8b8-wx9xw 0/1 CrashLoopBackOff 29 129m
        ks-apiserver-7c4c4b49bc-m8tjv 1/1 Running 0 129m
        ks-console-58fd4c4b9c-psbmr 1/1 Running 0 129m
        ks-controller-manager-66f5d7d546-vbp6b 1/1 Running 0 129m
        ks-installer-7d9fb945c7-v25pm 1/1 Running 165 2d
        openldap-0 0/1 ContainerCreating 0 3d2h
        redis-6fd6c6d6f9-lfmwp 0/1 ContainerCreating 1 3d2h
        [root@k8sphere01 conf]#

        还是pvc的问题,你的pvc创建失败了,控制台相关的pod都没启动, 这个应该是gfs的问题,你看看gfs的日志

          qcloud 刚安装的环境还是已经在上面跑应用,如果是刚安装出现的问题,可以把对应的pv删了,重新cm ks-installer 的pod。

            Forest-L 新安装的应用,ks-installer能单独安装吗?还是必须重新安装install.sh

            我自己搭建的nfs集群,不知道为什么mysql etcd redis的pvc都创建不出来,然后监控的两个pvc可以创建

            [root@online-aliyun-cn-shenzhen-c-vm-10041 cert]# ll /nfs/k8s/data/
            total 32
            drwxrwxrwx 5 root root 4096 May 15 13:51 ./
            drwxr-xr-x 3 root root 4096 May 13 18:32 ../
            drwxrwxrwx 3 root root 4096 May 15 13:51 kubesphere-monitoring-system-prometheus-k8s-db-prometheus-k8s-0-pvc-c277b5aa-6098-4d67-b22f-e9d0a2e8b355/
            drwxrwxrwx 3 root root 4096 May 15 13:51 kubesphere-monitoring-system-prometheus-k8s-system-db-prometheus-k8s-system-0-pvc-11c1bbba-cdc1-4a9b-91c3-373df8c9646e/
            drwx------ 2 root root 16384 May 13 18:14 lost+found/

            这些就创建不了,导致安装失败
            `TASK [common : include_tasks] **************************************************
            skipping: [localhost] => (item={u’ks’: u’mysql-pvc’, u’op’: u’openpitrix-db’})
            skipping: [localhost] => (item={u’ks’: u’etcd-pvc’, u’op’: u’openpitrix-etcd’})

            TASK [common : Getting PersistentVolumeName (mysql)] ***************************
            skipping: [localhost]

            TASK [common : Getting PersistentVolumeSize (mysql)] ***************************
            skipping: [localhost]

            TASK [common : Setting PersistentVolumeName (mysql)] ***************************
            skipping: [localhost]

            TASK [common : Setting PersistentVolumeSize (mysql)] ***************************
            skipping: [localhost]

            TASK [common : Getting PersistentVolumeName (etcd)] ****************************
            skipping: [localhost]

            TASK [common : Getting PersistentVolumeSize (etcd)] ****************************
            skipping: [localhost]

            TASK [common : Setting PersistentVolumeName (etcd)] ****************************
            skipping: [localhost]

            TASK [common : Setting PersistentVolumeSize (etcd)] ****************************
            skipping: [localhost]

            TASK [common : Kubesphere | Check mysql PersistentVolumeClaim] *****************
            fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system mysql-pvc -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.409147″, “end”: “2020-05-15 05:50:27.114121″, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-05-15 05:50:26.704974”, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“mysql-pvc\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“mysql-pvc\” not found"], “stdout”: "", “stdout_lines”: []}
            …ignoring

            TASK [common : Kubesphere | Setting mysql db pv size] **************************
            skipping: [localhost]

            TASK [common : Kubesphere | Check redis PersistentVolumeClaim] *****************
            fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system redis-pvc -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.420811″, “end”: “2020-05-15 05:50:27.734230”, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-05-15 05:50:27.313419”, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“redis-pvc\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“redis-pvc\” not found"], “stdout”: "", “stdout_lines”: []}
            …ignoring

            TASK [common : Kubesphere | Setting redis db pv size] **************************
            skipping: [localhost]

            TASK [common : Kubesphere | Check minio PersistentVolumeClaim] *****************
            fatal: [localhost]: FAILED! => {“changed”: true, “cmd”: “/usr/local/bin/kubectl get pvc -n kubesphere-system minio -o jsonpath=‘{.status.capacity.storage}’\n”, “delta”: “0:00:00.456821″, “end”: “2020-05-15 05:50:28.398121″, “msg”: “non-zero return code”, “rc”: 1, “start”: “2020-05-15 05:50:27.941300″, “stderr”: "Error from server (NotFound): persistentvolumeclaims \“minio\” not found", “stderr_lines”: ["Error from server (NotFound): persistentvolumeclaims \“minio\” not found"], “stdout”: "", “stdout_lines”: []}
            …ignoring

            TASK [common : Kubesphere | Setting minio pv size] *****************************
            skipping: [localhost]`

            这是我的默认存储
            [root@online-aliyun-cn-shenzhen-c-vm-10041 ]# kubectl get sc
            NAME PROVISIONER AGE
            nfs-storage-master01 (default) nfs-storage-master01 37h

              1 个月 后
              1 个月 后

              同样是 redis-ha redis-pvc 没有创建,
              有没有redis-ha.yaml 了? 手动创建也行啊?