k8s version: v1.16.3
helm: v2.16.2
我的redis已经创建成功了,为什么连接不了

[root@localhost kubesphere]# kubectl get pvc -A 
NAMESPACE                      NAME                                               STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS       AGE
kubesphere-monitoring-system   prometheus-k8s-db-prometheus-k8s-0                 Bound    pvc-7b8bb610-63bc-4f29-93c9-cd2b5d38e008   20Gi       RWO            openebs-hostpath   33m
kubesphere-monitoring-system   prometheus-k8s-system-db-prometheus-k8s-system-0   Bound    pvc-7ede725f-a52b-448a-a984-43be40d67ba0   20Gi       RWO            openebs-hostpath   33m
kubesphere-system              redis-pvc                                          Bound    pvc-e01ae137-2635-4143-ada4-859b64921541   2Gi        RWO            openebs-hostpath   34m
`
`[root@localhost kubesphere]# kubectl get pv -A 
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                                                                           STORAGECLASS       REASON   AGE
pvc-7b8bb610-63bc-4f29-93c9-cd2b5d38e008   20Gi       RWO            Delete           Bound    kubesphere-monitoring-system/prometheus-k8s-db-prometheus-k8s-0                 openebs-hostpath            34m
pvc-7ede725f-a52b-448a-a984-43be40d67ba0   20Gi       RWO            Delete           Bound    kubesphere-monitoring-system/prometheus-k8s-system-db-prometheus-k8s-system-0   openebs-hostpath            34m
pvc-e01ae137-2635-4143-ada4-859b64921541   2Gi        RWO            Delete           Bound    kubesphere-system/redis-pvc                                                     openebs-hostpath            35m
[root@localhost test]# docker logs 5a016038d546 | grep fatal
2020-08-18T06:28:26Z INFO     : shell-operator v1.0.0-beta.5
2020-08-18T06:28:26Z INFO     : Use temporary dir: /tmp/shell-operator
2020-08-18T06:28:26Z INFO     : Initialize hooks manager ...
2020-08-18T06:28:26Z INFO     : Search and load hooks ...
2020-08-18T06:28:26Z INFO     : HTTP SERVER Listening on 0.0.0.0:9115
2020-08-18T06:28:26Z INFO     : Load hook config from '/hooks/kubesphere/installRunner.py'
2020-08-18T06:28:27Z INFO     : Initializing schedule manager ...
2020-08-18T06:28:27Z INFO     : KUBE Init Kubernetes client
2020-08-18T06:28:27Z INFO     : KUBE-INIT Kubernetes client is configured successfully
2020-08-18T06:28:27Z INFO     : MAIN: run main loop
2020-08-18T06:28:27Z INFO     : MAIN: add onStartup tasks
2020-08-18T06:28:27Z INFO     : Running schedule manager ...
2020-08-18T06:28:27Z INFO     : QUEUE add all HookRun@OnStartup
2020-08-18T06:28:27Z INFO     : MSTOR Create new metric shell_operator_live_ticks
2020-08-18T06:28:27Z INFO     : MSTOR Create new metric shell_operator_tasks_queue_length
2020-08-18T06:28:27Z INFO     : GVR for kind 'ConfigMap' is /v1, Resource=configmaps
2020-08-18T06:28:27Z INFO     : EVENT Kube event '3bf9649b-bf05-47c9-bf31-9d5f751782cf'
2020-08-18T06:28:27Z INFO     : QUEUE add TASK_HOOK_RUN@KUBE_EVENTS kubesphere/installRunner.py
2020-08-18T06:28:30Z INFO     : TASK_RUN HookRun@KUBE_EVENTS kubesphere/installRunner.py
2020-08-18T06:28:30Z INFO     : Running hook 'kubesphere/installRunner.py' binding 'KUBE_EVENTS' ...
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-system mysql-pvc -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.714864", "end": "2020-08-18 06:29:11.658119", "msg": "non-zero return code", "rc": 1, "start": "2020-08-18 06:29:10.943255", "stderr": "Error from server (NotFound): persistentvolumeclaims \"mysql-pvc\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"mysql-pvc\" not found"], "stdout": "", "stdout_lines": []}
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-system redis-pvc -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.716028", "end": "2020-08-18 06:29:12.660974", "msg": "non-zero return code", "rc": 1, "start": "2020-08-18 06:29:11.944946", "stderr": "Error from server (NotFound): persistentvolumeclaims \"redis-pvc\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"redis-pvc\" not found"], "stdout": "", "stdout_lines": []}
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-system minio -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.705411", "end": "2020-08-18 06:29:13.612514", "msg": "non-zero return code", "rc": 1, "start": "2020-08-18 06:29:12.907103", "stderr": "Error from server (NotFound): persistentvolumeclaims \"minio\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"minio\" not found"], "stdout": "", "stdout_lines": []}
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-system openldap-pvc-openldap-0 -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.708329", "end": "2020-08-18 06:29:14.555406", "msg": "non-zero return code", "rc": 1, "start": "2020-08-18 06:29:13.847077", "stderr": "Error from server (NotFound): persistentvolumeclaims \"openldap-pvc-openldap-0\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"openldap-pvc-openldap-0\" not found"], "stdout": "", "stdout_lines": []}
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-system etcd-pvc -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.717584", "end": "2020-08-18 06:29:15.511885", "msg": "non-zero return code", "rc": 1, "start": "2020-08-18 06:29:14.794301", "stderr": "Error from server (NotFound): persistentvolumeclaims \"etcd-pvc\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"etcd-pvc\" not found"], "stdout": "", "stdout_lines": []}
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl get pvc -n kubesphere-system data-redis-ha-server-0 -o jsonpath='{.status.capacity.storage}'\n", "delta": "0:00:00.703361", "end": "2020-08-18 06:29:16.451343", "msg": "non-zero return code", "rc": 1, "start": "2020-08-18 06:29:15.747982", "stderr": "Error from server (NotFound): persistentvolumeclaims \"data-redis-ha-server-0\" not found", "stderr_lines": ["Error from server (NotFound): persistentvolumeclaims \"data-redis-ha-server-0\" not found"], "stdout": "", "stdout_lines": []}
fatal: [localhost]: FAILED! => {"msg": "The conditional check 'console_version.stdout and console_version.stdout != ks_version' failed. The error was: error while evaluating conditional (console_version.stdout and console_version.stdout != ks_version): 'dict object' has no attribute 'stdout'\n\nThe error appears to be in '/kubesphere/installer/roles/ks-core/prepare/tasks/ks-stop.yaml': line 1, column 3, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\n- name: ks-upgrade | disable ks-apiserver\n  ^ here\n"}
fatal: [localhost]: FAILED! => {"msg": "The conditional check 'console_version.stdout and console_version.stdout != ks_version' failed. The error was: error while evaluating conditional (console_version.stdout and console_version.stdout != ks_version): 'dict object' has no attribute 'stdout'\n\nThe error appears to be in '/kubesphere/installer/roles/ks-core/prepare/tasks/ks-stop.yaml': line 6, column 3, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\n- name: ks-upgrade | disable ks-apigateway\n  ^ here\n"}
fatal: [localhost]: FAILED! => {"msg": "The conditional check 'console_version.stdout and console_version.stdout != ks_version' failed. The error was: error while evaluating conditional (console_version.stdout and console_version.stdout != ks_version): 'dict object' has no attribute 'stdout'\n\nThe error appears to be in '/kubesphere/installer/roles/ks-core/prepare/tasks/ks-stop.yaml': line 11, column 3, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\n- name: ks-upgrade | disable ks-account\n  ^ here\n"}
fatal: [localhost]: FAILED! => {"msg": "The conditional check 'console_version.stdout and console_version.stdout != ks_version' failed. The error was: error while evaluating conditional (console_version.stdout and console_version.stdout != ks_version): 'dict object' has no attribute 'stdout'\n\nThe error appears to be in '/kubesphere/installer/roles/ks-core/prepare/tasks/ks-stop.yaml': line 16, column 3, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\n- name: ks-upgrade | disable ks-console\n  ^ here\n"}
fatal: [localhost]: FAILED! => {"msg": "The conditional check 'console_version.stdout and console_version.stdout != ks_version' failed. The error was: error while evaluating conditional (console_version.stdout and console_version.stdout != ks_version): 'dict object' has no attribute 'stdout'\n\nThe error appears to be in '/kubesphere/installer/roles/ks-core/prepare/tasks/ks-stop.yaml': line 21, column 3, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\n- name: ks-upgrade | disable ks-controller-manager\n  ^ here\n"}
fatal: [localhost]: FAILED! => {"msg": "The conditional check 'console_version.stdout and console_version.stdout == ks_version' failed. The error was: error while evaluating conditional (console_version.stdout and console_version.stdout == ks_version): 'dict object' has no attribute 'stdout'\n\nThe error appears to be in '/kubesphere/installer/roles/ks-core/prepare/tasks/ks-restart.yaml': line 1, column 3, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\n- name: ks-upgrade | restart ks-apiserver\n  ^ here\n"}
fatal: [localhost]: FAILED! => {"msg": "The conditional check 'console_version.stdout and console_version.stdout == ks_version' failed. The error was: error while evaluating conditional (console_version.stdout and console_version.stdout == ks_version): 'dict object' has no attribute 'stdout'\n\nThe error appears to be in '/kubesphere/installer/roles/ks-core/prepare/tasks/ks-restart.yaml': line 6, column 3, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\n- name: ks-upgrade | restart ks-apigateway\n  ^ here\n"}
fatal: [localhost]: FAILED! => {"msg": "The conditional check 'console_version.stdout and console_version.stdout == ks_version' failed. The error was: error while evaluating conditional (console_version.stdout and console_version.stdout == ks_version): 'dict object' has no attribute 'stdout'\n\nThe error appears to be in '/kubesphere/installer/roles/ks-core/prepare/tasks/ks-restart.yaml': line 11, column 3, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\n- name: ks-upgrade | restart ks-account\n  ^ here\n"}
fatal: [localhost]: FAILED! => {"msg": "The conditional check 'console_version.stdout and console_version.stdout == ks_version' failed. The error was: error while evaluating conditional (console_version.stdout and console_version.stdout == ks_version): 'dict object' has no attribute 'stdout'\n\nThe error appears to be in '/kubesphere/installer/roles/ks-core/prepare/tasks/ks-restart.yaml': line 16, column 3, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\n- name: ks-upgrade | restart ks-console\n  ^ here\n"}
fatal: [localhost]: FAILED! => {"msg": "The conditional check 'console_version.stdout and console_version.stdout == ks_version' failed. The error was: error while evaluating conditional (console_version.stdout and console_version.stdout == ks_version): 'dict object' has no attribute 'stdout'\n\nThe error appears to be in '/kubesphere/installer/roles/ks-core/prepare/tasks/ks-restart.yaml': line 21, column 3, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\n- name: ks-upgrade | restart ks-controller-manager\n  ^ here\n"}
fatal: [localhost]: FAILED! => {"changed": true, "cmd": "/usr/local/bin/kubectl delete cm -n kubesphere-system ks-router-config\n", "delta": "0:00:00.732516", "end": "2020-08-18 06:29:56.354221", "msg": "non-zero return code", "rc": 1, "start": "2020-08-18 06:29:55.621705", "stderr": "Error from server (NotFound): configmaps \"ks-router-config\" not found", "stderr_lines": ["Error from server (NotFound): configmaps \"ks-router-config\" not found"], "stdout": "", "stdout_lines": []}

ks-apigateway连接不上redis

[root@localhost kubesphere]# kubectl -n kubesphere-system logs -l app=ks-apigateway
[DEV NOTICE] Registered directive 'authenticate' before 'jwt'
[DEV NOTICE] Registered directive 'authentication' before 'jwt'
[DEV NOTICE] Registered directive 'swagger' before 'jwt'
2020/08/18 07:07:13 [INFO][cache:0xc00052b180] Started certificate maintenance routine
Activating privacy features... done.
E0818 07:07:18.399814       1 redis.go:51] unable to reach redis hostdial tcp: i/o timeout
2020/08/18 07:07:18 dial tcp: i/o timeout
    luckyship 更改标题为「unable to reach redis hostdial tcp: i/o timeout 无法连接到redis

    yunkunrao 最小化安装,在已有k8s的环境上

    [root@localhost kubesphere]# kubectl get nodes
    NAME            STATUS   ROLES    AGE   VERSION
    193.160.57.29   Ready    master   39d   v1.16.3
    193.160.57.30   Ready    <none>   35d   v1.16.3
    193.160.57.31   Ready    <none>   35d   v1.16.3

    有大佬帮忙看一下吗。redis的服务是正常的,就是连不上

    [root@localhost test]# kubectl -n kubesphere-system logs -l app=redis
    1:C 20 Aug 2020 05:12:28.698 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
    1:C 20 Aug 2020 05:12:28.698 # Redis version=5.0.5, bits=64, commit=00000000, modified=0, pid=1, just started
    1:C 20 Aug 2020 05:12:28.698 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf
    1:M 20 Aug 2020 05:12:28.700 * Running mode=standalone, port=6379.
    1:M 20 Aug 2020 05:12:28.700 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.
    1:M 20 Aug 2020 05:12:28.700 # Server initialized
    1:M 20 Aug 2020 05:12:28.700 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.
    1:M 20 Aug 2020 05:12:28.700 * Ready to accept connections
    [root@localhost test]# kubectl -n kubesphere-system logs -l app=ks-apigateway
    [DEV NOTICE] Registered directive 'authenticate' before 'jwt'
    [DEV NOTICE] Registered directive 'authentication' before 'jwt'
    [DEV NOTICE] Registered directive 'swagger' before 'jwt'
    Activating privacy features... done.
    2020/08/20 08:47:03 [INFO][cache:0xc0001c50e0] Started certificate maintenance routine
    E0820 08:47:08.326876       1 redis.go:51] unable to reach redis hostdial tcp: i/o timeout
    2020/08/20 08:47:08 dial tcp: i/o timeout

    另外出现ks-account卡住不动的情况,参考https://kubesphere.com.cn/forum/d/1600-ks-account-init-1-2-helm-2-16-3-k8s-1-17-3
    已经解决,目前环境已成功起来

    3 个月 后
    1 年 后