root@master:/# kubectl describe pod openldap-0 -n kubesphere-system
Name: openldap-0
Namespace: kubesphere-system
Priority: 0
Node: master/192.168.137.231
Start Time: Fri, 14 Aug 2020 09:55:34 +0800
Labels: app.kubernetes.io/instance=ks-openldap
app.kubernetes.io/name=openldap-ha
controller-revision-hash=openldap-76c8cfbbc7
statefulset.kubernetes.io/pod-name=openldap-0
Annotations: <none>
Status: Running
IP: 10.233.70.12
IPs:
IP: 10.233.70.12
Controlled By: StatefulSet/openldap
Containers:
openldap-ha:
Container ID: docker://53144ea168d8028af1a86e18ebd0c511050ac758fcb2114bde5c27ced4828310
Image: 192.168.137.231:5000/osixia/openldap:1.3.0
Image ID: docker-pullable://192.168.137.231:5000/osixia/openldap@sha256:901c6c8f3a79aac020458184f00db60d15d54a002c9f643965466e3a4e2953c8
Port: 389/TCP
Host Port: 0/TCP
Args:
–copy-service
–loglevel=warning
State: Waiting
Reason: CrashLoopBackOff
Last State: Terminated
Reason: Error
Exit Code: 1
Started: Fri, 14 Aug 2020 11:21:33 +0800
Finished: Fri, 14 Aug 2020 11:21:35 +0800
Ready: False
Restart Count: 21
Liveness: tcp-socket :389 delay=30s timeout=1s period=15s #success=1 #failure=3
Readiness: tcp-socket :389 delay=30s timeout=1s period=15s #success=1 #failure=3
Environment:
LDAP_ORGANISATION: kubesphere
LDAP_DOMAIN: kubesphere.io
LDAP_CONFIG_PASSWORD: admin
LDAP_ADMIN_PASSWORD: admin
LDAP_REPLICATION: false
LDAP_TLS: false
LDAP_REMOVE_CONFIG_AFTER_SETUP: true
MY_POD_NAME: openldap-0 (v1:metadata.name)
HOSTNAME: $(MY_POD_NAME).openldap
Mounts:
/etc/ldap/slapd.d from openldap-pvc (rw,path=“ldap-config”)
/var/lib/ldap from openldap-pvc (rw,path=“ldap-data”)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-n4k6m (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
openldap-pvc:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: openldap-pvc-openldap-0
ReadOnly: false
default-token-n4k6m:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-n4k6m
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: CriticalAddonsOnly
node-role.kubernetes.io/master:NoSchedule
node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
Normal Created 42m (x14 over 87m) kubelet, master Created container openldap-ha
Normal Pulled 17m (x19 over 87m) kubelet, master Container image “192.168.137.231:5000/osixia/openldap:1.3.0″ already present on machine
Warning BackOff 2m6s (x361 over 86m) kubelet, master Back-off restarting failed container
root@master:/#