麻烦看一下,kubeedge里一个pod一直CreashLoopBackoff
日志如下:
[root@localhost kk]# kubectl logs cloudcore-54b8c9cf95-6mf47 -n kubeedge
W0430 09:40:49.542959 1 validation.go:165] TLSTunnelPrivateKeyFile does not exist in /etc/kubeedge/certs/server.key, will load from secret
W0430 09:40:49.543086 1 validation.go:168] TLSTunnelCertFile does not exist in /etc/kubeedge/certs/server.crt, will load from secret
W0430 09:40:49.543106 1 validation.go:171] TLSTunnelCAFile does not exist in /etc/kubeedge/ca/rootCA.crt, will load from secret
I0430 09:40:49.543144 1 server.go:64] Version: v1.6.1
W0430 09:40:49.543194 1 client_config.go:608] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.
F0430 09:40:49.545945 1 config.go:34] AdvertiseAddress must be specified!
goroutine 1 [running]:
github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2.stacks(0xc00000e001, 0xc0001381c0, 0x50, 0xdf)
/go/src/github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2/klog.go:996 +0xb8
github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2.(*loggingT).output(0x2938e20, 0xc000000003, 0x0, 0x0, 0xc00016a230, 0x28679ad, 0x9, 0x22, 0x1761600)
/go/src/github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2/klog.go:945 +0x19d
github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2.(*loggingT).printDepth(0x2938e20, 0x3, 0x0, 0x0, 0x1, 0xc000161a08, 0x1, 0x1)
/go/src/github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2/klog.go:718 +0x15e
github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2.(*loggingT).print(...)
/go/src/github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2/klog.go:703
github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2.Fatal(...)
/go/src/github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2/klog.go:1436
github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/config.InitConfigure.func1()
/go/src/github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/config/config.go:34 +0x80b
sync.(*Once).doSlow(0x29648c0, 0xc000161ba8)
/usr/local/go/src/sync/once.go:66 +0xec
sync.(*Once).Do(...)
/usr/local/go/src/sync/once.go:57
github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/config.InitConfigure(0xc000130790)
/go/src/github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/config/config.go:32 +0x69
github.com/kubeedge/kubeedge/cloud/pkg/cloudhub.Register(0xc000130790)
/go/src/github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/cloudhub.go:45 +0x2b
github.com/kubeedge/kubeedge/cloud/cmd/cloudcore/app.registerModules(0xc00051e7c0)
/go/src/github.com/kubeedge/kubeedge/cloud/cmd/cloudcore/app/server.go:109 +0x36
github.com/kubeedge/kubeedge/cloud/cmd/cloudcore/app.NewCloudCoreCommand.func1(0xc00047e580, 0x2964820, 0x0, 0x0)
/go/src/github.com/kubeedge/kubeedge/cloud/cmd/cloudcore/app/server.go:67 +0x2a3
github.com/kubeedge/kubeedge/vendor/github.com/spf13/cobra.(*Command).execute(0xc00047e580, 0xc00003a1b0, 0x0, 0x0, 0xc00047e580, 0xc00003a1b0)
/go/src/github.com/kubeedge/kubeedge/vendor/github.com/spf13/cobra/command.go:846 +0x29d
github.com/kubeedge/kubeedge/vendor/github.com/spf13/cobra.(*Command).ExecuteC(0xc00047e580, 0xc0000440b8, 0x0, 0x0)
/go/src/github.com/kubeedge/kubeedge/vendor/github.com/spf13/cobra/command.go:950 +0x349
github.com/kubeedge/kubeedge/vendor/github.com/spf13/cobra.(*Command).Execute(...)
/go/src/github.com/kubeedge/kubeedge/vendor/github.com/spf13/cobra/command.go:887
main.main()
/go/src/github.com/kubeedge/kubeedge/cloud/cmd/cloudcore/cloudcore.go:16 +0x64
goroutine 6 [chan receive]:
github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2.(*loggingT).flushDaemon(0x2938e20)
/go/src/github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2/klog.go:1131 +0x8b
created by github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2.init.0
/go/src/github.com/kubeedge/kubeedge/vendor/k8s.io/klog/v2/klog.go:416 +0xd6
goroutine 7 [select]:
github.com/kubeedge/kubeedge/cloud/pkg/router/rule.do(0xc000044d80)
/go/src/github.com/kubeedge/kubeedge/cloud/pkg/router/rule/statushandler.go:37 +0x1ee
created by github.com/kubeedge/kubeedge/cloud/pkg/router/rule.init.1
/go/src/github.com/kubeedge/kubeedge/cloud/pkg/router/rule/statushandler.go:26 +0x63
goroutine 8 [select]:
github.com/kubeedge/kubeedge/vendor/k8s.io/apimachinery/pkg/util/wait.BackoffUntil(0x199ab68, 0x1b61d20, 0xc00050c000, 0x1, 0xc000044120)
/go/src/github.com/kubeedge/kubeedge/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:167 +0x13f
github.com/kubeedge/kubeedge/vendor/k8s.io/apimachinery/pkg/util/wait.JitterUntil(0x199ab68, 0x12a05f200, 0x0, 0x1, 0xc000044120)
/go/src/github.com/kubeedge/kubeedge/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:133 +0x98
github.com/kubeedge/kubeedge/vendor/k8s.io/apimachinery/pkg/util/wait.Until(...)
/go/src/github.com/kubeedge/kubeedge/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:90
github.com/kubeedge/kubeedge/vendor/k8s.io/apimachinery/pkg/util/wait.Forever(0x199ab68, 0x12a05f200)
/go/src/github.com/kubeedge/kubeedge/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:81 +0x4f
created by github.com/kubeedge/kubeedge/vendor/k8s.io/component-base/logs.InitLogs
/go/src/github.com/kubeedge/kubeedge/vendor/k8s.io/component-base/logs/logs.go:58 +0x8a
[root@localhost kk]# kubectl describe pod cloudcore-54b8c9cf95-6mf47 -n kubeedge
Name: cloudcore-54b8c9cf95-6mf47
Namespace: kubeedge
Priority: 0
Node: k8s-node2/192.168.149.177
Start Time: Thu, 29 Apr 2021 17:51:38 +0800
Labels: k8s-app=kubeedge
kubeedge=cloudcore
pod-template-hash=54b8c9cf95
Annotations: cni.projectcalico.org/podIP: 10.233.76.12/32
cni.projectcalico.org/podIPs: 10.233.76.12/32
Status: Running
IP: 10.233.76.12
IPs:
IP: 10.233.76.12
Controlled By: ReplicaSet/cloudcore-54b8c9cf95
Containers:
cloudcore:
Container ID: docker://c04b095bc010965c3cfdbf25881e9da3fecda9da57ac1ddf4e18fbef75397eb9
Image: registry.cn-beijing.aliyuncs.com/kubesphereio/cloudcore:v1.6.1
Image ID: docker-pullable://registry.cn-beijing.aliyuncs.com/kubesphereio/cloudcore@sha256:6d744e1e1d17323a3e3d17d68c37b969ab6d4c6d3665d7a3051db53b2d277fc1
Ports: 10000/TCP, 10001/TCP, 10002/TCP, 10003/TCP, 10004/TCP
Host Ports: 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP
State: Waiting
Reason: CrashLoopBackOff
Last State: Terminated
Reason: Error
Exit Code: 255
Started: Fri, 30 Apr 2021 09:45:53 +0800
Finished: Fri, 30 Apr 2021 09:45:53 +0800
Ready: False
Restart Count: 191
Limits:
cpu: 200m
memory: 1Gi
Requests:
cpu: 100m
memory: 512Mi
Environment: <none>
Mounts:
/etc/kubeedge from certs (rw)
/etc/kubeedge/config from conf (rw)
/etc/localtime from host-time (rw)
/var/lib/kubeedge from sock (rw)
/var/run/secrets/kubernetes.io/serviceaccount from cloudcore-token-wv4x4 (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
conf:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: cloudcore
Optional: false
certs:
Type: Secret (a volume populated by a Secret)
SecretName: cloudcore
Optional: false
sock:
Type: HostPath (bare host directory volume)
Path: /var/lib/kubeedge
HostPathType: DirectoryOrCreate
host-time:
Type: HostPath (bare host directory volume)
Path: /etc/localtime
HostPathType:
cloudcore-token-wv4x4:
Type: Secret (a volume populated by a Secret)
SecretName: cloudcore-token-wv4x4
Optional: false
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning BackOff 41s (x4422 over 15h) kubelet, k8s-node2 Back-off restarting failed container