kubectl describe pod jaeger-collector-84477ffd9c-6kn8k -n istio-system
操作系统信息,私有云主机,centos-release-7-7.1908.0.el7.centos.x86_64,8C/16G
Kubernetes版本信息,v1.20.4。多节点(1个master,3个node节点)。
KubeSphere版本信息,v3.1.1。在线安装(KubeKey)。全套安装。
问题是什么,
jaeger-collector-84477ffd9c-8qgg4 一直起不来 CrashLoopBackOff
NAMESPACE NAME READY STATUS RESTARTS AGE
istio-system istio-ingressgateway-5f47b55746-rzh25 0/1 Running 0 33m
istio-system jaeger-collector-84477ffd9c-8qgg4 0/1 CrashLoopBackOff 6 12m
istio-system jaeger-operator-5866fb6775-trqzv 1/1 Running 0 13m
istio-system jaeger-query-7c5d6b455d-9cd2r 2/2 Running 0 12m
istio-system kiali-78b7c6b6d7-mqnfp 1/1 Running 0 12m
istio-system kiali-operator-7cc57dff58-7qdmm 1/1 Running 0 13m
kube-system calico-kube-controllers-8545b68dd4-prcnv 1/1 Running 0 40m
kube-system calico-node-2jq47 1/1 Running 0 40m
kube-system calico-node-9mvkr 1/1 Running 0 40m
kube-system calico-node-b7gpm 1/1 Running 0 40m
kube-system calico-node-rfx6f 1/1 Running 0 40m
kube-system coredns-7f87749d6c-55lsw 1/1 Running 0 40m
kube-system coredns-7f87749d6c-pdtfn 1/1 Running 0 40m
kube-system kube-apiserver-k8s-master 1/1 Running 0 40m
kube-system kube-controller-manager-k8s-master 1/1 Running 0 40m
kube-system kube-proxy-867s6 1/1 Running 0 40m
kube-system kube-proxy-9s4wn 1/1 Running 0 40m
kube-system kube-proxy-9wxw6 1/1 Running 0 40m
kube-system kube-proxy-w59xp 1/1 Running 0 40m
kube-system kube-scheduler-k8s-master 1/1 Running 0 40m
kube-system metrics-server-6958566fd5-b4jd2 1/1 Running 0 39m
kube-system nodelocaldns-6p2wg 1/1 Running 0 40m
kube-system nodelocaldns-j2vlb 1/1 Running 0 40m
kube-system nodelocaldns-kfr92 1/1 Running 0 40m
kube-system nodelocaldns-zgmvb 1/1 Running 0 40m
kube-system openebs-localpv-provisioner-5cddd6cbfc-8×8m4 1/1 Running 0 40m
kube-system snapshot-controller-0 1/1 Running 0 37m
kubesphere-controls-system default-http-backend-5d7c68c698-hv9fv 1/1 Running 0 35m
kubesphere-controls-system kubectl-admin-68dc989bf8-jcgsk 1/1 Running 0 12m
kubesphere-devops-system ks-jenkins-8957dc768-mkcbr 1/1 Running 0 33m
kubesphere-devops-system s2ioperator-0 1/1 Running 0 34m
kubesphere-logging-system elasticsearch-logging-data-0 1/1 Running 0 35m
kubesphere-logging-system elasticsearch-logging-data-1 1/1 Running 0 35m
kubesphere-logging-system elasticsearch-logging-discovery-0 1/1 Running 0 35m
kubesphere-logging-system fluent-bit-dsg47 1/1 Running 0 8m47s
kubesphere-logging-system fluent-bit-fnzjj 1/1 Running 0 8m47s
kubesphere-logging-system fluent-bit-gdsv4 1/1 Running 0 8m47s
kubesphere-logging-system fluent-bit-vqbnh 1/1 Running 0 8m47s
kubesphere-logging-system fluentbit-operator-5fff5f4584-cl5w8 1/1 Running 0 35m
kubesphere-logging-system ks-events-exporter-54859ff5fd-phjzh 2/2 Running 0 7m44s
kubesphere-logging-system ks-events-operator-976c46847-n42mz 1/1 Running 0 34m
kubesphere-logging-system ks-events-ruler-7c6bcc5dc6-nnlwq 2/2 Running 0 7m38s
kubesphere-logging-system ks-events-ruler-7c6bcc5dc6-rw2gx 2/2 Running 0 7m38s
kubesphere-logging-system kube-auditing-operator-55cfdb8dfd-nq8rv 1/1 Running 0 34m
kubesphere-logging-system kube-auditing-webhook-deploy-f4db5bf65-qtvdd 1/1 Running 0 8m38s
kubesphere-logging-system kube-auditing-webhook-deploy-f4db5bf65-vqvfx 1/1 Running 0 8m38s
kubesphere-logging-system logsidecar-injector-deploy-5d846c6697-mvk98 2/2 Running 0 34m
kubesphere-logging-system logsidecar-injector-deploy-5d846c6697-wlnfp 2/2 Running 0 34m
kubesphere-monitoring-system alertmanager-main-0 2/2 Running 0 8m29s
kubesphere-monitoring-system alertmanager-main-1 2/2 Running 0 8m29s
kubesphere-monitoring-system alertmanager-main-2 2/2 Running 0 8m29s
kubesphere-monitoring-system kube-state-metrics-577b8b4cf-n8kwt 3/3 Running 0 8m22s
kubesphere-monitoring-system node-exporter-4gf9z 2/2 Running 0 8m48s
kubesphere-monitoring-system node-exporter-cm9bd 2/2 Running 0 8m48s
kubesphere-monitoring-system node-exporter-kq5rt 2/2 Running 0 8m48s
kubesphere-monitoring-system node-exporter-vlh54 2/2 Running 0 8m48s
kubesphere-monitoring-system notification-manager-deployment-97dfccc89-7kz6r 1/1 Running 0 7m17s
kubesphere-monitoring-system notification-manager-deployment-97dfccc89-j4jg4 1/1 Running 0 7m17s
kubesphere-monitoring-system notification-manager-operator-59cbfc566b-bnccx 2/2 Running 0 7m32s
kubesphere-monitoring-system prometheus-k8s-0 3/3 Running 1 8m27s
kubesphere-monitoring-system prometheus-k8s-1 3/3 Running 1 8m27s
kubesphere-monitoring-system prometheus-operator-8f97cb8c6-ktjlb 2/2 Running 0 8m42s
kubesphere-monitoring-system thanos-ruler-kubesphere-0 2/2 Running 0 8m29s
kubesphere-monitoring-system thanos-ruler-kubesphere-1 2/2 Running 0 8m29s
kubesphere-system ks-apiserver-558b8f9bff-bx2nd 1/1 Running 0 12m
kubesphere-system ks-console-58b965dbf5-2hdhm 1/1 Running 0 34m
kubesphere-system ks-controller-manager-5487fc4784-dnkg2 1/1 Running 0 12m
kubesphere-system ks-installer-769994b6ff-dmhmk 1/1 Running 0 40m
kubesphere-system minio-d98b7c954-vlnqz 1/1 Running 0 36m
kubesphere-system openldap-0 1/1 Running 1 36m
kubesphere-system openpitrix-import-job-rzfld 0/1 Completed 0 34m
kubesphere-system redis-644c849c9d-xf5q7 1/1 Running 0 36m
[root@k8s-node2 ~]# kubectl describe jaeger-collector-84477ffd9c-8qgg4 -n istio-system
error: the server doesn’t have a resource type “jaeger-collector-84477ffd9c-8qgg4”
[root@k8s-master ~]# kubectl logs jaeger-collector-84477ffd9c-8qgg4 -n istio-system
2021/11/04 06:46:07 maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined
{“level”:“info”,“ts”:1636008367.6498055,“caller”:“flags/service.go:115”,“msg”:“Mounting metrics handler on admin server”,“route”:“/metrics”}
{“level”:“info”,“ts”:1636008367.6508489,“caller”:“flags/admin.go:115”,“msg”:“Mounting health check on admin server”,“route”:“/”}
{“level”:“info”,“ts”:1636008367.6510231,“caller”:“flags/admin.go:121”,“msg”:“Starting admin HTTP server”,“http-port”:14269}
{“level”:“info”,“ts”:1636008367.651074,“caller”:“flags/admin.go:107”,“msg”:“Admin server started”,“http-port”:14269,“health-status”:“unavailable”}
{“level”:“info”,“ts”:1636008367.6901472,“caller”:“config/config.go:163”,“msg”:“Elasticsearch detected”,“version”:6}
config-sample.yaml 文件内容:
apiVersion: kubekey.kubesphere.io/v1alpha1
kind: Cluster
metadata:
name: sample
spec:
hosts:
- {name: k8s-master, address: 192.168.8.151, internalAddress: 192.168.8.151, user: root, password: gzys2021@}
- {name: k8s-node1, address: 192.168.8.234, internalAddress: 192.168.8.234, user: root, password: gzys2021@}
- {name: k8s-node2, address: 192.168.8.197, internalAddress: 192.168.8.197, user: root, password: gzys2021@}
- {name: k8s-node3, address: 192.168.8.248, internalAddress: 192.168.8.248, user: root, password: gzys2021@}
roleGroups:
etcd:
- k8s-master
master:
- k8s-master
worker:
- k8s-node1
- k8s-node2
- k8s-node3
controlPlaneEndpoint:
domain: lb.kubesphere.local
address: ""
port: 6443
kubernetes:
version: v1.20.4
imageRepo: kubesphere
clusterName: cluster.local
network:
plugin: calico
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
registry:
registryMirrors: []
insecureRegistries: []
addons: []
—
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
version: v3.1.1
spec:
persistence:
storageClass: ""
authentication:
jwtSecret: ""
zone: ""
local_registry: ""
etcd:
monitoring: true
endpointIps: localhost
port: 2379
tlsEnable: true
common:
redis:
enabled: true
redisVolumSize: 2Gi
openldap:
enabled: true
openldapVolumeSize: 2Gi
minioVolumeSize: 20Gi
monitoring:
endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
es:
elasticsearchMasterVolumeSize: 4Gi
elasticsearchDataVolumeSize: 20Gi
logMaxAge: 7
elkPrefix: logstash
basicAuth:
enabled: false
username: ""
password: ""
externalElasticsearchUrl: ""
externalElasticsearchPort: ""
console:
enableMultiLogin: true
port: 30880
alerting:
enabled: true
thanosruler:
replicas: 1
resources: {}
auditing:
enabled: true
devops:
enabled: true
jenkinsMemoryLim: 2Gi
jenkinsMemoryReq: 1500Mi
jenkinsVolumeSize: 8Gi
jenkinsJavaOpts_Xms: 512m
jenkinsJavaOpts_Xmx: 512m
jenkinsJavaOpts_MaxRAM: 2g
events:
enabled: true
ruler:
enabled: true
replicas: 2
logging:
enabled: true
logsidecar:
enabled: true
replicas: 2
metrics_server:
enabled: true
monitoring:
storageClass: ""
prometheusMemoryRequest: 400Mi
prometheusVolumeSize: 20Gi
multicluster:
clusterRole: none
network:
networkpolicy:
enabled: true
ippool:
type: none
topology:
type: none
openpitrix:
store:
enabled: true
servicemesh:
enabled: true
kubeedge:
enabled: false
cloudCore:
nodeSelector: {“node-role.kubernetes.io/worker”: ""}
tolerations: []
cloudhubPort: “10000”
cloudhubQuicPort: “10001”
cloudhubHttpsPort: “10002”
cloudstreamPort: “10003”
tunnelPort: “10004”
cloudHub:
advertiseAddress:
- ""
nodeLimit: “100”
service:
cloudhubNodePort: “30000”
cloudhubQuicNodePort: “30001”
cloudhubHttpsNodePort: “30002”
cloudstreamNodePort: “30003”
tunnelNodePort: “30004”
edgeWatcher:
nodeSelector: {“node-role.kubernetes.io/worker”: ""}
tolerations: []
edgeWatcherAgent:
nodeSelector: {“node-role.kubernetes.io/worker”: ""}
tolerations: []