Kubernetes版本信息

使用KK安装,kubernetes版本v1.23.17

问题是什么

apiVersion: v1
kind: Pod
metadata:
annotations:
prometheus.io/port: “9253”
prometheus.io/scrape: “true”
creationTimestamp: “2024-09-24T16:44:03Z”
generateName: nodelocaldns-
labels:
controller-revision-hash: 7b967f7c4d
k8s-app: nodelocaldns
pod-template-generation: “1”
name: nodelocaldns-4qntd
namespace: kube-system
ownerReferences:

  • apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: DaemonSet
    name: nodelocaldns
    uid: 9900711d-51bd-498b-9a76-be803c99efc9
    resourceVersion: “55191”
    uid: 113b848f-e422-45de-833e-ad69aa68a111
    spec:
    affinity:
    nodeAffinity:
    requiredDuringSchedulingIgnoredDuringExecution:
    nodeSelectorTerms:
    - matchFields:
    - key: metadata.name
    operator: In
    values:
    - node5
    containers:
  • args:
    • -localip
    • 169.254.25.10
    • -conf
    • /etc/coredns/Corefile
    • -upstreamsvc
    • coredns
      image: harbor.samic.local/kubesphereio/k8s-dns-node-cache:1.22.20
      imagePullPolicy: IfNotPresent
      livenessProbe:
      failureThreshold: 10
      httpGet:
      host: 169.254.25.10
      path: /health
      port: 9254
      scheme: HTTP
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 5
      name: node-cache
      ports:
    • containerPort: 53
      hostPort: 53
      name: dns
      protocol: UDP
    • containerPort: 53
      hostPort: 53
      name: dns-tcp
      protocol: TCP
    • containerPort: 9253
      hostPort: 9253
      name: metrics
      protocol: TCP
      readinessProbe:
      failureThreshold: 10
      httpGet:
      host: 169.254.25.10
      path: /health
      port: 9254
      scheme: HTTP
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 5
      resources:
      limits:
      memory: 200Mi
      requests:
      cpu: 100m
      memory: 70Mi
      securityContext:
      privileged: true
      terminationMessagePath: /dev/termination-log
      terminationMessagePolicy: File
      volumeMounts:
    • mountPath: /etc/coredns
      name: config-volume
    • mountPath: /run/xtables.lock
      name: xtables-lock
    • mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-47t8g
      readOnly: true
      dnsPolicy: Default
      enableServiceLinks: true
      hostNetwork: true
      nodeName: node5
      nodeSelector:
      kubernetes.io/os: linux
      preemptionPolicy: PreemptLowerPriority
      priority: 2000000000
      priorityClassName: system-cluster-critical
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      serviceAccount: nodelocaldns
      serviceAccountName: nodelocaldns
      terminationGracePeriodSeconds: 0
      tolerations:
  • effect: NoSchedule
    operator: Exists
  • effect: NoExecute
    operator: Exists
  • key: CriticalAddonsOnly
    operator: Exists
  • effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
  • effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
  • effect: NoSchedule
    key: node.kubernetes.io/disk-pressure
    operator: Exists
  • effect: NoSchedule
    key: node.kubernetes.io/memory-pressure
    operator: Exists
  • effect: NoSchedule
    key: node.kubernetes.io/pid-pressure
    operator: Exists
  • effect: NoSchedule
    key: node.kubernetes.io/unschedulable
    operator: Exists
  • effect: NoSchedule
    key: node.kubernetes.io/network-unavailable
    operator: Exists
    volumes:
  • configMap:
    defaultMode: 420
    items:
    • key: Corefile
      path: Corefile
      name: nodelocaldns
      name: config-volume
  • hostPath:
    path: /run/xtables.lock
    type: FileOrCreate
    name: xtables-lock
  • name: kube-api-access-47t8g
    projected:
    defaultMode: 420
    sources:
    • serviceAccountToken:
      expirationSeconds: 3607
      path: token
      • configMap:
        items:
        • key: ca.crt
          path: ca.crt
          name: kube-root-ca.crt
      • downwardAPI:
        items:
        • fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
          path: namespace
          status:
          conditions:
  • lastProbeTime: null
    lastTransitionTime: “2024-09-24T16:44:05Z”
    status: “True”
    type: Initialized
  • lastProbeTime: null
    lastTransitionTime: “2024-09-24T16:44:05Z”
    message: ‘containers with unready status: [node-cache]’
    reason: ContainersNotReady
    status: “False”
    type: Ready
  • lastProbeTime: null
    lastTransitionTime: “2024-09-24T16:44:05Z”
    message: ‘containers with unready status: [node-cache]’
    reason: ContainersNotReady
    status: “False”
    type: ContainersReady
  • lastProbeTime: null
    lastTransitionTime: “2024-09-24T16:44:03Z”
    status: “True”
    type: PodScheduled
    containerStatuses:
  • containerID: docker://533a4798007876b6ee6836eeebab3350906dd90f37d8067d459521114a8bebf8
    image: harbor.samic.local/kubesphereio/k8s-dns-node-cache:1.22.20
    imageID: docker-pullable://harbor.samic.local/kubesphereio/k8s-dns-node-cache@sha256:c48a69743a5a7e7d5f8ecd66d47ea565195859edebde21e8a61529bd9efe9c22
    lastState:
    terminated:
    containerID: docker://533a4798007876b6ee6836eeebab3350906dd90f37d8067d459521114a8bebf8
    exitCode: 1
    finishedAt: “2024-09-25T00:21:04Z”
    reason: Error
    startedAt: “2024-09-25T00:21:04Z”
    name: node-cache
    ready: false
    restartCount: 94
    started: false
    state:
    waiting:
    message: back-off 5m0s restarting failed container=node-cache pod=nodelocaldns-4qntd_kube-system(113b848f-e422-45de-833e-ad69aa68a111)
    reason: CrashLoopBackOff
    hostIP: 172.29.48.40
    phase: Running
    podIP: 172.29.48.40
    podIPs:
  • ip: 172.29.48.40
    qosClass: Burstable
    startTime: “2024-09-24T16:44:05Z”