监控数据丢失,求大神帮忙排查一下;


kubesphere-monitoring-system 命名空间Pod状态

# kubectl get po -n kubesphere-monitoring-system
NAME                                   READY   STATUS    RESTARTS   AGE
kube-state-metrics-566cdbcb48-dpjfh    4/4     Running   0          49m
node-exporter-4654q                    2/2     Running   0          127m
node-exporter-4z87m                    2/2     Running   0          127m
node-exporter-7bldz                    2/2     Running   0          127m
node-exporter-8r925                    2/2     Running   0          127m
node-exporter-95rp4                    2/2     Running   0          126m
node-exporter-g5mkr                    2/2     Running   0          127m
node-exporter-qfr9s                    2/2     Running   0          127m
node-exporter-rrbt6                    2/2     Running   0          126m
node-exporter-stjhp                    2/2     Running   0          127m
node-exporter-x6cjw                    2/2     Running   0          127m
node-exporter-z6gmf                    2/2     Running   0          126m
node-exporter-zqs9r                    2/2     Running   0          127m
prometheus-k8s-0                       3/3     Running   0          126m
prometheus-k8s-1                       3/3     Running   0          127m
prometheus-k8s-system-0                3/3     Running   0          127m
prometheus-k8s-system-1                3/3     Running   0          126m
prometheus-operator-6b97679cfd-tdl9x   1/1     Running   0          102m
权限
kube-state-metrics
   # kubectl get clusterrole kube-state-metrics -oyaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRole","metadata":{"annotations":{},"name":"kube-state-metrics"},"rules":[{"apiGroups":[""],"resources":["nodes","pods","services","resourcequotas","replicationcontrollers","limitranges","persistentvolumeclaims","persistentvolumes","namespaces","endpoints"],"verbs":["list","watch"]},{"apiGroups":["extensions"],"resources":["daemonsets","deployments","replicasets"],"verbs":["list","watch"]},{"apiGroups":["apps"],"resources":["statefulsets"],"verbs":["list","watch"]},{"apiGroups":["batch"],"resources":["cronjobs","jobs"],"verbs":["list","watch"]},{"apiGroups":["autoscaling"],"resources":["horizontalpodautoscalers"],"verbs":["list","watch"]}]}
  creationTimestamp: "2021-03-02T09:25:56Z"
  name: kube-state-metrics
  resourceVersion: "13431231"
  selfLink: /apis/rbac.authorization.k8s.io/v1/clusterroles/kube-state-metrics
  uid: 798320ab-5258-4675-af35-d2b9b71cfe8b
rules:
- apiGroups:
  - ""
  resources:
  - nodes
  - pods
  - services
  - resourcequotas
  - replicationcontrollers
  - limitranges
  - persistentvolumeclaims
  - persistentvolumes
  - namespaces
  - endpoints
  verbs:
  - list
  - watch
- apiGroups:
  - extensions
  resources:
  - daemonsets
  - deployments
  - replicasets
  verbs:
  - list
  - watch
- apiGroups:
  - apps
  resources:
  - statefulsets
  verbs:
  - list
  - watch
- apiGroups:
  - batch
  resources:
  - cronjobs
  - jobs
  verbs:
  - list
  - watch
- apiGroups:
  - autoscaling
  resources:
  - horizontalpodautoscalers
  verbs:
  - list
  - watch
  # kubectl get sa -n kubesphere-monitoring-system kube-state-metrics -oyaml
apiVersion: v1
kind: ServiceAccount
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"v1","kind":"ServiceAccount","metadata":{"annotations":{},"name":"kube-state-metrics","namespace":"kubesphere-monitoring-system"}}
  creationTimestamp: "2021-02-02T05:57:36Z"
  name: kube-state-metrics
  namespace: kubesphere-monitoring-system
  resourceVersion: "3009"
  selfLink: /api/v1/namespaces/kubesphere-monitoring-system/serviceaccounts/kube-state-metrics
  uid: 9410579e-e3b9-424e-9421-6b05276fca4a
secrets:
- name: kube-state-metrics-token-xdpt6
kubesphere-kube-state-metrics
# kubectl get clusterrole kubesphere-kube-state-metrics -oyaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRole","metadata":{"annotations":{},"creationTimestamp":"2021-02-05T08:00:52Z","name":"kubesphere-kube-state-metrics","resourceVersion":"1334116","selfLink":"/apis/rbac.authorization.k8s.io/v1/clusterroles/kubesphere-kube-state-metrics","uid":"028c6a0f-a360-49fc-87e0-7645d06e8013"},"rules":[{"apiGroups":[""],"resources":["configmaps","secrets","nodes","pods","services","resourcequotas","replicationcontrollers","limitranges","persistentvolumeclaims","persistentvolumes","namespaces","endpoints"],"verbs":["list","watch"]},{"apiGroups":["extensions"],"resources":["daemonsets","deployments","replicasets","ingresses"],"verbs":["list","watch"]},{"apiGroups":["apps"],"resources":["statefulsets","daemonsets","deployments","replicasets"],"verbs":["list","watch"]},{"apiGroups":["batch"],"resources":["cronjobs","jobs"],"verbs":["list","watch"]},{"apiGroups":["autoscaling"],"resources":["horizontalpodautoscalers"],"verbs":["list","watch"]},{"apiGroups":["authentication.k8s.io"],"resources":["tokenreviews"],"verbs":["create"]},{"apiGroups":["authorization.k8s.io"],"resources":["subjectaccessreviews"],"verbs":["create"]},{"apiGroups":["policy"],"resources":["poddisruptionbudgets"],"verbs":["list","watch"]}]}
  creationTimestamp: "2021-03-02T08:37:56Z"
  name: kubesphere-kube-state-metrics
  resourceVersion: "13413426"
  selfLink: /apis/rbac.authorization.k8s.io/v1/clusterroles/kubesphere-kube-state-metrics
  uid: 61c4799c-77e4-40d6-992d-393b559d6d28
rules:
- apiGroups:
  - ""
  resources:
  - configmaps
  - secrets
  - nodes
  - pods
  - services
  - resourcequotas
  - replicationcontrollers
  - limitranges
  - persistentvolumeclaims
  - persistentvolumes
  - namespaces
  - endpoints
  verbs:
  - list
  - watch
- apiGroups:
  - extensions
  resources:
  - daemonsets
  - deployments
  - replicasets
  - ingresses
  verbs:
  - list
  - watch
- apiGroups:
  - apps
  resources:
  - statefulsets
  - daemonsets
  - deployments
  - replicasets
  verbs:
  - list
  - watch
- apiGroups:
  - batch
  resources:
  - cronjobs
  - jobs
  verbs:
  - list
  - watch
- apiGroups:
  - autoscaling
  resources:
  - horizontalpodautoscalers
  verbs:
  - list
  - watch
- apiGroups:
  - authentication.k8s.io
  resources:
  - tokenreviews
  verbs:
  - create
- apiGroups:
  - authorization.k8s.io
  resources:
  - subjectaccessreviews
  verbs:
  - create
- apiGroups:
  - policy
  resources:
  - poddisruptionbudgets
  verbs:
  - list
  - watch
kube-state-metric Pod日志
kube-state-metric 容器
"system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope

E0302 09:52:09.922367 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope

E0302 09:52:09.923528 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1beta1.CertificateSigningRequest: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope

E0302 09:52:10.924590 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope

E0302 09:52:10.925354 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1beta1.CertificateSigningRequest: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope

E0302 09:52:11.926679 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope

E0302 09:52:11.927484 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1beta1.CertificateSigningRequest: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope

E0302 09:52:12.928718 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope

E0302 09:52:12.928964 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1beta1.CertificateSigningRequest: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope

E0302 09:52:13.930564 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope

E0302 09:52:13.931462 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1beta1.CertificateSigningRequest: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope

E0302 09:52:14.932361 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope

E0302 09:52:14.933034 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1beta1.CertificateSigningRequest: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope

E0302 09:52:15.934590 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope

E0302 09:52:15.935161 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1beta1.CertificateSigningRequest: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope

E0302 09:52:16.936960 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope

E0302 09:52:16.937503 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1beta1.CertificateSigningRequest: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope

E0302 09:52:17.939474 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope

E0302 09:52:17.940176 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1beta1.CertificateSigningRequest: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope

E0302 09:52:18.942081 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope

E0302 09:52:18.942212 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1beta1.CertificateSigningRequest: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope

E0302 09:52:19.944160 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope

E0302 09:52:19.945120 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1beta1.CertificateSigningRequest: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope

E0302 09:52:20.945936 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope

E0302 09:52:20.946927 1 reflector.go:125] k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1beta1.CertificateSigningRequest: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics" cannot list resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope
addon-resizer 容器
ERROR: logging before flag.Parse: E0302 09:50:16.778782 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:50:26.787220 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[memory:{i:{value:0 scale:0} d:{Dec:0xc42040c420} s: Format:BinarySI} cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI}] Requests:map[memory:{i:{value:0 scale:0} d:{Dec:0xc42040c420} s: Format:BinarySI} cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI}]}

ERROR: logging before flag.Parse: E0302 09:50:26.788609 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:50:36.797324 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI} cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI}]} Expected: {Limits:map[memory:{i:{value:0 scale:0} d:{Dec:0xc420311d70} s: Format:BinarySI} cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420311d70} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:50:36.798610 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:50:46.810534 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42041d470} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42041d470} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:50:46.811833 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:50:56.821149 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42040d320} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42040d320} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:50:56.823834 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:51:06.832668 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42040de00} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42040de00} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:51:06.834010 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:51:16.847878 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420425740} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420425740} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:51:16.849780 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:51:26.857000 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI} cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420442ea0} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420442ea0} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:51:26.857901 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:51:36.864350 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI} cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420443890} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420443890} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:51:36.865568 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:51:46.872466 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42043b8c0} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42043b8c0} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:51:46.873755 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:51:56.882493 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42042f260} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42042f260} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:51:56.884217 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:52:06.891710 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42042ed20} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42042ed20} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:52:06.893029 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:52:16.901015 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc4204427b0} s: Format:BinarySI}] Requests:map[memory:{i:{value:0 scale:0} d:{Dec:0xc4204427b0} s: Format:BinarySI} cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI}]}

ERROR: logging before flag.Parse: E0302 09:52:16.901910 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:52:26.911316 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420443140} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420443140} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:52:26.912629 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:52:36.920974 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42041c6f0} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42041c6f0} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:52:36.922223 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:52:46.931350 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[memory:{i:{value:0 scale:0} d:{Dec:0xc42043adb0} s: Format:BinarySI} cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42043adb0} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:52:46.932937 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:52:56.942233 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI} cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42040c660} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42040c660} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:52:56.943857 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:53:06.952069 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420402a20} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420402a20} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:53:06.953379 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:53:16.962063 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420310420} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420310420} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:53:16.963254 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:53:26.978389 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc4202ec240} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc4202ec240} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:53:26.980198 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:53:36.988986 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420311b90} s: Format:BinarySI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420311b90} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:53:36.990503 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:53:46.999218 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc420425f80} s: Format:BinarySI}] Requests:map[memory:{i:{value:0 scale:0} d:{Dec:0xc420425f80} s: Format:BinarySI} cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI}]}

ERROR: logging before flag.Parse: E0302 09:53:47.000453 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:53:57.008125 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[memory:{i:{value:0 scale:0} d:{Dec:0xc42042e5a0} s: Format:BinarySI} cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc42042e5a0} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:53:57.009283 1 nanny_lib.go:110] the server could not find the requested resource

ERROR: logging before flag.Parse: I0302 09:54:07.018731 1 nanny_lib.go:108] Resources are not within the expected limits, updating the deployment. Actual: {Limits:map[] Requests:map[cpu:{i:{value:10 scale:-3} d:{Dec:} s:10m Format:DecimalSI} memory:{i:{value:157286400 scale:0} d:{Dec:} s:150Mi Format:BinarySI}]} Expected: {Limits:map[memory:{i:{value:0 scale:0} d:{Dec:0xc4203113b0} s: Format:BinarySI} cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI}] Requests:map[cpu:{i:{value:54000000 scale:-9} d:{Dec:} s: Format:DecimalSI} memory:{i:{value:0 scale:0} d:{Dec:0xc4203113b0} s: Format:BinarySI}]}

ERROR: logging before flag.Parse: E0302 09:54:07.020123 1 nanny_lib.go:110] the server could not find the requested resource
Prometheus Pod日志
prometheus-k8s容器
level=warn ts=2021-03-02T09:54:39.642017488Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/node-exporter/0 target=https://10.3.9.66:9100/metrics msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:54:43.363302049Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.62:10250/metrics/cadvisor msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=420

level=warn ts=2021-03-02T09:54:43.363522739Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.62:10250/metrics/cadvisor msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:54:46.395324322Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/node-exporter/0 target=https://10.3.9.69:9100/metrics msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=197

level=warn ts=2021-03-02T09:54:46.395499905Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/node-exporter/0 target=https://10.3.9.69:9100/metrics msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:54:46.835711768Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/0 target=https://10.3.9.64:10250/metrics msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=4

level=warn ts=2021-03-02T09:54:46.835914232Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/0 target=https://10.3.9.64:10250/metrics msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:54:49.364873376Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/0 target=https://10.3.9.65:10250/metrics msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=10

level=warn ts=2021-03-02T09:54:49.36501781Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/0 target=https://10.3.9.65:10250/metrics msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:54:49.384517999Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.64:10250/metrics/cadvisor msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=284

level=warn ts=2021-03-02T09:54:49.384680459Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.64:10250/metrics/cadvisor msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:54:49.717454849Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.68:10250/metrics/cadvisor msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=336

level=warn ts=2021-03-02T09:54:49.717869819Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.68:10250/metrics/cadvisor msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:54:50.869771069Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/prometheus-system/0 target=http://10.233.73.44:9090/metrics msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=544

level=warn ts=2021-03-02T09:54:50.869845334Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/prometheus-system/0 target=http://10.233.73.44:9090/metrics msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:54:51.278782967Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/node-exporter/0 target=https://10.3.9.91:9100/metrics msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=207

level=warn ts=2021-03-02T09:54:51.278920193Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/node-exporter/0 target=https://10.3.9.91:9100/metrics msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:54:52.589281277Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.92:10250/metrics/cadvisor msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=356

level=warn ts=2021-03-02T09:54:52.589453306Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.92:10250/metrics/cadvisor msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:54:52.594199595Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/prometheus-system/0 target=http://10.233.83.36:9090/metrics msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=544

level=warn ts=2021-03-02T09:54:52.594267987Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/prometheus-system/0 target=http://10.233.83.36:9090/metrics msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:54:56.715762432Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.93:10250/metrics/cadvisor msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=356

level=warn ts=2021-03-02T09:54:56.715941362Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.93:10250/metrics/cadvisor msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:54:57.025168739Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.66:10250/metrics/cadvisor msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=308

level=warn ts=2021-03-02T09:54:57.025327343Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.66:10250/metrics/cadvisor msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:54:59.256428456Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/0 target=https://10.3.9.61:10250/metrics msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=16

level=warn ts=2021-03-02T09:54:59.256597627Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/0 target=https://10.3.9.61:10250/metrics msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:55:04.110286143Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.61:10250/metrics/cadvisor msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=380

level=warn ts=2021-03-02T09:55:04.110489635Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.61:10250/metrics/cadvisor msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:55:09.21061972Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/0 target=https://10.3.9.91:10250/metrics msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=4

level=warn ts=2021-03-02T09:55:09.210807135Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/0 target=https://10.3.9.91:10250/metrics msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:55:09.608824362Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.63:10250/metrics/cadvisor msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=448

level=warn ts=2021-03-02T09:55:09.609090028Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.63:10250/metrics/cadvisor msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:55:10.058868905Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/node-exporter/0 target=https://10.3.9.65:9100/metrics msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=206

level=warn ts=2021-03-02T09:55:10.059013968Z caller=scrape.go:851 component="scrape manager" scrape_pool=kubesphere-monitoring-system/node-exporter/0 target=https://10.3.9.65:9100/metrics msg="appending scrape report failed" err="out of bounds"

level=warn ts=2021-03-02T09:55:10.833050374Z caller=scrape.go:1094 component="scrape manager" scrape_pool=kubesphere-monitoring-system/kubelet/1 target=https://10.3.9.91:10250/metrics/cadvisor msg="Error on ingesting samples that are too old or are too far into the future" num_dropped=368

level=warn ts=2021-03-02T09:55:10.836492213Z caller=scrape.go:851 component="scrape manager" scrape_pool=kub
prometheus-operator Pod
ts=2021-03-02T08:28:28.175596136Z caller=main.go:199 msg="Starting Prometheus Operator version '0.34.0'."

ts=2021-03-02T08:28:28.179175415Z caller=main.go:96 msg="Staring insecure server on :8080"

level=info ts=2021-03-02T08:28:28.185288951Z caller=operator.go:219 component=alertmanageroperator msg="connection established" cluster-version=v1.17.3

level=info ts=2021-03-02T08:28:28.356052187Z caller=operator.go:441 component=prometheusoperator msg="connection established" cluster-version=v1.17.3

level=info ts=2021-03-02T08:28:28.774141022Z caller=operator.go:641 component=alertmanageroperator msg="CRD updated" crd=Alertmanager

level=info ts=2021-03-02T08:28:29.076213551Z caller=operator.go:1870 component=prometheusoperator msg="CRD updated" crd=Prometheus

level=info ts=2021-03-02T08:28:29.182589389Z caller=operator.go:1870 component=prometheusoperator msg="CRD updated" crd=ServiceMonitor

level=info ts=2021-03-02T08:28:29.203427635Z caller=operator.go:1870 component=prometheusoperator msg="CRD updated" crd=PodMonitor

level=info ts=2021-03-02T08:28:29.223849977Z caller=operator.go:1870 component=prometheusoperator msg="CRD updated" crd=PrometheusRule

level=info ts=2021-03-02T08:28:31.790980422Z caller=operator.go:235 component=alertmanageroperator msg="CRD API endpoints ready"

level=info ts=2021-03-02T08:28:32.00110592Z caller=operator.go:190 component=alertmanageroperator msg="successfully synced all caches"

level=info ts=2021-03-02T08:28:41.312697948Z caller=operator.go:457 component=prometheusoperator msg="CRD API endpoints ready"

level=info ts=2021-03-02T08:28:42.172929235Z caller=operator.go:387 component=prometheusoperator msg="successfully synced all caches"

level=info ts=2021-03-02T08:28:42.174097868Z caller=operator.go:1072 component=prometheusoperator msg="sync prometheus" key=kubesphere-monitoring-system/k8s

level=info ts=2021-03-02T08:28:42.286613896Z caller=operator.go:1072 component=prometheusoperator msg="sync prometheus" key=kubesphere-monitoring-system/k8s-system
  • Forest-L 回复了此帖
  • 你的 kube-state-metrics 无权访问一些资源,你是做了什么操作了?

    你可能需要重新apply下面的 yaml, 然后把 kube-state-metrics 的 pod 删掉

    https://raw.githubusercontent.com/kubesphere/prometheus-operator/ks-v2.1.1/contrib/kube-prometheus/manifests/kube-state-metrics-clusterRole.yaml
    https://raw.githubusercontent.com/kubesphere/prometheus-operator/ks-v2.1.1/contrib/kube-prometheus/manifests/kube-state-metrics-clusterRoleBinding.yaml
    https://raw.githubusercontent.com/kubesphere/prometheus-operator/ks-v2.1.1/contrib/kube-prometheus/manifests/kube-state-metrics-role.yaml
    https://raw.githubusercontent.com/kubesphere/prometheus-operator/ks-v2.1.1/contrib/kube-prometheus/manifests/kube-state-metrics-roleBinding.yaml
    https://raw.githubusercontent.com/kubesphere/prometheus-operator/ks-v2.1.1/contrib/kube-prometheus/manifests/kube-state-metrics-serviceAccount.yaml

    你的 kube-state-metrics 无权访问一些资源,你是做了什么操作了?

    你可能需要重新apply下面的 yaml, 然后把 kube-state-metrics 的 pod 删掉

    https://raw.githubusercontent.com/kubesphere/prometheus-operator/ks-v2.1.1/contrib/kube-prometheus/manifests/kube-state-metrics-clusterRole.yaml
    https://raw.githubusercontent.com/kubesphere/prometheus-operator/ks-v2.1.1/contrib/kube-prometheus/manifests/kube-state-metrics-clusterRoleBinding.yaml
    https://raw.githubusercontent.com/kubesphere/prometheus-operator/ks-v2.1.1/contrib/kube-prometheus/manifests/kube-state-metrics-role.yaml
    https://raw.githubusercontent.com/kubesphere/prometheus-operator/ks-v2.1.1/contrib/kube-prometheus/manifests/kube-state-metrics-roleBinding.yaml
    https://raw.githubusercontent.com/kubesphere/prometheus-operator/ks-v2.1.1/contrib/kube-prometheus/manifests/kube-state-metrics-serviceAccount.yaml

      Forest-L 已经解决,之前因日志有报错:"k8s.io/kube-state-metrics/internal/store/builder.go:295: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User “system:serviceaccount:kubesphere-monitoring-system:kube-state-metrics” cannot list resource “storageclasses” in API group “storage.k8s.io” at the cluster scope"

      修改过kube-state-metrics 、kubesphere-kube-state-metrics这两个clusterrole,第二天发现没有监控数据,恢复回去可以了;

      但是根据社区方案,使用keepalived+haproxy部署的高可用,部署完毕后kubesphere 监控页面里面,apiserver没有监控数据,不知是何原因

      benjaminhuo 感谢,已经解决,还有一个初始就存在的问题,apiserver监控没有数据,推测是否与keepalived haproxy高可用方式有关,不知有解决办法没有;