这类出现这个报错是因为你同时开启了opensearch以及es,所以会报这个错误

DehaoCheng
如果开启opensearch,就是把日志传到opensearch

如果开启es,就是把日志传到es,

这两者只能二选一,

安装成功后其实都是可以在kubesphere里面查询审计、容器等日志,是这个意思吗?

    sword913 是的,当然传输日志可以都发送到es或者opensearch,但是查询只能在一个收集端里查询(opensearch或者es),所以只能二选一。无论opensearch或者es,只要安装了一个都可以在里面查询日志信息

      DehaoCheng

      我把服务器重置,然后重新安装了

      目前只打开了ES

      创建了一个mysql服务,但是依旧搜不到日志,如下

      这种情况改如何排查问题呢?

      至于fluentbit的警告,你可以将output的buffer改大一点

      DehaoCheng

      ClusterConfiguration的配置如下:

      apiVersion: installer.kubesphere.io/v1alpha1

      kind: ClusterConfiguration

      metadata:

      annotations:

      kubectl.kubernetes.io/last-applied-configuration: >

      {"apiVersion":"installer.kubesphere.io/v1alpha1","kind":"ClusterConfiguration","metadata":{"annotations":{},"labels":{"version":"v3.4.1"},"name":"ks-installer","namespace":"kubesphere-system"},"spec":{"alerting":{"enabled":true},"auditing":{"enabled":true},"authentication":{"jwtSecret":""},"common":{"core":{"console":{"enableMultiLogin":true,"port":30880,"type":"NodePort"}},"es":{"basicAuth":{"enabled":false,"password":"","username":""},"elkPrefix":"dev","enabled":true,"externalElasticsearchHost":"10.203.98.104","externalElasticsearchPort":"9200","logMaxAge":7},"gpu":{"kinds":[{"default":true,"resourceName":"nvidia.com/gpu","resourceType":"GPU"}]},"minio":{"volumeSize":"20Gi"},"monitoring":{"GPUMonitoring":{"enabled":false},"endpoint":"http://prometheus-operated.kubesphere-monitoring-system.svc:9090"},"openldap":{"enabled":false,"volumeSize":"2Gi"},"opensearch":{"basicAuth":{"enabled":true,"password":"admin","username":"admin"},"dashboard":{"enabled":false},"enabled":false,"externalOpensearchHost":"","externalOpensearchPort":"","logMaxAge":7,"opensearchPrefix":"whizard"},"redis":{"enableHA":false,"enabled":false,"volumeSize":"2Gi"}},"devops":{"enabled":true,"jenkinsCpuLim":1,"jenkinsCpuReq":0.5,"jenkinsMemoryLim":"4Gi","jenkinsMemoryReq":"4Gi","jenkinsVolumeSize":"16Gi"},"edgeruntime":{"enabled":false,"kubeedge":{"cloudCore":{"cloudHub":{"advertiseAddress":[""]},"service":{"cloudhubHttpsNodePort":"30002","cloudhubNodePort":"30000","cloudhubQuicNodePort":"30001","cloudstreamNodePort":"30003","tunnelNodePort":"30004"}},"enabled":false,"iptables-manager":{"enabled":true,"mode":"external"}}},"etcd":{"endpointIps":"10.203.99.101,10.203.99.102,10.203.99.103","monitoring":true,"port":2379,"tlsEnable":true},"events":{"enabled":true,"ruler":{"enabled":true,"replicas":2}},"gatekeeper":{"enabled":false},"logging":{"containerruntime":"containerd","enabled":true,"logsidecar":{"enabled":true,"replicas":2}},"metrics_server":{"enabled":true},"monitoring":{"gpu":{"nvidia_dcgm_exporter":{"enabled":false}},"node_exporter":{"port":9100},"storageClass":""},"multicluster":{"clusterRole":"none"},"network":{"ippool":{"type":"calico"},"networkpolicy":{"enabled":true},"topology":{"type":"weave-scope"}},"openpitrix":{"store":{"enabled":false}},"persistence":{"storageClass":""},"servicemesh":{"enabled":true,"istio":{"components":{"cni":{"enabled":false},"ingressGateways":[{"enabled":false,"name":"istio-ingressgateway"}]}}},"terminal":{"timeout":600}}}

      labels:

      version: v3.4.1

      name: ks-installer

      namespace: kubesphere-system

      spec:

      alerting:

      enabled: true

      auditing:

      enabled: true

      authentication:

      jwtSecret: ''

      common:

      core:

      console:

      enableMultiLogin: true

      port: 30880

      type: NodePort

      es:

      basicAuth:

      enabled: false

      password: ''

      username: ''

      elkPrefix: dev

      enabled: true

      externalElasticsearchHost: 10.203.98.104

      externalElasticsearchPort: '9200'

      logMaxAge: 7

      gpu:

      kinds:

      - default: true

      resourceName: nvidia.com/gpu

      resourceType: GPU

      minio:

      volumeSize: 20Gi

      monitoring:

      GPUMonitoring:

      enabled: false

      endpoint: 'http://prometheus-operated.kubesphere-monitoring-system.svc:9090'

      openldap:

      enabled: false

      volumeSize: 2Gi

      opensearch:

      basicAuth:

      enabled: true

      password: admin

      username: admin

      dashboard:

      enabled: false

      enabled: false

      externalOpensearchHost: ''

      externalOpensearchPort: ''

      logMaxAge: 7

      opensearchPrefix: whizard

      redis:

      enableHA: false

      enabled: false

      volumeSize: 2Gi

      devops:

      enabled: true

      jenkinsCpuLim: 1

      jenkinsCpuReq: 0.5

      jenkinsMemoryLim: 4Gi

      jenkinsMemoryReq: 4Gi

      jenkinsVolumeSize: 16Gi

      edgeruntime:

      enabled: false

      kubeedge:

      cloudCore:

      cloudHub:

      advertiseAddress:

      - ''

      service:

      cloudhubHttpsNodePort: '30002'

      cloudhubNodePort: '30000'

      cloudhubQuicNodePort: '30001'

      cloudstreamNodePort: '30003'

      tunnelNodePort: '30004'

      enabled: false

      iptables-manager:

      enabled: true

      mode: external

      etcd:

      endpointIps: '10.203.99.101,10.203.99.102,10.203.99.103'

      monitoring: true

      port: 2379

      tlsEnable: true

      events:

      enabled: true

      ruler:

      enabled: true

      replicas: 2

      gatekeeper:

      enabled: false

      logging:

      containerruntime: containerd

      enabled: true

      logsidecar:

      enabled: true

      replicas: 2

      metrics_server:

      enabled: true

      monitoring:

      gpu:

      nvidia_dcgm_exporter:

      enabled: false

      node_exporter:

      port: 9100

      storageClass: ''

      multicluster:

      clusterRole: none

      network:

      ippool:

      type: calico

      networkpolicy:

      enabled: true

      topology:

      type: weave-scope

      openpitrix:

      store:

      enabled: false

      persistence:

      storageClass: ''

      servicemesh:

      enabled: true

      istio:

      components:

      cni:

      enabled: false

      ingressGateways:

      - enabled: false

      name: istio-ingressgateway

      terminal:

      timeout: 600

      status:

      alerting:

      enabledTime: '2023-12-26T14:55:05CST'

      status: enabled

      auditing:

      enabledTime: '2023-12-26T14:42:15CST'

      status: enabled

      clusterId: 9563212c-01b3-48a0-8aec-74b230735b61-1703573825

      core:

      enabledTime: '2023-12-26T14:41:44CST'

      status: enabled

      version: v3.4.1

      events:

      enabledTime: '2023-12-26T14:42:19CST'

      ruler:

      enabledTime: '2023-12-26T14:42:20CST'

      status: enabled

      status: enabled

      fluentbit:

      enabledTime: '2023-12-26T14:40:31CST'

      status: enabled

      logging:

      enabledTime: '2023-12-26T14:42:20CST'

      status: enabled

      metricsServer:

      enabledTime: '2023-12-26T14:38:28CST'

      status: enabled

      minio:

      enabledTime: '2023-12-26T14:40:18CST'

      status: enabled

      monitoring:

      enabledTime: '2023-12-26T14:54:59CST'

      status: enabled

      network:

      topology:

      enabledTime: '2023-12-26T14:41:58CST'

      status: enabled

      openldap:

      enabledTime: '2023-12-26T14:38:59CST'

      status: enabled

      servicemesh:

      enabledTime: '2023-12-26T14:43:12CST'

      status: enabled

      DehaoCheng

      kubesphere-v3.4.1.yaml的配置如下

      apiVersion: kubekey.kubesphere.io/v1alpha2

      kind: Cluster

      metadata:

      name: sample

      spec:

      hosts:

      - {name: k8s-master01, address: 10.203.99.101, internalAddress: 10.203.99.101, user: root, password: "yoening@123"}

      - {name: k8s-master02, address: 10.203.99.102, internalAddress: 10.203.99.102, user: root, password: "yoening@123"}

      - {name: k8s-master03, address: 10.203.99.103, internalAddress: 10.203.99.103, user: root, password: "yoening@123"}

      - {name: k8s-node01, address: 10.203.99.104, internalAddress: 10.203.99.104, user: root, password: "yoening@123"}

      - {name: k8s-node02, address: 10.203.99.105, internalAddress: 10.203.99.105, user: root, password: "yoening@123"}

      - {name: k8s-node03, address: 10.203.99.106, internalAddress: 10.203.99.106, user: root, password: "yoening@123"}

      roleGroups:

      etcd:

      - k8s-master01

      - k8s-master02

      - k8s-master03

      control-plane:

      - k8s-master01

      - k8s-master02

      - k8s-master03

      worker:

      - k8s-node01

      - k8s-node02

      - k8s-node03

      controlPlaneEndpoint:

      ## Internal loadbalancer for apiservers

      ## internalLoadbalancer: haproxy

      domain: lb.kubesphere.local

      address: 10.203.99.107

      port: 16443

      kubernetes:

      version: v1.26.5

      clusterName: cluster.local

      autoRenewCerts: true

      containerManager: containerd

      etcd:

      type: kubekey

      network:

      plugin: calico

      kubePodsCIDR: 172.17.64.0/18

      kubeServiceCIDR: 172.17.0.0/18

      ## multus support. https://github.com/k8snetworkplumbingwg/multus-cni

      multusCNI:

      enabled: false

      registry:

      privateRegistry: ""

      namespaceOverride: ""

      registryMirrors: []

      insecureRegistries: []

      addons: []

      ---

      apiVersion: installer.kubesphere.io/v1alpha1

      kind: ClusterConfiguration

      metadata:

      name: ks-installer

      namespace: kubesphere-system

      labels:

      version: v3.4.1

      spec:

      persistence:

      storageClass: ""

      authentication:

      jwtSecret: ""

      local_registry: ""

      # dev_tag: ""

      etcd:

      monitoring: true

      endpointIps: localhost

      port: 2379

      tlsEnable: true

      common:

      core:

      console:

      enableMultiLogin: true

      port: 30880

      type: NodePort

      # apiserver:

      # resources: {}

      # controllerManager:

      # resources: {}

      redis:

      enabled: false

      enableHA: false

      volumeSize: 2Gi

      openldap:

      enabled: false

      volumeSize: 2Gi

      minio:

      volumeSize: 20Gi

      monitoring:

      # type: external

      endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090

      GPUMonitoring:

      enabled: false

      gpu:

      kinds:

      - resourceName: "nvidia.com/gpu"

      resourceType: "GPU"

      default: true

      es:

      # master:

      # volumeSize: 4Gi

      # replicas: 1

      # resources: {}

      # data:

      # volumeSize: 20Gi

      # replicas: 1

      # resources: {}

      enabled: true

      logMaxAge: 7

      elkPrefix: dev

      basicAuth:

      enabled: false

      username: ""

      password: ""

      externalElasticsearchHost: "10.203.98.104"

      externalElasticsearchPort: "9200"

      opensearch:

      # master:

      # volumeSize: 4Gi

      # replicas: 1

      # resources: {}

      # data:

      # volumeSize: 20Gi

      # replicas: 1

      # resources: {}

      enabled: false

      logMaxAge: 7

      opensearchPrefix: whizard

      basicAuth:

      enabled: true

      username: "admin"

      password: "admin"

      externalOpensearchHost: ""

      externalOpensearchPort: ""

      dashboard:

      enabled: false

      alerting:

      enabled: true

      # thanosruler:

      # replicas: 1

      # resources: {}

      auditing:

      enabled: true

      # operator:

      # resources: {}

      # webhook:

      # resources: {}

      devops:

      enabled: true

      jenkinsCpuReq: 0.5

      jenkinsCpuLim: 1

      jenkinsMemoryReq: 4Gi

      jenkinsMemoryLim: 4Gi

      jenkinsVolumeSize: 16Gi

      events:

      enabled: true

      # operator:

      # resources: {}

      # exporter:

      # resources: {}

      ruler:

      enabled: true

      replicas: 2

      # resources: {}

      logging:

      enabled: true

      containerruntime: containerd

      logsidecar:

      enabled: true

      replicas: 2

      # resources: {}

      metrics_server:

      enabled: true

      monitoring:

      storageClass: ""

      node_exporter:

      port: 9100

      # resources: {}

      # kube_rbac_proxy:

      # resources: {}

      # kube_state_metrics:

      # resources: {}

      # prometheus:

      # replicas: 1

      # volumeSize: 20Gi

      # resources: {}

      # operator:

      # resources: {}

      # alertmanager:

      # replicas: 1

      # resources: {}

      # notification_manager:

      # resources: {}

      # operator:

      # resources: {}

      # proxy:

      # resources: {}

      gpu:

      nvidia_dcgm_exporter:

      enabled: false

      # resources: {}

      multicluster:

      clusterRole: none

      network:

      networkpolicy:

      enabled: true

      ippool:

      type: calico

      topology:

      type: weave-scope

      openpitrix:

      store:

      enabled: false

      servicemesh:

      enabled: true

      istio:

      components:

      ingressGateways:

      - name: istio-ingressgateway

      enabled: false

      cni:

      enabled: false

      edgeruntime:

      enabled: false

      kubeedge:

      enabled: false

      cloudCore:

      cloudHub:

      advertiseAddress:

      - ""

      service:

      cloudhubNodePort: "30000"

      cloudhubQuicNodePort: "30001"

      cloudhubHttpsNodePort: "30002"

      cloudstreamNodePort: "30003"

      tunnelNodePort: "30004"

      # resources: {}

      # hostNetWork: false

      iptables-manager:

      enabled: true

      mode: "external"

      # resources: {}

      # edgeService:

      # resources: {}

      gatekeeper:

      enabled: false

      # controller_manager:

      # resources: {}

      # audit:

      # resources: {}

      terminal:

      timeout: 600

      DehaoCheng

      config 配置如下:

      [root@k8s-master01 kubekey]# kubectl get configmap kubesphere-config -n kubesphere-system -o yaml

      apiVersion: v1

      data:

      kubesphere.yaml: |

      authentication:

      authenticateRateLimiterMaxTries: 10

      authenticateRateLimiterDuration: 10m0s

      loginHistoryRetentionPeriod: 168h

      maximumClockSkew: 10s

      multipleLogin: True

      kubectlImage: kubesphere/kubectl:v1.22.0

      jwtSecret: "1W9EADsHAmqMJ8szthOeXPiTQ51KfHfh"

      oauthOptions:

      clients:

      - name: kubesphere

      secret: kubesphere

      redirectURIs:

      - '*'

      ldap:

      host: openldap.kubesphere-system.svc:389

      managerDN: cn=admin,dc=kubesphere,dc=io

      managerPassword: admin

      userSearchBase: ou=Users,dc=kubesphere,dc=io

      groupSearchBase: ou=Groups,dc=kubesphere,dc=io

      s3:

      endpoint: http://minio.kubesphere-system.svc:9000

      region: us-east-1

      disableSSL: True

      forcePathStyle: True

      accessKeyID: openpitrixminioaccesskey

      secretAccessKey: openpitrixminiosecretkey

      bucket: s2i-binaries

      network:

      enableNetworkPolicy: true

      ippoolType: calico

      weaveScopeHost: weave-scope-app.weave

      devops:

      host: http://devops-jenkins.kubesphere-devops-system.svc/

      username: admin

      password: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJlbWFpbCI6ImFkbWluQGt1YmVzcGhlcmUuaW8iLCJ1c2VybmFtZSI6ImFkbWluIiwidG9rZW5fdHlwZSI6InN0YXRpY190b2tlbiJ9.AVHKZgFyYk_DQAV2rgPybZqnXJBs5EyVgQuNrg5SxWI

      maxConnections: 100

      endpoint: http://devops-apiserver.kubesphere-devops-system:9090

      servicemesh:

      istioPilotHost: http://istiod.istio-system.svc:8080/version

      jaegerQueryHost: http://jaeger-query.istio-system.svc:16686

      servicemeshPrometheusHost: http://prometheus-k8s.kubesphere-monitoring-system.svc:9090

      kialiQueryHost: http://kiali.istio-system:20001

      multicluster:

      clusterRole: none

      monitoring:

      endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090

      enableGPUMonitoring: false

      gpu:

      kinds:

      - resourceName: nvidia.com/gpu

      resourceType: GPU

      default: True

      notification:

      endpoint: http://notification-manager-svc.kubesphere-monitoring-system.svc:19093

      logging:

      host: http://10.203.98.104:9200

      basicAuth: True

      username: "admin"

      password: "admin"

      indexPrefix: ks-whizard-logging

      events:

      host: http://10.203.98.104:9200

      basicAuth: True

      username: "admin"

      password: "admin"

      indexPrefix: ks-whizard-events

      auditing:

      enable: true

      webhookURL: https://kube-auditing-webhook-svc.kubesphere-logging-system.svc:6443/audit/webhook/event

      host: http://10.203.98.104:9200

      basicAuth: True

      username: "admin"

      password: "admin"

      indexPrefix: ks-whizard-auditing

      alerting:

      prometheusEndpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090

      thanosRulerEndpoint: http://thanos-ruler-operated.kubesphere-monitoring-system.svc:10902

      thanosRuleResourceLabels: thanos-ruler=kubesphere,role=alert-rules

      terminal:

      image: alpine:3.14

      timeout: 600

      gateway:

      watchesPath: /var/helm-charts/watches.yaml

      repository: kubesphere/nginx-ingress-controller

      tag: v1.3.1

      namespace: kubesphere-controls-system

      kind: ConfigMap

      metadata:

      annotations:

      kubectl.kubernetes.io/last-applied-configuration: |

      {"apiVersion":"v1","data":{"kubesphere.yaml":"authentication:\n authenticateRateLimiterMaxTries: 10\n authenticateRateLimiterDuration: 10m0s\n loginHistoryRetentionPeriod: 168h\n maximumClockSkew: 10s\n multipleLogin: True\n kubectlImage: kubesphere/kubectl:v1.22.0\n jwtSecret: \"1W9EADsHAmqMJ8szthOeXPiTQ51KfHfh\"\n oauthOptions:\n clients:\n - name: kubesphere\n secret: kubesphere\n redirectURIs:\n - '*'\nldap:\n host: openldap.kubesphere-system.svc:389\n managerDN: cn=admin,dc=kubesphere,dc=io\n managerPassword: admin\n userSearchBase: ou=Users,dc=kubesphere,dc=io\n groupSearchBase: ou=Groups,dc=kubesphere,dc=io\ns3:\n endpoint: http://minio.kubesphere-system.svc:9000\n region: us-east-1\n disableSSL: True\n forcePathStyle: True\n accessKeyID: openpitrixminioaccesskey\n secretAccessKey: openpitrixminiosecretkey\n bucket: s2i-binaries\nnetwork:\n enableNetworkPolicy: true\n ippoolType: calico\n weaveScopeHost: weave-scope-app.weave\ndevops:\n host: http://devops-jenkins.kubesphere-devops-system.svc/\n username: admin\n password: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJlbWFpbCI6ImFkbWluQGt1YmVzcGhlcmUuaW8iLCJ1c2VybmFtZSI6ImFkbWluIiwidG9rZW5fdHlwZSI6InN0YXRpY190b2tlbiJ9.AVHKZgFyYk_DQAV2rgPybZqnXJBs5EyVgQuNrg5SxWI\n maxConnections: 100\n endpoint: http://devops-apiserver.kubesphere-devops-system:9090\nservicemesh:\n istioPilotHost: http://istiod.istio-system.svc:8080/version\n jaegerQueryHost: http://jaeger-query.istio-system.svc:16686\n servicemeshPrometheusHost: http://prometheus-k8s.kubesphere-monitoring-system.svc:9090\n kialiQueryHost: http://kiali.istio-system:20001\nmulticluster:\n clusterRole: none\nmonitoring:\n endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090\n enableGPUMonitoring: false\ngpu:\n kinds:\n - resourceName: nvidia.com/gpu\n resourceType: GPU\n default: True\nnotification:\n endpoint: http://notification-manager-svc.kubesphere-monitoring-system.svc:19093\nlogging:\n host: http://10.203.98.104:9200\n basicAuth: True\n username: \"admin\"\n password: \"admin\"\n indexPrefix: ks-whizard-logging\nevents:\n host: http://10.203.98.104:9200\n basicAuth: True\n username: \"admin\"\n password: \"admin\"\n indexPrefix: ks-whizard-events\nauditing:\n enable: true\n webhookURL: https://kube-auditing-webhook-svc.kubesphere-logging-system.svc:6443/audit/webhook/event\n host: http://10.203.98.104:9200\n basicAuth: True\n username: \"admin\"\n password: \"admin\"\n indexPrefix: ks-whizard-auditing\nalerting:\n prometheusEndpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090\n thanosRulerEndpoint: http://thanos-ruler-operated.kubesphere-monitoring-system.svc:10902\n thanosRuleResourceLabels: thanos-ruler=kubesphere,role=alert-rules\n\n\nterminal:\n image: alpine:3.14\n timeout: 600\ngateway:\n watchesPath: /var/helm-charts/watches.yaml\n repository: kubesphere/nginx-ingress-controller\n tag: v1.3.1\n namespace: kubesphere-controls-system\n"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"kubesphere-config","namespace":"kubesphere-system"}}

      creationTimestamp: "2023-12-26T06:55:17Z"

      name: kubesphere-config

      namespace: kubesphere-system

      resourceVersion: "7586"

      uid: 0c183d51-ae2e-4e2c-bc71-75878a08b2e9

      DehaoCheng

      另外我的外部ES是没有设置账号密码的,可以直接使用9200免密码登录

      logging:
      
      host: http://10.203.98.104:9200
      
      basicAuth: True
      
      username: "admin"
      
      password: "admin"
      
      indexPrefix: ks-whizard-logging
      
      events:
      
      host: http://10.203.98.104:9200
      
      basicAuth: True
      
      username: "admin"
      
      password: "admin"
      
      indexPrefix: ks-whizard-events
      
      auditing:
      
      enable: true
      
      webhookURL: https://kube-auditing-webhook-svc.kubesphere-logging-system.svc:6443/audit/webhook/event
      
      host: http://10.203.98.104:9200
      
      basicAuth: True
      
      username: "admin"
      
      password: "admin"
      
      indexPrefix: ks-whizard-auditing

      你把kubesphere-config中的这些配置删除一下,这里默认引用了opensearch的配置,没有加载进es的配置。改成类似

      
      events:
      
      host: http://xxx:9200
      
      indexPrefix: ks-dev-events

      就可以了,其余修改类似,然后重启ks-apiserver。

        然后看看自定义资源定义里的output,看一下配置,如果有也需要删除相应字段,如果你拿不准,可以发出来看看

        DehaoCheng

        也使用 kubectl -n kubesphere-system rollout restart deploy ks-apiserver

        重启后多了个es的pod:如下图:

        但是ES里面没有新创建索引(我在config里面换了个indexPrefix名字),同样kubesphere也没有日志

        盼回复