• 安装部署
  • kubesphere3.4 "unable to sign certificate: must specify a CommonName"

参考安装部署手册

部署手册

config-sample.yaml

apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
  name: sample
spec:
  hosts:
  - {name: rs-master-174-01, address: 192.168.50.174, internalAddress: 192.168.50.174, user: root, password: "123456"}
  - {name: rs-master-175-02, address: 192.168.50.175, internalAddress: 192.168.50.175, user: root, password: "123456"}
  - {name: rs-master-176-03, address: 192.168.50.176, internalAddress: 192.168.50.176, user: root, password: "123456"}
  - {name: rs-node-177-01, address: 192.168.50.177, internalAddress: 192.168.50.177, user: root, password: "123456"}
  - {name: rs-node-178-02, address: 192.168.50.178, internalAddress: 192.168.50.178, user: root, password: "123456"}
  - {name: rs-node-179-03, address: 192.168.50.179, internalAddress: 192.168.50.179, user: root, password: "123456"}
  - {name: devops-180, address: 192.168.50.180, internalAddress: 192.168.50.180, user: root, password: "123456"}
  roleGroups:
    etcd:
    - rs-master-174-01
    - rs-master-175-02
    - rs-master-176-03
    control-plane: 
    - rs-master-174-01
    - rs-master-175-02
    - rs-master-176-03
    worker:
    - rs-node-177-01
    - rs-node-178-02
    - rs-node-179-03
    registry:
    - devops-180
  controlPlaneEndpoint:
    ## Internal loadbalancer for apiservers 
    internalLoadbalancer: haproxy


    domain: lb.kubesphere.local
    address: ""
    port: 6443
  kubernetes:
    version: v1.23.15
    clusterName: cluster.local
    autoRenewCerts: true
    containerManager: docker
  etcd:
    type: kubekey
  network:
    plugin: calico
    kubePodsCIDR: 10.233.64.0/18
    kubeServiceCIDR: 10.233.0.0/18
    ## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
    multusCNI:
      enabled: false
  registry:
    type: "harbor"
    #auths:
    #  "dockerhub.kubekey.local":
    #    username: admin
    #    password: Harbor12345
    #privateRegistry: "dockerhub.kubekey.local"
    #namespaceOverride: "kubesphereio"
    privateRegistry: ""
    namespaceOverride: ""
    registryMirrors: []
    insecureRegistries: []
  addons: []



---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
  name: ks-installer
  namespace: kubesphere-system
  labels:
    version: v3.4.0
spec:
  persistence:
    storageClass: ""
  authentication:
    jwtSecret: ""
  zone: ""
  local_registry: ""
  namespace_override: ""
  # dev_tag: ""
  etcd:
    monitoring: false
    endpointIps: localhost
    port: 2379
    tlsEnable: true
  common:
    core:
      console:
        enableMultiLogin: true
        port: 30880
        type: NodePort
    # apiserver:
    #  resources: {}
    # controllerManager:
    #  resources: {}
    redis:
      enabled: false
      enableHA: false
      volumeSize: 2Gi
    openldap:
      enabled: false
      volumeSize: 2Gi
    minio:
      volumeSize: 20Gi
    monitoring:
      # type: external
      endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
      GPUMonitoring:
        enabled: false
    gpu:
      kinds:
      - resourceName: "nvidia.com/gpu"
        resourceType: "GPU"
        default: true
    es:
      # master:
      #   volumeSize: 4Gi
      #   replicas: 1
      #   resources: {}
      # data:
      #   volumeSize: 20Gi
      #   replicas: 1
      #   resources: {}
      logMaxAge: 7
      elkPrefix: logstash
      basicAuth:
        enabled: false
        username: ""
        password: ""
      externalElasticsearchHost: ""
      externalElasticsearchPort: ""
    opensearch:
      # master:
      #   volumeSize: 4Gi
      #   replicas: 1
      #   resources: {}
      # data:
      #   volumeSize: 20Gi
      #   replicas: 1
      #   resources: {}
      enabled: true
      logMaxAge: 7
      opensearchPrefix: whizard
      basicAuth:
        enabled: true
        username: "admin"
        password: "admin"
      externalOpensearchHost: ""
      externalOpensearchPort: ""
      dashboard:
        enabled: false
  alerting:
    enabled: false
    # thanosruler:
    #   replicas: 1
    #   resources: {}
  auditing:
    enabled: false
    # operator:
    #   resources: {}
    # webhook:
    #   resources: {}
  devops:
    enabled: false
    jenkinsCpuReq: 0.5
    jenkinsCpuLim: 1
    jenkinsMemoryReq: 4Gi
    jenkinsMemoryLim: 4Gi
    jenkinsVolumeSize: 16Gi
  events:
    enabled: false
    # operator:
    #   resources: {}
    # exporter:
    #   resources: {}
    # ruler:
    #   enabled: true
    #   replicas: 2
    #   resources: {}
  logging:
    enabled: false
    logsidecar:
      enabled: true
      replicas: 2
      # resources: {}
  metrics_server:
    enabled: false
  monitoring:
    storageClass: ""
    node_exporter:
      port: 9100
      # resources: {}
    # kube_rbac_proxy:
    #   resources: {}
    # kube_state_metrics:
    #   resources: {}
    # prometheus:
    #   replicas: 1
    #   volumeSize: 20Gi
    #   resources: {}
    #   operator:
    #     resources: {}
    # alertmanager:
    #   replicas: 1
    #   resources: {}
    # notification_manager:
    #   resources: {}
    #   operator:
    #     resources: {}
    #   proxy:
    #     resources: {}
    gpu:
      nvidia_dcgm_exporter:
        enabled: false
        # resources: {}
  multicluster:
    clusterRole: none
  network:
    networkpolicy:
      enabled: false
    ippool:
      type: none
    topology:
      type: none
  openpitrix:
    store:
      enabled: false
  servicemesh:
    enabled: false
    istio:
      components:
        ingressGateways:
        - name: istio-ingressgateway
          enabled: false
        cni:
          enabled: false
  edgeruntime:
    enabled: false
    kubeedge:
      enabled: false
      cloudCore:
        cloudHub:
          advertiseAddress:
            - ""
        service:
          cloudhubNodePort: "30000"
          cloudhubQuicNodePort: "30001"
          cloudhubHttpsNodePort: "30002"
          cloudstreamNodePort: "30003"
          tunnelNodePort: "30004"
        # resources: {}
        # hostNetWork: false
      iptables-manager:
        enabled: true
        mode: "external"
        # resources: {}
      # edgeService:
      #   resources: {}
  gatekeeper:
    enabled: false
    # controller_manager:
    #   resources: {}
    # audit:
    #   resources: {}
  terminal:
    timeout: 600

安装报错日志

11:16:46 UTC success: [rs-node-178-02]
11:16:46 UTC success: [rs-node-177-01]
11:16:46 UTC success: [rs-master-174-01]
11:16:46 UTC success: [rs-node-179-03]
11:16:46 UTC success: [rs-master-175-02]
11:16:46 UTC success: [rs-master-176-03]
11:16:46 UTC success: [devops-180]
11:16:46 UTC [ConfigureOSModule] configure the ntp server for each node
11:16:46 UTC skipped: [rs-node-179-03]
11:16:46 UTC skipped: [rs-master-174-01]
11:16:46 UTC skipped: [rs-master-175-02]
11:16:46 UTC skipped: [rs-master-176-03]
11:16:46 UTC skipped: [devops-180]
11:16:46 UTC skipped: [rs-node-177-01]
11:16:46 UTC skipped: [rs-node-178-02]
11:16:46 UTC [InitRegistryModule] Fetch registry certs
11:16:46 UTC success: [devops-180]
11:16:46 UTC [InitRegistryModule] Generate registry Certs
[certs] Using existing ca certificate authority
11:16:46 UTC message: [LocalHost]
unable to sign certificate: must specify a CommonName
11:16:46 UTC failed: [LocalHost]
error: Pipeline[InitRegistryPipeline] execute failed: Module[InitRegistryModule] exec failed: 
failed: [LocalHost] [GenerateRegistryCerts] exec failed after 1 retries: unable to sign certificate: must specify a CommonName

CommonName是在那一步指定的?

6 天 后

解决方案是什么 也遇到了同样的问题

需要将registry 注释掉的部分解除注释,然后在 privateRegistry dockerhub.kubkey.local 然后在执行脚本就行了 刚刚测试通过

    解决了,不过我的好像是dns的问题,处理好了dns就好了

      2 个月 后
      22 天 后
      4 个月 后
      5 个月 后
      1 个月 后

      Modular-sys

      請問可以幫忙看一下哪裡有錯嗎?
      我的配置

      apiVersion: kubekey.kubesphere.io/v1alpha2
      kind: Cluster
      metadata:
        name: sample
      spec:
        hosts:
        - {name: etspst001-hp-compaq-8000-elite-cmt-pc, address: 192.168.1.3, internalAddress: 192.168.1.3, user: ets-pst-001, password: "pwd"}
        - {name: etspst002-bm6660-bm6360, address: 192.168.1.5, internalAddress: 192.168.1.5, user: ets-pst-002, password: "pwd"}
        - {name: harbor-poc, address: 172.20.192.55, internalAddress: 172.20.192.55, user: h00283, password: "pwd"}
        roleGroups:
          etcd:
          - etspst001-hp-compaq-8000-elite-cmt-pc
          control-plane:
          - etspst001-hp-compaq-8000-elite-cmt-pc
          worker:
          - etspst001-hp-compaq-8000-elite-cmt-pc
          - harbor-poc
          registry:
          - etspst002-bm6660-bm6360
        controlPlaneEndpoint:
          ## Internal loadbalancer for apiservers
          # internalLoadbalancer: haproxy
      
          domain: lb.kubesphere.local
          address: ""
          port: 6443
        kubernetes:
          version: v1.28.12
          clusterName: cluster.local
          autoRenewCerts: true
          containerManager: containerd
        etcd:
          type: kubekey
        network:
          plugin: calico
          kubePodsCIDR: 10.233.64.0/18
          kubeServiceCIDR: 10.233.0.0/18
          ## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
          multusCNI:
            enabled: false
        registry:
          type: harbor
          auths:
            "dockerhub.kubekey.local":
              username: admin
              password: Harbor12345
          privateRegistry: "dockerhub.kubekey.local"
          namespaceOverride: "kubesphereio"
          registryMirrors: []
          insecureRegistries: []
        addons: []

      錯誤

      ets-pst-001@etspst001-hp-compaq-8000-elite-cmt-pc:~/ks$ ./kk init registry -f config-sample.yaml -a kubesphere.tar.gz
      
      
       _   __      _          _   __
      | | / /     | |        | | / /
      | |/ / _   _| |__   ___| |/ /  ___ _   _
      |    \| | | | '_ \ / _ \    \ / _ \ | | |
      | |\  \ |_| | |_) |  __/ |\  \  __/ |_| |
      \_| \_/\__,_|_.__/ \___\_| \_/\___|\__, |
                                          __/ |
                                         |___/
      
      16:29:24 CST [GreetingsModule] Greetings
      16:29:24 CST message: [etspst002-bm6660-bm6360]
      Greetings, KubeKey!
      16:29:24 CST message: [etspst001-hp-compaq-8000-elite-cmt-pc]
      Greetings, KubeKey!
      16:29:29 CST message: [harbor-poc]
      Greetings, KubeKey!
      16:29:29 CST success: [etspst002-bm6660-bm6360]
      16:29:29 CST success: [etspst001-hp-compaq-8000-elite-cmt-pc]
      16:29:29 CST success: [harbor-poc]
      16:29:29 CST [UnArchiveArtifactModule] Check the KubeKey artifact md5 value
      16:30:00 CST success: [LocalHost]
      16:30:00 CST [UnArchiveArtifactModule] UnArchive the KubeKey artifact
      16:30:00 CST skipped: [LocalHost]
      16:30:00 CST [UnArchiveArtifactModule] Create the KubeKey artifact Md5 file
      16:30:00 CST skipped: [LocalHost]
      16:30:00 CST [RegistryPackageModule] Download registry package
      16:30:00 CST message: [localhost]
      downloading amd64 harbor v2.10.1  ...
      16:30:05 CST message: [localhost]
      downloading amd64 docker 24.0.9  ...
      16:30:05 CST message: [localhost]
      downloading amd64 compose v2.26.1  ...
      16:30:06 CST success: [LocalHost]
      16:30:06 CST [ConfigureOSModule] Get OS release
      16:30:06 CST success: [harbor-poc]
      16:30:06 CST success: [etspst002-bm6660-bm6360]
      16:30:06 CST success: [etspst001-hp-compaq-8000-elite-cmt-pc]
      16:30:06 CST [ConfigureOSModule] Prepare to init OS
      16:30:08 CST success: [etspst002-bm6660-bm6360]
      16:30:08 CST success: [etspst001-hp-compaq-8000-elite-cmt-pc]
      16:30:08 CST success: [harbor-poc]
      16:30:08 CST [ConfigureOSModule] Generate init os script
      16:30:08 CST success: [harbor-poc]
      16:30:08 CST success: [etspst002-bm6660-bm6360]
      16:30:08 CST success: [etspst001-hp-compaq-8000-elite-cmt-pc]
      16:30:08 CST [ConfigureOSModule] Exec init os script
      16:30:10 CST stdout: [etspst002-bm6660-bm6360]
      net.ipv4.conf.default.rp_filter = 0
      net.ipv4.conf.all.rp_filter = 0
      net.ipv4.ip_forward = 1
      net.bridge.bridge-nf-call-arptables = 1
      net.bridge.bridge-nf-call-ip6tables = 1
      net.bridge.bridge-nf-call-iptables = 1
      net.ipv4.ip_local_reserved_ports = 30000-32767
      net.core.netdev_max_backlog = 65535
      net.core.rmem_max = 33554432
      net.core.wmem_max = 33554432
      net.core.somaxconn = 32768
      net.ipv4.tcp_max_syn_backlog = 1048576
      net.ipv4.neigh.default.gc_thresh1 = 512
      net.ipv4.neigh.default.gc_thresh2 = 2048
      net.ipv4.neigh.default.gc_thresh3 = 4096
      net.ipv4.tcp_retries2 = 15
      net.ipv4.tcp_max_tw_buckets = 1048576
      net.ipv4.tcp_max_orphans = 65535
      net.ipv4.tcp_keepalive_time = 600
      net.ipv4.tcp_keepalive_intvl = 30
      net.ipv4.tcp_keepalive_probes = 10
      net.ipv4.udp_rmem_min = 131072
      net.ipv4.udp_wmem_min = 131072
      net.ipv4.conf.all.arp_accept = 1
      net.ipv4.conf.default.arp_accept = 1
      net.ipv4.conf.all.arp_ignore = 1
      net.ipv4.conf.default.arp_ignore = 1
      vm.max_map_count = 262144
      vm.swappiness = 0
      vm.overcommit_memory = 0
      fs.inotify.max_user_instances = 524288
      fs.inotify.max_user_watches = 524288
      fs.pipe-max-size = 4194304
      fs.aio-max-nr = 262144
      kernel.pid_max = 65535
      kernel.watchdog_thresh = 5
      kernel.hung_task_timeout_secs = 5
      net.ipv6.conf.all.disable_ipv6 = 0
      net.ipv6.conf.default.disable_ipv6 = 0
      net.ipv6.conf.lo.disable_ipv6 = 0
      net.ipv6.conf.all.forwarding = 1
      16:30:13 CST stdout: [etspst001-hp-compaq-8000-elite-cmt-pc]
      net.ipv4.conf.default.rp_filter = 0
      net.ipv4.conf.all.rp_filter = 0
      net.ipv4.ip_forward = 1
      net.bridge.bridge-nf-call-arptables = 1
      net.bridge.bridge-nf-call-ip6tables = 1
      net.bridge.bridge-nf-call-iptables = 1
      net.ipv4.ip_local_reserved_ports = 30000-32767
      net.core.netdev_max_backlog = 65535
      net.core.rmem_max = 33554432
      net.core.wmem_max = 33554432
      net.core.somaxconn = 32768
      net.ipv4.tcp_max_syn_backlog = 1048576
      net.ipv4.neigh.default.gc_thresh1 = 512
      net.ipv4.neigh.default.gc_thresh2 = 2048
      net.ipv4.neigh.default.gc_thresh3 = 4096
      net.ipv4.tcp_retries2 = 15
      net.ipv4.tcp_max_tw_buckets = 1048576
      net.ipv4.tcp_max_orphans = 65535
      net.ipv4.udp_rmem_min = 131072
      net.ipv4.udp_wmem_min = 131072
      net.ipv4.conf.all.arp_accept = 1
      net.ipv4.conf.default.arp_accept = 1
      net.ipv4.conf.all.arp_ignore = 1
      net.ipv4.conf.default.arp_ignore = 1
      vm.max_map_count = 262144
      vm.swappiness = 0
      vm.overcommit_memory = 0
      fs.inotify.max_user_instances = 524288
      fs.inotify.max_user_watches = 524288
      fs.pipe-max-size = 4194304
      fs.aio-max-nr = 262144
      kernel.pid_max = 65535
      kernel.watchdog_thresh = 5
      kernel.hung_task_timeout_secs = 5
      net.ipv4.tcp_keepalive_time = 600
      net.ipv4.tcp_keepalive_intvl = 30
      net.ipv4.tcp_keepalive_probes = 10
      net.ipv6.conf.all.disable_ipv6 = 0
      net.ipv6.conf.default.disable_ipv6 = 0
      net.ipv6.conf.lo.disable_ipv6 = 0
      net.ipv6.conf.all.forwarding = 1
      16:30:16 CST stdout: [harbor-poc]
      net.ipv4.conf.default.rp_filter = 0
      net.ipv4.conf.all.rp_filter = 0
      net.ipv4.ip_forward = 1
      net.ipv6.conf.all.disable_ipv6 = 1
      net.ipv6.conf.default.disable_ipv6 = 1
      vm.max_map_count = 262144
      net.bridge.bridge-nf-call-iptables = 1
      net.bridge.bridge-nf-call-ip6tables = 1
      net.bridge.bridge-nf-call-arptables = 1
      net.ipv4.ip_local_reserved_ports = 30000-32767
      net.core.netdev_max_backlog = 65535
      net.core.rmem_max = 33554432
      net.core.wmem_max = 33554432
      net.core.somaxconn = 32768
      net.ipv4.tcp_max_syn_backlog = 1048576
      net.ipv4.neigh.default.gc_thresh1 = 512
      net.ipv4.neigh.default.gc_thresh2 = 2048
      net.ipv4.neigh.default.gc_thresh3 = 4096
      net.ipv4.tcp_retries2 = 15
      net.ipv4.tcp_max_tw_buckets = 1048576
      net.ipv4.tcp_max_orphans = 65535
      net.ipv4.tcp_keepalive_time = 600
      net.ipv4.tcp_keepalive_intvl = 30
      net.ipv4.tcp_keepalive_probes = 10
      net.ipv4.udp_rmem_min = 131072
      net.ipv4.udp_wmem_min = 131072
      net.ipv4.conf.all.arp_accept = 1
      net.ipv4.conf.default.arp_accept = 1
      net.ipv4.conf.all.arp_ignore = 1
      net.ipv4.conf.default.arp_ignore = 1
      vm.swappiness = 0
      vm.overcommit_memory = 0
      fs.inotify.max_user_instances = 524288
      fs.inotify.max_user_watches = 524288
      fs.pipe-max-size = 4194304
      fs.aio-max-nr = 262144
      kernel.pid_max = 65535
      kernel.watchdog_thresh = 5
      kernel.hung_task_timeout_secs = 5
      net.ipv6.conf.all.disable_ipv6 = 0
      net.ipv6.conf.default.disable_ipv6 = 0
      net.ipv6.conf.lo.disable_ipv6 = 0
      net.ipv6.conf.all.forwarding = 1
      16:30:16 CST success: [etspst002-bm6660-bm6360]
      16:30:16 CST success: [etspst001-hp-compaq-8000-elite-cmt-pc]
      16:30:16 CST success: [harbor-poc]
      16:30:16 CST [ConfigureOSModule] configure the ntp server for each node
      16:30:16 CST skipped: [etspst002-bm6660-bm6360]
      16:30:16 CST skipped: [etspst001-hp-compaq-8000-elite-cmt-pc]
      16:30:16 CST skipped: [harbor-poc]
      16:30:16 CST [InitRegistryModule] Fetch registry certs
      16:30:17 CST success: [etspst002-bm6660-bm6360]
      16:30:17 CST [InitRegistryModule] Generate registry Certs
      [certs] Using existing ca certificate authority
      [certs] Using existing dockerhub.kubekey.local certificate and key on disk
      16:30:17 CST success: [LocalHost]
      16:30:17 CST [InitRegistryModule] Synchronize certs file
      16:30:18 CST success: [etspst002-bm6660-bm6360]
      16:30:18 CST [InitRegistryModule] Synchronize certs file to all nodes
      16:30:21 CST success: [etspst002-bm6660-bm6360]
      16:30:21 CST success: [etspst001-hp-compaq-8000-elite-cmt-pc]
      16:30:21 CST success: [harbor-poc]
      16:30:21 CST [InstallRegistryModule] Sync docker binaries
      16:30:21 CST skipped: [etspst002-bm6660-bm6360]
      16:30:21 CST [InstallRegistryModule] Generate containerd service
      16:30:21 CST skipped: [etspst002-bm6660-bm6360]
      16:30:21 CST [InstallRegistryModule] Generate docker service
      16:30:21 CST skipped: [etspst002-bm6660-bm6360]
      16:30:21 CST [InstallRegistryModule] Generate docker config
      16:30:21 CST skipped: [etspst002-bm6660-bm6360]
      16:30:21 CST [InstallRegistryModule] Enable containerd
      16:30:21 CST skipped: [etspst002-bm6660-bm6360]
      16:30:21 CST [InstallRegistryModule] Enable docker
      16:30:21 CST skipped: [etspst002-bm6660-bm6360]
      16:30:21 CST [InstallRegistryModule] Install docker compose
      16:30:26 CST success: [etspst002-bm6660-bm6360]
      16:30:26 CST [InstallRegistryModule] Sync harbor package
      16:31:13 CST success: [etspst002-bm6660-bm6360]
      16:31:13 CST [InstallRegistryModule] Generate harbor service
      16:31:13 CST success: [etspst002-bm6660-bm6360]
      16:31:13 CST [InstallRegistryModule] Generate harbor config
      16:31:13 CST success: [etspst002-bm6660-bm6360]
      16:31:13 CST [InstallRegistryModule] start harbor
      16:31:23 CST message: [etspst002-bm6660-bm6360]
      start harbor failed: Failed to exec command: sudo -E /bin/bash -c "cd /opt/harbor && chmod +x install.sh && export PATH=$PATH:/usr/local/bin; ./install.sh --with-trivy && systemctl daemon-reload && systemctl enable harbor && systemctl restart harbor"
      [Step 0]: checking if docker is installed ...
      
      Note: docker version: 24.0.7
      
      [Step 1]: checking docker-compose is installed ...
      
      Note: docker-compose version: 2.26.1
      
      [Step 2]: loading Harbor images ...
      Loaded image: goharbor/harbor-portal:v2.10.1
      Loaded image: goharbor/harbor-db:v2.10.1
      Loaded image: goharbor/redis-photon:v2.10.1
      Loaded image: goharbor/nginx-photon:v2.10.1
      Loaded image: goharbor/prepare:v2.10.1
      Loaded image: goharbor/harbor-core:v2.10.1
      Loaded image: goharbor/harbor-log:v2.10.1
      Loaded image: goharbor/harbor-jobservice:v2.10.1
      Loaded image: goharbor/harbor-registryctl:v2.10.1
      Loaded image: goharbor/harbor-exporter:v2.10.1
      Loaded image: goharbor/registry-photon:v2.10.1
      Loaded image: goharbor/trivy-adapter-photon:v2.10.1
      
      
      [Step 3]: preparing environment ...
      
      [Step 4]: preparing harbor configs ...
      prepare base dir is set to /opt/harbor
      docker: Error response from daemon: error while creating mount source path '/opt/harbor/input': mkdir /opt/harbor: read-only file system.: Process exited with status 125
      16:31:23 CST retry: [etspst002-bm6660-bm6360]
      16:31:36 CST message: [etspst002-bm6660-bm6360]
      start harbor failed: Failed to exec command: sudo -E /bin/bash -c "cd /opt/harbor && chmod +x install.sh && export PATH=$PATH:/usr/local/bin; ./install.sh --with-trivy && systemctl daemon-reload && systemctl enable harbor && systemctl restart harbor"
      [Step 0]: checking if docker is installed ...
      
      Note: docker version: 24.0.7
      
      [Step 1]: checking docker-compose is installed ...
      
      Note: docker-compose version: 2.26.1
      
      [Step 2]: loading Harbor images ...
      Loaded image: goharbor/harbor-portal:v2.10.1
      Loaded image: goharbor/harbor-db:v2.10.1
      Loaded image: goharbor/redis-photon:v2.10.1
      Loaded image: goharbor/nginx-photon:v2.10.1
      Loaded image: goharbor/prepare:v2.10.1
      Loaded image: goharbor/harbor-core:v2.10.1
      Loaded image: goharbor/harbor-log:v2.10.1
      Loaded image: goharbor/harbor-jobservice:v2.10.1
      Loaded image: goharbor/harbor-registryctl:v2.10.1
      Loaded image: goharbor/harbor-exporter:v2.10.1
      Loaded image: goharbor/registry-photon:v2.10.1
      Loaded image: goharbor/trivy-adapter-photon:v2.10.1
      
      
      [Step 3]: preparing environment ...
      
      [Step 4]: preparing harbor configs ...
      prepare base dir is set to /opt/harbor
      docker: Error response from daemon: error while creating mount source path '/opt/harbor/input': mkdir /opt/harbor: read-only file system.: Process exited with status 125
      16:31:36 CST failed: [etspst002-bm6660-bm6360]
      error: Pipeline[InitRegistryPipeline] execute failed: Module[InstallRegistryModule] exec failed:
      failed: [etspst002-bm6660-bm6360] [StartHarbor] exec failed after 2 retries: start harbor failed: Failed to exec command: sudo -E /bin/bash -c "cd /opt/harbor && chmod +x install.sh && export PATH=$PATH:/usr/local/bin; ./install.sh --with-trivy && systemctl daemon-reload && systemctl enable harbor && systemctl restart harbor"
      [Step 0]: checking if docker is installed ...
      
      Note: docker version: 24.0.7
      
      [Step 1]: checking docker-compose is installed ...
      
      Note: docker-compose version: 2.26.1
      
      [Step 2]: loading Harbor images ...
      Loaded image: goharbor/harbor-portal:v2.10.1
      Loaded image: goharbor/harbor-db:v2.10.1
      Loaded image: goharbor/redis-photon:v2.10.1
      Loaded image: goharbor/nginx-photon:v2.10.1
      Loaded image: goharbor/prepare:v2.10.1
      Loaded image: goharbor/harbor-core:v2.10.1
      Loaded image: goharbor/harbor-log:v2.10.1
      Loaded image: goharbor/harbor-jobservice:v2.10.1
      Loaded image: goharbor/harbor-registryctl:v2.10.1
      Loaded image: goharbor/harbor-exporter:v2.10.1
      Loaded image: goharbor/registry-photon:v2.10.1
      Loaded image: goharbor/trivy-adapter-photon:v2.10.1
      
      
      [Step 3]: preparing environment ...
      
      [Step 4]: preparing harbor configs ...
      prepare base dir is set to /opt/harbor
      docker: Error response from daemon: error while creating mount source path '/opt/harbor/input': mkdir /opt/harbor: read-only file system.: Process exited with status 125
        4 个月 后

        Modular-sys

        参考你提供的配置,我遇到新的问题

        13:54:18 UTC [InitRegistryModule] Synchronize certs file to all nodes
        13:54:18 UTC message: [node1]
        scp registry certs file to /etc/docker/certs.d/ failed: Failed to exec command: sudo -E /bin/bash -c "mv -f /tmp/kubekey/etc/docker/certs.d/dockerhub.kubekey.local/ca.crt /etc/docker/certs.d/dockerhub.kubekey.local/ca.crt"
        mv: cannot stat '/tmp/kubekey/etc/docker/certs.d/dockerhub.kubekey.local/ca.crt': No such file or directory: Process exited with status 1
        13:54:19 UTC failed: [node1]
        13:54:19 UTC success: [master]
        13:54:19 UTC success: [node2]
        error: Pipeline[InitRegistryPipeline] execute failed: Module[InitRegistryModule] exec failed:
        failed: [node1] [SyncCertsFileToAllNodes] exec failed after 1 retries: scp registry certs file to /etc/docker/certs.d/ failed: Failed to exec command: sudo -E /bin/bash -c "mv -f /tmp/kubekey/etc/docker/certs.d/dockerhub.kubekey.local/ca.crt /etc/docker/certs.d/dockerhub.kubekey.local/ca.crt"
        mv: cannot stat '/tmp/kubekey/etc/docker/certs.d/dockerhub.kubekey.local/ca.crt': No such file or directory: Process exited with status 1