本文主要介绍使用keepalived + haproxy对kube-apiserver进行负载均衡,实现高可用kubernetes集群。

  1. 部署架构

  1. 集群规划(示例集群操作系统为EulerOS v2.5)
ip角色
192.168.0.100vip
192.168.0.61lb(keepalived + haproxy)
192.168.0.62lb(keepalived + haproxy)
192.168.0.81master
192.168.0.82master
192.168.0.83master
192.168.0.84worker
192.168.0.85worker
192.168.0.86worker
  1. 部署keepalived+haproxy
yum install keepalived haproxy psmisc -y
  1. 配置haproxy (两台lb机器配置一致即可,注意替换后端服务地址)
global
    log /dev/log  local0 warning
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    
   stats socket /var/lib/haproxy/stats
  
defaults
  log global
  option  httplog
  option  dontlognull
        timeout connect 5000
        timeout client 50000
        timeout server 50000


frontend kube-apiserver
  bind *:6443
  mode tcp
  option tcplog
  default_backend kube-apiserver

backend kube-apiserver
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server kube-apiserver-1 192.168.0.81:6443 check
  server kube-apiserver-2 192.168.0.82:6443 check
  server kube-apiserver-3 192.168.0.83:6443 check
  1. 启动haproxy,并设置开机自启动
systemctl restart haproxy
systemctl enable haproxy
  1. 配置keepalived (注释的地方,根据实际环境填写,每台机器配置不同)
global_defs {
  notification_email {
  }
  router_id LVS_DEVEL
  vrrp_skip_check_adv_addr
  vrrp_garp_interval 0
  vrrp_gna_interval 0
}

vrrp_script chk_haproxy {
  script "killall -0 haproxy"
  interval 2
  weight 2
}

vrrp_instance haproxy-vip {
  state BACKUP
  priority 100
  interface eth0                        #实例绑定的网卡
  virtual_router_id 60
  advert_int 1
  authentication {
    auth_type PASS
    auth_pass 1111
  }
  unicast_src_ip 192.168.0.61      #当前机器地址
  unicast_peer {
    192.168.0.62                         #peer中其它机器地址
  }

  virtual_ipaddress {
    192.168.0.100/24                  #vip地址
  }

  track_script {
    chk_haproxy
  }
}
  1. 启动keepalived,设置开机自启动
systemctl restart keepalived
systemctl enable keepalived
  1. 验证keepalived + haproxy 可用性

    (1) 使用ip a s查看各lb节点vip绑定情况
    (2) 暂停vip所在节点haproxy:systemctl stop haproxy
    (3) 再次使用ip a s查看各lb节点vip绑定情况,查看vip是否发生漂移

  2. 使用kubekey部署k8s集群

# 创建配置文件
./kk create config --with-kubesphere   #创建包含kubesphere的配置文件
  1. 填写配置文件 (根据机器实际情况填写相关信息, controlPlaneEndpoint.address填入vip地址)
# config-example.yaml
apiVersion: kubekey.kubesphere.io/v1alpha1
kind: Cluster
metadata:
  name: config-sample
spec:
  hosts:
  - {name: node1, address: 192.168.0.81, internalAddress: 192.168.0.81, password: EulerOS@2.5}
  - {name: node2, address: 192.168.0.82, internalAddress: 192.168.0.82, password: EulerOS@2.5}
  - {name: node3, address: 192.168.0.83, internalAddress: 192.168.0.83, password: EulerOS@2.5}
  - {name: node4, address: 192.168.0.84, internalAddress: 192.168.0.84, password: EulerOS@2.5}
  - {name: node5, address: 192.168.0.85, internalAddress: 192.168.0.85, password: EulerOS@2.5}
  - {name: node6, address: 192.168.0.86, internalAddress: 192.168.0.86, password: EulerOS@2.5}
  roleGroups:
    etcd:
    - node[1:3]
    master: 
    - node[1:3]
    worker:
    - node[4:6]
  controlPlaneEndpoint:
    domain: lb.kubesphere.local
    address: 192.168.0.100                     # vip
    port: 6443
  kubernetes:
    version: v1.17.6
    imageRepo: kubesphere
    clusterName: cluster.local
  network:
    plugin: calico
    kube_pods_cidr: 10.233.64.0/18
    kube_service_cidr: 10.233.0.0/18
  registry:
    registryMirrors: []
    insecureRegistries: []
  storage:
    defaultStorageClass: localVolume
    localVolume:
      storageClassName: local


---
apiVersion: v1
data:
  ks-config.yaml: |
    ---
    local_registry: ""
    persistence:
      storageClass: ""
    etcd:
      monitoring: true
      endpointIps: 192.168.0.7,192.168.0.8,192.168.0.9
      port: 2379
      tlsEnable: true
    common:
      mysqlVolumeSize: 20Gi
      minioVolumeSize: 20Gi
      etcdVolumeSize: 20Gi
      openldapVolumeSize: 2Gi
      redisVolumSize: 2Gi
    console:
      enableMultiLogin: False  # enable/disable multi login
      port: 30880
    monitoring:
      prometheusReplicas: 1
      prometheusMemoryRequest: 400Mi
      prometheusVolumeSize: 20Gi
      grafana:
        enabled: false
      notification:
        enabled: false
    logging:
      enabled: false
      elasticsearchMasterReplicas: 1
      elasticsearchDataReplicas: 1
      logsidecarReplicas: 2
      elasticsearchMasterVolumeSize: 4Gi
      elasticsearchDataVolumeSize: 20Gi
      logMaxAge: 7
      elkPrefix: logstash
      containersLogMountedPath: ""
      kibana:
        enabled: false
    events:
      enabled: false
    auditing:
      enabled: false
    openpitrix:
      enabled: false
    devops:
      enabled: false
      jenkinsMemoryLim: 2Gi
      jenkinsMemoryReq: 1500Mi
      jenkinsVolumeSize: 8Gi
      jenkinsJavaOpts_Xms: 512m
      jenkinsJavaOpts_Xmx: 512m
      jenkinsJavaOpts_MaxRAM: 2g
      sonarqube:
        enabled: false
        postgresqlVolumeSize: 8Gi
    servicemesh:
      enabled: false
    notification:
      enabled: false
    alerting:
      enabled: false
    metrics_server:
      enabled: false
    weave_scope:
      enabled: false
kind: ConfigMap
metadata:
  name: ks-installer
  namespace: kubesphere-system
  labels:
    version: v3.0.0
  1. 执行部署
./kk create cluster -f config-example.yaml
  1. 等待部署完成,登录kubesphere界面
  • 可以尝试一下使用haproxy的这个特性,有助于在后端挂掉时被客户端更快感知到。

      server kube-apiserver-1 192.168.0.81:6443 check on-marked-down shutdown-sessions
      server kube-apiserver-2 192.168.0.82:6443 check on-marked-down shutdown-sessions
      server kube-apiserver-3 192.168.0.83:6443 check on-marked-down shutdown-sessions

可以尝试一下使用haproxy的这个特性,有助于在后端挂掉时被客户端更快感知到。

  server kube-apiserver-1 192.168.0.81:6443 check on-marked-down shutdown-sessions
  server kube-apiserver-2 192.168.0.82:6443 check on-marked-down shutdown-sessions
  server kube-apiserver-3 192.168.0.83:6443 check on-marked-down shutdown-sessions
Cauchy 更改标题为「kubernetes高可用部署(方案):keepalived + haproxy

我们用的wise2c的breeze,可视化安装k8s集群。。自带keepalived+haproxy,嘻嘻

    2 个月 后

    请大佬指教,初次使用,还有些坑没有踩过,劳烦了,谢谢。
    参考这个教程搭建keepalived+haproxy ,验证过,我认为是可用的,部署时总是报
    unable to fetch the kubeadm-config ConfigMap from cluster: failed to get config map: Get https://lb.kubesphere.local:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config?timeout=10s: dial tcp 10.10.71.67:6443: connect: connection refused

    haproxy.cfg和keepalived.conf的配置如下图



      shenhonglei 可以先在报错机器上执行下kubeadm reset , 然后执行下报错那条命令试试。

        Cauchy 目前只做了三个操作,一个是BACKUP的priority为100,一个是virtual_ipaddress加上10.10.71.67/24 #加/24,还有一个就是kubeadm reset .

        ,然后就走下去了。

        慢慢安装吧。


        #####################################################

        Welcome to KubeSphere!

        #####################################################
        Console: http://10.10.71.214:30880
        Account: admin
        Password: P@88w0rd
        NOTES:

        1. After logging into the console, please check the
          monitoring status of service components in
          the “Cluster Management”. If any service is not
          ready, please wait patiently until all components
          are ready.
        2. Please modify the default password after login.
          #####################################################
          https://kubesphere.io 2020-08-15 23:32:12
        6 个月 后

        高可用模式,配了虚拟vip 装好后,执行 kubectl cluster-info
        发现master 还是本地模式,是什么情况?

        7 个月 后

        你好! 确实是systemctl stop haproxy haproxy停止后vip会飘逸吗?为什么我这里没有呢?
        我安装的时候 systemctl stop keepalived vip 才会飘逸,
        haproxy停止后只是本地的listen的端口停掉了。vip还在

          ds2d
          若keepalived本身都停止工作了,VIP当然会漂移,但在keepalived+haproxy的模式下,保证的是haproxy的高可用,haproxy停止运行后,VIP应该漂移到别的节点,继续提供监听服务才是预期行为

          在keepalived配置中的这两项就是用来检测haproxy是否存活的:

          vrrp_script chk_haproxy {
            script "killall -0 haproxy"
            interval 2
            weight 2
          }
          
            track_script {
              chk_haproxy
            }
          • ds2d 回复了此帖

            kevendeng 这里的配置是没错的。我重新测试了了一下。
            停止haproxyVIP会漂移,但是需要配置keepalived

            vrrp_instance haproxy-vip {
              state BACKUP
              priority 100
              }

            这两个参数需要2个slb的值都是一样的
            state 都是MASTER 或者都是BACKUP
            priority 也要设置相同,一个大一个小VIP也不会漂移

            shenhonglei 大佬!你确定可用吗?为什么我配置keepalived 的时候配置 MASTER和BACKUP 还有优先级的之后,去测试管不MASTER上的haproxy后VIP并不会漂移?

            1 年 后

            请问是只有执行下面部署命令的master节点才提供控制面板吗?

            ./kk create cluster -f config-example.yaml

            如果关掉执行上面部署命令的master,再随便关掉一台master,那么最后剩下的那台master可以提供控制面板服务吗