[高并发负载均衡] 四、基于keepalived的LVS的高可用

2021-02-17  本文已影响0人  每皮1024

keepalived介绍

是一种应用层第三方工具,主要目的为HA,具有以下功能:

  1. 监控自己服务(lvs)
  2. Master通告自己还活着,Backup监听Master状态,Master挂了,一堆Backup推举出一个新的Master(所以master和backup都有keepalived部署)
  3. 配置:vip,添加ipvs,keepalive有配置文件(所以keepalived是能代替ipvsadm的)
  4. 对后端server做健康检查,如果判挂要做及时剔除以免调度任务分配给挂的服务器

nginx,可以作为公司的负载均衡来用,nginx称为了单点故障,也可以用keepalived来解决(包括tomcat)

一、准备机器节点

node01、node04作为LVS服务器,node02、node03作为底层real servers

二、布置网络层

类似“LVS的DR模型搭建”,只是node01的VIP可先不配(由keepalived来配置)

预处理:若目前状态在“LVS的DR模型搭建”

  1. 需清空node01调度策略的配置
ipvsadm -C
ipvsadm -ln # 查看调度策略配置已经没有了
  1. 需卸载node01 VIP
ifconfig eth0:2 down

node01和node04上keepalived的安装、配置、启动

yum install keepalived ipvsadm -y

位于/etc/keepalived/keepalived.conf,内含

! Configuration File for keepalived

global_defs {// 全局配置
   notification_email {
     acassen@firewall.loc // 有邮件通知能力
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VI_1 {// 虚拟路由冗余协议
    state MASTER // 表示负载均衡服务器中是主:现实场景备机配置一般会稍低一点
    interface eth0 // 服务器一般多个网卡,数据不会影响信号的实效,同时网络也有多个可用
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.200.16 // 这些列举了
        192.168.200.17
        192.168.200.18
    }
}

virtual_server 192.168.200.100 443 {// 虚拟服务器
    delay_loop 6
    lb_algo rr
    lb_kind NAT
    persistence_timeout 50 // real server都响应过,能做到负载的参照,只要没超时就会往原负载fa
    protocol TCP

    real_server 192.168.201.100 443 {
        weight 1
        SSL_GET {
            url {
              path /
              digest ff20ad2481f97b1754ef3e12ecd3a9cc
            }
            url {
              path /mrtg/
              digest 9b3a0c85a887a256d6939da88aabd8cd
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

virtual_server 10.10.10.2 1358 {
    delay_loop 6
    lb_algo rr 
    lb_kind NAT
    persistence_timeout 50
    protocol TCP

    sorry_server 192.168.200.200 1358

    real_server 192.168.200.2 1358 {
        weight 1
        HTTP_GET {
            url { 
              path /testurl/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334d
            }
            url { 
              path /testurl2/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334d
            }
            url { 
              path /testurl3/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334d
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }

    real_server 192.168.200.3 1358 {
        weight 1
        HTTP_GET {
            url { 
              path /testurl/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334c
            }
            url { 
              path /testurl2/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334c
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

virtual_server 10.10.10.3 1358 {
    delay_loop 3
    lb_algo rr 
    lb_kind NAT
    persistence_timeout 50
    protocol TCP

    real_server 192.168.200.4 1358 {
        weight 1
        HTTP_GET {
            url { 
              path /testurl/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334d
            }
            url { 
              path /testurl2/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334d
            }
            url { 
              path /testurl3/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334d
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }

    real_server 192.168.200.5 1358 {
        weight 1
        HTTP_GET {
            url { 
              path /testurl/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334d
            }
            url { 
              path /testurl2/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334d
            }
            url { 
              path /testurl3/test.jsp
              digest 640205b7b0fc66c1ea91c463fac6334d
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

若不知如何写某个配置,可以使用linux的man帮助文档

yum install man
man 5 keepalived.conf # 然后通过/搜索关键词

实际修改内容

vrrp_instance VI_1 {
    state MASTER // node04 BACKUP
    interface eth0
    virtual_router_id 51
    priority 100 // node04 50
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        172.16.6.100/24 dev eth0 label eth0:2
    }
}
// -A 设置进入包的配置
virtual_server 172.16.6.100 80 {
    delay_loop 6
    lb_algo rr
    lb_kind DR
    nat_mask 255.255.255.0
    persistence_timeout 0
    protocol TCP
    // -a 设置出去
    real_server 172.16.6.3 80 {
        weight 1
        HTTP_GET {//健康检查配置
            url {
              path /
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 172.16.6.4 80 {
        weight 1
        HTTP_GET {
            url {
              path /
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}
# 拷贝到node4节点,仅修改state和priority两个部分
scp ./keepalived.conf root@172.16.6.5:`pwd`
service keepalived start

问题

主机keepalived程序异常退出造成的问题:

keepalived是一个程序,则必然会出现异常退出的情形,首先我们可以看主lvs的进程情况,其中1表示的是父进程,剩下两个子进程分别表示对real server进行心跳检测的进程

[root@localhost keepalived]# ps -ef | grep keep
root       1516      1  0 09:50 ?        00:00:01 /usr/sbin/keepalived -D
root       1517   1516  0 09:50 ?        00:00:02 /usr/sbin/keepalived -D
root       1518   1516  0 09:50 ?        00:00:02 /usr/sbin/keepalived -D
root       1682   1345  0 10:29 pts/0    00:00:00 grep --color=auto keep

引申:zookeeper,集群解决keepalived单点导致的问题

参考

上一篇 下一篇

猜你喜欢

热点阅读