author:JevonWei
版权声明:原创做品html
实验目的:构建LVS-DR架构,为了达到LVS的高可用目的,故在LVS-DR的Director端作Keepalive集群,在Director-A上作keepalive-A,在Director上作keepalive-B,LVS-RS1和LVS-RS2为后端的两台web服务器,经过在Director上作keepalive集群实现高可用的目的nginx
网络拓扑图
web
实验环境(keepalive节点同时做为LVS的directory节点)vim
keepalive-A(Director-A) 172.16.253.108 keepalive-B(Director-A) 172.16.253.105 LVS-RS1 172.16.250.127 LVS-RS2 172.16.253.193 VIP 172.16.253.150 client 172.16.253.177
为了更好的观察实验结果,故在此将RS1和RS2的web页面内容设置不一致,以至能够更清晰的区分RS1服务端和RS2服务端后端
LVS-RS1bash
[root@LVS-RS1 ~]# systemctl restart chronyd \\多台服务器时间同步 [root@LVS-RS1 ~]# iptables -F [root@LVS-RS1 ~]# setenforce 0 [root@LVS-RS1 ~]# yum -y install nginx [root@LVS-RS1 ~]# vim /usr/share/nginx/html/index.html <h1> Web RS1 </h1> [root@LVS-RS1 ~]# systemctl start nginx 修改内核参数并添加VIP地址 [root@LVS-RS1 ~]# vim lvs_dr.sh #!/bin/bash # vip=172.16.253.150 mask=255.255.255.255 iface="lo:0" case $1 in start) ifconfig $iface $vip netmask $mask broadcast $vip up route add -host $vip dev $iface echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce ;; stop) echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce ifconfig $iface down ;; *) echo "Usage:$(basename $0) start|stop" exit 1 ;; esac [root@LVS-RS1 ~]# bash lvs_dr.sh start [root@LVS-RS1 ~]# ifconfig lo:0: flags=73<UP,LOOPBACK,RUNNING> mtu 65536 inet 172.16.253.150 netmask 255.255.255.255 loop txqueuelen 1 (Local Loopback)
LVS-RS2服务器
[root@LVS-RS2 ~]# systemctl restart chronyd \\多台服务器时间同步 [root@LVS-RS2 ~]# iptables -F [root@LVS-RS2 ~]# setenforce 0 [root@LVS-RS2 ~]# yum -y install nginx [root@LVS-RS2 ~]# vim /usr/share/nginx/html/index.html <h1> Web RS2 </h1> [root@LVS-RS2 ~]# systemctl start nginx 修改内核参数并添加VIP地址 [root@LVS-RS2 ~]# vim lvs_dr.sh #!/bin/bash # vip=172.16.253.150 mask=255.255.255.255 iface="lo:0" case $1 in start) ifconfig $iface $vip netmask $mask broadcast $vip up route add -host $vip dev $iface echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce ;; stop) echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce ifconfig $iface down ;; *) echo "Usage:$(basename $0) start|stop" exit 1 ;; esac [root@LVS-RS1 ~]# bash lvs_dr.sh start [root@LVS-RS1 ~]# ifconfig lo:0: flags=73<UP,LOOPBACK,RUNNING> mtu 65536 inet 172.16.253.150 netmask 255.255.255.255 loop txqueuelen 1 (Local Loopback)
keepalive-A网络
[root@keepaliveA ~]# systemctl restart chronyd \\多台服务器时间同步 [root@keepaliveA ~]# yum -y install ipvsadm
keepalive-B架构
[root@keepaliveB ~]# systemctl restart chronyd \\多台服务器时间同步 [root@keepaliveB ~]# yum -y install ipvsadm
keepalive-Acurl
[root@keepaliveA ~]# yum -y install nginx [root@keepaliveA ~]# vim /usr/share/nginx/html/index.html </h1> sorry from Director-A(keepalive-A) </h1> [root@keepaliveA ~]# systemctl start nginx
keepalive-B
[root@keepalive-B ~]# yum -y install nginx [root@keepalive-B ~]# vim /usr/share/nginx/html/index.html </h1> sorry from Director-B(keepalive-B) </h1> [root@keepaliveB ~]# systemctl start nginx
keepalive-A
[root@keepalive-A ~]# iptables -F [root@keepalive-A ~]# yum -y install keepalived [root@keepaliveA ~]# vim /etc/keepalived/keepalived.conf global_defs { notification_email { \\定义邮件通知设置 jevon@danran.com \\定义邮件接收地址 } notification_email_from ka_admin@danran.com \\邮件发送者 smtp_server 127.0.0.1 \\邮件server服务器 smtp_connect_timeout 30 \\链接超时 router_id keepaliveA \\route的ID信息,自定义 vrrp_mcast_group4 224.103.5.5 \\多播地址段,默认为224.0.0.18 } vrrp_instance VI_A { state MASTER interface ens33 virtual_router_id 51 priority 100 advert_int 1 authentication { auth_type PASS auth_pass qr8hQHuL } virtual_ipaddress { 172.16.253.150/32 dev ens33 } virtual_server 172.16.253.150 80 { delay_loop 6 \\服务轮询的时间间隔 lb_algo rr \\定义调度方法; lb_kind DR \\集群的类型; protocol TCP \\服务协议,仅支持TCP; sorry_server 127.0.0.1 80 \\指定sorry server,且为本机的wen服务提供的web页面 real_server 172.16.250.127 80 { weight 1 \\权重 SSL_GET { \\应用层检测 url { path / \\定义要监控的URL #digest ff20ad2481f97b1754ef3e12ecd3a9cc \\判断上述检测机制为健康状态的响应的内容的校验码; status_code 200 \\判断上述检测机制为健康状态的响应码 } connect_timeout 3 \\链接请求的超时时长; nb_get_retry 3 \\重试次数 delay_before_retry 1 \\重试以前的延迟时长 } } real_server 172.16.253.193 80 { weight 1 SSL_GET { url { path / #digest ff20ad2481f97b1754ef3e12ecd3a9cc status_code 200 } connect_timeout 3 nb_get_retry 3 delay_before_retry 1 } } } [root@keepaliveA ~]# systemctl start keepalived [root@keepaliveA ~]# ip a l 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:75:dc:3c brd ff:ff:ff:ff:ff:ff inet 172.16.253.150/32 scope global ens33 valid_lft forever preferred_lft forever [root@keepaliveA ~]# ipvsadm -Ln IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP 172.16.253.150:80 rr -> 172.16.250.127:80 Route 1 0 0 -> 172.16.253.193:80 Route 1 0 0
keepalive-B
[root@keepalive-B ~]# iptables -F [root@keepalive-B ~]# yum -y install keepalived [root@keepaliveA ~]# vim /etc/keepalived/keepalived.conf global_defs { notification_email { \\定义邮件通知设置 jevon@danran.com \\定义邮件接收地址 } notification_email_from ka_admin@danran.com \\邮件发送者 smtp_server 127.0.0.1 \\邮件server服务器 smtp_connect_timeout 30 \\链接超时 router_id keepaliveA \\route的ID信息,自定义 vrrp_mcast_group4 224.103.5.5 \\多播地址段,默认为224.0.0.18 } vrrp_instance VI_A { state BACKUP interface ens33 virtual_router_id 51 priority 95 advert_int 1 authentication { auth_type PASS auth_pass qr8hQHuL } virtual_ipaddress { 172.16.253.150/32 dev ens33 } virtual_server 172.16.253.150 80 { delay_loop 6 \\服务轮询的时间间隔 lb_algo rr \\定义调度方法; lb_kind DR \\集群的类型; protocol TCP \\服务协议,仅支持TCP; sorry_server 127.0.0.1 80 \\指定sorry server,且为本机的wen服务提供的web页面 real_server 172.16.250.127 80 { weight 1 \\权重 SSL_GET { \\应用层检测 url { path / \\定义要监控的URL #digest ff20ad2481f97b1754ef3e12ecd3a9cc \\判断上述检测机制为健康状态的响应的内容的校验码; status_code 200 \\判断上述检测机制为健康状态的响应码 } connect_timeout 3 \\链接请求的超时时长; nb_get_retry 3 \\重试次数 delay_before_retry 1 \\重试以前的延迟时长 } } real_server 172.16.253.193 80 { weight 1 SSL_GET { url { path / #digest ff20ad2481f97b1754ef3e12ecd3a9cc status_code 200 } connect_timeout 3 nb_get_retry 3 delay_before_retry 1 } } } [root@keepaliveB ~]# systemctl start keepalived [root@keepalive-B ~]# ipvsadm IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP 172.16.253.150:http rr -> 172.16.250.127:http Route 1 0 0 -> 172.16.253.193:http Route 1 0 0
client测试
[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done <h1> Web RS1 </h1> <h1> Web RS2 </h1> <h1> Web RS1 </h1> <h1> Web RS2 </h1> <h1> Web RS1 </h1>
当keepalive-A故障时
[root@keepaliveA ~]# systemctl stop keepalived
keepalive-B自动成为MASTER主节点,则LVS的director调度服务器切换为keepalive-B上,LVS-RS1和LVS-RS2的web服务正常使用
client访问测试
[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done <h1> Web RS2 </h1> <h1> Web RS1 </h1> <h1> Web RS2 </h1> <h1> Web RS1 </h1> <h1> Web RS2 </h1>
当keepalive-A修恢复正常时,keepalive-A再次成为MASTER主节点
[root@keepaliveA ~]# systemctl start keepalived [root@keepaliveA ~]# ip a l : ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:75:dc:3c brd ff:ff:ff:ff:ff:ff inet 172.16.253.150/32 scope global ens33 valid_lft forever preferred_lft forever
当LVS-RS1的web服务故障时
[root@LVS-RS1 ~]# iptables -A INPUT -p tcp --dport 80 -j REJECT
client访问
[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done <h1> Web RS2 </h1> <h1> Web RS2 </h1> <h1> Web RS2 </h1> <h1> Web RS2 </h1>
当LVS-RS1和LVS-RS2的web服务所有故障时
[root@LVS-RS1 ~]# iptables -A INPUT -p tcp --dport 80 -j REJECT [root@LVS-RS2 ~]# iptables -A INPUT -p tcp --dport 80 -j REJECT
client访问到的时sorry server服务器,且sorry server服务器为keepalive-A
[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done </h1> sorry from Director-A(keepalive-A) </h1> </h1> sorry from Director-A(keepalive-A) </h1> </h1> sorry from Director-A(keepalive-A) </h1> </h1> sorry from Director-A(keepalive-A) </h1> </h1> sorry from Director-A(keepalive-A) </h1>
当keepalive-A故障时
[root@keepaliveA ~]# systemctl stop keepalived.service
client访问sorry server服务页面,且sorry server服务器为keepalive-B
[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done </h1> sorry from Director-B(keepalive-B) </h1> </h1> sorry from Director-B(keepalive-B) </h1> </h1> sorry from Director-B(keepalive-B) </h1> </h1> sorry from Director-B(keepalive-B) </h1> </h1> sorry from Director-B(keepalive-B) </h1>
LVS-RS1的web服务恢复正常后
[root@LVS-RS1 ~]# iptables -F
client访问测试
[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done <h1> Web RS1 </h1> <h1> Web RS1 </h1> <h1> Web RS1 </h1> <h1> Web RS1 </h1> <h1> Web RS1 </h1>
LVS-RS1和LVS-RS2的web服务所有恢复正常后
[root@LVS-RS1 ~]# iptables -F [root@LVS-RS2 ~]# iptables -F
client访问测试
[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done <h1> Web RS2 </h1> <h1> Web RS1 </h1> <h1> Web RS2 </h1> <h1> Web RS1 </h1> <h1> Web RS2 </h1>
保存:建议保存至/etc/sysconfig/ipvsadm
ipvsadm-save > /PATH/TO/IPVSADM_FILE ipvsadm -S > /PATH/TO/IPVSADM_FILE systemctl stop ipvsadm.service
重载:
ipvsadm-restore < /PATH/FROM/IPVSADM_FILE ipvsadm -R < /PATH/FROM/IPVSADM_FILE systemctl restart ipvsadm.service
[root@keepaliveA ~]# genhash -s 172.16.250.127 -p 80 -u /