调度器dir:172.16.111.100
真实服务器rs1:172.16.111.110
真实服务器rs2:172.16.111.120
VIP:172.16.111.200html
[root@garytao-01 ~]# vim /usr/local/sbin/lvs_dr.sh 增长以下脚本内容: #! /bin/bash echo 1 > /proc/sys/net/ipv4/ip_forward ipv=/usr/sbin/ipvsadm vip=172.16.111.200 rs1=172.16.111.110 rs2=172.16.111.120 #注意这里的网卡名字 ifdown ens33 #为了避免让网卡设置屡次 ifup ens33 ifconfig ens33:2 $vip broadcast $vip netmask 255.255.255.255 up route add -host $vip dev ens33:2 $ipv -C $ipv -A -t $vip:80 -s rr $ipv -a -t $vip:80 -r $rs1:80 -g -w 1 $ipv -a -t $vip:80 -r $rs2:80 -g -w 1 #启动脚本 [root@garytao-01 ~]# sh /usr/local/sbin/lvs_dr.sh 成功断开设备 'ens33'。 成功激活的链接(D-Bus 激活路径:/org/freedesktop/NetworkManager/ActiveConnection/2)
[root@garytao-02 ~]# vim /usr/local/sbin/lvs_rs.sh 增长以下脚本内容: #/bin/bash vip=172.16.111.200 #把vip绑定在lo上,是为了实现rs直接把结果返回给客户端 ifconfig lo:0 $vip broadcast $vip netmask 255.255.255.255 up route add -host $vip lo:0 #如下操做为更改arp内核参数,目的是为了让rs顺利发送mac地址给客户端 #参考文档www.cnblogs.com/lgfeng/archive/2012/10/16/2726308.html echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce #启动脚本 [root@garytao-02 ~]# sh /usr/local/sbin/lvs_rs.sh #在查看机器上查看路由及IP [root@garytao-02 ~]# route -n Kernel IP routing table Destination Gateway Genmask Flags Metric Ref Use Iface 0.0.0.0 172.16.111.2 0.0.0.0 UG 100 0 0 ens33 172.16.0.0 0.0.0.0 255.255.0.0 U 100 0 0 ens33 172.16.111.200 0.0.0.0 255.255.255.255 UH 0 0 0 lo [root@garytao-02 ~]# ip add 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet 172.16.111.200/32 brd 172.16.111.200 scope global lo:0 valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:2c:5f:75 brd ff:ff:ff:ff:ff:ff inet 172.16.111.110/16 brd 172.16.255.255 scope global ens33 valid_lft forever preferred_lft forever inet6 fe80::b6dc:6aed:f1d0:2f43/64 scope link valid_lft forever preferred_lft forever [root@garytao-03 ~]# route -n Kernel IP routing table Destination Gateway Genmask Flags Metric Ref Use Iface 0.0.0.0 172.16.111.2 0.0.0.0 UG 100 0 0 ens33 172.16.111.0 0.0.0.0 255.255.255.0 U 100 0 0 ens33 172.16.111.200 0.0.0.0 255.255.255.255 UH 0 0 0 lo [root@garytao-03 ~]# ip add 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet 172.16.111.200/32 brd 172.16.111.200 scope global lo:0 valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:84:9e:21 brd ff:ff:ff:ff:ff:ff inet 172.16.111.120/24 brd 172.16.111.255 scope global ens33 valid_lft forever preferred_lft forever inet6 fe80::5a44:6d44:2313:b46f/64 scope link valid_lft forever preferred_lft forever
LVS架构中,无论是NAT模式仍是DR模式,当后端的RS宕掉时,调度器依然会把请求转发到宕掉的RS上,这样的结果并非咱们想要的。keepalived就能够解决该问题,它不单单有高可用的功能,还有负载均衡的功能,在调度器上只要安装了keepalived,就不用再安装ipvsadm了,也不用去编写LVS相关的脚本了,也就是说keepalived已经嵌入了LVS功能,完整的keepalived+LVS架构须要有两台调度器实现高可用,提供调度服务的只须要一台,另一台做为备用。linux
完整架构须要两台服务器(角色为dir)分别安装keepalived软件,目的是实现高可用,但keepalived自己也有负载均衡的功能,因此本次实验能够只安装一台keepalived。nginx
dir(安装keepalived)172.16.111.100
rs1:172.16.111.110
rs2:172.16.111.120
vip:172.16.111.200算法
[root@garytao-01 ~]# vim /etc/keepalived/keepalived.conf 删除以前的配置,增长以下配置内容: vrrp_instance VI_1 { #备用服务器上为 BACKUP state MASTER #绑定vip的网卡为ens33,你的网卡和阿铭的可能不同,这里须要你改一下 interface ens33 virtual_router_id 51 #备用服务器上为90 priority 100 advert_int 1 authentication { auth_type PASS auth_pass aminglinux } virtual_ipaddress { 172.16.111.200 } } virtual_server 172.16.111.200 80 { #(每隔10秒查询realserver状态) delay_loop 10 #(lvs 算法) lb_algo wlc #(DR模式) lb_kind DR #(同一IP的链接60秒内被分配到同一台realserver) persistence_timeout 60 #(用TCP协议检查realserver状态) protocol TCP real_server 172.16.111.110 80 { #(权重) weight 100 TCP_CHECK { #(10秒无响应超时) connect_timeout 10 nb_get_retry 3 delay_before_retry 3 connect_port 80 } } real_server 172.16.111.120 80 { weight 100 TCP_CHECK { connect_timeout 10 nb_get_retry 3 delay_before_retry 3 connect_port 80 } } } #因为以前执行过LVS的脚本,可能须要作如下操做: #把以前的ipvsadm规则清空 [root@garytao-01 ~]# ipvsadm -C #把以前设置的VIP删除掉 [root@garytao-01 ~]# systemctl restart network #在keepalived的配置文件中定义的LVS模式为DR模式,因此须要在两台rs上执行lvs_dr_rr.sh脚本,在上一节设置的脚本。 [root@garytao-01 ~]# sh /usr/local/sbin/lvs_dr_rs.ch #启动keepalived [root@garytao-01 ~]# systemctl start keepalived [root@garytao-01 ~]# ps aux |grep keep root 5753 0.0 0.1 120720 1396 ? Ss 19:05 0:00 /usr/sbin/keepalived -D root 5754 0.0 0.3 122916 3124 ? S 19:05 0:00 /usr/sbin/keepalived -D root 5755 0.0 0.2 127116 2660 ? S 19:05 0:00 /usr/sbin/keepalived -D root 5760 0.0 0.0 112680 972 pts/0 R+ 19:06 0:00 grep --color=auto keep [root@garytao-01 ~]# ip add 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:09:e5:58 brd ff:ff:ff:ff:ff:ff inet 172.16.111.100/16 brd 172.16.255.255 scope global ens33 valid_lft forever preferred_lft forever inet 172.16.111.200/32 brd 172.16.111.200 scope global ens33:2 valid_lft forever preferred_lft forever inet6 fe80::1ffb:cde1:5f3e:5778/64 scope link valid_lft forever preferred_lft forever 3: ens38: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:09:e5:6c brd ff:ff:ff:ff:ff:ff inet 172.16.13.129/24 brd 172.16.13.255 scope global dynamic ens38 valid_lft 1486sec preferred_lft 1486sec inet6 fe80::1392:1882:3d3d:ad8c/64 scope link valid_lft forever preferred_lft forever #使用执行命令ipvsadm -ln测试查看链接数 #停掉120上的nginx后,使用ipvsadm -ln查看以下 [root@garytao-03 ~]# systemctl stop nginx [root@garytao-03 ~]# ps aux |grep nginx root 7020 0.0 0.0 112680 976 pts/0 R+ 19:29 0:00 grep --color=auto nginx [root@garytao-01 ~]# ipvsadm -ln IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP 172.16.111.200:80 wlc persistent 60 -> 172.16.111.110:80 Route 100 0 0 #关掉keepalived, [root@garytao-01 ~]# systemctl stop keepalived [root@garytao-01 ~]# ipvsadm -ln IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn #开启120上的nginx及keepalived后 [root@garytao-03 ~]# systemctl start nginx [root@garytao-03 ~]# ps aux |grep nginx root 7032 0.0 0.0 20500 624 ? Ss 19:32 0:00 nginx: master process /usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf nobody 7033 0.0 0.3 22944 3208 ? S 19:32 0:00 nginx: worker process nobody 7034 0.0 0.3 22944 3208 ? S 19:32 0:00 nginx: worker process root 7036 0.0 0.0 112680 972 pts/0 S+ 19:32 0:00 grep --color=auto nginx [root@garytao-01 ~]# systemctl start keepalived #dir机器 [root@garytao-01 ~]# ipvsadm -ln IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP 172.16.111.200:80 wlc persistent 60 -> 172.16.111.110:80 Route 100 0 0 -> 172.16.111.120:80 Route 100 0 0