咱们以前的Keepalived+LVS单主模式,是只有一台lvs工做,这会形成资源浪费,能够采用双主结构,让两台lvs都进行工做,采用dns轮询方式,当用户访问域名经过dns轮询天天lvs,双主结构须要2个vip,这2个vip须要绑定域名。nginx
一样,在每台lvs上安装keepalived软件,当keepalived检测到其中一个lvs宕机则将宕机的vip漂移到活动lvs上,当lvs恢复则vip又从新漂移回来。后端
附上我画的拓扑图 初始状态tomcat
其中一个主机宕机bash
主机恢复 负载均衡
![]()
所需环境 vip1 192.168.12.101 vip2 192.168.12.102 lvs_master1 192.168.12.12 lvs_master2 192.168.12.13 nginx1 192.168.12.2 nginx2 192.168.12.3 tomcat1 192.168.12.6 tomcat2 192.168.12.7oop
###What did you do todayui
双主模式相比主从环境,区别在于: 1.DNS轮询。 2.LVS负载均衡层须要2个vip。好比192.168.12.12和192.168.12.13 3.后端的realServer上要绑定这2个vip到lo本地回环设备上 4.keepalived.conf的配置相比于上面的主从模式有所不一样。spa
#!/bin/sh
VIP=192.168.12.101
. /etc/rc.d/init.d/functions
case "$1" in
start)
/sbin/ifconfig lo down
/sbin/ifconfig lo up
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
/sbin/sysctl -p >/dev/null 2>&1
/sbin/ifconfig lo:0 $VIP netmask 255.255.255.255 up
/sbin/route add -host $VIP dev lo:0
echo "LVS-DR real server starts successfully.\n"
;;
stop)
/sbin/ifconfig lo:0 down
/sbin/route del $VIP >/dev/null 2>&1
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
echo "LVS-DR real server stopped.\n"
;;
status)
isLoOn=`/sbin/ifconfig lo:0 | grep "$VIP"`
isRoOn=`/bin/netstat -rn | grep "$VIP"`
if [ "$isLoON" == "" -a "$isRoOn" == "" ]; then
echo "LVS-DR real server has run yet."
else
echo "LVS-DR real server is running."
fi
exit 3
;;
*)
echo "Usage: $0 {start|stop|status}"
exit 1
esac
exit 0
复制代码
#!/bin/sh
VIP=192.168.12.102
. /etc/rc.d/init.d/functions
case "$1" in
start)
/sbin/ifconfig lo down
/sbin/ifconfig lo up
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
/sbin/sysctl -p >/dev/null 2>&1
/sbin/ifconfig lo:1 $VIP netmask 255.255.255.255 up
/sbin/route add -host $VIP dev lo:1
echo "LVS-DR real server starts successfully.\n"
;;
stop)
/sbin/ifconfig lo:1 down
/sbin/route del $VIP >/dev/null 2>&1
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
echo "LVS-DR real server stopped.\n"
;;
status)
isLoOn=`/sbin/ifconfig lo:1 | grep "$VIP"`
isRoOn=`/bin/netstat -rn | grep "$VIP"`
if [ "$isLoON" == "" -a "$isRoOn" == "" ]; then
echo "LVS-DR real server has run yet."
else
echo "LVS-DR real server is running."
fi
exit 3
;;
*)
echo "Usage: $0 {start|stop|status}"
exit 1
esac
exit 0
复制代码
[root@localhost init.d]# chmod +x double_master_lvsdr0 [root@localhost init.d]# chmod +x double_master_lvsdr1 [root@localhost init.d]# echo "/etc/init.d/double_master_lvsdr0" >> /etc/rc.d/rc.local [root@localhost init.d]# echo "/etc/init.d/double_master_lvsdr1" >> /etc/rc.d/rc.local 3d
![]()
启动double_master_lvsdr0和double_master_lvsdr1脚本 rest
查看192.168.12.2和192.168.12.3,发现vip已经成功绑定到本地回环口lo上了。
在lvs_master1和lvs_master2打开ip_forward路由转发功能
[root@localhost ~]# echo "1" > /proc/sys/net/ipv4/ip_forward
! Configuration File for keepalived
global_defs {
router_id LVS_MASTER
}
vrrp_script check_lvs {
script "/etc/keepalived/lvs_check.sh"
interval 2
weight -20
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
check_lvs
}
virtual_ipaddress {
192.168.12.101
}
}
vrrp_instance VI_2 {
state BACKUP
interface eth0
virtual_router_id 52
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
check_lvs
}
virtual_ipaddress {
192.168.12.102
}
}
virtual_server 192.168.12.101 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
#nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 192.168.12.2 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 192.168.12.3 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
virtual_server 192.168.12.102 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
#nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 192.168.12.2 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 192.168.12.3 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
复制代码
! Configuration File for keepalived
global_defs {
router_id LVS_BACKUP
}
vrrp_script check_lvs {
script "/etc/keepalived/lvs_check.sh"
interval 2
weight -20
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
check_lvs
}
virtual_ipaddress {
192.168.12.101
}
}
vrrp_instance VI_2 {
state MASTER
interface eth0
virtual_router_id 52
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
check_lvs
}
virtual_ipaddress {
192.168.12.102
}
}
virtual_server 192.168.12.101 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
#nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 192.168.12.2 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 192.168.12.3 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
virtual_server 192.168.12.102 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
#nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 192.168.12.2 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 192.168.12.3 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
复制代码
a=`ipvsadm -ln`
str="Route"
bb=`echo $aa|grep $str|wc -l`
if [ $bb = 0 ];then
sleep 3
aa=`ipvsadm -ln`
bb=`echo $aa|grep $str|wc -l`
if [ $bb = 0 ];then
killall keepalived
fi
fi
复制代码
启动192.168.12.2和192.16812.3的nginx、double_master_lvsdr0、double_master_lvsdr1服务。启动192.168.12.6和192.168.12.7的tomcat。
咱们查看lvs_master1的eth0节点信息,发现绑定了vip1(192.168.12.101)
查看lvs_master2的eth0节点信息,发现绑定了vip2(192.168.12.102)
查看lvs_master1的lvs以及realserver的信息。
查看lvs_master2的lvs以及realserver的信息
修改hosts文件(C:\Windows\System32\drivers\etc\hosts),指定cmaxiaoma.mayday.com对应的2个vip。(在hosts里这样设置,达不到负载均衡,只会优先访问192.168.12.101)
访问cmazxiaoma.mayday.com
咱们中止掉lvs_master1.
当咱们又恢复lvs_master1.vip1又回到了lvs_master1手里,而lvs_master2又失去了vip1。
在lvs_master2的eth0以及lvs、RealServer信息。
###Summary
今天加班就到这里了,回去要照顾女友了!