keepalived高可用lvs

    LVS只是作一个负载均衡,经过访问VIP来访问后端的网站程序,一旦LVS宕机,整个网站就访问不了,这就出现了单点。因此要结合keepalive这种高可用软件来保证整个网站的高可用。本文将介绍如何利用keepalive来实现LVS的高可用(以LVS的DR模式为例,生产环境后台的real server 网站内容是一致的,为了看到实验效果,这里是两个不一样的页面)。 前端


实验拓扑:node

wKiom1glxlyiQ9sBAACuFpbqspk088.png


1.在两台Real Server上配置web服务web

wKioL1glx-OxhsrcAAAvTiq1kEU269.png


2.配置两台Real Server
后端

配置脚本以下:bash

#!/bin/bash
#

VIP='10.1.88.88'
NETMASK='255.255.255.255'
IFACE='lo:0'

case $1 in
start)
	echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
	echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
	echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
	echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce

	ifconfig $IFACE $VIP broadcast $VIP netmask $NETMASK up
	route add -host $VIP $IFACE
	;;
stop)
	echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
	echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
	echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
	echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce

	ifconfig $IFACE down
	;;
*)
	echo "Usage: $(basename $0) {start|stop}"
	exit 1
esac


3.配置keepalived服务器

keepalived master的配置:负载均衡

! Configuration File for keepalived

global_defs {
   notification_email {
       root@localhost
   }
   notification_email_from keepalived@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id node1
}

vrrp_script chk_down {                #配置脚本
	script "[[ -f /etc/keepalived/down ]] && exit 1 || exit 0"  #脚本内容
	interval 2     #间隔两秒执行一次脚本
	weight -6      #若是脚本执行失败则优先级减6
}

vrrp_instance VI_1 {               #配置实例     
    state MASTER                       #状态
    interface eno16777736              #keepalived所配置的接口   
    virtual_router_id 88              #VRID
    priority 100                     #优先级
    advert_int 1                    
    authentication {
        auth_type PASS
        auth_pass 1111
    }

    virtual_ipaddress {
		10.1.88.88/16 dev eno16777736 label eno16777736:0    #VIP
    }

	track_script {
		chk_down
	}
	
	notify_master "/etc/keepalived/notify.sh master"
	notify_backup "/etc/keepalived/notify.sh backup"
	notify_fault  "/etc/keepalived/notify.sh fault"
}

virtual_server 10.1.88.88 80 {    #配置虚拟服务器
	delay_loop 3        
	lb_algo rr
	lb_kind DR
	protocol TCP

	sorry_server 127.0.0.1 80        #当全部realserver宕机后,则启用sorry_server

	real_server 10.1.68.5 80 {            #realserver配置
		weight 1
		HTTP_GET {        #realserver健康状态检测
			url {
				path /
				status_code 200
			}

			connect_timeout 1
			nb_get_retry 3
			delay_before_retry 1
		}
	}

	real_server 10.1.68.6 80 {
		weight 1
		HTTP_GET {
			url {
				path /
				status_code 200
			}

			connect_timeout 1
			nb_get_retry 3
			delay_before_retry 1
		}
	}
}

keepalived backup配置:ide

global_defs {
   notification_email {
       root@localhost
   }
   notification_email_from keepalived@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id node1
}

vrrp_script chk_down {
	script "[[ -f /etc/keepalived/down ]] && exit 1 || exit 0"
	interval 2
	weight -6
}

vrrp_instance VI_1 {
    state BACKUP             #配置bakup服务器state为BACKUP
    interface eno16777736
    virtual_router_id 88
    priority 98              #优先级为98,比master低
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }

    virtual_ipaddress {
		10.1.88.88/16 dev eno16777736 label eno16777736:0
    }

	track_script {
		chk_down
	}

	notify_master "/etc/keepalived/notify.sh master"
	notify_backup "/etc/keepalived/notify.sh backup"
	notify_fault  "/etc/keepalived/notify.sh fault"
}

virtual_server 10.1.88.88 80 {
	delay_loop 3
	lb_algo rr
	lb_kind DR
	protocol TCP

	sorry_server 127.0.0.1 80    

	real_server 10.1.68.5 80 {
		weight 1
		HTTP_GET {
			url {
				path /
				status_code 200
			}

			connect_timeout 1
			nb_get_retry 3
			delay_before_retry 1
		}
	}

	real_server 10.1.68.6 80 {
		weight 1
		HTTP_GET {
			url {
				path /
				status_code 200
			}

			connect_timeout 1
			nb_get_retry 3
			delay_before_retry 1
		}
	}
}

在两台调度器上面配置httpd,用于sorry_serveroop

wKioL1gl0u-C9BdsAAAoGErl_jY849.png

在两台前端调度器上同时启动keepalived测试

service keepalived start    #master
service keepalived start    #backup

此时两台调度器已经同时生成了ipvs规则

wKioL1gl0Q7xUm9UAAAoG6tg0z8579.png


此时VIP已经在master的网卡接口上

wKiom1gl04Og5stUAAAmaoBXypU425.png



4.测试

wKiom1gl0XjD0hSCAABEViFPD6Y590.png

模拟其中一台调度器故障,VIP当即漂移至backup调度器,不影响调度

wKioL1gl1DOiTY4CAAAwxpqxSB0418.png

wKiom1gl1F_ws3yqAABAFvq-QXo942.png

其中有一台故障可正常调度至另外一台realserver

wKioL1gl0daje7T8AAA_2y1Wp9c573.png

后端realserver所有故障,此时sorry_server服务器响应

wKiom1gl0mOjg5jEAABBk0IXeC8328.png

相关文章
相关标签/搜索