keepalived+nginx

环境说明:

角色IP应用和系统版本备注LB1192.168.100.240、192.168.100.250(vip)rockylinux9.3、nginx1.24.0、Keepalived v2.2.8主负载均衡、高可用LB2192.168.100.230、192.168.100.250(vip)rockylinux9.3、nginx1.24.0、Keepalived v2.2.8副负载均衡、高可用rs1192.168.100.220rockylinux9.3、Apache/2.4.57后端服务器rs2192.168.100.200rockylinux9.3、Apache/2.4.57后端服务器

前期准备

所有主机都配置阿里云源(下载快一点)、关闭防火墙和selinux

为了方便 nginx用yum安装,apache也是yum安装

nginx配置文件在/etc/nginx/nginx.conf

apache网页 /var/www/html/index.html

要先在主机LB1和LB2上都部署好nginx,rs1和rs2上部署好可以访问的http页面。然后在主机LB1和LB2上部署负载均衡

主机LB1负载均衡

刷新

主机LB2负载均衡

刷新

keepalibed

安装keepalived

LB1和LB2均安装keepalived

LB1

[root@LB1 ~]# yum -y install keepalived

LB2

[root@LB2 ~]# yum -y install keepalived

让keepalived监控nginx负载均衡机

通过脚本呢实现

LB1

[root@LB1 ~]# mkdir /scripts

[root@LB1 ~]# cd /scripts/

[root@LB1 scripts]# vim check_nginx.sh

[root@LB1 scripts]# cat check_nginx.sh

#!/bin/bash

nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)

if [ $nginx_status -lt 1 ];then

systemctl stop keepalived

fi

[root@LB1 scripts]# chmod +x check_nginx.sh

[root@LB1 scripts]# ll

total 4

-rwxr-xr-x. 1 root root 142 Mar 2 15:45 check_nginx.sh

LB2

[root@LB2 ~]# mkdir /scripts

[root@LB2 ~]# cd /scripts/

[root@LB2 scripts]# vim notify.sh

[root@LB2 scripts]# cat notify.sh

#!/bin/bash

VIP=$2

sendmail (){

subject="${VIP}'s server keepalived state is translate"

content="`date +'%F %T'`: `hostname`'s state change to master"

echo $content | mail -s "$subject" aabbcc@qq.com

}

case "$1" in

master)

nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)

if [ $nginx_status -lt 1 ];then

systemctl start nginx

fi

sendmail

;;

backup)

nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)

if [ $nginx_status -gt 0 ];then

systemctl stop nginx

fi

;;

*)

echo "Usage:$0 master|backup VIP"

;;

esac

[root@LB2 scripts]# chomd +x notify.sh

[root@LB2 scripts]# chmod +x notify.sh

[root@LB2 scripts]#

配置keepalived加入监控脚本的配置

LB1

先将原配置文件(移走)备份一下

[root@LB1 ~]# mv /etc/keepalived/keepalived.conf /opt/

[root@LB1 ~]# vim /etc/keepalived/keepalived.conf

[root@LB1 ~]# cat /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {

router_id lb01 //id 两台主机要不一样

}

vrrp_script nginx_check {

script "/scripts/check_nginx.sh" //脚本路径

interval 1

weight -20

}

vrrp_instance VI_1 {

state MASTER //初始状态为MASTER或BACKYUP

interface ens33 //vrrp示例绑定的网卡接口,和真实网卡一致

virtual_router_id 51 //虚拟路由器id,两台主机要一样

priority 100 //优先级,优先级越大就是主服务器

advert_int 1

authentication {

auth_type PASS

auth_pass ysy //密码自定义

}

virtual_ipaddress {

192.168.100.250

}

track_script { //追踪脚本

nginx_check

}

}

virtual_server 192.168.100.250 80 { //虚拟服务器

delay_loop 6

lb_algo rr

lb_kind DR

persistence_timeout 50

protocol TCP

real_server 192.168.100.240 80 { //指向主负载均衡

weight 1

TCP_CHECK {

connect_port 80

connect_timeout 3

nb_get_retry 3

delay_before_retry 3

}

}

real_server 192.168.100.230 80 { //指向副负载均衡

weight 1

TCP_CHECK {

connect_port 80

connect_timeout 3

nb_get_retry 3

delay_before_retry 3

}

}

}

[root@LB1 ~]# systemctl enable --now keepalived.service

Created symlink /etc/systemd/system/multi-user.target.wants/keepalived.service → /usr/lib/systemd/system/keepalived.service.

[root@LB1 ~]#

LB2

先将原配置文件(移走)备份一下

[root@LB2 ~]# mv /etc/keepalived/keepalived.conf /opt/

[root@LB2 ~]# vim /etc/keepalived/keepalived.conf

[root@LB2 ~]# cat /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {

router_id lb02

}

vrrp_instance VI_1 {

state BACKUP

interface ens33

virtual_router_id 51

priority 90

nopreempt

advert_int 1

authentication {

auth_type PASS

auth_pass ysy

}

virtual_ipaddress {

192.168.100.250

}

notify_master "/scripts/notify.sh master 192.168.100.250"

notify_backup "/scripts/notify.sh backup 192.168.100.250"

}

virtual_server 192.168.100.250 80 {

delay_loop 6

lb_algo rr

lb_kind DR

persistence_timeout 50

protocol TCP

real_server 192.168.100.240 80 {

weight 1

TCP_CHECK {

connect_port 80

connect_timeout 3

nb_get_retry 3

delay_before_retry 3

}

}

real_server 192.168.100.230 80 {

weight 1

TCP_CHECK {

connect_port 80

connect_timeout 3

nb_get_retry 3

delay_before_retry 3

}

}

}

[root@LB2 ~]#

[root@LB2 ~]# systemctl enable --now keepalived.service

Created symlink /etc/systemd/system/multi-user.target.wants/keepalived.service → /usr/lib/systemd/system/keepalived.service.

[root@LB2 ~]#

验证

LB1

初始正常情况下的LB1,keepalived服务是启动的,但nginx也是启动, 所有有vip192.168.100.250 [root@LB1 ~]# ip a 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens33: mtu 1500 qdisc fq_codel state UP group default qlen 1000 link/ether 00:0c:29:5578 brd ff:ff:ff:ff:ff:ff altname enp2s1 inet 192.168.100.240/24 brd 192.168.100.255 scope global noprefixroute ens33 valid_lft forever preferred_lft forever inet 192.168.100.250/32 scope global ens33 valid_lft forever preferred_lft forever inet6 fe80::20c:29ff:fe55:ab78/64 scope link noprefixroute valid_lft forever preferred_lft forever [root@LB1 ~]#

LB2

初始正常情况下的LB2,keepalived服务是启动的,但nginx是关闭的, 所有没有vip192.168.100.250 [root@LB2 ~]# ip a 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens33: mtu 1500 qdisc fq_codel state UP group default qlen 1000 link/ether 00:0c:29:47:01:b3 brd ff:ff:ff:ff:ff:ff altname enp2s1 inet 192.168.100.230/24 brd 192.168.100.255 scope global noprefixroute ens33 valid_lft forever preferred_lft forever inet6 fe80::20c:29ff:fe47:1b3/64 scope link noprefixroute valid_lft forever preferred_lft forever [root@LB2 ~]#

访问

刷新

模拟故障

关闭LB1的nginx

[root@LB1 ~]# systemctl stop nginx.service

现在主负载均衡器LB1的nginx被关闭了,就会自动执行脚本。关闭keepalived服务。然后备负载均衡器LB2的脚本会启动nginx服务,把vip抢过来,完成主备切换

[root@LB1 ~]# ip a

1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000

link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

inet 127.0.0.1/8 scope host lo

valid_lft forever preferred_lft forever

inet6 ::1/128 scope host

valid_lft forever preferred_lft forever

2: ens33: mtu 1500 qdisc fq_codel state UP group default qlen 1000

link/ether 00:0c:29:55:ab:78 brd ff:ff:ff:ff:ff:ff

altname enp2s1

inet 192.168.100.240/24 brd 192.168.100.255 scope global noprefixroute ens33

valid_lft forever preferred_lft forever

inet6 fe80::20c:29ff:fe55:ab78/64 scope link noprefixroute

valid_lft forever preferred_lft forever

[root@LB1 ~]#

[root@LB2 ~]# ip a

1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000

link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

inet 127.0.0.1/8 scope host lo

valid_lft forever preferred_lft forever

inet6 ::1/128 scope host

valid_lft forever preferred_lft forever

2: ens33: mtu 1500 qdisc fq_codel state UP group default qlen 1000

link/ether 00:0c:29:47:01:b3 brd ff:ff:ff:ff:ff:ff

altname enp2s1

inet 192.168.100.230/24 brd 192.168.100.255 scope global noprefixroute ens33

valid_lft forever preferred_lft forever

inet 192.168.100.250/32 scope global ens33

valid_lft forever preferred_lft forever

inet6 fe80::20c:29ff:fe47:1b3/64 scope link noprefixroute

valid_lft forever preferred_lft forever

[root@LB2 ~]#

参考阅读

评论可见,请评论后查看内容,谢谢!!!
 您阅读本篇文章共花了: