I have used Piranha to setup fail-over for httpd. I set the servers the same, but one shows the VIPS and the other doesn't. Is this normal?
Server A) [Primary]
[root@titania ~]# ipvsadm -L
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 172.16.28.231:http lblc
-> 172.16.28.229:http Route 1 0 0
-> 172.16.28.230:http Route 1 0 0
[root@titania ~]
[root@titania ~]# ifconfig
eth0 Link encap:Ethernet HWaddr 00:15:5D:1C:D4:04
inet addr:172.16.28.233 Bcast:172.16.28.255 Mask:255.255.255.0
inet6 addr: fe80::215:5dff:fe1c:d404/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:1115300 errors:0 dropped:0 overruns:0 frame:0
TX packets:686246 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:115178238 (109.8 MiB) TX bytes:50225507 (47.8 MiB)
eth0:1 Link encap:Ethernet HWaddr 00:15:5D:1C:D4:04
inet addr:172.16.28.231 Bcast:172.16.28.255 Mask:255.255.255.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:8 errors:0 dropped:0 overruns:0 frame:0
TX packets:8 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:838 (838.0 b) TX bytes:838 (838.0 b)
[root@titania ~]#
[root@titania ~]# cat /etc/sysconfig/ha/lvs.cf
serial_no = 53
primary = 172.16.28.233
service = lvs
backup_active = 1
backup = 172.16.28.232
heartbeat = 1
heartbeat_port = 539
keepalive = 6
deadtime = 18
network = direct
debug_level = NONE
monitor_links = 1
syncdaemon = 1
syncd_iface = eth0
syncd_id = 0
virtual http {
active = 1
address = 172.16.28.231 eth0:1
vip_nmask = 255.255.255.0
port = 80
send = "GET / HTTP/1.0\r\n\r\n"
expect = "HTTP"
use_regex = 0
load_monitor = none
scheduler = lblc
protocol = tcp
timeout = 6
reentry = 15
quiesce_server = 1
server deimos {
address = 172.16.28.229
active = 1
weight = 1
}
server phobos {
address = 172.16.28.230
active = 1
weight = 1
}
}
[root@titania ~]#
Server B) [Secondary]
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
[root@titan ~]#
[root@titan ~]# ifconfig
eth0 Link encap:Ethernet HWaddr 00:15:5D:1C:34:01
inet addr:172.16.28.232 Bcast:172.16.28.255 Mask:255.255.255.0
inet6 addr: fe80::215:5dff:fe1c:3401/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:529515 errors:0 dropped:0 overruns:0 frame:0
TX packets:81242 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:31911311 (30.4 MiB) TX bytes:4919228 (4.6 MiB)
Interrupt:9 Base address:0x2000
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:236 errors:0 dropped:0 overruns:0 frame:0
TX packets:236 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:22532 (22.0 KiB) TX bytes:22532 (22.0 KiB)
[root@titan ~]#
[root@titan ~]# cat /etc/sysconfig/ha/lvs.cf
serial_no = 51
primary = 172.16.28.233
service = lvs
backup_active = 1
backup = 172.16.28.232
heartbeat = 1
heartbeat_port = 539
keepalive = 6
deadtime = 18
network = direct
debug_level = NONE
monitor_links = 1
syncdaemon = 1
syncd_iface = eth0
syncd_id = 0
virtual http {
active = 1
address = 172.16.28.231 eth0:1
vip_nmask = 255.255.255.0
port = 80
send = "GET / HTTP/1.0\r\n\r\n"
expect = "HTTP"
use_regex = 0
load_monitor = none
scheduler = lblc
protocol = tcp
timeout = 6
reentry = 15
quiesce_server = 1
server deimos {
address = 172.16.28.229
active = 1
weight = 1
}
server phobos {
address = 172.16.28.230
active = 1
weight = 1
}
}
[root@titan ~]#
Yes, this is normal and expected behavior for LVS.
Since this is a high availability setup, the load balanced IP (172.16.28.231 in this example) will only reside on the active node. If you were to stop the pulse daemon on Server A, a failover of the load balanced IP to Server B would automatically occur (assuming the pulse daemon is running and is in a healthy state Server B). The failover should be nearly instantaneous, and once it has occurred, restart the pulse daemon on Server A.
The commands for this workflow and failover verification are included below:
You will be able to failover back and forth between nodes using this method.