NFS-Ganesha 搭配 ctdb 組成 NFS HA 實驗
事前導讀可參考如下 URL
https://xrcd2.blogspot.com/2022/04/glusterfs-samba.html
https://xrcd2.blogspot.com/2024/05/glusterfs-servernfs-ganesha.html
架構說明:
(1) nfs1 192.168.100.11
(2) nfs2 192.168.100.12
(3) nfs-vip 192.168.100.10
+
(4) NFS Client 192.168.100.30
作業系統及程式安裝:
Oracle Linux 9.6 + glusterfs 11.1 + NFS-Ganesha 4.4 + ctdb 4.23.0
[root@nfs1 /]# gluster --version
glusterfs 11.1
Repository revision: git://git.gluster.org/glusterfs.git
Copyright (c) 2006-2016 Red Hat, Inc. <https://www.gluster.org/>
GlusterFS comes with ABSOLUTELY NO WARRANTY.
It is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3
or later), or the GNU General Public License, version 2 (GPLv2),
in all cases as published by the Free Software Foundation.
[root@nfs1 /]# gluster volume info
Volume Name: nfsvolume
Type: Replicate
Volume ID: 5979bdc3-20bf-4600-bac6-ad1514459efd
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: nfs1:/syncdisk
Brick2: nfs2:/syncdisk
Options Reconfigured:
cluster.granular-entry-heal: on
storage.fips-mode-rchecksum: on
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
[root@nfs1 /]#
[root@nfs1 /]# /usr/bin/ganesha.nfsd -v
NFS-Ganesha Release = V4.4
[root@nfs1 /]#
[root@nfs1 /]# /usr/bin/ctdb version
4.23.0
[root@nfs1 /]#
這篇的重點是說明如何透過 CTDB 設定形成 NFS 服務的 HA VIP
[root@nfs1 /]# cat /etc/ctdb/nodes
192.168.100.11
192.168.100.12
[root@nfs1 /]# cat /etc/ctdb/public_addresses
192.168.100.10/24 ens160
[root@nfs1 /]# ctdb status
Number of nodes:2
pnn:0 192.168.100.11 OK (THIS NODE)
pnn:1 192.168.100.12 OK
Generation:1901971548
Size:2
hash:0 lmaster:0
hash:1 lmaster:1
Recovery mode:NORMAL (0)
Leader:1
[root@nfs1 /]# ctdb ip
Public IPs on node 0
192.168.100.10 0
[root@nfs1 /]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:0c:29:21:77:33 brd ff:ff:ff:ff:ff:ff
altname enp3s0
inet 192.168.100.11/24 brd 192.168.100.255 scope global noprefixroute ens160
valid_lft forever preferred_lft forever
inet 192.168.100.10/24 brd 192.168.100.255 scope global secondary ens160
valid_lft forever preferred_lft forever
[root@nfs2 /]# ctdb status
Number of nodes:2
pnn:0 192.168.100.11 OK
pnn:1 192.168.100.12 OK (THIS NODE)
Generation:1901971548
Size:2
hash:0 lmaster:0
hash:1 lmaster:1
Recovery mode:NORMAL (0)
Leader:1
[root@nfs2 /]# ctdb ip
Public IPs on node 1
192.168.100.10 0
[root@nfs2 /]#
可參考 /usr/share/ctdb/events/legacy/ 目錄內的檔自行修改所需內容
[root@nfs1 events]# cat /usr/share/ctdb/events/legacy/99.nfs-ganesha.script
#!/bin/sh
# event script to manage nfs-ganesha in a cluster environment
[ -n "$CTDB_BASE" ] || \
CTDB_BASE=$(d="$(dirname "$0")" && cd -P "$d/../../" && pwd)
. "${CTDB_BASE}/functions"
service_name="nfs-ganesha"
case "$1" in
startup)
systemctl start "$service_name"
;;
shutdown)
systemctl stop "$service_name"
;;
takeip)
systemctl try-restart "$service_name"
;;
releaseip)
systemctl stop "$service_name"
;;
monitor)
systemctl is-active --quiet "$service_name" || exit 1
;;
esac
exit 0
[root@nfs1 /]# ctdb event script enable legacy 10.interface
修正在 /var/log/log.ctdb ==> nfs1 ctdb-recoverd[4893]: Assigned IP 192.168.100.10 not on an interface
該問題導致 HA VIP 無法形成??
[root@nfs1 /]# ctdb event script enable legacy 99.nfs-ganesha
[root@nfs1 /]# ctdb scriptstatus
10.interface OK 0.027 Mon Sep 29 14:29:19 2025
99.nfs-ganesha OK 0.006 Mon Sep 29 14:29:19 2025
====================================
nfs client 測試
[root@OL10 ~]# dnf -y install nfs-utils
[root@OL10 ~]# mount -t nfs4 192.168.100.10:/nfsha /mnt/nfs_share
[root@OL10 ~]# df
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/mapper/ol-root 46067712 6818824 39248888 15% /
devtmpfs 4096 0 4096 0% /dev
tmpfs 3833264 0 3833264 0% /dev/shm
tmpfs 1533308 9368 1523940 1% /run
tmpfs 1024 0 1024 0% /run/credentials/systemd-journald.service
/dev/nvme0n1p2 983040 510348 472692 52% /boot
tmpfs 1024 0 1024 0% /run/credentials/getty@tty1.service
tmpfs 766652 56 766596 1% /run/user/0
tmpfs 766652 56 766596 1% /run/user/1000
192.168.100.10:/nfsha 20904960 388096 20516864 2% /mnt/nfs_share
[root@OL10 nfsshare]# cd /mnt/nfs_share/nfsshare
[root@OL10 nfsshare]# pwd
/mnt/nfs_share/nfsshare
[root@OL10 nfsshare]# ll
total 0
-rw-r--r-- 1 4294967294 4294967294 0 Sep 29 11:00 123.txt
[root@OL10 nfsshare]# touch abc.txt
[root@OL10 nfsshare]# ll
total 0
-rw-r--r-- 1 4294967294 4294967294 0 Sep 29 11:00 123.txt
-rw-r--r-- 1 4294967294 4294967294 0 Sep 29 14:34 abc.txt
[root@OL10 nfsshare]#
========================================
HA 切換測試
[root@nfs2 /]# ctdb status
Number of nodes:2
pnn:0 192.168.100.11 DISCONNECTED|UNHEALTHY|INACTIVE
pnn:1 192.168.100.12 OK (THIS NODE)
Generation:839690097
Size:1
hash:0 lmaster:1
Recovery mode:NORMAL (0)
Leader:1
[root@nfs2 /]# ctdb ip
Public IPs on node 1
192.168.100.10 1
[root@nfs2 /]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:0c:29:0d:0f:f6 brd ff:ff:ff:ff:ff:ff
altname enp3s0
inet 192.168.100.12/24 brd 192.168.100.255 scope global noprefixroute ens160
valid_lft forever preferred_lft forever
inet 192.168.100.10/24 brd 192.168.100.255 scope global secondary ens160
valid_lft forever preferred_lft forever
[root@nfs2 /]#
沒有留言:
張貼留言