2024年5月12日 星期日

GlusterFS-server加NFS-ganesha大亂測

 事前導讀及參考資訊

https://xrcd2.blogspot.com/2022/04/glusterfs-samba.html

https://www.server-world.info/en/note?os=CentOS_Stream_9&p=glusterfs11&f=1

https://core.vmware.com/resource/nfs-iscsi-multipathing-vsphere

測試架構

192.168.100.201 Rocky  Linux  nfs1     glusterfs-server+nfs-ganesha

192.168.100.202 Rocky  Linux  nfs2     glusterfs-server+nfs-ganesha

192.168.100.x   Oracle Linux  Oracle9  glusterfs/nfsclient test

192.168.100.111 ESXi NFS Client 

192.168.100.222 PVE  Glusterfs Client + Client

===========================

操作及設定簡要記錄

fdisk /dev/nvme0n2

mkfs.xfs /dev/nvme0n2p1

mkdir /syncdisk

blkid

vi /etc/fstab


UUID=33548bbf-a5be-4d08-ab25-28e68dc438f5 /syncdisk   xfs   defaults   0 0



vi /etc/hosts

192.168.100.201 nfs1 
192.168.100.202     nfs2

[root@nfs1 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
devtmpfs        4.0M     0  4.0M   0% /dev
tmpfs           3.8G     0  3.8G   0% /dev/shm
tmpfs           1.5G  9.4M  1.5G   1% /run
/dev/nvme0n1p2   44G  6.8G   38G  16% /
/dev/nvme0n1p1  960M  469M  492M  49% /boot
/dev/nvme0n2p1   20G  175M   20G   1% /syncdisk
tmpfs           766M   36K  766M   1% /run/user/1000
[root@nfs1 ~]# 


  dnf install centos-release-gluster11.noarch 
  dnf install centos-release-nfs-ganesha5.noarch 
  dnf --enablerepo=crb install python3-pyxattr
  dnf install   glusterfs-server


[root@nfs1 yum.repos.d]# systemctl enable --now glusterd
[root@nfs1 yum.repos.d]# gluster --version
glusterfs 11.1
Repository revision: git://git.gluster.org/glusterfs.git
Copyright (c) 2006-2016 Red Hat, Inc. <https://www.gluster.org/>
GlusterFS comes with ABSOLUTELY NO WARRANTY.
It is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3
or later), or the GNU General Public License, version 2 (GPLv2),
in all cases as published by the Free Software Foundation.
[root@nfs1 yum.repos.d]# gluster peer probe nfs2
peer probe: success
[root@nfs1 yum.repos.d]# gluster peer status 
Number of Peers: 1

Hostname: nfs2
Uuid: bc6daa49-a8ca-438c-871c-45a978ea4251
State: Peer in Cluster (Connected)

[root@nfs1 yum.repos.d]# gluster volume create nfsvolume replica 2 transport tcp nfs1:/syncdisk/nfs nfs2:/syncdisk/nfs
Replica 2 volumes are prone to split-brain. Use Arbiter or Replica 3 to avoid this. See: http://docs.gluster.org/en/latest/Administrator-Guide/Split-brain-and-ways-to-deal-with-it/.
Do you still want to continue?
 (y/n) y
volume create: nfsvolume: success: please start the volume to access data

[root@nfs1 yum.repos.d]# gluster volume info 
 
Volume Name: nfsvolume
Type: Replicate
Volume ID: 776a642f-53fe-47ee-924a-20a45a911304
Status: Created
Snapshot Count: 0
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: nfs1:/syncdisk/nfs
Brick2: nfs2:/syncdisk/nfs
Options Reconfigured:
cluster.granular-entry-heal: on
storage.fips-mode-rchecksum: on
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
[root@nfs1 yum.repos.d]# 


[root@nfs1 yum.repos.d]# gluster volume start nfsvolume
volume start: nfsvolume: success
[root@nfs1 yum.repos.d]# 


[root@nfs2 ~]#  mount -t glusterfs nfs1:/nfsvolume /mnt
[root@nfs2 ~]# df 
Filesystem      1K-blocks    Used Available Use% Mounted on
devtmpfs             4096       0      4096   0% /dev
tmpfs             3918632       0   3918632   0% /dev/shm
tmpfs             1567456    9600   1557856   1% /run
/dev/nvme0n1p2   46064640 7125096  38939544  16% /
/dev/nvme0n2p1   20904960  179044  20725916   1% /syncdisk
/dev/nvme0n1p1     983040  479448    503592  49% /boot
tmpfs              783724      36    783688   1% /run/user/1000
nfs1:/nfsvolume  20904960  388092  20516868   2% /mnt

[root@nfs2 ~]#  echo "GlusterFS write test from nfs2" > /mnt/test.txt
[root@nfs2 ~]# ls -la /syncdisk/nfs/*
-rw-r--r-- 2 root root 21 May 11 12:57 /syncdisk/nfs/test.txt
[root@nfs2 ~]# 


[root@nfs1 yum.repos.d]# ls -la /syncdisk/nfs/*
-rw-r--r-- 2 root root 21 May 11 12:57 /syncdisk/nfs/test.txt
[root@nfs1 yum.repos.d]# 


mkdir /syncgfs 

vi /etc/fstab 



[root@nfs1 nfs]# cat /etc/fstab 

#
# /etc/fstab
# Created by anaconda on Tue Feb 20 23:34:32 2024
#
# Accessible filesystems, by reference, are maintained under '/dev/disk/'.
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info.
#
# After editing this file, run 'systemctl daemon-reload' to update systemd
# units generated from this file.
#
UUID=c5d392e3-edbd-44e3-bd92-0053aec8bc10 /                       xfs     defaults        0 0
UUID=6fe1dba6-5999-491c-9679-c32ee4c84490 /boot                   xfs     defaults        0 0
UUID=d631d923-aa38-475f-9609-8283d3a598dc none                    swap    defaults        0 0
UUID=33548bbf-a5be-4d08-ab25-28e68dc438f5 /syncdisk   xfs   defaults   0 0
nfs1:/nfsvolume /syncgfs glusterfs   defaults,_netdev   0 0
[root@nfs1 nfs]# 



[root@nfs2 syncgfs]# cat /etc/fstab 

#
# /etc/fstab
# Created by anaconda on Tue Feb 20 23:34:32 2024
#
# Accessible filesystems, by reference, are maintained under '/dev/disk/'.
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info.
#
# After editing this file, run 'systemctl daemon-reload' to update systemd
# units generated from this file.
#
UUID=c5d392e3-edbd-44e3-bd92-0053aec8bc10 /                       xfs     defaults        0 0
UUID=6fe1dba6-5999-491c-9679-c32ee4c84490 /boot                   xfs     defaults        0 0
UUID=d631d923-aa38-475f-9609-8283d3a598dc none                    swap    defaults        0 0
UUID=575c8913-d8df-4d79-857e-4b586c439b52 /syncdisk   xfs   defaults   0 0 
nfs2:/nfsvolume         /syncgfs        glusterfs                 defaults,_netdev        0 0
[root@nfs2 syncgfs]# 


dnf install nfs-ganesha-vfs nfs-ganesha nfs-ganesha-gluster


[root@nfs1 nfs]# cp /etc/ganesha/ganesha.conf /etc/ganesha/ganesha.conf.org
[root@nfs1 nfs]# vi /etc/ganesha/ganesha.conf




###################################################
#
# Ganesha Config Example
#
# This is a commented example configuration file for Ganesha.  It is not
# complete, but only has some common configuration options.  See the man pages
# for complete documentation.
#
###################################################

## These are core parameters that affect Ganesha as a whole.
NFS_CORE_PARAM {
## Allow NFSv3 to mount paths with the Pseudo path, the same as NFSv4,
## instead of using the physical paths.
mount_path_pseudo = true;

## Configure the protocols that Ganesha will listen for.  This is a hard
## limit, as this list determines which sockets are opened.  This list
## can be restricted per export, but cannot be expanded.
Protocols = 3,4;
}

## These are defaults for exports.  They can be overridden per-export.
EXPORT_DEFAULTS {
## Access type for clients.  Default is None, so some access must be
## given either here or in the export itself.
Access_Type = RW;
}

## Configure settings for the object handle cache
#MDCACHE {
## The point at which object cache entries will start being reused.
#Entries_HWMark = 100000;
#}

## Configure an export for some file tree
EXPORT
{
## Export Id (mandatory, each EXPORT must have a unique Export_Id)
Export_Id = 12345;

## Exported path (mandatory)
Path = /nfsha;

## Pseudo Path (required for NFSv4 or if mount_path_pseudo = true)
Pseudo = /nfsha;

## Restrict the protocols that may use this export.  This cannot allow
## access that is denied in NFS_CORE_PARAM.
Protocols = 3,4;

## Access type for clients.  Default is None, so some access must be
## given. It can be here, in the EXPORT_DEFAULTS, or in a CLIENT block
Access_Type = RW;

## Whether to squash various users.
Squash = No_root_squash;

## Allowed security types for this export
Sectype = sys;

## Exporting FSAL
FSAL {
Name = GLUSTER;
hostname="192.168.100.201";
volume="nfsvolume";
}
}

## Configure logging.  Default is to log to Syslog.  Basic logging can also be
## configured from the command line
LOG {
## Default log level for all components
Default_Log_Level = WARN;

## Configure per-component log levels.
#Components {
#FSAL = INFO;
#NFS4 = EVENT;
#}

## Where to log
#Facility {
#name = FILE;
#destination = "/var/log/ganesha.log";
#enable = active;
#}
}



[root@nfs1 nfs]#  systemctl disable --now nfs-server
[root@nfs1 nfs]#  systemctl enable --now nfs-ganesha
Created symlink /etc/systemd/system/multi-user.target.wants/nfs-ganesha.service → /usr/lib/systemd/system/nfs-ganesha.service.
Created symlink /etc/systemd/system/nfs-ganesha.service.wants/nfs-ganesha-lock.service → /usr/lib/systemd/system/nfs-ganesha-lock.service.

[root@nfs1 nfs]# showmount -e localhost
Export list for localhost:
/nfsha (everyone)
[root@nfs1 nfs]# 





[root@Oracle9 ~]# yum install nfsv4-client-utils.x86_64 nfs-utils.x86_64


[root@Oracle9 mnt]# 

vi /ets/hosts

192.168.100.201 nfs1
192.168.100.202 nfs2


mkdir /mnt/HA-NFS


[root@Oracle9 mnt]# mount -t nfs4 nfs1:/nfsha /mnt/HA-NFS/


[root@Oracle9 mnt]# df -h
Filesystem      Size  Used Avail Use% Mounted on
devtmpfs        4.0M     0  4.0M   0% /dev
tmpfs           3.7G     0  3.7G   0% /dev/shm
tmpfs           1.5G  9.2M  1.5G   1% /run
/dev/nvme0n1p3   45G   11G   35G  23% /
/dev/nvme0n1p1  960M  707M  254M  74% /boot
tmpfs           749M   36K  749M   1% /run/user/0
tmpfs           749M   36K  749M   1% /run/user/1001
nfs1:/nfsha      20G  379M   20G   2% /mnt/HA-NFS
[root@Oracle9 mnt]# 



[root@Oracle9 mnt]# cat  /mnt/HA-NFS/test.txt 
GlusterFS write test from nfs 1 
GlusterFS write test from nfs 2 

[root@Oracle9 mnt]# echo "GlusterFS write test from Oracle-Client  " >> /mnt/HA-NFS/test.txt 
[root@Oracle9 mnt]# 


[root@Oracle9 mnt]# cat /mnt/HA-NFS/test.txt 
GlusterFS write test from nfs 1 
GlusterFS write test from nfs 2 
GlusterFS write test from Oracle-Client  
[root@Oracle9 mnt]# 

[root@nfs1 nfs]# cat /syncdisk/nfs/test.txt 
GlusterFS write test from nfs 1 
GlusterFS write test from nfs 2 
GlusterFS write test from Oracle-Client  
[root@nfs1 nfs]# 


[root@nfs2 syncgfs]# cat /syncdisk/nfs/test.txt 
GlusterFS write test from nfs 1 
GlusterFS write test from nfs 2 
GlusterFS write test from Oracle-Client  
[root@nfs2 syncgfs]# 


[root@Oracle9 mnt]# umount /mnt/HA-NFS 
[root@Oracle9 mnt]# mount -t nfs4 nfs2:/nfsha /mnt/HA-NFS/

[root@Oracle9 mnt]# echo "GlusterFS write test2 from Oracle-Client  " >> /mnt/HA-NFS/test.txt 
[root@Oracle9 mnt]# 

[root@nfs1 nfs]# cat /syncdisk/nfs/test.txt 
GlusterFS write test from nfs 1 
GlusterFS write test from nfs 2 
GlusterFS write test from Oracle-Client  
GlusterFS write test2 from Oracle-Client  
[root@nfs1 nfs]# 


[root@nfs2 syncgfs]# cat /syncdisk/nfs/test.txt 
GlusterFS write test from nfs 1 
GlusterFS write test from nfs 2 
GlusterFS write test from Oracle-Client  
GlusterFS write test2 from Oracle-Client  
[root@nfs2 syncgfs]# 





ESXi

[root@localhost:~] esxcli storage nfs41 list
Volume Name  Host(s)                          Share   Accessible  Mounted  Read-Only  Security   isPE  Hardware Acceleration
-----------  -------------------------------  ------  ----------  -------  ---------  --------  -----  ---------------------
gfs-storage  192.168.100.201,192.168.100.202  /nfsha        true     true      false  AUTH_SYS  false  Not Supported
[root@localhost:~] 




[root@localhost:~] vim-cmd hostsvc/datastore/info gfs-storage
(vim.host.NasDatastoreInfo) {
   name = "gfs-storage", 
   url = "/vmfs/volumes/d3b21ae2-01cd6b50-0000-000000000000", 
   freeSpace = 14566449152, 
   maxFileSize = 70368744177664, 
   maxVirtualDiskCapacity = 68169720922112, 
   maxMemoryFileSize = 70368744177664, 
   timestamp = "2024-05-11T09:42:04.121385Z", 
   containerId = <unset>, 
   aliasOf = <unset>, 
   datastoreFormat = <unset>, 
   logicalSectorSize = <unset>, 
   physicalSectorSize = <unset>, 
   nas = (vim.host.NasVolume) {
      type = "NFS41", 
      name = "gfs-storage", 
      capacity = 21406679040, 
      remoteHost = "192.168.100.201", 
      remotePath = "/nfsha", 
      userName = <unset>, 
      remoteHostNames = (string) [
         "192.168.100.201", 
         "192.168.100.202"
      ], 
      securityType = "AUTH_SYS", 
      protocolEndpoint = false
   }
}
(vim.Datastore.HostMount) [
   (vim.Datastore.HostMount) {
      key = 'vim.HostSystem:ha-host', 
      mountInfo = (vim.host.MountInfo) {
         path = "/vmfs/volumes/d3b21ae2-01cd6b50-0000-000000000000", 
         accessMode = "readWrite", 
         mounted = true, 
         accessible = true, 
         inaccessibleReason = <unset>, 
         vmknicName = "None", 
         vmknicActive = false, 
         mountFailedReason = <unset>, 
         numTcpConnections = 1
      }
   }
]
[root@localhost:~]


[root@localhost:~] esxcli storage filesystem list
Mount Point                                        Volume Name                                 UUID                                 Mounted  Type            Size          Free
-------------------------------------------------  ------------------------------------------  -----------------------------------  -------  ------  ------------  ------------
/vmfs/volumes/d3b21ae2-01cd6b50-0000-000000000000  gfs-storage                                 d3b21ae2-01cd6b50-0000-000000000000     true  NFS41    21406679040   14566449152
/vmfs/volumes/654e469c-44d19cd7-8715-000c2921ee83  datastore1                                  654e469c-44d19cd7-8715-000c2921ee83     true  VMFS-6  137170518016   99963895808
/vmfs/volumes/654e469c-378e9fe6-73ec-000c2921ee83  OSDATA-654e469c-378e9fe6-73ec-000c2921ee83  654e469c-378e9fe6-73ec-000c2921ee83     true  VMFSOS  128580583424  125292249088
/vmfs/volumes/507161ab-d84dd9c3-c4a7-a5422027a503  BOOTBANK1                                   507161ab-d84dd9c3-c4a7-a5422027a503     true  vfat      4293591040    4000317440
/vmfs/volumes/d9628348-1545f094-5e85-a3d31d093e2e  BOOTBANK2                                   d9628348-1545f094-5e85-a3d31d093e2e     true  vfat      4293591040    4293525504
[root@localhost:~] 

PVE






ESXi  NFS

不同的儲存供應商有不同的方法來啟用此功能,
但通常 NAS 伺服器使用 no_root_squash 選項。
如果 NAS 伺服器不授予 root 存取權限,
仍然可以在主機上掛載 NFS 資料儲存。

但是,您無法在資料儲存上建立任何虛擬機器