Bug#763192: closed by Salvatore Bonaccorso <carnil@debian.org> (Re: Bug#763192: [LXC] [nfsd] kernel crash when running nfs-kernel-server in one LXC Container)
Le 23/04/2021 à 20:27, Debian Bug Tracking System a écrit :
This is an automatic notification regarding your Bug report
which was filed against the src:linux package:
#763192: NFSv4 server recovery not supported in container
It has been closed by Salvatore Bonaccorso <carnil@debian.org>.
Their explanation is attached below along with your original report.
If this explanation is unsatisfactory and you have not received a
better one in a separate message then please contact Salvatore Bonaccorso <carnil@debian.org> by
replying to this email.
Hi,
Please find here some good news about this issue. It is now possible to
run NFS server into one LXC container.
One of my current configuration running for 2 years on Debian Buster
on armhf and amd64 architecture is ...
Step 1: hypervisor configuration (target = hc1-260)
----------------------------------------------------
This is one arhf octocore odroid hC1 board :
ansible@hc1-260:~$ uname -a
Linux hc1-260 5.10.0-0.bpo.5-armmp-lpae #1 SMP Debian 5.10.24-1~bpo10+1
(2021-03-29) armv7l GNU/Linux
ansible@hc1-260:~$ cat /etc/debian_version
10.9
ansible@hc1-260:~$ ansible@hc1-260:~$ cat /proc/interrupts
CPU0 CPU1 CPU2 CPU3 CPU4 CPU5
CPU6 CPU7
57: 0 0 0 0 0 0
0 0 COMBINER 187 Edge mct_comp_irq
58: 48423896 0 0 0 0 0
0 0 GICv2 152 Level mct_tick0
59: 0 42386303 0 0 0 0
0 0 GICv2 153 Level mct_tick1
........
Because in my configuration, LXC container can not insmod dedicated nfs
module, it is then mandatory to instert it into hypervisor.
As a result, this is done into /etc/module file
ansible@hc1-260:~$ sudo cat /etc/modules |grep -v "#" |grep -v ^$
iptable_filter
autofs4
8021q
tun
nfsv4
nfsd
On hypervisor, lxc container running nfs server is ok
ansible@hc1-260:~$ sudo lxc-ls -f |grep nfs
vm-nfs-260 RUNNING 1 grp_lxc_start_on_boot
192.168.22.136, 192.168.24.136, 192.168.25.136
Here is the configuration of LXC container
ansible@hc1-260:~$ sudo cat /etc/lxc/auto/vm-nfs-260 |grep -v '#'
|grep -v ^$
lxc.arch = armv7l
lxc.uts.name = vm-nfs-260
lxc.start.auto = 1
lxc.start.order = 80
lxc.start.delay = 0
lxc.group = grp_lxc_start_on_boot
lxc.init.cmd = /sbin/init
lxc.init.uid = 0
lxc.init.gid = 0
lxc.ephemeral = 0
lxc.console.buffer.size = 102400
lxc.console.size = 102400
lxc.log.level = DEBUG
lxc.log.file = /var/log/lxc/vm-nfs-260.log
lxc.tty.max = 4
lxc.pty.max = 10
lxc.signal.halt = SIGPWR
lxc.signal.reboot = SIGINT
lxc.signal.stop = SIGKILL
lxc.cgroup.memory.limit_in_bytes = 313M
lxc.cgroup.cpuset.cpus = 4
lxc.cgroup.cpu.shares = 1024
lxc.cgroup.devices.deny = a
lxc.autodev = 1
lxc.cgroup.devices.allow = c 1:3 rwm
lxc.cgroup.devices.allow = c 1:5 rwm
lxc.cgroup.devices.allow = c 1:8 rwm
lxc.cgroup.devices.allow = c 1:9 rwm
lxc.cgroup.devices.allow = c 5:1 rwm
lxc.cgroup.devices.allow = c 5:2 rwm
lxc.cgroup.devices.allow = c 136:0 rwm
lxc.cgroup.devices.allow = c 136:1 rwm
lxc.cgroup.devices.allow = c 136:2 rwm
lxc.cgroup.devices.allow = c 136:3 rwm
lxc.cgroup.devices.allow = c 136:4 rwm
lxc.cgroup.devices.allow = c 136:5 rwm
lxc.cgroup.devices.allow = c 136:6 rwm
lxc.cgroup.devices.allow = c 136:7 rwm
lxc.cgroup.devices.allow = c 136:8 rwm
lxc.cgroup.devices.allow = c 136:9 rwm
lxc.cgroup.devices.allow = c 5:0 rwm
lxc.cgroup.devices.allow = c 4:64 rwm
lxc.cgroup.devices.allow = c 4:65 rwm
lxc.cgroup.devices.allow = c 4:0 rwm
lxc.cgroup.devices.allow = c 4:1 rwm
lxc.cgroup.devices.allow = c 4:2 rwm
lxc.cgroup.devices.allow = c 4:3 rwm
lxc.cgroup.devices.allow = c 4:4 rwm
lxc.cgroup.devices.allow = c 4:5 rwm
lxc.cgroup.devices.allow = c 4:6 rwm
lxc.rootfs.mount = /var/lib/lxc/vm-nfs-260/rootfs
lxc.rootfs.path =
/dev/mapper/vg_vm_nfs_260-lv_rootfs
lxc.rootfs.options = defaults,noatime,nodiratime
lxc.mount.entry = proc
/var/lib/lxc/vm-nfs-260/rootfs/proc proc nodev,noexec,nosuid 0 0
lxc.mount.entry = devpts
/var/lib/lxc/vm-nfs-260/rootfs/dev/pts devpts defaults 0 0
lxc.mount.entry = sysfs
/var/lib/lxc/vm-nfs-260/rootfs/sys sysfs d
lxc.mount.entry =
/dev/mapper/vg_vm_nfs_260-lv_usr /var/lib/lxc/vm-nfs-ime,nodiratime
lxc.mount.entry =
/dev/mapper/vg_vm_nfs_260-lv_var /var/lib/lxc/vm-nfs-ime,nodiratime
lxc.mount.entry =
/dev/mapper/vg_vm_nfs_260-lv_tmp /var/lib/lxc/vm-nfs-ime,nodiratime
lxc.mount.entry =
/dev/mapper/vg_vm_nfs_260-lv_home /var/lib/lxc/vm-nfsatime,nodiratime
lxc.mount.entry =
/dev/mapper/vg_vm_nfs_260-lv_var_log /var/lib/lxc/vm-lts,noatime,nodiratime
lxc.mount.entry =
/dev/mapper/vg_vm_nfs_260-lv_var_lib /var/lib/lxc/vm-lts,noatime,nodiratime
lxc.mount.entry =
/dev/mapper/vg_vm_nfs_260-lv_var_cache
/var/lib/lxc/vefaults,noatime,nodiratime
lxc.mount.entry =
/dev/mapper/vg_vm_nfs_260-lv_var_lib_apt /var/lib/lxc4
defaults,noatime,nodiratime
lxc.mount.entry =
/dev/mapper/vg_vm_nfs_260-lv_nfs_home /var/lib/lxc/vm,noatime,nodiratime
lxc.net.0.type = veth
lxc.net.0.flags = up
lxc.net.0.link = br-admi
lxc.net.0.name = et-admi
lxc.net.0.hwaddr = 02:00:10:80:08:25
lxc.net.0.veth.pair = e-nfs-adm
lxc.net.1.type = veth
lxc.net.1.flags = up
lxc.net.1.link = br-user
lxc.net.1.name = et-user
lxc.net.1.hwaddr = 02:00:10:80:08:24
lxc.net.1.veth.pair = e-nfs-usr
lxc.net.2.type = veth
lxc.net.2.flags = up
lxc.net.2.link = br-wifi
lxc.net.2.name = et-wifi
lxc.net.2.hwaddr = 02:00:10:80:08:27
lxc.net.2.veth.pair = e-nfs-wifi
lxc.net.3.type = veth
lxc.net.3.flags = up
lxc.net.3.link = br-serv
lxc.net.3.name = et-serv
lxc.net.3.hwaddr = 02:00:10:80:08:22
lxc.net.3.veth.pair = e-nfs-srv
lxc.net.4.type = veth
lxc.net.4.flags = up
lxc.net.4.link = br-fact
lxc.net.4.name = et-fact
lxc.net.4.hwaddr = 02:00:10:80:08:31
lxc.net.4.veth.pair = e-nfs-fact
lxc.apparmor.allow_incomplete = 1
lxc.apparmor.profile = unconfined
ansible@hc1-260:~$
Of course, on hypervisor, no nfs daemon is running , because ....
ansible@hc1-260:~$ dpkg -l |grep nfs
and no special mount point
ansible@hc1-260:~$ df
Filesystem 1K-blocks Used Available Use% Mounted on
udev 961608 0 961608 0% /dev
tmpfs 196624 1456 195168 1% /run
/dev/mmcblk0p1 3028752 1217568 1637616 43% /
tmpfs 5120 0 5120 0% /run/lock
tmpfs 393240 40 393200 1% /dev/shm
cgroup 983112 0 983112 0% /sys/fs/cgroup
On hypervisor, one dedicated disk is available, but only for nfs
server container, all is done accross LVM on SSD disk:
ansible@hc1-260:~$ sudo lvs |grep nfs
lv_home vg_vm_nfs_260 -wi-ao---- 124.00m
lv_nfs_home vg_vm_nfs_260 -wi-ao---- 58.00g
lv_rootfs vg_vm_nfs_260 -wi-ao---- 124.00m
lv_tmp vg_vm_nfs_260 -wi-ao---- 144.00m
lv_usr vg_vm_nfs_260 -wi-ao---- 688.00m
lv_var vg_vm_nfs_260 -wi-ao---- 112.00m
lv_var_cache vg_vm_nfs_260 -wi-ao---- 384.00m
lv_var_lib vg_vm_nfs_260 -wi-ao---- 132.00m
lv_var_lib_apt vg_vm_nfs_260 -wi-ao---- 732.00m
lv_var_log vg_vm_nfs_260 -wi-ao---- 200.00m
Previous LVM patrtition are of course mounted into LXC nfs container
according previous configuration...
Step 2: nfs server configuration (target = vm-nfs-260)
------------------------------------------------------
ansible@vm-nfs-260:~$ df
Filesystem 1K-blocks Used Available
Use% Mounted on
/dev/mapper/vg_vm_nfs_260-lv_rootfs 118867 4906 105074 5% /
none 492 0 492
0% /dev
/dev/mapper/vg_vm_nfs_260-lv_usr 677032 371040 256680
60% /usr
/dev/mapper/vg_vm_nfs_260-lv_var 106967 2335 96605
3% /var
/dev/mapper/vg_vm_nfs_260-lv_tmp 138697 1550 126826
2% /tmp
/dev/mapper/vg_vm_nfs_260-lv_home 118867 1769 108211
2% /home
/dev/mapper/vg_vm_nfs_260-lv_var_log 194235 79682 100217
45% /var/log
/dev/mapper/vg_vm_nfs_260-lv_var_lib 126786 11643 105682
10% /var/lib
/dev/mapper/vg_vm_nfs_260-lv_var_cache 372607 68377 280474
20% /var/cache
/dev/mapper/vg_vm_nfs_260-lv_var_lib_apt 721392 155928 513000
24% /var/lib/apt
/dev/mapper/vg_vm_nfs_260-lv_nfs_home 59600812 18687568 37855992
34% /srv
tmpfs 196624 56 196568
1% /run
tmpfs 5120 0 5120
0% /run/lock
tmpfs 393240 40 393200
1% /dev/shm
ansible@vm-nfs-260:~$
ansible@vm-nfs-260:~$ cat /etc/debian_version
10.9
ansible@vm-nfs-260:~$ pstree -anp
init,1
|-rpcbind,662 -w -h vm-nfs-260-service
|-rpc.statd,671 --state-directory-path /var/lib/nfs --port 32766
--outgoing-port 32765 --name vm-nfs-260-service
|-rpc.idmapd,680
|-rpc.mountd,742 --state-directory-path /var/lib/nfs --manage-gids
--port 32767 --num-threads=6
| |-rpc.mountd,745 --state-directory-path /var/lib/nfs
--manage-gids --port 32767 --num-threads=6
| |-rpc.mountd,746 --state-directory-path /var/lib/nfs
--manage-gids --port 32767 --num-threads=6
| |-rpc.mountd,747 --state-directory-path /var/lib/nfs
--manage-gids --port 32767 --num-threads=6
| |-rpc.mountd,748 --state-directory-path /var/lib/nfs
--manage-gids --port 32767 --num-threads=6
| |-rpc.mountd,749 --state-directory-path /var/lib/nfs
--manage-gids --port 32767 --num-threads=6
| `-rpc.mountd,750 --state-directory-path /var/lib/nfs
--manage-gids --port 32767 --num-threads=6
|-syslog-ng,764
| `-syslog-ng,765 -p /var/run/syslog-ng.pid --no-caps
|-cron,788
|-monit,804 -c /etc/monit/monitrc
| |-{monit},9259
| |-{monit},9260
| `-(verify_rpc_stat,10086)
|-getty,808 115200 console
`-sshd,32294
`-sshd,10079
`-sshd,10081
`-bash,10082
`-pstree,10107 -anp
ansible@vm-nfs-260:~$
Only ssh, monit, syslog are running with one sysvinit init
( no systemd !)
ansible@vm-nfs-260:~$ ip route ls
default via 192.168.24.254 dev et-user
192.168.22.0/24 dev et-serv proto kernel scope link src 192.168.22.136
192.168.24.0/24 dev et-user proto kernel scope link src 192.168.24.136
192.168.25.0/24 dev et-admi proto kernel scope link src 192.168.25.136
ansible@vm-nfs-260:~$
Of course, all configuration files are 100% compatible with Debian Buster.
ansible@vm-nfs-260:~$ cat /etc/default/nfs-kernel-server |grep -v "#"
|grep -v ^$
RPCNFSDCOUNT=8
RPCNFSDPRIORITY=0
RPCMOUNTDOPTS="--state-directory-path /var/lib/nfs --manage-gids --port
32767 --num-threads=6"
NEED_SVCGSSD="no"
ansible@vm-nfs-260:~$
ansible@vm-nfs-260:~$ cat /etc/exports |grep -v "#" |grep -v ^$
/srv/nfs/home
localhost(rw,secure_locks,insecure,no_subtree_check,no_all_squash,async,no_root_squash)
/srv/nfs/home
192.168.22.0/24(rw,secure_locks,insecure,no_subtree_check,no_all_squash,async,root_squash)
-
Step 3: nfs clien side
-----------------------
For client side, i am running nfs client on amd64, armhf and arm64
architecture, either on one real physical target, either on one
LXC container.
For example, following configuration is done on one LXC arm64 bullseye
arm64 target
jean-marc@vm-bullseye-arm64-280:~$ df
Sys. de fichiers
blocs de 1K Utilisé Disponible Uti% Monté sur
/dev/mapper/vg_vm_bullseye_arm64_280-lv_rootfs
118867 10847 99133 10% /
none
492 0 492 0% /dev
udev
1806524 0 1806524 0% /dev/dri
/dev/mapper/vg_vm_bullseye_arm64_280-lv_usr
5981956 4439736 1218636 79% /usr
/dev/mapper/vg_vm_bullseye_arm64_280-lv_var
206112 5011 186151 3% /var
/dev/mapper/vg_vm_bullseye_arm64_280-lv_tmp
138697 1554 126822 2% /tmp
/dev/mapper/vg_vm_bullseye_arm64_280-lv_home
118867 3256 106724 3% /home
/dev/mapper/vg_vm_bullseye_arm64_280-lv_var_log
194235 66707 113192 38% /var/log
/dev/mapper/vg_vm_bullseye_arm64_280-lv_var_lib
126786 48031 69294 41% /var/lib
/dev/mapper/vg_vm_bullseye_arm64_280-lv_var_cache
991512 196272 727656 22% /var/cache
/dev/mapper/vg_vm_bullseye_arm64_280-lv_var_lib_apt
721392 279148 389780 42% /var/lib/apt
tmpfs
390880 736 390144 1% /run
tmpfs
5120 0 5120 0% /run/lock
tmpfs
781760 16 781744 1% /dev/shm
tmpfs
390880 0 390880 0% /run/user/10000
vm-nfs-260-service.sub-dns-lapiteau.TLD.jml:/srv/nfs/home/jean-marc
59600896 18687616 37856000 34% /nfs-home/jean-marc
jean-marc@vm-bullseye-arm64-280:~$ uname -a
Linux vm-bullseye-arm64-280 5.10.0-0.bpo.5-arm64 #1 SMP Debian
5.10.24-1~bpo10+1 (2021-03-29) aarch64 GNU/Linux
jean-marc@vm-bullseye-arm64-280:~$ cat /etc/debian_version
bullseye/sid
jean-marc@vm-bullseye-arm64-280:~$
-
best regards
----------------------------------------
-- Jean-Marc LACROIX (06 82 29 98 66) --
-- mailto : jeanmarc.lacroix@free.fr --
-----------------------------------------
Reply to: