[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

[Nbd] 10 TByte partition



Moin!

The attached patch allows to run a 16TB nbd partition on a 32-bit
system.

Gruß, Stephan


--- nbd-2.9.18/nbd-client.c-	2010-08-09 16:57:20.000000000 +0200
+++ nbd-2.9.18/nbd-client.c	2010-12-10 23:11:27.000000000 +0100
@@ -175,11 +175,11 @@
 	size64 = ntohll(size64);
 
 #ifdef NBD_SET_SIZE_BLOCKS
-	if ((size64>>10) > (~0UL >> 1)) {
+	if ((size64>>12) > (uint64_t)~0UL) {
 		printf("size = %luMB", (unsigned long)(size64>>20));
 		err("Exported device is too big for me. Get 64-bit machine :-(\n");
 	} else
-		printf("size = %luKB", (unsigned long)(size64>>10));
+		printf("size = %luMB", (unsigned long)(size64>>20));
 #else
 	if (size64 > (~0UL >> 1)) {
 		printf("size = %luKB", (unsigned long)(size64>>10));
@@ -210,16 +210,18 @@
 	int read_only = (flags & NBD_FLAG_READ_ONLY) ? 1 : 0;
 
 #ifdef NBD_SET_SIZE_BLOCKS
-	if (size64/blocksize > (~0UL >> 1))
+	if (size64>>12 > (uint64_t)~0UL)
 		err("Device too large.\n");
 	else {
 		int er;
-		if (ioctl(nbd, NBD_SET_BLKSIZE, (unsigned long)blocksize) < 0)
+		if (ioctl(nbd, NBD_SET_BLKSIZE, 4096UL) < 0)
 			err("Ioctl/1.1a failed: %m\n");
-		size = (unsigned long)(size64/blocksize);
+		size = (unsigned long)(size64>>12);
 		if ((er = ioctl(nbd, NBD_SET_SIZE_BLOCKS, size)) < 0)
 			err("Ioctl/1.1b failed: %m\n");
-		fprintf(stderr, "bs=%d, sz=%lu\n", blocksize, size);
+		if (ioctl(nbd, NBD_SET_BLKSIZE, (unsigned long)blocksize) < 0)
+			err("Ioctl/1.1c failed: %m\n");
+		fprintf(stderr, "bs=%d, sz=%llu bytes\n", blocksize, 4096ULL*size);
 	}
 #else
 	if (size64 > (~0UL >> 1)) {
My setup: three old identical dual Xeon servers with six new 2TB SATA
disks each.  Each server provides a 10TB RAID5.  Via nbd, those three
10TB partitions shall form a net RAID5².  Now I am waiting for
ext2fsprogs to support >16TB filesystems on ext4.  And for the third
server to join.

# cat /proc/partitions 
major minor  #blocks  name

  43        0 9662653440 nbd0
   8        0 1953514584 sda
   8        1   20980858 sda1
   8        2 1932531142 sda2
   8       16 1953514584 sdb
   8       17   20980858 sdb1
   8       18 1932531142 sdb2
   8       32 1953514584 sdc
   8       33   20980858 sdc1
   8       34 1932531142 sdc2
   8       48 1953514584 sdd
   8       49   20980858 sdd1
   8       50 1932531142 sdd2
   8       64 1953514584 sde
   8       65   20980858 sde1
   8       66 1932531142 sde2
   8       80 1953514584 sdf
   8       81   20980858 sdf1
   8       82 1932531142 sdf2
   9        0 9662653440 md0
   9        1   20980736 md1
   9        2 19325303808 md2
 259        0 15624998912 md2p1
 259        1 3700302848 md2p2
# cat /proc/mdstat 
Personalities : [linear] [raid0] [raid1] [raid10] [raid6] [raid5] [raid4] 
md2 : active raid5 md0[0] nbd0[1]
      19325303808 blocks super 1.2 level 5, 512k chunk, algorithm 2 [3/2] [UU_]
      
md1 : active raid1 sde1[2] sdc1[1] sda1[0]
      20980736 blocks [3/3] [UUU]
      
md0 : active raid5 sdf2[5] sde2[4] sdd2[3] sdc2[2] sdb2[1] sda2[0]
      9662653440 blocks level 5, 512k chunk, algorithm 2 [6/6] [UUUUUU]
      
unused devices: <none>
# cat /proc/swaps 
Filename                                Type            Size    Used    Priority
/dev/sdb1                               partition       20980852        0       1
/dev/sdd1                               partition       20980852        0       1
/dev/sdf1                               partition       20980852        0       1

-- 
Stephan

Reply to: