/# pvscan -v
Wiping cache of LVM-capable devices
Wiping internal VG cache
Walking through all physical volumes
PV /dev/sdb VG data lvm2 [465.76 GiB / 465.76 GiB free]
PV /dev/sdd VG data lvm2 [465.76 GiB / 465.76 GiB free]
PV /dev/sde VG data lvm2 [465.76 GiB / 465.76 GiB free]
PV /dev/sdf VG data lvm2 [465.76 GiB / 465.76 GiB free]
PV /dev/sdc VG data lvm2 [465.76 GiB / 465.76 GiB free]
Total: 5 [2.27 TiB] / in use: 5 [2.27 TiB] / in no VG: 0 [0 ]
:/# vgscan -v
Wiping cache of LVM-capable devices
Wiping internal VG cache
Reading all physical volumes. This may take a while...
Finding all volume groups
Finding volume group "data"
Found volume group "data" using metadata type lvm2
It is not finding the logical volumes anymore.
I have a file in the archive folder /etc/lvm/archive from the morning before I ran "vgreduce --removemissing --force data"
Is there any way to restore the metadata? Below is the contents of archive file.
# Generated by LVM2 version 2.02.95(2) (2012-03-06): Thu Oct 22 09:44:12 2015
contents = "Text Format Volume Group"
version = 1
description = "Created *before* executing 'vgreduce --removemissing --force data'"
creation_host = "" # Linux 3.2.0-4-amd64 #1 SMP Debian 3.2.68-1+deb7u5 x86_64
creation_time = 1445521452 # Thu Oct 22 09:44:12 2015
data {
id = "0UcXlh-1lXG-udrp-T6QJ-fqMt-G5mz-mgpLmN"
seqno = 6
format = "lvm2" # informational
status = ["RESIZEABLE", "READ", "WRITE"]
flags = []
extent_size = 8192 # 4 Megabytes
max_lv = 0
max_pv = 0
metadata_copies = 0
physical_volumes {
pv0 {
id = "mKQGfL-Fs0C-bRIv-rcKT-jbWs-7olM-R2VCCw"
device = "/dev/sdb" # Hint only
status = ["ALLOCATABLE"]
flags = []
dev_size = 976773168 # 465.762 Gigabytes
pe_start = 384
pe_count = 119234 # 465.758 Gigabytes
}
pv1 {
id = "KLL51K-SwV9-6VU6-9ulL-DOJy-L9uJ-T5k43a"
device = "unknown device" # Hint only
status = ["ALLOCATABLE"]
flags = ["MISSING"]
dev_size = 976773168 # 465.762 Gigabytes
pe_start = 384
pe_count = 119234 # 465.758 Gigabytes
}
pv2 {
id = "98S7nn-R4sG-OMBc-HR9Y-kHPM-Du8q-ZBNJ3c"
device = "/dev/sdd" # Hint only
dev_size = 976773168 # 465.762 Gigabytes
pe_start = 384
pe_count = 119234 # 465.758 Gigabytes
}
pv3 {
id = "d1CHzd-91uQ-7c0X-XA02-0Tlh-bBtv-rusTsj"
device = "/dev/sde" # Hint only
status = ["ALLOCATABLE"]
flags = []
dev_size = 976773168 # 465.762 Gigabytes
pe_start = 384
pe_count = 119234 # 465.758 Gigabytes
}
pv4 {
id = "vK0cGK-zz71-rpGC-IcP3-gsKU-EHjx-fY70SB"
device = "/dev/sdf" # Hint only
status = ["ALLOCATABLE"]
flags = []
dev_size = 976773168 # 465.762 Gigabytes
pe_start = 384
pe_count = 119234 # 465.758 Gigabytes
}
}
logical_volumes {
home {
id = "DM6HzG-mgeC-Hj4M-MyKk-k8UT-ZjHU-t74qTe"
status = ["READ", "WRITE", "VISIBLE"]
flags = []
segment_count = 5
segment1 {
start_extent = 0
extent_count = 119234 # 465.758 Gigabytes
type = "striped"
stripe_count = 1 # linear
stripes = [
"pv0", 0
]
}
segment2 {
start_extent = 119234
extent_count = 119234 # 465.758 Gigabytes
type = "striped"
stripe_count = 1 # linear
stripes = [
"pv1", 0
]
}
segment3 {
start_extent = 238468
extent_count = 119234 # 465.758 Gigabytes
type = "striped"
stripe_count = 1 # linear
stripes = [
"pv2", 0
]
}
segment4 {
start_extent = 357702
extent_count = 119234 # 465.758 Gigabytes
type = "striped"
stripe_count = 1 # linear
stripes = [
"pv3", 0
]
}
segment5 {
start_extent = 476936
extent_count = 118131 # 461.449 Gigabytes
type = "striped"
stripe_count = 1 # linear
stripes = [
"pv4", 0
]
}
}
}
}
I want to see if its possible to try to recover the data because all the drives are actually still in the exact same slots. The failing drive on /dev/sdc magically started working again when I put it back in the server.