Mike Burns
2011-Aug-31 23:22 UTC
[Ovirt-devel] [PATCH node] fix install when VG exists on disk
This only affects non-HostVG volume groups. rhbz#733578 Signed-off-by: Mike Burns <mburns at redhat.com> --- scripts/storage.py | 14 +++++++++++--- 1 files changed, 11 insertions(+), 3 deletions(-) diff --git a/scripts/storage.py b/scripts/storage.py index 5f7f0e3..047bcd1 100644 --- a/scripts/storage.py +++ b/scripts/storage.py @@ -90,13 +90,19 @@ class Storage: vg = subprocess.Popen(vg_cmd, shell=True, stdout=PIPE, stderr=STDOUT) vg_output = vg.stdout.read() for vg in vg_output: - pvs = system("pvscan -o pv_name,vg_uuid --noheadings | grep \"%s\" | egrep -v -q \"%s%s[0-9]+|%s \"") % (vg, dev, part_delim, dev) - if pvs > 0: + pvs_cmd = "pvs -o pv_name,vg_uuid --noheadings | grep \"%s\" | egrep -v -q \"%s%s[0-9]+|%s \"" % (vg, dev, part_delim, dev) + pvs = subprocess.Popen(pvs_cmd, shell=True, stdout=PIPE, stderr=STDOUT) + pvs_output = pvs.stdout.read() + if pvs_output > 0: log("The volume group \"%s\" spans multiple disks.") % vg log("This operation cannot complete. Please manually") log("cleanup the storage using standard disk tools.") sys.exit(1) - wipe_volume_group(vg) + vg_name_cmd = "vgs -o vg_name,vg_uuid --noheadings 2>/dev/null | grep -w \"" + vg + "\" | awk '{print $1}'" + vg_name = subprocess.Popen(vg_name_cmd, shell=True, stdout=PIPE, stderr=STDOUT) + vg_name_output = vg_name.stdout.read() + system("vgchange -an " + vg_name_output) + wipe_volume_group(vg_name_output) return @@ -372,7 +378,9 @@ class Storage: log("Removing old LVM partitions") wipe_volume_group("HostVG") + log("Wiping LVM on HOSTVGDRIVE") self.wipe_lvm_on_disk(self.HOSTVGDRIVE) + log("Wiping LVM on ROOTDRIVE") self.wipe_lvm_on_disk(self.ROOTDRIVE) self.boot_size_si = self.BOOT_SIZE * (1024 * 1024) / (1000 * 1000) if OVIRT_VARS.has_key("OVIRT_ISCSI_ENABLED") and OVIRT_VARS["OVIRT_ISCSI_ENABLED"] == "y": -- 1.7.4.4
Alan Pevec
2011-Sep-01 16:27 UTC
[Ovirt-devel] [PATCH node] fix install when VG exists on disk
On Thu, Sep 1, 2011 at 1:22 AM, Mike Burns <mburns at redhat.com> wrote:> - ? ? ? ? ? ?pvs = system("pvscan -o pv_name,vg_uuid --noheadings | grep \"%s\" | egrep -v -q \"%s%s[0-9]+|%s \"") % (vg, dev, part_delim, dev) > - ? ? ? ? ? ?if pvs > 0: > + ? ? ? ? ? ?pvs_cmd = "pvs -o pv_name,vg_uuid --noheadings | grep \"%s\" | egrep -v -q \"%s%s[0-9]+|%s \"" % (vg, dev, part_delim, dev) > + ? ? ? ? ? ?pvs = subprocess.Popen(pvs_cmd, shell=True, stdout=PIPE, stderr=STDOUT) > + ? ? ? ? ? ?pvs_output = pvs.stdout.read() > + ? ? ? ? ? ?if pvs_output > 0:pvs_output is command output, for checking return code use if pvs.wait() > 0:> - ? ? ? ? ? ?wipe_volume_group(vg) > + ? ? ? ? ? ?vg_name_cmd = "vgs -o vg_name,vg_uuid --noheadings 2>/dev/null | grep -w \"" + vg + "\" | awk '{print $1}'" > + ? ? ? ? ? ?vg_name = subprocess.Popen(vg_name_cmd, shell=True, stdout=PIPE, stderr=STDOUT) > + ? ? ? ? ? ?vg_name_output = vg_name.stdout.read() > + ? ? ? ? ? ?system("vgchange -an " + vg_name_output)This should be moved to wipe_volume_group> + ? ? ? ? ? ?wipe_volume_group(vg_name_output)This whole trouble of converting vg_uuid to vg_name could be masked in wipe_volume_group But can someone remind me why don't we just take vg_name instead of vg_uuid in the first place? Alan
Possibly Parallel Threads
- [PATCH node] First draft of replacing some of the ovirt-config-* scripts with python equivalents.
- [PATCH node] Use vg uuid when detecting whether vg spans multiple disks
- [PATCH] RFC: Advanced Storage Configuration
- [PATCH node] Handle space in storage wwid
- New Storage related patches