Harshavardhana
2009-Jul-09 11:39 UTC
[Ovirt-devel] [PATCH 1/5 ovirt-server] Add glusterfs to task-omatic API for {task_storage,utils}
--- src/task-omatic/task_storage.rb | 50 +++++++++++++++++++++++++++++++++++++++ src/task-omatic/utils.rb | 40 +++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 0 deletions(-) diff --git a/src/task-omatic/task_storage.rb b/src/task-omatic/task_storage.rb index 77363ac..97ae4fc 100644 --- a/src/task-omatic/task_storage.rb +++ b/src/task-omatic/task_storage.rb @@ -202,6 +202,8 @@ class LibvirtPool return IscsiLibvirtPool.new(pool.ip_addr, pool[:target], pool[:port], logger) elsif pool[:type] == "NfsStoragePool" return NFSLibvirtPool.new(pool.ip_addr, pool.export_path, logger) + elsif pool[:type] == "GlusterfsStoragePool" + return GLUSTERFSLibvirtPool.new(pool.ip_addr, pool.export_path, logger) elsif pool[:type] == "LvmStoragePool" # OK, if this is LVM storage, there are two cases we need to care about: # 1) this is a LUN with LVM already on it. In this case, all we need to @@ -293,6 +295,54 @@ class NFSLibvirtPool < LibvirtPool end end +class GLUSTERFSLibvirtPool < LibvirtPool + def initialize(ip_addr, export_path, logger) + target = "#{ip_addr}-#{export_path.tr('/', '_')}" + super('netfs', target, logger) + + @type = 'netfs' + @host = ip_addr + @remote_vol = export_path + + @xml.root.elements["source"].add_element("host", {"name" => @host}) + @xml.root.elements["source"].add_element("dir", {"path" => @remote_vol}) + @xml.root.elements["source"].add_element("format", {"type" => "glusterfs"}) + + @xml.root.elements["target"].elements["path"].text = "/mnt/" + @name + end + + def create_vol(name, size, owner, group, mode) + # FIXME: this can actually take some time to complete (since we aren't + # doing sparse allocations at the moment). During that time, whichever + # libvirtd we chose to use is completely hung up. The solution is 3-fold: + # 1. Allow sparse allocations in the WUI front-end + # 2. Make libvirtd multi-threaded + # 3. Make taskomatic multi-threaded + super("netfs", name, size, owner, group, mode) + + # FIXME: we have to add the format as raw here because of a bug in libvirt; + # if you specify a volume with no format, it will crash libvirtd + @vol_xml.root.elements["target"].add_element("format", {"type" => "qcow2"}) + + # FIXME: Add allocation 0 element so that we create a sparse file. + # This was done because qmf was timing out waiting for the create + # operation to complete. This needs to be fixed in a better way + # however. We want to have non-sparse files for performance reasons. + @vol_xml.root.add_element("allocation").add_text('0') + + @logger.debug("Creating new volume on pool #{@remote_pool.name} - XML: #{@vol_xml.to_s}") + result = @remote_pool.createVolumeXML(@vol_xml.to_s) + raise "Error creating remote pool: #{result.text}" unless result.status == 0 + return result.volume + end + + def xmlequal?(docroot) + return (docroot.attributes['type'] == @type and + docroot.elements['source'].elements['host'].attributes['name'] == @host and + docroot.elements['source'].elements['dir'].attributes['path'] == @remote_vol) + end +end + class LVMLibvirtPool < LibvirtPool def initialize(vg_name, device, build_on_start, logger) super('logical', vg_name, logger) diff --git a/src/task-omatic/utils.rb b/src/task-omatic/utils.rb index e3005ed..cf68cae 100644 --- a/src/task-omatic/utils.rb +++ b/src/task-omatic/utils.rb @@ -114,6 +114,8 @@ class LibvirtPool return IscsiLibvirtPool.new(pool.ip_addr, pool[:target]) elsif pool[:type] == "NfsStoragePool" return NFSLibvirtPool.new(pool.ip_addr, pool.export_path) + elsif pool[:type] == "GlusterfsStoragePool" + return GLUSTERFSLibvirtPool.new(pool.ip_addr, pool.export_path) elsif pool[:type] == "LvmStoragePool" # OK, if this is LVM storage, there are two cases we need to care about: # 1) this is a LUN with LVM already on it. In this case, all we need to @@ -195,6 +197,44 @@ class NFSLibvirtPool < LibvirtPool end end +class GLUSTERFSLibvirtPool < LibvirtPool + def initialize(ip_addr, export_path) + super('netfs') + + @type = 'netfs' + @host = ip_addr + @remote_vol = export_path + @name = String.random_alphanumeric + + @xml.root.elements["source"].add_element("host", {"name" => @host}) + @xml.root.elements["source"].add_element("dir", {"path" => @export_path}) + @xml.root.elements["source"].add_element("format", {"type" => "glusterfs"}) + + @xml.root.elements["target"].elements["path"].text = "/mnt/" + @name + end + + def create_vol(name, size, owner, group, mode) + # FIXME: this can actually take some time to complete (since we aren't + # doing sparse allocations at the moment). During that time, whichever + # libvirtd we chose to use is completely hung up. The solution is 3-fold: + # 1. Allow sparse allocations in the WUI front-end + # 2. Make libvirtd multi-threaded + # 3. Make taskomatic multi-threaded + super("netfs", name, size, owner, group, mode) + + # FIXME: we have to add the format as raw here because of a bug in libvirt; + # if you specify a volume with no format, it will crash libvirtd + @vol_xml.root.elements["target"].add_element("format", {"type" => "raw"}) + @remote_pool.create_vol_xml(@vol_xml.to_s) + end + + def xmlequal?(docroot) + return (docroot.attributes['type'] == @type and + docroot.elements['source'].elements['host'].attributes['name'] == @host) and + docroot.elements['source'].elements['dir'].attributes['path'] == @remote_vol) + end +end + class LVMLibvirtPool < LibvirtPool def initialize(vg_name, device, build_on_start) super('logical', vg_name) -- 1.6.0.6
Harshavardhana
2009-Jul-28 13:50 UTC
[Ovirt-devel] [PATCH 1/5 ovirt-server] Add glusterfs to task-omatic API for {task_storage,utils}
--- src/task-omatic/task_storage.rb | 50 +++++++++++++++++++++++++++++++++++++++ src/task-omatic/utils.rb | 40 +++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 0 deletions(-) diff --git a/src/task-omatic/task_storage.rb b/src/task-omatic/task_storage.rb index 8165818..0272fbb 100644 --- a/src/task-omatic/task_storage.rb +++ b/src/task-omatic/task_storage.rb @@ -202,6 +202,8 @@ class LibvirtPool return IscsiLibvirtPool.new(pool.ip_addr, pool[:target], pool[:port], logger) elsif pool[:type] == "NfsStoragePool" return NFSLibvirtPool.new(pool.ip_addr, pool.export_path, logger) + elsif pool[:type] == "GlusterfsStoragePool" + return GLUSTERFSLibvirtPool.new(pool.ip_addr, pool.export_path, logger) elsif pool[:type] == "LvmStoragePool" # OK, if this is LVM storage, there are two cases we need to care about: # 1) this is a LUN with LVM already on it. In this case, all we need to @@ -293,6 +295,54 @@ class NFSLibvirtPool < LibvirtPool end end +class GLUSTERFSLibvirtPool < LibvirtPool + def initialize(ip_addr, export_path, logger) + target = "#{ip_addr}-#{export_path.tr('/', '_')}" + super('netfs', target, logger) + + @type = 'netfs' + @host = ip_addr + @remote_vol = export_path + + @xml.root.elements["source"].add_element("host", {"name" => @host}) + @xml.root.elements["source"].add_element("dir", {"path" => @remote_vol}) + @xml.root.elements["source"].add_element("format", {"type" => "glusterfs"}) + + @xml.root.elements["target"].elements["path"].text = "/mnt/" + @name + end + + def create_vol(name, size, owner, group, mode) + # FIXME: this can actually take some time to complete (since we aren't + # doing sparse allocations at the moment). During that time, whichever + # libvirtd we chose to use is completely hung up. The solution is 3-fold: + # 1. Allow sparse allocations in the WUI front-end + # 2. Make libvirtd multi-threaded + # 3. Make taskomatic multi-threaded + super("netfs", name, size, owner, group, mode) + + # FIXME: we have to add the format as raw here because of a bug in libvirt; + # if you specify a volume with no format, it will crash libvirtd + @vol_xml.root.elements["target"].add_element("format", {"type" => "qcow2"}) + + # FIXME: Add allocation 0 element so that we create a sparse file. + # This was done because qmf was timing out waiting for the create + # operation to complete. This needs to be fixed in a better way + # however. We want to have non-sparse files for performance reasons. + @vol_xml.root.add_element("allocation").add_text('0') + + @logger.debug("Creating new volume on pool #{@remote_pool.name} - XML: #{@vol_xml.to_s}") + result = @remote_pool.createVolumeXML(@vol_xml.to_s) + raise "Error creating remote pool: #{result.text}" unless result.status == 0 + return result.volume + end + + def xmlequal?(docroot) + return (docroot.attributes['type'] == @type and + docroot.elements['source'].elements['host'].attributes['name'] == @host and + docroot.elements['source'].elements['dir'].attributes['path'] == @remote_vol) + end +end + class LVMLibvirtPool < LibvirtPool def initialize(vg_name, device, build_on_start, logger) super('logical', vg_name, logger) diff --git a/src/task-omatic/utils.rb b/src/task-omatic/utils.rb index e3005ed..cf68cae 100644 --- a/src/task-omatic/utils.rb +++ b/src/task-omatic/utils.rb @@ -114,6 +114,8 @@ class LibvirtPool return IscsiLibvirtPool.new(pool.ip_addr, pool[:target]) elsif pool[:type] == "NfsStoragePool" return NFSLibvirtPool.new(pool.ip_addr, pool.export_path) + elsif pool[:type] == "GlusterfsStoragePool" + return GLUSTERFSLibvirtPool.new(pool.ip_addr, pool.export_path) elsif pool[:type] == "LvmStoragePool" # OK, if this is LVM storage, there are two cases we need to care about: # 1) this is a LUN with LVM already on it. In this case, all we need to @@ -195,6 +197,44 @@ class NFSLibvirtPool < LibvirtPool end end +class GLUSTERFSLibvirtPool < LibvirtPool + def initialize(ip_addr, export_path) + super('netfs') + + @type = 'netfs' + @host = ip_addr + @remote_vol = export_path + @name = String.random_alphanumeric + + @xml.root.elements["source"].add_element("host", {"name" => @host}) + @xml.root.elements["source"].add_element("dir", {"path" => @export_path}) + @xml.root.elements["source"].add_element("format", {"type" => "glusterfs"}) + + @xml.root.elements["target"].elements["path"].text = "/mnt/" + @name + end + + def create_vol(name, size, owner, group, mode) + # FIXME: this can actually take some time to complete (since we aren't + # doing sparse allocations at the moment). During that time, whichever + # libvirtd we chose to use is completely hung up. The solution is 3-fold: + # 1. Allow sparse allocations in the WUI front-end + # 2. Make libvirtd multi-threaded + # 3. Make taskomatic multi-threaded + super("netfs", name, size, owner, group, mode) + + # FIXME: we have to add the format as raw here because of a bug in libvirt; + # if you specify a volume with no format, it will crash libvirtd + @vol_xml.root.elements["target"].add_element("format", {"type" => "raw"}) + @remote_pool.create_vol_xml(@vol_xml.to_s) + end + + def xmlequal?(docroot) + return (docroot.attributes['type'] == @type and + docroot.elements['source'].elements['host'].attributes['name'] == @host) and + docroot.elements['source'].elements['dir'].attributes['path'] == @remote_vol) + end +end + class LVMLibvirtPool < LibvirtPool def initialize(vg_name, device, build_on_start) super('logical', vg_name) -- 1.6.0.6
Possibly Parallel Threads
- [TAKE-2][PATCH 1/5] Add glusterfs to task-omatic API for task_storage
- [PATCH server] Add more debugging to storage tasks
- [PATCH server] Remove ununsed utils.rb file.
- [PATCH server] Use fixed mount points and add timeouts to various calls.
- [PATCH] Fix virtual disk name (virtio)