Version 1 was here: https://listman.redhat.com/archives/libguestfs/2023-February/thread.html#30694 I made a few changes in v2 but overall decided to keep the now unused gcaps_arch_min_version capability. This doesn't preclude removing it in future if we think it's never going to be useful. I changed patch 1 so that to remove the long comment about how the field is used, anticipating the later patches. I changed patch 2 as Laszlo suggested. I changed patch 3 as Laszlo suggested, but this also allows us to use Option.default so it becomes a nice one-liner. Rich.
Richard W.M. Jones
2023-Feb-17 11:44 UTC
[Libguestfs] [PATCH v2v v2 1/3] v2v: Rename gcaps_default_cpu to gcaps_arch_min_version
Some guests require not just a specific architecture, but cannot run on qemu's default CPU model, eg. requiring x86_64-v2. Since we anticipate future guests requiring higher versions, let's encode the minimum architecture version instead of a simple boolean. This patch essentially just remaps: gcaps_default_cpu = true => gcaps_arch_min_version = 0 gcaps_default_cpu = false => gcaps_arch_min_version = 2 I removed a long comment about how this capability is used because we intend to completely remove use of the capability in a coming commit. Updates: commit a50b975024ac5e46e107882e27fea498bf425685 --- lib/types.mli | 22 +++++++--------------- convert/convert_linux.ml | 9 +++++---- convert/convert_windows.ml | 2 +- lib/types.ml | 6 +++--- output/create_libvirt_xml.ml | 4 ++-- output/output_qemu.ml | 6 +++--- 6 files changed, 21 insertions(+), 28 deletions(-) diff --git a/lib/types.mli b/lib/types.mli index 24a93760cf..743daa8a14 100644 --- a/lib/types.mli +++ b/lib/types.mli @@ -263,24 +263,16 @@ type guestcaps = { gcaps_machine : guestcaps_machine; (** Machine model. *) gcaps_arch : string; (** Architecture that KVM must emulate. *) + gcaps_arch_min_version : int; + (** Some guest OSes require not just a specific architecture, but a + minimum version. Notably RHEL >= 9 requires at least x86_64-v2. + + If the guest is capable of running on QEMU's default VCPU model + for the architecture ([-cpu qemu64]) then this is set to [0]. *) + gcaps_virtio_1_0 : bool; (** The guest supports the virtio devices that it does at the virtio-1.0 protocol level. *) - - gcaps_default_cpu : bool; - (** True iff the guest OS is capable of running on QEMU's default VCPU model - (eg. "-cpu qemu64" with the Q35 and I440FX machine types). - - This capability only matters for the QEMU and libvirt output modules, - where, in case the capability is false *and* the source hypervisor does - not specify a VCPU model, we must manually present the guest OS with a - VCPU that looks as close as possible to a physical CPU. (In that case, we - specify host-passthrough.) - - The management applications targeted by the RHV and OpenStack output - modules have their own explicit VCPU defaults, overriding the QEMU default - model even in case the source hypervisor does not specify a VCPU model; - those modules ignore this capability therefore. Refer to RHBZ#2076013. *) } (** Guest capabilities after conversion. eg. Was virtio found or installed? *) diff --git a/convert/convert_linux.ml b/convert/convert_linux.ml index 460cbff0ed..d5c0f24dbb 100644 --- a/convert/convert_linux.ml +++ b/convert/convert_linux.ml @@ -203,9 +203,10 @@ let convert (g : G.guestfs) source inspect i_firmware keep_serial_console _ * microarchitecture level, which the default QEMU VCPU model does not * satisfy. Refer to RHBZ#2076013 RHBZ#2166619. *) - let default_cpu_suffices = family <> `RHEL_family || - inspect.i_arch <> "x86_64" || - inspect.i_major_version < 9 in + let arch_min_version + if family <> `RHEL_family || inspect.i_arch <> "x86_64" || + inspect.i_major_version < 9 + then 0 else 2 in (* Return guest capabilities from the convert () function. *) let guestcaps = { @@ -217,8 +218,8 @@ let convert (g : G.guestfs) source inspect i_firmware keep_serial_console _ gcaps_virtio_socket = kernel.ki_supports_virtio_socket; gcaps_machine = machine; gcaps_arch = Utils.kvm_arch inspect.i_arch; + gcaps_arch_min_version = arch_min_version; gcaps_virtio_1_0 = virtio_1_0; - gcaps_default_cpu = default_cpu_suffices; } in guestcaps diff --git a/convert/convert_windows.ml b/convert/convert_windows.ml index 8d3737995f..9d8d271d05 100644 --- a/convert/convert_windows.ml +++ b/convert/convert_windows.ml @@ -265,8 +265,8 @@ let convert (g : G.guestfs) _ inspect i_firmware _ static_ips gcaps_machine = of_virtio_win_machine_type virtio_win_installed.Inject_virtio_win.machine; gcaps_arch = Utils.kvm_arch inspect.i_arch; + gcaps_arch_min_version = 0; gcaps_virtio_1_0 = virtio_win_installed.Inject_virtio_win.virtio_1_0; - gcaps_default_cpu = true; } in guestcaps diff --git a/lib/types.ml b/lib/types.ml index 98f8bc6fa5..6c019ae130 100644 --- a/lib/types.ml +++ b/lib/types.ml @@ -397,8 +397,8 @@ type guestcaps = { gcaps_virtio_socket : bool; gcaps_machine : guestcaps_machine; gcaps_arch : string; + gcaps_arch_min_version : int; gcaps_virtio_1_0 : bool; - gcaps_default_cpu : bool; } and guestcaps_block_type = Virtio_blk | IDE and guestcaps_net_type = Virtio_net | E1000 | RTL8139 @@ -426,8 +426,8 @@ let string_of_guestcaps gcaps gcaps_virtio_socket = %b\n\ gcaps_machine = %s\n\ gcaps_arch = %s\n\ + gcaps_arch_min_version = %d\n\ gcaps_virtio_1_0 = %b\n\ - gcaps_default_cpu = %b\n\ " (string_of_block_type gcaps.gcaps_block_bus) (string_of_net_type gcaps.gcaps_net_bus) @@ -437,8 +437,8 @@ let string_of_guestcaps gcaps gcaps.gcaps_virtio_socket (string_of_machine gcaps.gcaps_machine) gcaps.gcaps_arch + gcaps.gcaps_arch_min_version gcaps.gcaps_virtio_1_0 - gcaps.gcaps_default_cpu type target_buses = { target_virtio_blk_bus : target_bus_slot array; diff --git a/output/create_libvirt_xml.ml b/output/create_libvirt_xml.ml index 60977cf5bb..e9c6c8c150 100644 --- a/output/create_libvirt_xml.ml +++ b/output/create_libvirt_xml.ml @@ -185,14 +185,14 @@ let create_libvirt_xml ?pool source inspect ]; if source.s_cpu_model <> None || - not guestcaps.gcaps_default_cpu || + guestcaps.gcaps_arch_min_version >= 1 || source.s_cpu_topology <> None then ( let cpu_attrs = ref [] and cpu = ref [] in (match source.s_cpu_model with | None -> - if not guestcaps.gcaps_default_cpu then + if guestcaps.gcaps_arch_min_version >= 1 then List.push_back cpu_attrs ("mode", "host-model"); | Some model -> List.push_back cpu_attrs ("match", "minimum"); diff --git a/output/output_qemu.ml b/output/output_qemu.ml index b667e782ed..491906ebf9 100644 --- a/output/output_qemu.ml +++ b/output/output_qemu.ml @@ -175,9 +175,9 @@ module QEMU = struct arg "-m" (Int64.to_string (source.s_memory /^ 1024L /^ 1024L)); - (match source.s_cpu_model, guestcaps.gcaps_default_cpu with - | None, true -> () - | None, false -> arg "-cpu" "host" + (match source.s_cpu_model, guestcaps.gcaps_arch_min_version with + | None, 0 -> () + | None, _ -> arg "-cpu" "host" | Some model, _ -> arg "-cpu" model ); -- 2.39.0
Richard W.M. Jones
2023-Feb-17 11:44 UTC
[Libguestfs] [PATCH v2v v2 2/3] -o libvirt: Always use host-model unless overridden by source hypervisor
In the case where the source hypervisor doesn't specify a CPU model, previously we chose qemu64 (qemu's most basic model), except for a few guests that we know won't work on qemu64, eg. RHEL 9 requires x86_64-v2 where we use <cpu mode='host-model'/> However we recently encountered an obscure KVM bug related to this (https://bugzilla.redhat.com/show_bug.cgi?id=2168082). Windows 11 thinks the qemu64 CPU model when booted on real AMD Milan is an AMD Phenom and tried to apply an ancient erratum to it. Since KVM didn't emulate the correct MSRs for this it caused the guest to fail to boot. After discussion upstream we can't see any reason not to give all guests host-model. This should fix the bug above and also generally improve performance by allowing the guest to exploit all host features. Related: https://bugzilla.redhat.com/show_bug.cgi?id=2168082#c19 Related: https://listman.redhat.com/archives/libguestfs/2023-February/030624.html Thanks: Laszlo Ersek, Dr. David Alan Gilbert, Daniel Berrang? --- output/create_libvirt_xml.ml | 59 +++++++++++++++++------------------- tests/test-v2v-i-ova.xml | 1 + 2 files changed, 28 insertions(+), 32 deletions(-) diff --git a/output/create_libvirt_xml.ml b/output/create_libvirt_xml.ml index e9c6c8c150..1d77139b45 100644 --- a/output/create_libvirt_xml.ml +++ b/output/create_libvirt_xml.ml @@ -184,41 +184,36 @@ let create_libvirt_xml ?pool source inspect e "vcpu" [] [PCData (string_of_int source.s_vcpu)] ]; - if source.s_cpu_model <> None || - guestcaps.gcaps_arch_min_version >= 1 || - source.s_cpu_topology <> None then ( - let cpu_attrs = ref [] - and cpu = ref [] in + let cpu_attrs = ref [] + and cpu = ref [] in - (match source.s_cpu_model with - | None -> - if guestcaps.gcaps_arch_min_version >= 1 then - List.push_back cpu_attrs ("mode", "host-model"); - | Some model -> - List.push_back cpu_attrs ("match", "minimum"); - if model = "qemu64" then - List.push_back cpu_attrs ("check", "none"); - (match source.s_cpu_vendor with - | None -> () - | Some vendor -> - List.push_back cpu (e "vendor" [] [PCData vendor]) - ); - List.push_back cpu (e "model" ["fallback", "allow"] [PCData model]) - ); - (match source.s_cpu_topology with - | None -> () - | Some { s_cpu_sockets; s_cpu_cores; s_cpu_threads } -> - let topology_attrs = [ - "sockets", string_of_int s_cpu_sockets; - "cores", string_of_int s_cpu_cores; - "threads", string_of_int s_cpu_threads; - ] in - List.push_back cpu (e "topology" topology_attrs []) - ); - - List.push_back_list body [ e "cpu" !cpu_attrs !cpu ] + (match source.s_cpu_model with + | None -> + List.push_back cpu_attrs ("mode", "host-model"); + | Some model -> + List.push_back cpu_attrs ("match", "minimum"); + if model = "qemu64" then + List.push_back cpu_attrs ("check", "none"); + (match source.s_cpu_vendor with + | None -> () + | Some vendor -> + List.push_back cpu (e "vendor" [] [PCData vendor]) + ); + List.push_back cpu (e "model" ["fallback", "allow"] [PCData model]) + ); + (match source.s_cpu_topology with + | None -> () + | Some { s_cpu_sockets; s_cpu_cores; s_cpu_threads } -> + let topology_attrs = [ + "sockets", string_of_int s_cpu_sockets; + "cores", string_of_int s_cpu_cores; + "threads", string_of_int s_cpu_threads; + ] in + List.push_back cpu (e "topology" topology_attrs []) ); + List.push_back_list body [ e "cpu" !cpu_attrs !cpu ]; + let uefi_firmware match target_firmware with | TargetBIOS -> None diff --git a/tests/test-v2v-i-ova.xml b/tests/test-v2v-i-ova.xml index da1db473e5..e5907ea1cc 100644 --- a/tests/test-v2v-i-ova.xml +++ b/tests/test-v2v-i-ova.xml @@ -10,6 +10,7 @@ <memory unit='KiB'>2097152</memory> <currentMemory unit='KiB'>2097152</currentMemory> <vcpu>1</vcpu> + <cpu mode='host-model'/> <features> <acpi/> <apic/> -- 2.39.0
Richard W.M. Jones
2023-Feb-17 11:44 UTC
[Libguestfs] [PATCH v2v v2 3/3] -o qemu: Always use -cpu host unless overridden by source hypervisor
As with the prior commit, prefer -cpu host for all guests (except when we have more information from the source hypervisor). Although there is the disadvantage that -cpu host is non-migratable, in practice it would be very difficult to live migrate a host launched using direct qemu commands. Note that after this change, gcaps_arch_min_version is basically an informational field. No output uses it, but it will appear in debug output and there's the possibility we might use it for a future output mode. Thanks: Laszlo Ersek --- lib/types.mli | 6 +++++- output/output_qemu.ml | 6 +----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/types.mli b/lib/types.mli index 743daa8a14..4a183dd3ae 100644 --- a/lib/types.mli +++ b/lib/types.mli @@ -268,7 +268,11 @@ type guestcaps = { minimum version. Notably RHEL >= 9 requires at least x86_64-v2. If the guest is capable of running on QEMU's default VCPU model - for the architecture ([-cpu qemu64]) then this is set to [0]. *) + for the architecture ([-cpu qemu64]) then this is set to [0]. + + Note this capability is not actually used by any current output + mode. It is retained in case we might use it in future, but we + might remove it if it is not used. *) gcaps_virtio_1_0 : bool; (** The guest supports the virtio devices that it does at the virtio-1.0 diff --git a/output/output_qemu.ml b/output/output_qemu.ml index 491906ebf9..2bbacb6eda 100644 --- a/output/output_qemu.ml +++ b/output/output_qemu.ml @@ -175,11 +175,7 @@ module QEMU = struct arg "-m" (Int64.to_string (source.s_memory /^ 1024L /^ 1024L)); - (match source.s_cpu_model, guestcaps.gcaps_arch_min_version with - | None, 0 -> () - | None, _ -> arg "-cpu" "host" - | Some model, _ -> arg "-cpu" model - ); + arg "-cpu" (Option.default "host" source.s_cpu_model); if source.s_vcpu > 1 then ( (match source.s_cpu_topology with -- 2.39.0
Seemingly Similar Threads
- [PATCH v2v v2 3/3] -o qemu: Always use -cpu host unless overridden by source hypervisor
- [PATCH] v2v: Set machine type explicitly for outputs which support it (RHBZ#1581428).
- [V2V PATCH v3 1/6] Revert "Remove guestcaps_block_type Virtio_SCSI"
- [V2V PATCH v2 1/5] Revert "Remove guestcaps_block_type Virtio_SCSI"
- [V2V PATCH v2 1/5] Revert "Remove guestcaps_block_type Virtio_SCSI"