Hi all,
this patch series allows tools/ to compile on ARM, mostly providing an
empty implementation for all the arch specific functions that are needed.
Changes in v5:
- libxc: return -1 and set errno on error;
- add few missing emacs runes in new files.
Changes in v4:
- rebased on 55a36564fb4f85722c67f16fe508f3ecbd204549;
- minor compile fixes.
Changes in v3:
- move libxl_cpuid_policy_list_gen_json to libxl_(no)cpuid.c;
- rename xc_hvm_build.c to xc_hvm_build_x86.c;
- remove xc_nohvm, introduce xc_hvm_build_arm.c instead;
- remove "libxl: do not allocate e820 for non x86 guests.";
- introduce libxl__arch_domain_create.
Changes in v2:
- rebased on a22587ae517170a7755d3a88611ae0e2d5bb555e;
- dropped "arm: arch_dump_shared_mem_info as a no-op" that is already
in
xen-unstable;
- define xen_callback_t as uint64_t;
- define guest_word_t as uint64_t.
Ian Campbell (1):
arm: add stub hvm/save.h
Stefano Stabellini (6):
arm: compile libxc
arm: compile libxenguest
arm: compile memshr
arm: compile xentrace
arm: compile libxl
libxl: Introduce libxl__arch_domain_create
tools/libxc/Makefile | 13 +-
tools/libxc/xc_core.h | 2 +
tools/libxc/xc_core_arm.c | 107 +++++++
tools/libxc/xc_core_arm.h | 60 ++++
tools/libxc/xc_dom_arm.c | 50 +++
tools/libxc/xc_hvm_build.c | 511 --------------------------------
tools/libxc/xc_hvm_build_arm.c | 61 ++++
tools/libxc/xc_hvm_build_x86.c | 511 ++++++++++++++++++++++++++++++++
tools/libxc/xc_nomigrate.c | 53 ++++
tools/libxc/xenctrl.h | 4 +
tools/libxl/Makefile | 5 +-
tools/libxl/libxl_arch.h | 22 ++
tools/libxl/libxl_cpuid.c | 60 ++++
tools/libxl/libxl_create.c | 12 +-
tools/libxl/libxl_internal.h | 2 -
tools/libxl/libxl_json.c | 60 ----
tools/libxl/libxl_noarch.c | 8 +
tools/libxl/libxl_nocpuid.c | 8 +-
tools/libxl/libxl_pci.c | 242 ---------------
tools/libxl/libxl_x86.c | 259 ++++++++++++++++
tools/memshr/bidir-hash.c | 31 ++
tools/xentrace/xenctx.c | 12 +
xen/include/public/arch-arm/hvm/save.h | 39 +++
xen/include/public/hvm/save.h | 2 +
24 files changed, 1303 insertions(+), 831 deletions(-)
A git tree based on 6c69c04ed2b1d5fd0cbebb61e649c03d9d2e8d9a, is
available here:
git://xenbits.xen.org/people/sstabellini/xen-unstable.git arm-tools-5
Cheers,
Stefano
From: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/include/public/arch-arm/hvm/save.h | 39 ++++++++++++++++++++++++++++++++ xen/include/public/hvm/save.h | 2 + 2 files changed, 41 insertions(+), 0 deletions(-) create mode 100644 xen/include/public/arch-arm/hvm/save.h diff --git a/xen/include/public/arch-arm/hvm/save.h b/xen/include/public/arch-arm/hvm/save.h new file mode 100644 index 0000000..ec61298 --- /dev/null +++ b/xen/include/public/arch-arm/hvm/save.h @@ -0,0 +1,39 @@ +/* + * Structure definitions for HVM state that is held by Xen and must + * be saved along with the domain''s memory and device-model state. + * + * Copyright (c) 2012 Citrix Systems Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __XEN_PUBLIC_HVM_SAVE_ARM_H__ +#define __XEN_PUBLIC_HVM_SAVE_ARM_H__ + +#endif + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/public/hvm/save.h b/xen/include/public/hvm/save.h index d0f2661..58f8433 100644 --- a/xen/include/public/hvm/save.h +++ b/xen/include/public/hvm/save.h @@ -104,6 +104,8 @@ DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); #include "../arch-x86/hvm/save.h" #elif defined(__ia64__) #include "../arch-ia64/hvm/save.h" +#elif defined(__arm__) +#include "../arch-arm/hvm/save.h" #else #error "unsupported architecture" #endif -- 1.7.2.5
Introduce an empty implementation of the arch specific ARM functions in
xc_core_arm.c and xc_core_arm.h; define barriers on ARM.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
---
tools/libxc/Makefile | 1 +
tools/libxc/xc_core.h | 2 +
tools/libxc/xc_core_arm.c | 107 +++++++++++++++++++++++++++++++++++++++++++++
tools/libxc/xc_core_arm.h | 60 +++++++++++++++++++++++++
tools/libxc/xenctrl.h | 4 ++
5 files changed, 174 insertions(+), 0 deletions(-)
create mode 100644 tools/libxc/xc_core_arm.c
create mode 100644 tools/libxc/xc_core_arm.h
diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index b5e7022..f2e1ba7 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -8,6 +8,7 @@ CTRL_SRCS-y : CTRL_SRCS-y += xc_core.c
CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c
CTRL_SRCS-$(CONFIG_IA64) += xc_core_ia64.c
+CTRL_SRCS-$(CONFIG_ARM) += xc_core_arm.c
CTRL_SRCS-y += xc_cpupool.c
CTRL_SRCS-y += xc_domain.c
CTRL_SRCS-y += xc_evtchn.c
diff --git a/tools/libxc/xc_core.h b/tools/libxc/xc_core.h
index 1e88a75..358a8c1 100644
--- a/tools/libxc/xc_core.h
+++ b/tools/libxc/xc_core.h
@@ -155,6 +155,8 @@ int xc_core_arch_map_p2m_writable(xc_interface *xch,
unsigned int guest_width,
# include "xc_core_x86.h"
#elif defined (__ia64__)
# include "xc_core_ia64.h"
+#elif defined (__arm__)
+# include "xc_core_arm.h"
#else
# error "unsupported architecture"
#endif
diff --git a/tools/libxc/xc_core_arm.c b/tools/libxc/xc_core_arm.c
new file mode 100644
index 0000000..e6716e4
--- /dev/null
+++ b/tools/libxc/xc_core_arm.c
@@ -0,0 +1,107 @@
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
+ *
+ * Copyright (c) 2011 Citrix Systems
+ *
+ */
+
+#include "xg_private.h"
+#include "xc_core.h"
+
+int
+xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
+ unsigned long pfn)
+{
+ /* TODO: memory from DT */
+ if (pfn >= 0x80000 && pfn < 0x88000)
+ return 1;
+ return 0;
+}
+
+
+static int nr_gpfns(xc_interface *xch, domid_t domid)
+{
+ return xc_domain_maximum_gpfn(xch, domid) + 1;
+}
+
+int
+xc_core_arch_auto_translated_physmap(const xc_dominfo_t *info)
+{
+ return 1;
+}
+
+int
+xc_core_arch_memory_map_get(xc_interface *xch, struct xc_core_arch_context
*unused,
+ xc_dominfo_t *info, shared_info_any_t *live_shinfo,
+ xc_core_memory_map_t **mapp,
+ unsigned int *nr_entries)
+{
+ unsigned long p2m_size = nr_gpfns(xch, info->domid);
+ xc_core_memory_map_t *map;
+
+ map = malloc(sizeof(*map));
+ if ( map == NULL )
+ {
+ PERROR("Could not allocate memory");
+ return -1;
+ }
+
+ map->addr = 0;
+ map->size = ((uint64_t)p2m_size) << PAGE_SHIFT;
+
+ *mapp = map;
+ *nr_entries = 1;
+ return 0;
+}
+
+static int
+xc_core_arch_map_p2m_rw(xc_interface *xch, struct domain_info_context *dinfo,
xc_dominfo_t *info,
+ shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
+ unsigned long *pfnp, int rw)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int
+xc_core_arch_map_p2m(xc_interface *xch, unsigned int guest_width, xc_dominfo_t
*info,
+ shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
+ unsigned long *pfnp)
+{
+ struct domain_info_context _dinfo = { .guest_width = guest_width };
+ struct domain_info_context *dinfo = &_dinfo;
+ return xc_core_arch_map_p2m_rw(xch, dinfo, info,
+ live_shinfo, live_p2m, pfnp, 0);
+}
+
+int
+xc_core_arch_map_p2m_writable(xc_interface *xch, unsigned int guest_width,
xc_dominfo_t *info,
+ shared_info_any_t *live_shinfo, xen_pfn_t
**live_p2m,
+ unsigned long *pfnp)
+{
+ struct domain_info_context _dinfo = { .guest_width = guest_width };
+ struct domain_info_context *dinfo = &_dinfo;
+ return xc_core_arch_map_p2m_rw(xch, dinfo, info,
+ live_shinfo, live_p2m, pfnp, 1);
+}
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libxc/xc_core_arm.h b/tools/libxc/xc_core_arm.h
new file mode 100644
index 0000000..3a6be2a
--- /dev/null
+++ b/tools/libxc/xc_core_arm.h
@@ -0,0 +1,60 @@
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
+ *
+ * Copyright (c) 2012 Citrix Systems
+ *
+ */
+
+#ifndef XC_CORE_ARM_H
+#define XC_CORE_ARM_H
+
+#define ELF_ARCH_DATA ELFDATA2LSB
+#define ELF_ARCH_MACHINE EM_ARM
+
+struct xc_core_arch_context {
+ /* nothing */
+};
+
+#define xc_core_arch_context_init(arch_ctxt) do {} while (0)
+#define xc_core_arch_context_free(arch_ctxt) do {} while (0)
+#define xc_core_arch_context_get(arch_ctxt, ctxt, xch, domid) \
+ (0)
+#define xc_core_arch_context_dump(xch, arch_ctxt, args, dump_rtn) (0)
+
+int
+xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
+ unsigned long pfn);
+static inline int
+xc_core_arch_context_get_shdr(xc_interface *xch,
+ struct xc_core_arch_context *arch_ctxt,
+ struct xc_core_section_headers *sheaders,
+ struct xc_core_strtab *strtab,
+ uint64_t *filesz, uint64_t offset)
+{
+ *filesz = 0;
+ return 0;
+}
+
+#endif /* XC_CORE_ARM_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 73d24e5..29c13a7 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -83,6 +83,10 @@
#define xen_mb() asm volatile ("mf" ::: "memory")
#define xen_rmb() asm volatile ("mf" ::: "memory")
#define xen_wmb() asm volatile ("mf" ::: "memory")
+#elif defined(__arm__)
+#define xen_mb() asm volatile ("dmb" : : : "memory")
+#define xen_rmb() asm volatile ("dmb" : : : "memory")
+#define xen_wmb() asm volatile ("dmb" : : : "memory")
#else
#error "Define barriers"
#endif
--
1.7.2.5
Introduce an empty implementation of the arch specific ARM functions in
xc_dom_arm.c.
Provide empty implementations of xc_domain_save and xc_domain_restore
when CONFIG_MIGRATE is not set.
Move xc_hvm_build.c to xc_hvm_build_x86.c because the implementation is
x86 specific, introduce xc_hvm_build_arm.c with empty stubs.
Changes in v3:
- rename xc_hvm_build.c to xc_hvm_build_x86.c;
- remove xc_nohvm, introduce xc_hvm_build_arm.c instead;
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
tools/libxc/Makefile | 12 +-
tools/libxc/xc_dom_arm.c | 50 ++++
tools/libxc/xc_hvm_build.c | 511 ----------------------------------------
tools/libxc/xc_hvm_build_arm.c | 61 +++++
tools/libxc/xc_hvm_build_x86.c | 511 ++++++++++++++++++++++++++++++++++++++++
tools/libxc/xc_nomigrate.c | 53 ++++
6 files changed, 684 insertions(+), 514 deletions(-)
create mode 100644 tools/libxc/xc_dom_arm.c
delete mode 100644 tools/libxc/xc_hvm_build.c
create mode 100644 tools/libxc/xc_hvm_build_arm.c
create mode 100644 tools/libxc/xc_hvm_build_x86.c
create mode 100644 tools/libxc/xc_nomigrate.c
diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index f2e1ba7..02d39a3 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -42,9 +42,12 @@ CTRL_SRCS-$(CONFIG_MiniOS) += xc_minios.c
GUEST_SRCS-y : GUEST_SRCS-y += xg_private.c xc_suspend.c
-GUEST_SRCS-$(CONFIG_MIGRATE) += xc_domain_restore.c xc_domain_save.c
-GUEST_SRCS-$(CONFIG_MIGRATE) += xc_offline_page.c xc_compression.c
-GUEST_SRCS-$(CONFIG_HVM) += xc_hvm_build.c
+ifeq ($(CONFIG_MIGRATE),y)
+GUEST_SRCS-y += xc_domain_restore.c xc_domain_save.c
+GUEST_SRCS-y += xc_offline_page.c xc_compression.c
+else
+GUEST_SRCS-y += xc_nomigrate.c
+endif
vpath %.c ../../xen/common/libelf
CFLAGS += -I../../xen/common/libelf
@@ -61,7 +64,10 @@ GUEST_SRCS-y += xc_dom_compat_linux.c
GUEST_SRCS-$(CONFIG_X86) += xc_dom_x86.c
GUEST_SRCS-$(CONFIG_X86) += xc_cpuid_x86.c
+GUEST_SRCS-$(CONFIG_X86) += xc_hvm_build_x86.c
GUEST_SRCS-$(CONFIG_IA64) += xc_dom_ia64.c
+GUEST_SRCS-$(CONFIG_ARM) += xc_dom_arm.c
+GUEST_SRCS-$(CONFIG_ARM) += xc_hvm_build_arm.c
OSDEP_SRCS-y += xenctrl_osdep_ENOSYS.c
diff --git a/tools/libxc/xc_dom_arm.c b/tools/libxc/xc_dom_arm.c
new file mode 100644
index 0000000..122d0e8
--- /dev/null
+++ b/tools/libxc/xc_dom_arm.c
@@ -0,0 +1,50 @@
+/*
+ * Xen domain builder -- ARM
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
+ *
+ * Copyright (c) 2011, Citrix Systems
+ */
+#include <inttypes.h>
+#include <xen/xen.h>
+#include "xg_private.h"
+#include "xc_dom.h"
+
+int arch_setup_meminit(struct xc_dom_image *dom)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int arch_setup_bootearly(struct xc_dom_image *dom)
+{
+ DOMPRINTF("%s: doing nothing", __FUNCTION__);
+ return 0;
+}
+
+int arch_setup_bootlate(struct xc_dom_image *dom)
+{
+ DOMPRINTF("%s: doing nothing", __FUNCTION__);
+ return 0;
+}
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c
deleted file mode 100644
index 1fa5658..0000000
--- a/tools/libxc/xc_hvm_build.c
+++ /dev/null
@@ -1,511 +0,0 @@
-/******************************************************************************
- * xc_hvm_build.c
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
- */
-
-#include <stddef.h>
-#include <inttypes.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <zlib.h>
-
-#include "xg_private.h"
-#include "xc_private.h"
-
-#include <xen/foreign/x86_32.h>
-#include <xen/foreign/x86_64.h>
-#include <xen/hvm/hvm_info_table.h>
-#include <xen/hvm/params.h>
-#include <xen/hvm/e820.h>
-
-#include <xen/libelf/libelf.h>
-
-#define SUPERPAGE_2MB_SHIFT 9
-#define SUPERPAGE_2MB_NR_PFNS (1UL << SUPERPAGE_2MB_SHIFT)
-#define SUPERPAGE_1GB_SHIFT 18
-#define SUPERPAGE_1GB_NR_PFNS (1UL << SUPERPAGE_1GB_SHIFT)
-
-#define SPECIALPAGE_BUFIOREQ 0
-#define SPECIALPAGE_XENSTORE 1
-#define SPECIALPAGE_IOREQ 2
-#define SPECIALPAGE_IDENT_PT 3
-#define SPECIALPAGE_CONSOLE 4
-#define NR_SPECIAL_PAGES 5
-#define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
-
-static void build_hvm_info(void *hvm_info_page, uint64_t mem_size)
-{
- struct hvm_info_table *hvm_info = (struct hvm_info_table *)
- (((unsigned char *)hvm_info_page) + HVM_INFO_OFFSET);
- uint64_t lowmem_end = mem_size, highmem_end = 0;
- uint8_t sum;
- int i;
-
- if ( lowmem_end > HVM_BELOW_4G_RAM_END )
- {
- highmem_end = lowmem_end + (1ull<<32) - HVM_BELOW_4G_RAM_END;
- lowmem_end = HVM_BELOW_4G_RAM_END;
- }
-
- memset(hvm_info_page, 0, PAGE_SIZE);
-
- /* Fill in the header. */
- strncpy(hvm_info->signature, "HVM INFO", 8);
- hvm_info->length = sizeof(struct hvm_info_table);
-
- /* Sensible defaults: these can be overridden by the caller. */
- hvm_info->apic_mode = 1;
- hvm_info->nr_vcpus = 1;
- memset(hvm_info->vcpu_online, 0xff, sizeof(hvm_info->vcpu_online));
-
- /* Memory parameters. */
- hvm_info->low_mem_pgend = lowmem_end >> PAGE_SHIFT;
- hvm_info->high_mem_pgend = highmem_end >> PAGE_SHIFT;
- hvm_info->reserved_mem_pgstart = special_pfn(0);
-
- /* Finish with the checksum. */
- for ( i = 0, sum = 0; i < hvm_info->length; i++ )
- sum += ((uint8_t *)hvm_info)[i];
- hvm_info->checksum = -sum;
-}
-
-static int loadelfimage(
- xc_interface *xch,
- struct elf_binary *elf, uint32_t dom, unsigned long *parray)
-{
- privcmd_mmap_entry_t *entries = NULL;
- unsigned long pfn_start = elf->pstart >> PAGE_SHIFT;
- unsigned long pfn_end = (elf->pend + PAGE_SIZE - 1) >> PAGE_SHIFT;
- size_t pages = pfn_end - pfn_start;
- int i, rc = -1;
-
- /* Map address space for initial elf image. */
- entries = calloc(pages, sizeof(privcmd_mmap_entry_t));
- if ( entries == NULL )
- goto err;
-
- for ( i = 0; i < pages; i++ )
- entries[i].mfn = parray[(elf->pstart >> PAGE_SHIFT) + i];
-
- elf->dest = xc_map_foreign_ranges(
- xch, dom, pages << PAGE_SHIFT, PROT_READ | PROT_WRITE, 1 <<
PAGE_SHIFT,
- entries, pages);
- if ( elf->dest == NULL )
- goto err;
-
- elf->dest += elf->pstart & (PAGE_SIZE - 1);
-
- /* Load the initial elf image. */
- rc = elf_load_binary(elf);
- if ( rc < 0 )
- PERROR("Failed to load elf binary\n");
-
- munmap(elf->dest, pages << PAGE_SHIFT);
- elf->dest = NULL;
-
- err:
- free(entries);
-
- return rc;
-}
-
-/*
- * Check whether there exists mmio hole in the specified memory range.
- * Returns 1 if exists, else returns 0.
- */
-static int check_mmio_hole(uint64_t start, uint64_t memsize)
-{
- if ( start + memsize <= HVM_BELOW_4G_MMIO_START ||
- start >= HVM_BELOW_4G_MMIO_START + HVM_BELOW_4G_MMIO_LENGTH )
- return 0;
- else
- return 1;
-}
-
-static int setup_guest(xc_interface *xch,
- uint32_t dom, int memsize, int target,
- char *image, unsigned long image_size)
-{
- xen_pfn_t *page_array = NULL;
- unsigned long i, nr_pages = (unsigned long)memsize << (20 -
PAGE_SHIFT);
- unsigned long target_pages = (unsigned long)target << (20 -
PAGE_SHIFT);
- unsigned long entry_eip, cur_pages, cur_pfn;
- void *hvm_info_page;
- uint32_t *ident_pt;
- struct elf_binary elf;
- uint64_t v_start, v_end;
- int rc;
- xen_capabilities_info_t caps;
- unsigned long stat_normal_pages = 0, stat_2mb_pages = 0,
- stat_1gb_pages = 0;
- int pod_mode = 0;
-
- /* An HVM guest must be initialised with at least 2MB memory. */
- if ( memsize < 2 || target < 2 )
- goto error_out;
-
- if ( memsize > target )
- pod_mode = 1;
-
- memset(&elf, 0, sizeof(elf));
- if ( elf_init(&elf, image, image_size) != 0 )
- goto error_out;
-
- xc_elf_set_logfile(xch, &elf, 1);
-
- elf_parse_binary(&elf);
- v_start = 0;
- v_end = (unsigned long long)memsize << 20;
-
- if ( xc_version(xch, XENVER_capabilities, &caps) != 0 )
- {
- PERROR("Could not get Xen capabilities");
- goto error_out;
- }
-
- IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n"
- " Loader:
%016"PRIx64"->%016"PRIx64"\n"
- " TOTAL:
%016"PRIx64"->%016"PRIx64"\n"
- " ENTRY ADDRESS: %016"PRIx64"\n",
- elf.pstart, elf.pend,
- v_start, v_end,
- elf_uval(&elf, elf.ehdr, e_entry));
-
- if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL )
- {
- PERROR("Could not allocate memory.");
- goto error_out;
- }
-
- for ( i = 0; i < nr_pages; i++ )
- page_array[i] = i;
- for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ )
- page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
-
- /*
- * Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000.
- *
- * We attempt to allocate 1GB pages if possible. It falls back on 2MB
- * pages if 1GB allocation fails. 4KB pages will be used eventually if
- * both fail.
- *
- * Under 2MB mode, we allocate pages in batches of no more than 8MB to
- * ensure that we can be preempted and hence dom0 remains responsive.
- */
- rc = xc_domain_populate_physmap_exact(
- xch, dom, 0xa0, 0, 0, &page_array[0x00]);
- cur_pages = 0xc0;
- stat_normal_pages = 0xc0;
- while ( (rc == 0) && (nr_pages > cur_pages) )
- {
- /* Clip count to maximum 1GB extent. */
- unsigned long count = nr_pages - cur_pages;
- unsigned long max_pages = SUPERPAGE_1GB_NR_PFNS;
-
- if ( count > max_pages )
- count = max_pages;
-
- cur_pfn = page_array[cur_pages];
-
- /* Take care the corner cases of super page tails */
- if ( ((cur_pfn & (SUPERPAGE_1GB_NR_PFNS-1)) != 0) &&
- (count > (-cur_pfn & (SUPERPAGE_1GB_NR_PFNS-1))) )
- count = -cur_pfn & (SUPERPAGE_1GB_NR_PFNS-1);
- else if ( ((count & (SUPERPAGE_1GB_NR_PFNS-1)) != 0) &&
- (count > SUPERPAGE_1GB_NR_PFNS) )
- count &= ~(SUPERPAGE_1GB_NR_PFNS - 1);
-
- /* Attemp to allocate 1GB super page. Because in each pass we only
- * allocate at most 1GB, we don''t have to clip super page
boundaries.
- */
- if ( ((count | cur_pfn) & (SUPERPAGE_1GB_NR_PFNS - 1)) == 0
&&
- /* Check if there exists MMIO hole in the 1GB memory range */
- !check_mmio_hole(cur_pfn << PAGE_SHIFT,
- SUPERPAGE_1GB_NR_PFNS << PAGE_SHIFT) )
- {
- long done;
- unsigned long nr_extents = count >> SUPERPAGE_1GB_SHIFT;
- xen_pfn_t sp_extents[nr_extents];
-
- for ( i = 0; i < nr_extents; i++ )
- sp_extents[i] =
page_array[cur_pages+(i<<SUPERPAGE_1GB_SHIFT)];
-
- done = xc_domain_populate_physmap(xch, dom, nr_extents,
SUPERPAGE_1GB_SHIFT,
- pod_mode ?
XENMEMF_populate_on_demand : 0,
- sp_extents);
-
- if ( done > 0 )
- {
- stat_1gb_pages += done;
- done <<= SUPERPAGE_1GB_SHIFT;
- cur_pages += done;
- count -= done;
- }
- }
-
- if ( count != 0 )
- {
- /* Clip count to maximum 8MB extent. */
- max_pages = SUPERPAGE_2MB_NR_PFNS * 4;
- if ( count > max_pages )
- count = max_pages;
-
- /* Clip partial superpage extents to superpage boundaries. */
- if ( ((cur_pfn & (SUPERPAGE_2MB_NR_PFNS-1)) != 0) &&
- (count > (-cur_pfn & (SUPERPAGE_2MB_NR_PFNS-1))) )
- count = -cur_pfn & (SUPERPAGE_2MB_NR_PFNS-1);
- else if ( ((count & (SUPERPAGE_2MB_NR_PFNS-1)) != 0) &&
- (count > SUPERPAGE_2MB_NR_PFNS) )
- count &= ~(SUPERPAGE_2MB_NR_PFNS - 1); /* clip non-s.p.
tail */
-
- /* Attempt to allocate superpage extents. */
- if ( ((count | cur_pfn) & (SUPERPAGE_2MB_NR_PFNS - 1)) == 0 )
- {
- long done;
- unsigned long nr_extents = count >> SUPERPAGE_2MB_SHIFT;
- xen_pfn_t sp_extents[nr_extents];
-
- for ( i = 0; i < nr_extents; i++ )
- sp_extents[i] =
page_array[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)];
-
- done = xc_domain_populate_physmap(xch, dom, nr_extents,
SUPERPAGE_2MB_SHIFT,
- pod_mode ?
XENMEMF_populate_on_demand : 0,
- sp_extents);
-
- if ( done > 0 )
- {
- stat_2mb_pages += done;
- done <<= SUPERPAGE_2MB_SHIFT;
- cur_pages += done;
- count -= done;
- }
- }
- }
-
- /* Fall back to 4kB extents. */
- if ( count != 0 )
- {
- rc = xc_domain_populate_physmap_exact(
- xch, dom, count, 0, 0, &page_array[cur_pages]);
- cur_pages += count;
- stat_normal_pages += count;
- }
- }
-
- /* Subtract 0x20 from target_pages for the VGA "hole". Xen will
- * adjust the PoD cache size so that domain tot_pages will be
- * target_pages - 0x20 after this call. */
- if ( pod_mode )
- rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20,
- NULL, NULL, NULL);
-
- if ( rc != 0 )
- {
- PERROR("Could not allocate memory for HVM guest.");
- goto error_out;
- }
-
- IPRINTF("PHYSICAL MEMORY ALLOCATION:\n"
- " 4KB PAGES: 0x%016lx\n"
- " 2MB PAGES: 0x%016lx\n"
- " 1GB PAGES: 0x%016lx\n",
- stat_normal_pages, stat_2mb_pages, stat_1gb_pages);
-
- if ( loadelfimage(xch, &elf, dom, page_array) != 0 )
- goto error_out;
-
- if ( (hvm_info_page = xc_map_foreign_range(
- xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
- HVM_INFO_PFN)) == NULL )
- goto error_out;
- build_hvm_info(hvm_info_page, v_end);
- munmap(hvm_info_page, PAGE_SIZE);
-
- /* Allocate and clear special pages. */
- for ( i = 0; i < NR_SPECIAL_PAGES; i++ )
- {
- xen_pfn_t pfn = special_pfn(i);
- rc = xc_domain_populate_physmap_exact(xch, dom, 1, 0, 0, &pfn);
- if ( rc != 0 )
- {
- PERROR("Could not allocate %d''th special page.",
i);
- goto error_out;
- }
- if ( xc_clear_domain_page(xch, dom, special_pfn(i)) )
- goto error_out;
- }
-
- xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_PFN,
- special_pfn(SPECIALPAGE_XENSTORE));
- xc_set_hvm_param(xch, dom, HVM_PARAM_BUFIOREQ_PFN,
- special_pfn(SPECIALPAGE_BUFIOREQ));
- xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_PFN,
- special_pfn(SPECIALPAGE_IOREQ));
- xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN,
- special_pfn(SPECIALPAGE_CONSOLE));
-
- /*
- * Identity-map page table is required for running with CR0.PG=0 when
- * using Intel EPT. Create a 32-bit non-PAE page directory of superpages.
- */
- if ( (ident_pt = xc_map_foreign_range(
- xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
- special_pfn(SPECIALPAGE_IDENT_PT))) == NULL )
- goto error_out;
- for ( i = 0; i < PAGE_SIZE / sizeof(*ident_pt); i++ )
- ident_pt[i] = ((i << 22) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER
|
- _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
- munmap(ident_pt, PAGE_SIZE);
- xc_set_hvm_param(xch, dom, HVM_PARAM_IDENT_PT,
- special_pfn(SPECIALPAGE_IDENT_PT) << PAGE_SHIFT);
-
- /* Insert JMP <rel32> instruction at address 0x0 to reach entry
point. */
- entry_eip = elf_uval(&elf, elf.ehdr, e_entry);
- if ( entry_eip != 0 )
- {
- char *page0 = xc_map_foreign_range(
- xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, 0);
- if ( page0 == NULL )
- goto error_out;
- page0[0] = 0xe9;
- *(uint32_t *)&page0[1] = entry_eip - 5;
- munmap(page0, PAGE_SIZE);
- }
-
- free(page_array);
- return 0;
-
- error_out:
- free(page_array);
- return -1;
-}
-
-static int xc_hvm_build_internal(xc_interface *xch,
- uint32_t domid,
- int memsize,
- int target,
- char *image,
- unsigned long image_size)
-{
- if ( (image == NULL) || (image_size == 0) )
- {
- ERROR("Image required");
- return -1;
- }
-
- return setup_guest(xch, domid, memsize, target, image, image_size);
-}
-
-/* xc_hvm_build:
- * Create a domain for a virtualized Linux, using files/filenames.
- */
-int xc_hvm_build(xc_interface *xch,
- uint32_t domid,
- int memsize,
- const char *image_name)
-{
- char *image;
- int sts;
- unsigned long image_size;
-
- if ( (image_name == NULL) ||
- ((image = xc_read_image(xch, image_name, &image_size)) == NULL) )
- return -1;
-
- sts = xc_hvm_build_internal(xch, domid, memsize, memsize, image,
image_size);
-
- free(image);
-
- return sts;
-}
-
-/* xc_hvm_build_target_mem:
- * Create a domain for a pre-ballooned virtualized Linux, using
- * files/filenames. If target < memsize, domain is created with
- * memsize pages marked populate-on-demand,
- * calculating pod cache size based on target.
- * If target == memsize, pages are populated normally.
- */
-int xc_hvm_build_target_mem(xc_interface *xch,
- uint32_t domid,
- int memsize,
- int target,
- const char *image_name)
-{
- char *image;
- int sts;
- unsigned long image_size;
-
- if ( (image_name == NULL) ||
- ((image = xc_read_image(xch, image_name, &image_size)) == NULL) )
- return -1;
-
- sts = xc_hvm_build_internal(xch, domid, memsize, target, image,
image_size);
-
- free(image);
-
- return sts;
-}
-
-/* xc_hvm_build_mem:
- * Create a domain for a virtualized Linux, using memory buffers.
- */
-int xc_hvm_build_mem(xc_interface *xch,
- uint32_t domid,
- int memsize,
- const char *image_buffer,
- unsigned long image_size)
-{
- int sts;
- unsigned long img_len;
- char *img;
-
- /* Validate that there is a kernel buffer */
-
- if ( (image_buffer == NULL) || (image_size == 0) )
- {
- ERROR("kernel image buffer not present");
- return -1;
- }
-
- img = xc_inflate_buffer(xch, image_buffer, image_size, &img_len);
- if ( img == NULL )
- {
- ERROR("unable to inflate ram disk buffer");
- return -1;
- }
-
- sts = xc_hvm_build_internal(xch, domid, memsize, memsize,
- img, img_len);
-
- /* xc_inflate_buffer may return the original buffer pointer (for
- for already inflated buffers), so exercise some care in freeing */
-
- if ( (img != NULL) && (img != image_buffer) )
- free(img);
-
- return sts;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_hvm_build_arm.c b/tools/libxc/xc_hvm_build_arm.c
new file mode 100644
index 0000000..010ebdb
--- /dev/null
+++ b/tools/libxc/xc_hvm_build_arm.c
@@ -0,0 +1,61 @@
+/******************************************************************************
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
+ *
+ * Copyright (c) 2011, Citrix Systems
+ */
+
+#include <inttypes.h>
+#include <errno.h>
+#include <xenctrl.h>
+#include <xenguest.h>
+
+int xc_hvm_build(xc_interface *xch,
+ uint32_t domid,
+ int memsize,
+ const char *image_name)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int xc_hvm_build_target_mem(xc_interface *xch,
+ uint32_t domid,
+ int memsize,
+ int target,
+ const char *image_name)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int xc_hvm_build_mem(xc_interface *xch,
+ uint32_t domid,
+ int memsize,
+ const char *image_buffer,
+ unsigned long image_size)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c
new file mode 100644
index 0000000..1fa5658
--- /dev/null
+++ b/tools/libxc/xc_hvm_build_x86.c
@@ -0,0 +1,511 @@
+/******************************************************************************
+ * xc_hvm_build.c
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
+ */
+
+#include <stddef.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <zlib.h>
+
+#include "xg_private.h"
+#include "xc_private.h"
+
+#include <xen/foreign/x86_32.h>
+#include <xen/foreign/x86_64.h>
+#include <xen/hvm/hvm_info_table.h>
+#include <xen/hvm/params.h>
+#include <xen/hvm/e820.h>
+
+#include <xen/libelf/libelf.h>
+
+#define SUPERPAGE_2MB_SHIFT 9
+#define SUPERPAGE_2MB_NR_PFNS (1UL << SUPERPAGE_2MB_SHIFT)
+#define SUPERPAGE_1GB_SHIFT 18
+#define SUPERPAGE_1GB_NR_PFNS (1UL << SUPERPAGE_1GB_SHIFT)
+
+#define SPECIALPAGE_BUFIOREQ 0
+#define SPECIALPAGE_XENSTORE 1
+#define SPECIALPAGE_IOREQ 2
+#define SPECIALPAGE_IDENT_PT 3
+#define SPECIALPAGE_CONSOLE 4
+#define NR_SPECIAL_PAGES 5
+#define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
+
+static void build_hvm_info(void *hvm_info_page, uint64_t mem_size)
+{
+ struct hvm_info_table *hvm_info = (struct hvm_info_table *)
+ (((unsigned char *)hvm_info_page) + HVM_INFO_OFFSET);
+ uint64_t lowmem_end = mem_size, highmem_end = 0;
+ uint8_t sum;
+ int i;
+
+ if ( lowmem_end > HVM_BELOW_4G_RAM_END )
+ {
+ highmem_end = lowmem_end + (1ull<<32) - HVM_BELOW_4G_RAM_END;
+ lowmem_end = HVM_BELOW_4G_RAM_END;
+ }
+
+ memset(hvm_info_page, 0, PAGE_SIZE);
+
+ /* Fill in the header. */
+ strncpy(hvm_info->signature, "HVM INFO", 8);
+ hvm_info->length = sizeof(struct hvm_info_table);
+
+ /* Sensible defaults: these can be overridden by the caller. */
+ hvm_info->apic_mode = 1;
+ hvm_info->nr_vcpus = 1;
+ memset(hvm_info->vcpu_online, 0xff, sizeof(hvm_info->vcpu_online));
+
+ /* Memory parameters. */
+ hvm_info->low_mem_pgend = lowmem_end >> PAGE_SHIFT;
+ hvm_info->high_mem_pgend = highmem_end >> PAGE_SHIFT;
+ hvm_info->reserved_mem_pgstart = special_pfn(0);
+
+ /* Finish with the checksum. */
+ for ( i = 0, sum = 0; i < hvm_info->length; i++ )
+ sum += ((uint8_t *)hvm_info)[i];
+ hvm_info->checksum = -sum;
+}
+
+static int loadelfimage(
+ xc_interface *xch,
+ struct elf_binary *elf, uint32_t dom, unsigned long *parray)
+{
+ privcmd_mmap_entry_t *entries = NULL;
+ unsigned long pfn_start = elf->pstart >> PAGE_SHIFT;
+ unsigned long pfn_end = (elf->pend + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ size_t pages = pfn_end - pfn_start;
+ int i, rc = -1;
+
+ /* Map address space for initial elf image. */
+ entries = calloc(pages, sizeof(privcmd_mmap_entry_t));
+ if ( entries == NULL )
+ goto err;
+
+ for ( i = 0; i < pages; i++ )
+ entries[i].mfn = parray[(elf->pstart >> PAGE_SHIFT) + i];
+
+ elf->dest = xc_map_foreign_ranges(
+ xch, dom, pages << PAGE_SHIFT, PROT_READ | PROT_WRITE, 1 <<
PAGE_SHIFT,
+ entries, pages);
+ if ( elf->dest == NULL )
+ goto err;
+
+ elf->dest += elf->pstart & (PAGE_SIZE - 1);
+
+ /* Load the initial elf image. */
+ rc = elf_load_binary(elf);
+ if ( rc < 0 )
+ PERROR("Failed to load elf binary\n");
+
+ munmap(elf->dest, pages << PAGE_SHIFT);
+ elf->dest = NULL;
+
+ err:
+ free(entries);
+
+ return rc;
+}
+
+/*
+ * Check whether there exists mmio hole in the specified memory range.
+ * Returns 1 if exists, else returns 0.
+ */
+static int check_mmio_hole(uint64_t start, uint64_t memsize)
+{
+ if ( start + memsize <= HVM_BELOW_4G_MMIO_START ||
+ start >= HVM_BELOW_4G_MMIO_START + HVM_BELOW_4G_MMIO_LENGTH )
+ return 0;
+ else
+ return 1;
+}
+
+static int setup_guest(xc_interface *xch,
+ uint32_t dom, int memsize, int target,
+ char *image, unsigned long image_size)
+{
+ xen_pfn_t *page_array = NULL;
+ unsigned long i, nr_pages = (unsigned long)memsize << (20 -
PAGE_SHIFT);
+ unsigned long target_pages = (unsigned long)target << (20 -
PAGE_SHIFT);
+ unsigned long entry_eip, cur_pages, cur_pfn;
+ void *hvm_info_page;
+ uint32_t *ident_pt;
+ struct elf_binary elf;
+ uint64_t v_start, v_end;
+ int rc;
+ xen_capabilities_info_t caps;
+ unsigned long stat_normal_pages = 0, stat_2mb_pages = 0,
+ stat_1gb_pages = 0;
+ int pod_mode = 0;
+
+ /* An HVM guest must be initialised with at least 2MB memory. */
+ if ( memsize < 2 || target < 2 )
+ goto error_out;
+
+ if ( memsize > target )
+ pod_mode = 1;
+
+ memset(&elf, 0, sizeof(elf));
+ if ( elf_init(&elf, image, image_size) != 0 )
+ goto error_out;
+
+ xc_elf_set_logfile(xch, &elf, 1);
+
+ elf_parse_binary(&elf);
+ v_start = 0;
+ v_end = (unsigned long long)memsize << 20;
+
+ if ( xc_version(xch, XENVER_capabilities, &caps) != 0 )
+ {
+ PERROR("Could not get Xen capabilities");
+ goto error_out;
+ }
+
+ IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n"
+ " Loader:
%016"PRIx64"->%016"PRIx64"\n"
+ " TOTAL:
%016"PRIx64"->%016"PRIx64"\n"
+ " ENTRY ADDRESS: %016"PRIx64"\n",
+ elf.pstart, elf.pend,
+ v_start, v_end,
+ elf_uval(&elf, elf.ehdr, e_entry));
+
+ if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL )
+ {
+ PERROR("Could not allocate memory.");
+ goto error_out;
+ }
+
+ for ( i = 0; i < nr_pages; i++ )
+ page_array[i] = i;
+ for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ )
+ page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
+
+ /*
+ * Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000.
+ *
+ * We attempt to allocate 1GB pages if possible. It falls back on 2MB
+ * pages if 1GB allocation fails. 4KB pages will be used eventually if
+ * both fail.
+ *
+ * Under 2MB mode, we allocate pages in batches of no more than 8MB to
+ * ensure that we can be preempted and hence dom0 remains responsive.
+ */
+ rc = xc_domain_populate_physmap_exact(
+ xch, dom, 0xa0, 0, 0, &page_array[0x00]);
+ cur_pages = 0xc0;
+ stat_normal_pages = 0xc0;
+ while ( (rc == 0) && (nr_pages > cur_pages) )
+ {
+ /* Clip count to maximum 1GB extent. */
+ unsigned long count = nr_pages - cur_pages;
+ unsigned long max_pages = SUPERPAGE_1GB_NR_PFNS;
+
+ if ( count > max_pages )
+ count = max_pages;
+
+ cur_pfn = page_array[cur_pages];
+
+ /* Take care the corner cases of super page tails */
+ if ( ((cur_pfn & (SUPERPAGE_1GB_NR_PFNS-1)) != 0) &&
+ (count > (-cur_pfn & (SUPERPAGE_1GB_NR_PFNS-1))) )
+ count = -cur_pfn & (SUPERPAGE_1GB_NR_PFNS-1);
+ else if ( ((count & (SUPERPAGE_1GB_NR_PFNS-1)) != 0) &&
+ (count > SUPERPAGE_1GB_NR_PFNS) )
+ count &= ~(SUPERPAGE_1GB_NR_PFNS - 1);
+
+ /* Attemp to allocate 1GB super page. Because in each pass we only
+ * allocate at most 1GB, we don''t have to clip super page
boundaries.
+ */
+ if ( ((count | cur_pfn) & (SUPERPAGE_1GB_NR_PFNS - 1)) == 0
&&
+ /* Check if there exists MMIO hole in the 1GB memory range */
+ !check_mmio_hole(cur_pfn << PAGE_SHIFT,
+ SUPERPAGE_1GB_NR_PFNS << PAGE_SHIFT) )
+ {
+ long done;
+ unsigned long nr_extents = count >> SUPERPAGE_1GB_SHIFT;
+ xen_pfn_t sp_extents[nr_extents];
+
+ for ( i = 0; i < nr_extents; i++ )
+ sp_extents[i] =
page_array[cur_pages+(i<<SUPERPAGE_1GB_SHIFT)];
+
+ done = xc_domain_populate_physmap(xch, dom, nr_extents,
SUPERPAGE_1GB_SHIFT,
+ pod_mode ?
XENMEMF_populate_on_demand : 0,
+ sp_extents);
+
+ if ( done > 0 )
+ {
+ stat_1gb_pages += done;
+ done <<= SUPERPAGE_1GB_SHIFT;
+ cur_pages += done;
+ count -= done;
+ }
+ }
+
+ if ( count != 0 )
+ {
+ /* Clip count to maximum 8MB extent. */
+ max_pages = SUPERPAGE_2MB_NR_PFNS * 4;
+ if ( count > max_pages )
+ count = max_pages;
+
+ /* Clip partial superpage extents to superpage boundaries. */
+ if ( ((cur_pfn & (SUPERPAGE_2MB_NR_PFNS-1)) != 0) &&
+ (count > (-cur_pfn & (SUPERPAGE_2MB_NR_PFNS-1))) )
+ count = -cur_pfn & (SUPERPAGE_2MB_NR_PFNS-1);
+ else if ( ((count & (SUPERPAGE_2MB_NR_PFNS-1)) != 0) &&
+ (count > SUPERPAGE_2MB_NR_PFNS) )
+ count &= ~(SUPERPAGE_2MB_NR_PFNS - 1); /* clip non-s.p.
tail */
+
+ /* Attempt to allocate superpage extents. */
+ if ( ((count | cur_pfn) & (SUPERPAGE_2MB_NR_PFNS - 1)) == 0 )
+ {
+ long done;
+ unsigned long nr_extents = count >> SUPERPAGE_2MB_SHIFT;
+ xen_pfn_t sp_extents[nr_extents];
+
+ for ( i = 0; i < nr_extents; i++ )
+ sp_extents[i] =
page_array[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)];
+
+ done = xc_domain_populate_physmap(xch, dom, nr_extents,
SUPERPAGE_2MB_SHIFT,
+ pod_mode ?
XENMEMF_populate_on_demand : 0,
+ sp_extents);
+
+ if ( done > 0 )
+ {
+ stat_2mb_pages += done;
+ done <<= SUPERPAGE_2MB_SHIFT;
+ cur_pages += done;
+ count -= done;
+ }
+ }
+ }
+
+ /* Fall back to 4kB extents. */
+ if ( count != 0 )
+ {
+ rc = xc_domain_populate_physmap_exact(
+ xch, dom, count, 0, 0, &page_array[cur_pages]);
+ cur_pages += count;
+ stat_normal_pages += count;
+ }
+ }
+
+ /* Subtract 0x20 from target_pages for the VGA "hole". Xen will
+ * adjust the PoD cache size so that domain tot_pages will be
+ * target_pages - 0x20 after this call. */
+ if ( pod_mode )
+ rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20,
+ NULL, NULL, NULL);
+
+ if ( rc != 0 )
+ {
+ PERROR("Could not allocate memory for HVM guest.");
+ goto error_out;
+ }
+
+ IPRINTF("PHYSICAL MEMORY ALLOCATION:\n"
+ " 4KB PAGES: 0x%016lx\n"
+ " 2MB PAGES: 0x%016lx\n"
+ " 1GB PAGES: 0x%016lx\n",
+ stat_normal_pages, stat_2mb_pages, stat_1gb_pages);
+
+ if ( loadelfimage(xch, &elf, dom, page_array) != 0 )
+ goto error_out;
+
+ if ( (hvm_info_page = xc_map_foreign_range(
+ xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
+ HVM_INFO_PFN)) == NULL )
+ goto error_out;
+ build_hvm_info(hvm_info_page, v_end);
+ munmap(hvm_info_page, PAGE_SIZE);
+
+ /* Allocate and clear special pages. */
+ for ( i = 0; i < NR_SPECIAL_PAGES; i++ )
+ {
+ xen_pfn_t pfn = special_pfn(i);
+ rc = xc_domain_populate_physmap_exact(xch, dom, 1, 0, 0, &pfn);
+ if ( rc != 0 )
+ {
+ PERROR("Could not allocate %d''th special page.",
i);
+ goto error_out;
+ }
+ if ( xc_clear_domain_page(xch, dom, special_pfn(i)) )
+ goto error_out;
+ }
+
+ xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_PFN,
+ special_pfn(SPECIALPAGE_XENSTORE));
+ xc_set_hvm_param(xch, dom, HVM_PARAM_BUFIOREQ_PFN,
+ special_pfn(SPECIALPAGE_BUFIOREQ));
+ xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_PFN,
+ special_pfn(SPECIALPAGE_IOREQ));
+ xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN,
+ special_pfn(SPECIALPAGE_CONSOLE));
+
+ /*
+ * Identity-map page table is required for running with CR0.PG=0 when
+ * using Intel EPT. Create a 32-bit non-PAE page directory of superpages.
+ */
+ if ( (ident_pt = xc_map_foreign_range(
+ xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
+ special_pfn(SPECIALPAGE_IDENT_PT))) == NULL )
+ goto error_out;
+ for ( i = 0; i < PAGE_SIZE / sizeof(*ident_pt); i++ )
+ ident_pt[i] = ((i << 22) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER
|
+ _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
+ munmap(ident_pt, PAGE_SIZE);
+ xc_set_hvm_param(xch, dom, HVM_PARAM_IDENT_PT,
+ special_pfn(SPECIALPAGE_IDENT_PT) << PAGE_SHIFT);
+
+ /* Insert JMP <rel32> instruction at address 0x0 to reach entry
point. */
+ entry_eip = elf_uval(&elf, elf.ehdr, e_entry);
+ if ( entry_eip != 0 )
+ {
+ char *page0 = xc_map_foreign_range(
+ xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, 0);
+ if ( page0 == NULL )
+ goto error_out;
+ page0[0] = 0xe9;
+ *(uint32_t *)&page0[1] = entry_eip - 5;
+ munmap(page0, PAGE_SIZE);
+ }
+
+ free(page_array);
+ return 0;
+
+ error_out:
+ free(page_array);
+ return -1;
+}
+
+static int xc_hvm_build_internal(xc_interface *xch,
+ uint32_t domid,
+ int memsize,
+ int target,
+ char *image,
+ unsigned long image_size)
+{
+ if ( (image == NULL) || (image_size == 0) )
+ {
+ ERROR("Image required");
+ return -1;
+ }
+
+ return setup_guest(xch, domid, memsize, target, image, image_size);
+}
+
+/* xc_hvm_build:
+ * Create a domain for a virtualized Linux, using files/filenames.
+ */
+int xc_hvm_build(xc_interface *xch,
+ uint32_t domid,
+ int memsize,
+ const char *image_name)
+{
+ char *image;
+ int sts;
+ unsigned long image_size;
+
+ if ( (image_name == NULL) ||
+ ((image = xc_read_image(xch, image_name, &image_size)) == NULL) )
+ return -1;
+
+ sts = xc_hvm_build_internal(xch, domid, memsize, memsize, image,
image_size);
+
+ free(image);
+
+ return sts;
+}
+
+/* xc_hvm_build_target_mem:
+ * Create a domain for a pre-ballooned virtualized Linux, using
+ * files/filenames. If target < memsize, domain is created with
+ * memsize pages marked populate-on-demand,
+ * calculating pod cache size based on target.
+ * If target == memsize, pages are populated normally.
+ */
+int xc_hvm_build_target_mem(xc_interface *xch,
+ uint32_t domid,
+ int memsize,
+ int target,
+ const char *image_name)
+{
+ char *image;
+ int sts;
+ unsigned long image_size;
+
+ if ( (image_name == NULL) ||
+ ((image = xc_read_image(xch, image_name, &image_size)) == NULL) )
+ return -1;
+
+ sts = xc_hvm_build_internal(xch, domid, memsize, target, image,
image_size);
+
+ free(image);
+
+ return sts;
+}
+
+/* xc_hvm_build_mem:
+ * Create a domain for a virtualized Linux, using memory buffers.
+ */
+int xc_hvm_build_mem(xc_interface *xch,
+ uint32_t domid,
+ int memsize,
+ const char *image_buffer,
+ unsigned long image_size)
+{
+ int sts;
+ unsigned long img_len;
+ char *img;
+
+ /* Validate that there is a kernel buffer */
+
+ if ( (image_buffer == NULL) || (image_size == 0) )
+ {
+ ERROR("kernel image buffer not present");
+ return -1;
+ }
+
+ img = xc_inflate_buffer(xch, image_buffer, image_size, &img_len);
+ if ( img == NULL )
+ {
+ ERROR("unable to inflate ram disk buffer");
+ return -1;
+ }
+
+ sts = xc_hvm_build_internal(xch, domid, memsize, memsize,
+ img, img_len);
+
+ /* xc_inflate_buffer may return the original buffer pointer (for
+ for already inflated buffers), so exercise some care in freeing */
+
+ if ( (img != NULL) && (img != image_buffer) )
+ free(img);
+
+ return sts;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libxc/xc_nomigrate.c b/tools/libxc/xc_nomigrate.c
new file mode 100644
index 0000000..e734d73
--- /dev/null
+++ b/tools/libxc/xc_nomigrate.c
@@ -0,0 +1,53 @@
+/******************************************************************************
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
+ *
+ * Copyright (c) 2011, Citrix Systems
+ */
+
+#include <inttypes.h>
+#include <errno.h>
+#include <xenctrl.h>
+#include <xenguest.h>
+
+int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t
max_iters,
+ uint32_t max_factor, uint32_t flags,
+ struct save_callbacks* callbacks, int hvm,
+ unsigned long vm_generationid_addr)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
+ unsigned int store_evtchn, unsigned long *store_mfn,
+ domid_t store_domid, unsigned int console_evtchn,
+ unsigned long *console_mfn, domid_t console_domid,
+ unsigned int hvm, unsigned int pae, int superpages,
+ int no_incr_generationid,
+ unsigned long *vm_generationid_addr)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--
1.7.2.5
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Acked-by: Ian Campbell <Ian.campbell@citrix.com>
---
tools/memshr/bidir-hash.c | 31 +++++++++++++++++++++++++++++++
1 files changed, 31 insertions(+), 0 deletions(-)
diff --git a/tools/memshr/bidir-hash.c b/tools/memshr/bidir-hash.c
index 6c0dc3d..45d473e 100644
--- a/tools/memshr/bidir-hash.c
+++ b/tools/memshr/bidir-hash.c
@@ -109,6 +109,37 @@ static void hash_resize(struct __hash *h);
} while (0)
static inline void atomic_inc(uint32_t *v) { ia64_fetchadd4_rel(v, 1); }
static inline void atomic_dec(uint32_t *v) { ia64_fetchadd4_rel(v, -1); }
+#elif defined(__arm__)
+static inline void atomic_inc(uint32_t *v)
+{
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__("@ atomic_add\n"
+"1: ldrex %0, [%3]\n"
+" add %0, %0, #1\n"
+" strex %1, %0, [%3]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp),
"+Qo" (*v)
+ : "r" (v)
+ : "cc");
+}
+static inline void atomic_dec(uint32_t *v)
+{
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__("@ atomic_sub\n"
+"1: ldrex %0, [%3]\n"
+" sub %0, %0, #1\n"
+" strex %1, %0, [%3]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp),
"+Qo" (*v)
+ : "r" (v)
+ : "cc");
+}
#else /* __x86__ */
static inline void atomic_inc(uint32_t *v)
{
--
1.7.2.5
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
---
tools/xentrace/xenctx.c | 12 ++++++++++++
1 files changed, 12 insertions(+), 0 deletions(-)
diff --git a/tools/xentrace/xenctx.c b/tools/xentrace/xenctx.c
index a12cc21..530ef65 100644
--- a/tools/xentrace/xenctx.c
+++ b/tools/xentrace/xenctx.c
@@ -60,6 +60,12 @@ int disp_ar_regs;
int disp_br_regs;
int disp_bank_regs;
int disp_tlb;
+
+#elif defined(__arm__)
+#define NO_TRANSLATION
+typedef uint64_t guest_word_t;
+#define FMT_32B_WORD "%08llx"
+#define FMT_64B_WORD "%016llx"
#endif
struct symbol {
@@ -678,6 +684,12 @@ void print_ctx(vcpu_guest_context_any_t *ctx)
print_tr(i, &tr->dtrs[i]);
}
}
+#elif defined(__arm__)
+static void print_ctx(vcpu_guest_context_any_t *ctx)
+{
+ /* XXX: properly implement this */
+ print_symbol(0);
+}
#endif
#ifndef NO_TRANSLATION
--
1.7.2.5
libxl_cpuid_destroy has been renamed to libxl_cpuid_dispose; also cpuid
functions are only available on x86, so ifdef the new cpuid related
function in libxl_json.c.
Changes in v3:
- move libxl_cpuid_policy_list_gen_json to libxl_(no)cpuid.c.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
---
tools/libxl/Makefile | 1 +
tools/libxl/libxl_cpuid.c | 60 +++++++++++++++++++++++++++++++++++++++++++
tools/libxl/libxl_json.c | 60 -------------------------------------------
tools/libxl/libxl_nocpuid.c | 8 +++++-
4 files changed, 68 insertions(+), 61 deletions(-)
diff --git a/tools/libxl/Makefile b/tools/libxl/Makefile
index 06764f2..41b6ac4 100644
--- a/tools/libxl/Makefile
+++ b/tools/libxl/Makefile
@@ -36,6 +36,7 @@ LIBXL_OBJS-y += libxl_noblktap2.o
endif
LIBXL_OBJS-$(CONFIG_X86) += libxl_cpuid.o
LIBXL_OBJS-$(CONFIG_IA64) += libxl_nocpuid.o
+LIBXL_OBJS-$(CONFIG_ARM) += libxl_nocpuid.o
ifeq ($(CONFIG_NetBSD),y)
LIBXL_OBJS-y += libxl_netbsd.o
diff --git a/tools/libxl/libxl_cpuid.c b/tools/libxl/libxl_cpuid.c
index dcdb9d02..ff7531f 100644
--- a/tools/libxl/libxl_cpuid.c
+++ b/tools/libxl/libxl_cpuid.c
@@ -333,6 +333,66 @@ void libxl_cpuid_set(libxl_ctx *ctx, uint32_t domid,
(const char**)(cpuid[i].policy), cpuid_res);
}
+yajl_gen_status libxl_cpuid_policy_list_gen_json(yajl_gen hand,
+ libxl_cpuid_policy_list *pcpuid)
+{
+ libxl_cpuid_policy_list cpuid = *pcpuid;
+ yajl_gen_status s;
+ const char *input_names[2] = { "leaf", "subleaf" };
+ const char *policy_names[4] = { "eax", "ebx",
"ecx", "edx" };
+ int i, j;
+
+ /*
+ * Aiming for:
+ * [
+ * { ''leaf'': ''val-eax'',
+ * ''subleaf'': ''val-ecx'',
+ * ''eax'': ''filter'',
+ * ''ebx'': ''filter'',
+ * ''ecx'': ''filter'',
+ * ''edx'': ''filter'' },
+ * { ''leaf'': ''val-eax'', ...,
''eax'': ''filter'', ... },
+ * ... etc ...
+ * ]
+ */
+
+ s = yajl_gen_array_open(hand);
+ if (s != yajl_gen_status_ok) goto out;
+
+ if (cpuid == NULL) goto empty;
+
+ for (i = 0; cpuid[i].input[0] != XEN_CPUID_INPUT_UNUSED; i++) {
+ s = yajl_gen_map_open(hand);
+ if (s != yajl_gen_status_ok) goto out;
+
+ for (j = 0; j < 2; j++) {
+ if (cpuid[i].input[j] != XEN_CPUID_INPUT_UNUSED) {
+ s = libxl__yajl_gen_asciiz(hand, input_names[j]);
+ if (s != yajl_gen_status_ok) goto out;
+ s = yajl_gen_integer(hand, cpuid[i].input[j]);
+ if (s != yajl_gen_status_ok) goto out;
+ }
+ }
+
+ for (j = 0; j < 4; j++) {
+ if (cpuid[i].policy[j] != NULL) {
+ s = libxl__yajl_gen_asciiz(hand, policy_names[j]);
+ if (s != yajl_gen_status_ok) goto out;
+ s = yajl_gen_string(hand,
+ (const unsigned char *)cpuid[i].policy[j], 32);
+ if (s != yajl_gen_status_ok) goto out;
+ }
+ }
+ s = yajl_gen_map_close(hand);
+ if (s != yajl_gen_status_ok) goto out;
+ }
+
+empty:
+ s = yajl_gen_array_close(hand);
+out:
+ return s;
+}
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxl/libxl_json.c b/tools/libxl/libxl_json.c
index be6ad96..4cbcd57 100644
--- a/tools/libxl/libxl_json.c
+++ b/tools/libxl/libxl_json.c
@@ -140,66 +140,6 @@ out:
return s;
}
-yajl_gen_status libxl_cpuid_policy_list_gen_json(yajl_gen hand,
- libxl_cpuid_policy_list *pcpuid)
-{
- libxl_cpuid_policy_list cpuid = *pcpuid;
- yajl_gen_status s;
- const char *input_names[2] = { "leaf", "subleaf" };
- const char *policy_names[4] = { "eax", "ebx",
"ecx", "edx" };
- int i, j;
-
- /*
- * Aiming for:
- * [
- * { ''leaf'': ''val-eax'',
- * ''subleaf'': ''val-ecx'',
- * ''eax'': ''filter'',
- * ''ebx'': ''filter'',
- * ''ecx'': ''filter'',
- * ''edx'': ''filter'' },
- * { ''leaf'': ''val-eax'', ...,
''eax'': ''filter'', ... },
- * ... etc ...
- * ]
- */
-
- s = yajl_gen_array_open(hand);
- if (s != yajl_gen_status_ok) goto out;
-
- if (cpuid == NULL) goto empty;
-
- for (i = 0; cpuid[i].input[0] != XEN_CPUID_INPUT_UNUSED; i++) {
- s = yajl_gen_map_open(hand);
- if (s != yajl_gen_status_ok) goto out;
-
- for (j = 0; j < 2; j++) {
- if (cpuid[i].input[j] != XEN_CPUID_INPUT_UNUSED) {
- s = libxl__yajl_gen_asciiz(hand, input_names[j]);
- if (s != yajl_gen_status_ok) goto out;
- s = yajl_gen_integer(hand, cpuid[i].input[j]);
- if (s != yajl_gen_status_ok) goto out;
- }
- }
-
- for (j = 0; j < 4; j++) {
- if (cpuid[i].policy[j] != NULL) {
- s = libxl__yajl_gen_asciiz(hand, policy_names[j]);
- if (s != yajl_gen_status_ok) goto out;
- s = yajl_gen_string(hand,
- (const unsigned char *)cpuid[i].policy[j], 32);
- if (s != yajl_gen_status_ok) goto out;
- }
- }
- s = yajl_gen_map_close(hand);
- if (s != yajl_gen_status_ok) goto out;
- }
-
-empty:
- s = yajl_gen_array_close(hand);
-out:
- return s;
-}
-
yajl_gen_status libxl_string_list_gen_json(yajl_gen hand, libxl_string_list
*pl)
{
libxl_string_list l = *pl;
diff --git a/tools/libxl/libxl_nocpuid.c b/tools/libxl/libxl_nocpuid.c
index 9e52f8d..5f7cb6a 100644
--- a/tools/libxl/libxl_nocpuid.c
+++ b/tools/libxl/libxl_nocpuid.c
@@ -14,7 +14,7 @@
#include "libxl_internal.h"
-void libxl_cpuid_destroy(libxl_cpuid_policy_list *p_cpuid_list)
+void libxl_cpuid_dispose(libxl_cpuid_policy_list *p_cpuid_list)
{
}
@@ -38,6 +38,12 @@ void libxl_cpuid_set(libxl_ctx *ctx, uint32_t domid,
{
}
+yajl_gen_status libxl_cpuid_policy_list_gen_json(yajl_gen hand,
+ libxl_cpuid_policy_list *pcpuid)
+{
+ return 0;
+}
+
/*
* Local variables:
* mode: C
--
1.7.2.5
Stefano Stabellini
2012-Feb-23 14:51 UTC
[PATCH v5 7/7] libxl: Introduce libxl__arch_domain_create
Introduce an arch specific internal domain creation function. At the
moment only x86 provides an implementation.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
tools/libxl/Makefile | 6 +-
tools/libxl/libxl_arch.h | 22 ++++
tools/libxl/libxl_create.c | 12 +--
tools/libxl/libxl_internal.h | 2 -
tools/libxl/libxl_noarch.c | 8 ++
tools/libxl/libxl_pci.c | 242 ---------------------------------------
tools/libxl/libxl_x86.c | 259 ++++++++++++++++++++++++++++++++++++++++++
7 files changed, 294 insertions(+), 257 deletions(-)
create mode 100644 tools/libxl/libxl_arch.h
create mode 100644 tools/libxl/libxl_noarch.c
create mode 100644 tools/libxl/libxl_x86.c
diff --git a/tools/libxl/Makefile b/tools/libxl/Makefile
index 41b6ac4..ba5852b 100644
--- a/tools/libxl/Makefile
+++ b/tools/libxl/Makefile
@@ -34,9 +34,9 @@ LIBXL_OBJS-y += libxl_blktap2.o
else
LIBXL_OBJS-y += libxl_noblktap2.o
endif
-LIBXL_OBJS-$(CONFIG_X86) += libxl_cpuid.o
-LIBXL_OBJS-$(CONFIG_IA64) += libxl_nocpuid.o
-LIBXL_OBJS-$(CONFIG_ARM) += libxl_nocpuid.o
+LIBXL_OBJS-$(CONFIG_X86) += libxl_cpuid.o libxl_x86.o
+LIBXL_OBJS-$(CONFIG_IA64) += libxl_nocpuid.o libxl_noarch.o
+LIBXL_OBJS-$(CONFIG_ARM) += libxl_nocpuid.o libxl_noarch.o
ifeq ($(CONFIG_NetBSD),y)
LIBXL_OBJS-y += libxl_netbsd.o
diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h
new file mode 100644
index 0000000..d1bbdf7
--- /dev/null
+++ b/tools/libxl/libxl_arch.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Citrix Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ */
+
+#ifndef LIBXL_ARCH_H
+#define LIBXL_ARCH_H
+
+/* arch specific internal domain creation function */
+int libxl__arch_domain_create(libxl__gc *gc, libxl_domain_config *d_config,
+ uint32_t domid);
+
+#endif
diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
index f28d814..ff589a8 100644
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -18,6 +18,7 @@
#include "libxl_osdeps.h" /* must come before any other headers */
#include "libxl_internal.h"
+#include "libxl_arch.h"
#include <xc_dom.h>
#include <xenguest.h>
@@ -616,16 +617,7 @@ static int do_domain_create(libxl__gc *gc,
libxl_domain_config *d_config,
goto error_out;
}
}
-
- if (d_config->c_info.type == LIBXL_DOMAIN_TYPE_PV &&
- d_config->b_info.u.pv.e820_host) {
- int rc;
- rc = libxl__e820_alloc(gc, domid, d_config);
- if (rc)
- LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR,
- "Failed while collecting E820 with: %d
(errno:%d)\n",
- rc, errno);
- }
+ libxl__arch_domain_create(gc, d_config, domid);
if ( cb && (d_config->c_info.type == LIBXL_DOMAIN_TYPE_HVM ||
(d_config->c_info.type == LIBXL_DOMAIN_TYPE_PV &&
d_config->b_info.u.pv.bootloader ))) {
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index 8846c68..bd384e2 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -975,8 +975,6 @@ _hidden int libxl__error_set(libxl__gc *gc, int code);
_hidden int libxl__file_reference_map(libxl_file_reference *f);
_hidden int libxl__file_reference_unmap(libxl_file_reference *f);
-_hidden int libxl__e820_alloc(libxl__gc *gc, uint32_t domid,
libxl_domain_config *d_config);
-
/* parse the string @s as a sequence of 6 colon separated bytes in to @mac */
_hidden int libxl__parse_mac(const char *s, libxl_mac mac);
/* compare mac address @a and @b. 0 if the same, -ve if a<b and +ve if
a>b */
diff --git a/tools/libxl/libxl_noarch.c b/tools/libxl/libxl_noarch.c
new file mode 100644
index 0000000..7893535
--- /dev/null
+++ b/tools/libxl/libxl_noarch.c
@@ -0,0 +1,8 @@
+#include "libxl_internal.h"
+#include "libxl_arch.h"
+
+int libxl__arch_domain_create(libxl__gc *gc, libxl_domain_config *d_config,
+ uint32_t domid)
+{
+ return 0;
+}
diff --git a/tools/libxl/libxl_pci.c b/tools/libxl/libxl_pci.c
index 33425f5..d960f4b 100644
--- a/tools/libxl/libxl_pci.c
+++ b/tools/libxl/libxl_pci.c
@@ -1147,248 +1147,6 @@ int libxl__device_pci_destroy_all(libxl__gc *gc,
uint32_t domid)
return 0;
}
-static const char *e820_names(int type)
-{
- switch (type) {
- case E820_RAM: return "RAM";
- case E820_RESERVED: return "Reserved";
- case E820_ACPI: return "ACPI";
- case E820_NVS: return "ACPI NVS";
- case E820_UNUSABLE: return "Unusable";
- default: break;
- }
- return "Unknown";
-}
-
-static int e820_sanitize(libxl_ctx *ctx, struct e820entry src[],
- uint32_t *nr_entries,
- unsigned long map_limitkb,
- unsigned long balloon_kb)
-{
- uint64_t delta_kb = 0, start = 0, start_kb = 0, last = 0, ram_end;
- uint32_t i, idx = 0, nr;
- struct e820entry e820[E820MAX];
-
- if (!src || !map_limitkb || !balloon_kb || !nr_entries)
- return ERROR_INVAL;
-
- nr = *nr_entries;
- if (!nr)
- return ERROR_INVAL;
-
- if (nr > E820MAX)
- return ERROR_NOMEM;
-
- /* Weed out anything under 1MB */
- for (i = 0; i < nr; i++) {
- if (src[i].addr > 0x100000)
- continue;
-
- src[i].type = 0;
- src[i].size = 0;
- src[i].addr = -1ULL;
- }
-
- /* Find the lowest and highest entry in E820, skipping over
- * undesired entries. */
- start = -1ULL;
- last = 0;
- for (i = 0; i < nr; i++) {
- if ((src[i].type == E820_RAM) ||
- (src[i].type == E820_UNUSABLE) ||
- (src[i].type == 0))
- continue;
-
- start = src[i].addr < start ? src[i].addr : start;
- last = src[i].addr + src[i].size > last ?
- src[i].addr + src[i].size > last : last;
- }
- if (start > 1024)
- start_kb = start >> 10;
-
- /* Add the memory RAM region for the guest */
- e820[idx].addr = 0;
- e820[idx].size = (uint64_t)map_limitkb << 10;
- e820[idx].type = E820_RAM;
-
- /* .. and trim if neccessary */
- if (start_kb && map_limitkb > start_kb) {
- delta_kb = map_limitkb - start_kb;
- if (delta_kb)
- e820[idx].size -= (uint64_t)(delta_kb << 10);
- }
- /* Note: We don''t touch balloon_kb here. Will add it at the end.
*/
- ram_end = e820[idx].addr + e820[idx].size;
- idx ++;
-
- LIBXL__LOG(ctx, LIBXL__LOG_DEBUG, "Memory: %"PRIu64"kB End
of RAM: " \
- "0x%"PRIx64" (PFN) Delta: %"PRIu64"kB,
PCI start: %"PRIu64"kB " \
- "(0x%"PRIx64" PFN), Balloon
%"PRIu64"kB\n", (uint64_t)map_limitkb,
- ram_end >> 12, delta_kb, start_kb ,start >> 12,
- (uint64_t)balloon_kb);
-
-
- /* This whole code below is to guard against if the Intel IGD is passed
into
- * the guest. If we don''t pass in IGD, this whole code can be
ignored.
- *
- * The reason for this code is that Intel boxes fill their E820 with
- * E820_RAM amongst E820_RESERVED and we can''t just ditch those
E820_RAM.
- * That is b/c any "gaps" in the E820 is considered PCI I/O space
by
- * Linux and it would be utilized by the Intel IGD as I/O space while
- * in reality it was an RAM region.
- *
- * What this means is that we have to walk the E820 and for any region
- * that is RAM and below 4GB and above ram_end, needs to change its type
- * to E820_UNUSED. We also need to move some of the E820_RAM regions if
- * the overlap with ram_end. */
- for (i = 0; i < nr; i++) {
- uint64_t end = src[i].addr + src[i].size;
-
- /* We don''t care about E820_UNUSABLE, but we need to
- * change the type to zero b/c the loop after this
- * sticks E820_UNUSABLE on the guest''s E820 but ignores
- * the ones with type zero. */
- if ((src[i].type == E820_UNUSABLE) ||
- /* Any region that is within the "RAM region" can
- * be safely ditched. */
- (end < ram_end)) {
- src[i].type = 0;
- continue;
- }
-
- /* Look only at RAM regions. */
- if (src[i].type != E820_RAM)
- continue;
-
- /* We only care about RAM regions below 4GB. */
- if (src[i].addr >= (1ULL<<32))
- continue;
-
- /* E820_RAM overlaps with our RAM region. Move it */
- if (src[i].addr < ram_end) {
- uint64_t delta;
-
- src[i].type = E820_UNUSABLE;
- delta = ram_end - src[i].addr;
- /* The end < ram_end should weed this out */
- if (src[i].size - delta < 0)
- src[i].type = 0;
- else {
- src[i].size -= delta;
- src[i].addr = ram_end;
- }
- if (src[i].addr + src[i].size != end) {
- /* We messed up somewhere */
- src[i].type = 0;
- LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "Computed E820
wrongly. Continuing on.");
- }
- }
- /* Lastly, convert the RAM to UNSUABLE. Look in the Linux kernel
- at git commit 2f14ddc3a7146ea4cd5a3d1ecd993f85f2e4f948
- "xen/setup: Inhibit resource API from using System RAM E820
- gaps as PCI mem gaps" for full explanation. */
- if (end > ram_end)
- src[i].type = E820_UNUSABLE;
- }
-
- /* Check if there is a region between ram_end and start. */
- if (start > ram_end) {
- int add_unusable = 1;
- for (i = 0; i < nr && add_unusable; i++) {
- if (src[i].type != E820_UNUSABLE)
- continue;
- if (ram_end != src[i].addr)
- continue;
- if (start != src[i].addr + src[i].size) {
- /* there is one, adjust it */
- src[i].size = start - src[i].addr;
- }
- add_unusable = 0;
- }
- /* .. and if not present, add it in. This is to guard against
- the Linux guest assuming that the gap between the end of
- RAM region and the start of the E820_[ACPI,NVS,RESERVED]
- is PCI I/O space. Which it certainly is _not_. */
- if (add_unusable) {
- e820[idx].type = E820_UNUSABLE;
- e820[idx].addr = ram_end;
- e820[idx].size = start - ram_end;
- idx++;
- }
- }
- /* Almost done: copy them over, ignoring the undesireable ones */
- for (i = 0; i < nr; i++) {
- if ((src[i].type == E820_RAM) ||
- (src[i].type == 0))
- continue;
-
- e820[idx].type = src[i].type;
- e820[idx].addr = src[i].addr;
- e820[idx].size = src[i].size;
- idx++;
- }
- /* At this point we have the mapped RAM + E820 entries from src. */
- if (balloon_kb) {
- /* and if we truncated the RAM region, then add it to the end. */
- e820[idx].type = E820_RAM;
- e820[idx].addr = (uint64_t)(1ULL << 32) > last ?
- (uint64_t)(1ULL << 32) : last;
- /* also add the balloon memory to the end. */
- e820[idx].size = (uint64_t)(delta_kb << 10) +
- (uint64_t)(balloon_kb << 10);
- idx++;
-
- }
- nr = idx;
-
- for (i = 0; i < nr; i++) {
- LIBXL__LOG(ctx, LIBXL__LOG_DEBUG, ":\t[%"PRIx64" ->
%"PRIx64"] %s",
- e820[i].addr >> 12, (e820[i].addr + e820[i].size)
>> 12,
- e820_names(e820[i].type));
- }
-
- /* Done: copy the sanitized version. */
- *nr_entries = nr;
- memcpy(src, e820, nr * sizeof(struct e820entry));
- return 0;
-}
-
-int libxl__e820_alloc(libxl__gc *gc, uint32_t domid, libxl_domain_config
*d_config)
-{
- libxl_ctx *ctx = libxl__gc_owner(gc);
- int rc;
- uint32_t nr;
- struct e820entry map[E820MAX];
- libxl_domain_build_info *b_info;
-
- if (d_config == NULL || d_config->c_info.type == LIBXL_DOMAIN_TYPE_HVM)
- return ERROR_INVAL;
-
- b_info = &d_config->b_info;
- if (!b_info->u.pv.e820_host)
- return ERROR_INVAL;
-
- rc = xc_get_machine_memory_map(ctx->xch, map, E820MAX);
- if (rc < 0) {
- errno = rc;
- return ERROR_FAIL;
- }
- nr = rc;
- rc = e820_sanitize(ctx, map, &nr, b_info->target_memkb,
- (b_info->max_memkb - b_info->target_memkb) +
- b_info->u.pv.slack_memkb);
- if (rc)
- return ERROR_FAIL;
-
- rc = xc_domain_set_memory_map(ctx->xch, domid, map, nr);
-
- if (rc < 0) {
- errno = rc;
- return ERROR_FAIL;
- }
- return 0;
-}
-
/*
* Local variables:
* mode: C
diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c
new file mode 100644
index 0000000..7e11f2d
--- /dev/null
+++ b/tools/libxl/libxl_x86.c
@@ -0,0 +1,259 @@
+#include "libxl_internal.h"
+#include "libxl_arch.h"
+
+static const char *e820_names(int type)
+{
+ switch (type) {
+ case E820_RAM: return "RAM";
+ case E820_RESERVED: return "Reserved";
+ case E820_ACPI: return "ACPI";
+ case E820_NVS: return "ACPI NVS";
+ case E820_UNUSABLE: return "Unusable";
+ default: break;
+ }
+ return "Unknown";
+}
+
+static int e820_sanitize(libxl_ctx *ctx, struct e820entry src[],
+ uint32_t *nr_entries,
+ unsigned long map_limitkb,
+ unsigned long balloon_kb)
+{
+ uint64_t delta_kb = 0, start = 0, start_kb = 0, last = 0, ram_end;
+ uint32_t i, idx = 0, nr;
+ struct e820entry e820[E820MAX];
+
+ if (!src || !map_limitkb || !balloon_kb || !nr_entries)
+ return ERROR_INVAL;
+
+ nr = *nr_entries;
+ if (!nr)
+ return ERROR_INVAL;
+
+ if (nr > E820MAX)
+ return ERROR_NOMEM;
+
+ /* Weed out anything under 1MB */
+ for (i = 0; i < nr; i++) {
+ if (src[i].addr > 0x100000)
+ continue;
+
+ src[i].type = 0;
+ src[i].size = 0;
+ src[i].addr = -1ULL;
+ }
+
+ /* Find the lowest and highest entry in E820, skipping over
+ * undesired entries. */
+ start = -1ULL;
+ last = 0;
+ for (i = 0; i < nr; i++) {
+ if ((src[i].type == E820_RAM) ||
+ (src[i].type == E820_UNUSABLE) ||
+ (src[i].type == 0))
+ continue;
+
+ start = src[i].addr < start ? src[i].addr : start;
+ last = src[i].addr + src[i].size > last ?
+ src[i].addr + src[i].size > last : last;
+ }
+ if (start > 1024)
+ start_kb = start >> 10;
+
+ /* Add the memory RAM region for the guest */
+ e820[idx].addr = 0;
+ e820[idx].size = (uint64_t)map_limitkb << 10;
+ e820[idx].type = E820_RAM;
+
+ /* .. and trim if neccessary */
+ if (start_kb && map_limitkb > start_kb) {
+ delta_kb = map_limitkb - start_kb;
+ if (delta_kb)
+ e820[idx].size -= (uint64_t)(delta_kb << 10);
+ }
+ /* Note: We don''t touch balloon_kb here. Will add it at the end.
*/
+ ram_end = e820[idx].addr + e820[idx].size;
+ idx ++;
+
+ LIBXL__LOG(ctx, LIBXL__LOG_DEBUG, "Memory: %"PRIu64"kB End
of RAM: " \
+ "0x%"PRIx64" (PFN) Delta: %"PRIu64"kB,
PCI start: %"PRIu64"kB " \
+ "(0x%"PRIx64" PFN), Balloon
%"PRIu64"kB\n", (uint64_t)map_limitkb,
+ ram_end >> 12, delta_kb, start_kb ,start >> 12,
+ (uint64_t)balloon_kb);
+
+
+ /* This whole code below is to guard against if the Intel IGD is passed
into
+ * the guest. If we don''t pass in IGD, this whole code can be
ignored.
+ *
+ * The reason for this code is that Intel boxes fill their E820 with
+ * E820_RAM amongst E820_RESERVED and we can''t just ditch those
E820_RAM.
+ * That is b/c any "gaps" in the E820 is considered PCI I/O space
by
+ * Linux and it would be utilized by the Intel IGD as I/O space while
+ * in reality it was an RAM region.
+ *
+ * What this means is that we have to walk the E820 and for any region
+ * that is RAM and below 4GB and above ram_end, needs to change its type
+ * to E820_UNUSED. We also need to move some of the E820_RAM regions if
+ * the overlap with ram_end. */
+ for (i = 0; i < nr; i++) {
+ uint64_t end = src[i].addr + src[i].size;
+
+ /* We don''t care about E820_UNUSABLE, but we need to
+ * change the type to zero b/c the loop after this
+ * sticks E820_UNUSABLE on the guest''s E820 but ignores
+ * the ones with type zero. */
+ if ((src[i].type == E820_UNUSABLE) ||
+ /* Any region that is within the "RAM region" can
+ * be safely ditched. */
+ (end < ram_end)) {
+ src[i].type = 0;
+ continue;
+ }
+
+ /* Look only at RAM regions. */
+ if (src[i].type != E820_RAM)
+ continue;
+
+ /* We only care about RAM regions below 4GB. */
+ if (src[i].addr >= (1ULL<<32))
+ continue;
+
+ /* E820_RAM overlaps with our RAM region. Move it */
+ if (src[i].addr < ram_end) {
+ uint64_t delta;
+
+ src[i].type = E820_UNUSABLE;
+ delta = ram_end - src[i].addr;
+ /* The end < ram_end should weed this out */
+ if (src[i].size - delta < 0)
+ src[i].type = 0;
+ else {
+ src[i].size -= delta;
+ src[i].addr = ram_end;
+ }
+ if (src[i].addr + src[i].size != end) {
+ /* We messed up somewhere */
+ src[i].type = 0;
+ LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "Computed E820
wrongly. Continuing on.");
+ }
+ }
+ /* Lastly, convert the RAM to UNSUABLE. Look in the Linux kernel
+ at git commit 2f14ddc3a7146ea4cd5a3d1ecd993f85f2e4f948
+ "xen/setup: Inhibit resource API from using System RAM E820
+ gaps as PCI mem gaps" for full explanation. */
+ if (end > ram_end)
+ src[i].type = E820_UNUSABLE;
+ }
+
+ /* Check if there is a region between ram_end and start. */
+ if (start > ram_end) {
+ int add_unusable = 1;
+ for (i = 0; i < nr && add_unusable; i++) {
+ if (src[i].type != E820_UNUSABLE)
+ continue;
+ if (ram_end != src[i].addr)
+ continue;
+ if (start != src[i].addr + src[i].size) {
+ /* there is one, adjust it */
+ src[i].size = start - src[i].addr;
+ }
+ add_unusable = 0;
+ }
+ /* .. and if not present, add it in. This is to guard against
+ the Linux guest assuming that the gap between the end of
+ RAM region and the start of the E820_[ACPI,NVS,RESERVED]
+ is PCI I/O space. Which it certainly is _not_. */
+ if (add_unusable) {
+ e820[idx].type = E820_UNUSABLE;
+ e820[idx].addr = ram_end;
+ e820[idx].size = start - ram_end;
+ idx++;
+ }
+ }
+ /* Almost done: copy them over, ignoring the undesireable ones */
+ for (i = 0; i < nr; i++) {
+ if ((src[i].type == E820_RAM) ||
+ (src[i].type == 0))
+ continue;
+
+ e820[idx].type = src[i].type;
+ e820[idx].addr = src[i].addr;
+ e820[idx].size = src[i].size;
+ idx++;
+ }
+ /* At this point we have the mapped RAM + E820 entries from src. */
+ if (balloon_kb) {
+ /* and if we truncated the RAM region, then add it to the end. */
+ e820[idx].type = E820_RAM;
+ e820[idx].addr = (uint64_t)(1ULL << 32) > last ?
+ (uint64_t)(1ULL << 32) : last;
+ /* also add the balloon memory to the end. */
+ e820[idx].size = (uint64_t)(delta_kb << 10) +
+ (uint64_t)(balloon_kb << 10);
+ idx++;
+
+ }
+ nr = idx;
+
+ for (i = 0; i < nr; i++) {
+ LIBXL__LOG(ctx, LIBXL__LOG_DEBUG, ":\t[%"PRIx64" ->
%"PRIx64"] %s",
+ e820[i].addr >> 12, (e820[i].addr + e820[i].size)
>> 12,
+ e820_names(e820[i].type));
+ }
+
+ /* Done: copy the sanitized version. */
+ *nr_entries = nr;
+ memcpy(src, e820, nr * sizeof(struct e820entry));
+ return 0;
+}
+
+static int libxl__e820_alloc(libxl__gc *gc, uint32_t domid, libxl_domain_config
*d_config)
+{
+ libxl_ctx *ctx = libxl__gc_owner(gc);
+ int rc;
+ uint32_t nr;
+ struct e820entry map[E820MAX];
+ libxl_domain_build_info *b_info;
+
+ if (d_config == NULL || d_config->c_info.type == LIBXL_DOMAIN_TYPE_HVM)
+ return ERROR_INVAL;
+
+ b_info = &d_config->b_info;
+ if (!b_info->u.pv.e820_host)
+ return ERROR_INVAL;
+
+ rc = xc_get_machine_memory_map(ctx->xch, map, E820MAX);
+ if (rc < 0) {
+ errno = rc;
+ return ERROR_FAIL;
+ }
+ nr = rc;
+ rc = e820_sanitize(ctx, map, &nr, b_info->target_memkb,
+ (b_info->max_memkb - b_info->target_memkb) +
+ b_info->u.pv.slack_memkb);
+ if (rc)
+ return ERROR_FAIL;
+
+ rc = xc_domain_set_memory_map(ctx->xch, domid, map, nr);
+
+ if (rc < 0) {
+ errno = rc;
+ return ERROR_FAIL;
+ }
+ return 0;
+}
+
+int libxl__arch_domain_create(libxl__gc *gc, libxl_domain_config *d_config,
+ uint32_t domid)
+{
+ int rc = 0;
+ if (d_config->c_info.type == LIBXL_DOMAIN_TYPE_PV &&
+ d_config->b_info.u.pv.e820_host) {
+ rc = libxl__e820_alloc(gc, domid, d_config);
+ if (rc)
+ LIBXL__LOG_ERRNO(libxl__gc_owner(gc), LIBXL__LOG_ERROR,
+ "Failed while collecting E820 with: %d
(errno:%d)\n",
+ rc, errno);
+ }
+ return rc;
+}
--
1.7.2.5
On Thu, 2012-02-23 at 14:51 +0000, Stefano Stabellini wrote:> Introduce an empty implementation of the arch specific ARM functions in > xc_dom_arm.c. > Provide empty implementations of xc_domain_save and xc_domain_restore > when CONFIG_MIGRATE is not set. > Move xc_hvm_build.c to xc_hvm_build_x86.c because the implementation is > x86 specific, introduce xc_hvm_build_arm.c with empty stubs. > > > Changes in v3: > > - rename xc_hvm_build.c to xc_hvm_build_x86.c; > > - remove xc_nohvm, introduce xc_hvm_build_arm.c instead; > > > > Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>Acked-by: Ian Campbell <ian.campbell@citrix.com> If you happen to repost then updating the copyright lines to say 2012 instead of 2011 might be useful. I''d also encourage the use of "format-patch -M" in the future -- it makes renames much easier to review.> --- > tools/libxc/Makefile | 12 +- > tools/libxc/xc_dom_arm.c | 50 ++++ > tools/libxc/xc_hvm_build.c | 511 ---------------------------------------- > tools/libxc/xc_hvm_build_arm.c | 61 +++++ > tools/libxc/xc_hvm_build_x86.c | 511 ++++++++++++++++++++++++++++++++++++++++ > tools/libxc/xc_nomigrate.c | 53 ++++ > 6 files changed, 684 insertions(+), 514 deletions(-) > create mode 100644 tools/libxc/xc_dom_arm.c > delete mode 100644 tools/libxc/xc_hvm_build.c > create mode 100644 tools/libxc/xc_hvm_build_arm.c > create mode 100644 tools/libxc/xc_hvm_build_x86.c > create mode 100644 tools/libxc/xc_nomigrate.c > > diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile > index f2e1ba7..02d39a3 100644 > --- a/tools/libxc/Makefile > +++ b/tools/libxc/Makefile > @@ -42,9 +42,12 @@ CTRL_SRCS-$(CONFIG_MiniOS) += xc_minios.c > > GUEST_SRCS-y :> GUEST_SRCS-y += xg_private.c xc_suspend.c > -GUEST_SRCS-$(CONFIG_MIGRATE) += xc_domain_restore.c xc_domain_save.c > -GUEST_SRCS-$(CONFIG_MIGRATE) += xc_offline_page.c xc_compression.c > -GUEST_SRCS-$(CONFIG_HVM) += xc_hvm_build.c > +ifeq ($(CONFIG_MIGRATE),y) > +GUEST_SRCS-y += xc_domain_restore.c xc_domain_save.c > +GUEST_SRCS-y += xc_offline_page.c xc_compression.c > +else > +GUEST_SRCS-y += xc_nomigrate.c > +endif > > vpath %.c ../../xen/common/libelf > CFLAGS += -I../../xen/common/libelf > @@ -61,7 +64,10 @@ GUEST_SRCS-y += xc_dom_compat_linux.c > > GUEST_SRCS-$(CONFIG_X86) += xc_dom_x86.c > GUEST_SRCS-$(CONFIG_X86) += xc_cpuid_x86.c > +GUEST_SRCS-$(CONFIG_X86) += xc_hvm_build_x86.c > GUEST_SRCS-$(CONFIG_IA64) += xc_dom_ia64.c > +GUEST_SRCS-$(CONFIG_ARM) += xc_dom_arm.c > +GUEST_SRCS-$(CONFIG_ARM) += xc_hvm_build_arm.c > > OSDEP_SRCS-y += xenctrl_osdep_ENOSYS.c > > diff --git a/tools/libxc/xc_dom_arm.c b/tools/libxc/xc_dom_arm.c > new file mode 100644 > index 0000000..122d0e8 > --- /dev/null > +++ b/tools/libxc/xc_dom_arm.c > @@ -0,0 +1,50 @@ > +/* > + * Xen domain builder -- ARM > + * > + * This library is free software; you can redistribute it and/or > + * modify it under the terms of the GNU Lesser General Public > + * License as published by the Free Software Foundation; > + * version 2.1 of the License. > + * > + * This library is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + * Lesser General Public License for more details. > + * > + * You should have received a copy of the GNU Lesser General Public > + * License along with this library; if not, write to the Free Software > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA > + * > + * Copyright (c) 2011, Citrix Systems > + */ > +#include <inttypes.h> > +#include <xen/xen.h> > +#include "xg_private.h" > +#include "xc_dom.h" > + > +int arch_setup_meminit(struct xc_dom_image *dom) > +{ > + errno = ENOSYS; > + return -1; > +} > + > +int arch_setup_bootearly(struct xc_dom_image *dom) > +{ > + DOMPRINTF("%s: doing nothing", __FUNCTION__); > + return 0; > +} > + > +int arch_setup_bootlate(struct xc_dom_image *dom) > +{ > + DOMPRINTF("%s: doing nothing", __FUNCTION__); > + return 0; > +} > +/* > + * Local variables: > + * mode: C > + * c-set-style: "BSD" > + * c-basic-offset: 4 > + * tab-width: 4 > + * indent-tabs-mode: nil > + * End: > + */ > diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c > deleted file mode 100644 > index 1fa5658..0000000 > --- a/tools/libxc/xc_hvm_build.c > +++ /dev/null > @@ -1,511 +0,0 @@ > -/****************************************************************************** > - * xc_hvm_build.c > - * > - * This library is free software; you can redistribute it and/or > - * modify it under the terms of the GNU Lesser General Public > - * License as published by the Free Software Foundation; > - * version 2.1 of the License. > - * > - * This library is distributed in the hope that it will be useful, > - * but WITHOUT ANY WARRANTY; without even the implied warranty of > - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > - * Lesser General Public License for more details. > - * > - * You should have received a copy of the GNU Lesser General Public > - * License along with this library; if not, write to the Free Software > - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA > - */ > - > -#include <stddef.h> > -#include <inttypes.h> > -#include <stdlib.h> > -#include <unistd.h> > -#include <zlib.h> > - > -#include "xg_private.h" > -#include "xc_private.h" > - > -#include <xen/foreign/x86_32.h> > -#include <xen/foreign/x86_64.h> > -#include <xen/hvm/hvm_info_table.h> > -#include <xen/hvm/params.h> > -#include <xen/hvm/e820.h> > - > -#include <xen/libelf/libelf.h> > - > -#define SUPERPAGE_2MB_SHIFT 9 > -#define SUPERPAGE_2MB_NR_PFNS (1UL << SUPERPAGE_2MB_SHIFT) > -#define SUPERPAGE_1GB_SHIFT 18 > -#define SUPERPAGE_1GB_NR_PFNS (1UL << SUPERPAGE_1GB_SHIFT) > - > -#define SPECIALPAGE_BUFIOREQ 0 > -#define SPECIALPAGE_XENSTORE 1 > -#define SPECIALPAGE_IOREQ 2 > -#define SPECIALPAGE_IDENT_PT 3 > -#define SPECIALPAGE_CONSOLE 4 > -#define NR_SPECIAL_PAGES 5 > -#define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x)) > - > -static void build_hvm_info(void *hvm_info_page, uint64_t mem_size) > -{ > - struct hvm_info_table *hvm_info = (struct hvm_info_table *) > - (((unsigned char *)hvm_info_page) + HVM_INFO_OFFSET); > - uint64_t lowmem_end = mem_size, highmem_end = 0; > - uint8_t sum; > - int i; > - > - if ( lowmem_end > HVM_BELOW_4G_RAM_END ) > - { > - highmem_end = lowmem_end + (1ull<<32) - HVM_BELOW_4G_RAM_END; > - lowmem_end = HVM_BELOW_4G_RAM_END; > - } > - > - memset(hvm_info_page, 0, PAGE_SIZE); > - > - /* Fill in the header. */ > - strncpy(hvm_info->signature, "HVM INFO", 8); > - hvm_info->length = sizeof(struct hvm_info_table); > - > - /* Sensible defaults: these can be overridden by the caller. */ > - hvm_info->apic_mode = 1; > - hvm_info->nr_vcpus = 1; > - memset(hvm_info->vcpu_online, 0xff, sizeof(hvm_info->vcpu_online)); > - > - /* Memory parameters. */ > - hvm_info->low_mem_pgend = lowmem_end >> PAGE_SHIFT; > - hvm_info->high_mem_pgend = highmem_end >> PAGE_SHIFT; > - hvm_info->reserved_mem_pgstart = special_pfn(0); > - > - /* Finish with the checksum. */ > - for ( i = 0, sum = 0; i < hvm_info->length; i++ ) > - sum += ((uint8_t *)hvm_info)[i]; > - hvm_info->checksum = -sum; > -} > - > -static int loadelfimage( > - xc_interface *xch, > - struct elf_binary *elf, uint32_t dom, unsigned long *parray) > -{ > - privcmd_mmap_entry_t *entries = NULL; > - unsigned long pfn_start = elf->pstart >> PAGE_SHIFT; > - unsigned long pfn_end = (elf->pend + PAGE_SIZE - 1) >> PAGE_SHIFT; > - size_t pages = pfn_end - pfn_start; > - int i, rc = -1; > - > - /* Map address space for initial elf image. */ > - entries = calloc(pages, sizeof(privcmd_mmap_entry_t)); > - if ( entries == NULL ) > - goto err; > - > - for ( i = 0; i < pages; i++ ) > - entries[i].mfn = parray[(elf->pstart >> PAGE_SHIFT) + i]; > - > - elf->dest = xc_map_foreign_ranges( > - xch, dom, pages << PAGE_SHIFT, PROT_READ | PROT_WRITE, 1 << PAGE_SHIFT, > - entries, pages); > - if ( elf->dest == NULL ) > - goto err; > - > - elf->dest += elf->pstart & (PAGE_SIZE - 1); > - > - /* Load the initial elf image. */ > - rc = elf_load_binary(elf); > - if ( rc < 0 ) > - PERROR("Failed to load elf binary\n"); > - > - munmap(elf->dest, pages << PAGE_SHIFT); > - elf->dest = NULL; > - > - err: > - free(entries); > - > - return rc; > -} > - > -/* > - * Check whether there exists mmio hole in the specified memory range. > - * Returns 1 if exists, else returns 0. > - */ > -static int check_mmio_hole(uint64_t start, uint64_t memsize) > -{ > - if ( start + memsize <= HVM_BELOW_4G_MMIO_START || > - start >= HVM_BELOW_4G_MMIO_START + HVM_BELOW_4G_MMIO_LENGTH ) > - return 0; > - else > - return 1; > -} > - > -static int setup_guest(xc_interface *xch, > - uint32_t dom, int memsize, int target, > - char *image, unsigned long image_size) > -{ > - xen_pfn_t *page_array = NULL; > - unsigned long i, nr_pages = (unsigned long)memsize << (20 - PAGE_SHIFT); > - unsigned long target_pages = (unsigned long)target << (20 - PAGE_SHIFT); > - unsigned long entry_eip, cur_pages, cur_pfn; > - void *hvm_info_page; > - uint32_t *ident_pt; > - struct elf_binary elf; > - uint64_t v_start, v_end; > - int rc; > - xen_capabilities_info_t caps; > - unsigned long stat_normal_pages = 0, stat_2mb_pages = 0, > - stat_1gb_pages = 0; > - int pod_mode = 0; > - > - /* An HVM guest must be initialised with at least 2MB memory. */ > - if ( memsize < 2 || target < 2 ) > - goto error_out; > - > - if ( memsize > target ) > - pod_mode = 1; > - > - memset(&elf, 0, sizeof(elf)); > - if ( elf_init(&elf, image, image_size) != 0 ) > - goto error_out; > - > - xc_elf_set_logfile(xch, &elf, 1); > - > - elf_parse_binary(&elf); > - v_start = 0; > - v_end = (unsigned long long)memsize << 20; > - > - if ( xc_version(xch, XENVER_capabilities, &caps) != 0 ) > - { > - PERROR("Could not get Xen capabilities"); > - goto error_out; > - } > - > - IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n" > - " Loader: %016"PRIx64"->%016"PRIx64"\n" > - " TOTAL: %016"PRIx64"->%016"PRIx64"\n" > - " ENTRY ADDRESS: %016"PRIx64"\n", > - elf.pstart, elf.pend, > - v_start, v_end, > - elf_uval(&elf, elf.ehdr, e_entry)); > - > - if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL ) > - { > - PERROR("Could not allocate memory."); > - goto error_out; > - } > - > - for ( i = 0; i < nr_pages; i++ ) > - page_array[i] = i; > - for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ ) > - page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT; > - > - /* > - * Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000. > - * > - * We attempt to allocate 1GB pages if possible. It falls back on 2MB > - * pages if 1GB allocation fails. 4KB pages will be used eventually if > - * both fail. > - * > - * Under 2MB mode, we allocate pages in batches of no more than 8MB to > - * ensure that we can be preempted and hence dom0 remains responsive. > - */ > - rc = xc_domain_populate_physmap_exact( > - xch, dom, 0xa0, 0, 0, &page_array[0x00]); > - cur_pages = 0xc0; > - stat_normal_pages = 0xc0; > - while ( (rc == 0) && (nr_pages > cur_pages) ) > - { > - /* Clip count to maximum 1GB extent. */ > - unsigned long count = nr_pages - cur_pages; > - unsigned long max_pages = SUPERPAGE_1GB_NR_PFNS; > - > - if ( count > max_pages ) > - count = max_pages; > - > - cur_pfn = page_array[cur_pages]; > - > - /* Take care the corner cases of super page tails */ > - if ( ((cur_pfn & (SUPERPAGE_1GB_NR_PFNS-1)) != 0) && > - (count > (-cur_pfn & (SUPERPAGE_1GB_NR_PFNS-1))) ) > - count = -cur_pfn & (SUPERPAGE_1GB_NR_PFNS-1); > - else if ( ((count & (SUPERPAGE_1GB_NR_PFNS-1)) != 0) && > - (count > SUPERPAGE_1GB_NR_PFNS) ) > - count &= ~(SUPERPAGE_1GB_NR_PFNS - 1); > - > - /* Attemp to allocate 1GB super page. Because in each pass we only > - * allocate at most 1GB, we don''t have to clip super page boundaries. > - */ > - if ( ((count | cur_pfn) & (SUPERPAGE_1GB_NR_PFNS - 1)) == 0 && > - /* Check if there exists MMIO hole in the 1GB memory range */ > - !check_mmio_hole(cur_pfn << PAGE_SHIFT, > - SUPERPAGE_1GB_NR_PFNS << PAGE_SHIFT) ) > - { > - long done; > - unsigned long nr_extents = count >> SUPERPAGE_1GB_SHIFT; > - xen_pfn_t sp_extents[nr_extents]; > - > - for ( i = 0; i < nr_extents; i++ ) > - sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_1GB_SHIFT)]; > - > - done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_1GB_SHIFT, > - pod_mode ? XENMEMF_populate_on_demand : 0, > - sp_extents); > - > - if ( done > 0 ) > - { > - stat_1gb_pages += done; > - done <<= SUPERPAGE_1GB_SHIFT; > - cur_pages += done; > - count -= done; > - } > - } > - > - if ( count != 0 ) > - { > - /* Clip count to maximum 8MB extent. */ > - max_pages = SUPERPAGE_2MB_NR_PFNS * 4; > - if ( count > max_pages ) > - count = max_pages; > - > - /* Clip partial superpage extents to superpage boundaries. */ > - if ( ((cur_pfn & (SUPERPAGE_2MB_NR_PFNS-1)) != 0) && > - (count > (-cur_pfn & (SUPERPAGE_2MB_NR_PFNS-1))) ) > - count = -cur_pfn & (SUPERPAGE_2MB_NR_PFNS-1); > - else if ( ((count & (SUPERPAGE_2MB_NR_PFNS-1)) != 0) && > - (count > SUPERPAGE_2MB_NR_PFNS) ) > - count &= ~(SUPERPAGE_2MB_NR_PFNS - 1); /* clip non-s.p. tail */ > - > - /* Attempt to allocate superpage extents. */ > - if ( ((count | cur_pfn) & (SUPERPAGE_2MB_NR_PFNS - 1)) == 0 ) > - { > - long done; > - unsigned long nr_extents = count >> SUPERPAGE_2MB_SHIFT; > - xen_pfn_t sp_extents[nr_extents]; > - > - for ( i = 0; i < nr_extents; i++ ) > - sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)]; > - > - done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_2MB_SHIFT, > - pod_mode ? XENMEMF_populate_on_demand : 0, > - sp_extents); > - > - if ( done > 0 ) > - { > - stat_2mb_pages += done; > - done <<= SUPERPAGE_2MB_SHIFT; > - cur_pages += done; > - count -= done; > - } > - } > - } > - > - /* Fall back to 4kB extents. */ > - if ( count != 0 ) > - { > - rc = xc_domain_populate_physmap_exact( > - xch, dom, count, 0, 0, &page_array[cur_pages]); > - cur_pages += count; > - stat_normal_pages += count; > - } > - } > - > - /* Subtract 0x20 from target_pages for the VGA "hole". Xen will > - * adjust the PoD cache size so that domain tot_pages will be > - * target_pages - 0x20 after this call. */ > - if ( pod_mode ) > - rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20, > - NULL, NULL, NULL); > - > - if ( rc != 0 ) > - { > - PERROR("Could not allocate memory for HVM guest."); > - goto error_out; > - } > - > - IPRINTF("PHYSICAL MEMORY ALLOCATION:\n" > - " 4KB PAGES: 0x%016lx\n" > - " 2MB PAGES: 0x%016lx\n" > - " 1GB PAGES: 0x%016lx\n", > - stat_normal_pages, stat_2mb_pages, stat_1gb_pages); > - > - if ( loadelfimage(xch, &elf, dom, page_array) != 0 ) > - goto error_out; > - > - if ( (hvm_info_page = xc_map_foreign_range( > - xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, > - HVM_INFO_PFN)) == NULL ) > - goto error_out; > - build_hvm_info(hvm_info_page, v_end); > - munmap(hvm_info_page, PAGE_SIZE); > - > - /* Allocate and clear special pages. */ > - for ( i = 0; i < NR_SPECIAL_PAGES; i++ ) > - { > - xen_pfn_t pfn = special_pfn(i); > - rc = xc_domain_populate_physmap_exact(xch, dom, 1, 0, 0, &pfn); > - if ( rc != 0 ) > - { > - PERROR("Could not allocate %d''th special page.", i); > - goto error_out; > - } > - if ( xc_clear_domain_page(xch, dom, special_pfn(i)) ) > - goto error_out; > - } > - > - xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_PFN, > - special_pfn(SPECIALPAGE_XENSTORE)); > - xc_set_hvm_param(xch, dom, HVM_PARAM_BUFIOREQ_PFN, > - special_pfn(SPECIALPAGE_BUFIOREQ)); > - xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_PFN, > - special_pfn(SPECIALPAGE_IOREQ)); > - xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN, > - special_pfn(SPECIALPAGE_CONSOLE)); > - > - /* > - * Identity-map page table is required for running with CR0.PG=0 when > - * using Intel EPT. Create a 32-bit non-PAE page directory of superpages. > - */ > - if ( (ident_pt = xc_map_foreign_range( > - xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, > - special_pfn(SPECIALPAGE_IDENT_PT))) == NULL ) > - goto error_out; > - for ( i = 0; i < PAGE_SIZE / sizeof(*ident_pt); i++ ) > - ident_pt[i] = ((i << 22) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER | > - _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); > - munmap(ident_pt, PAGE_SIZE); > - xc_set_hvm_param(xch, dom, HVM_PARAM_IDENT_PT, > - special_pfn(SPECIALPAGE_IDENT_PT) << PAGE_SHIFT); > - > - /* Insert JMP <rel32> instruction at address 0x0 to reach entry point. */ > - entry_eip = elf_uval(&elf, elf.ehdr, e_entry); > - if ( entry_eip != 0 ) > - { > - char *page0 = xc_map_foreign_range( > - xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, 0); > - if ( page0 == NULL ) > - goto error_out; > - page0[0] = 0xe9; > - *(uint32_t *)&page0[1] = entry_eip - 5; > - munmap(page0, PAGE_SIZE); > - } > - > - free(page_array); > - return 0; > - > - error_out: > - free(page_array); > - return -1; > -} > - > -static int xc_hvm_build_internal(xc_interface *xch, > - uint32_t domid, > - int memsize, > - int target, > - char *image, > - unsigned long image_size) > -{ > - if ( (image == NULL) || (image_size == 0) ) > - { > - ERROR("Image required"); > - return -1; > - } > - > - return setup_guest(xch, domid, memsize, target, image, image_size); > -} > - > -/* xc_hvm_build: > - * Create a domain for a virtualized Linux, using files/filenames. > - */ > -int xc_hvm_build(xc_interface *xch, > - uint32_t domid, > - int memsize, > - const char *image_name) > -{ > - char *image; > - int sts; > - unsigned long image_size; > - > - if ( (image_name == NULL) || > - ((image = xc_read_image(xch, image_name, &image_size)) == NULL) ) > - return -1; > - > - sts = xc_hvm_build_internal(xch, domid, memsize, memsize, image, image_size); > - > - free(image); > - > - return sts; > -} > - > -/* xc_hvm_build_target_mem: > - * Create a domain for a pre-ballooned virtualized Linux, using > - * files/filenames. If target < memsize, domain is created with > - * memsize pages marked populate-on-demand, > - * calculating pod cache size based on target. > - * If target == memsize, pages are populated normally. > - */ > -int xc_hvm_build_target_mem(xc_interface *xch, > - uint32_t domid, > - int memsize, > - int target, > - const char *image_name) > -{ > - char *image; > - int sts; > - unsigned long image_size; > - > - if ( (image_name == NULL) || > - ((image = xc_read_image(xch, image_name, &image_size)) == NULL) ) > - return -1; > - > - sts = xc_hvm_build_internal(xch, domid, memsize, target, image, image_size); > - > - free(image); > - > - return sts; > -} > - > -/* xc_hvm_build_mem: > - * Create a domain for a virtualized Linux, using memory buffers. > - */ > -int xc_hvm_build_mem(xc_interface *xch, > - uint32_t domid, > - int memsize, > - const char *image_buffer, > - unsigned long image_size) > -{ > - int sts; > - unsigned long img_len; > - char *img; > - > - /* Validate that there is a kernel buffer */ > - > - if ( (image_buffer == NULL) || (image_size == 0) ) > - { > - ERROR("kernel image buffer not present"); > - return -1; > - } > - > - img = xc_inflate_buffer(xch, image_buffer, image_size, &img_len); > - if ( img == NULL ) > - { > - ERROR("unable to inflate ram disk buffer"); > - return -1; > - } > - > - sts = xc_hvm_build_internal(xch, domid, memsize, memsize, > - img, img_len); > - > - /* xc_inflate_buffer may return the original buffer pointer (for > - for already inflated buffers), so exercise some care in freeing */ > - > - if ( (img != NULL) && (img != image_buffer) ) > - free(img); > - > - return sts; > -} > - > -/* > - * Local variables: > - * mode: C > - * c-set-style: "BSD" > - * c-basic-offset: 4 > - * tab-width: 4 > - * indent-tabs-mode: nil > - * End: > - */ > diff --git a/tools/libxc/xc_hvm_build_arm.c b/tools/libxc/xc_hvm_build_arm.c > new file mode 100644 > index 0000000..010ebdb > --- /dev/null > +++ b/tools/libxc/xc_hvm_build_arm.c > @@ -0,0 +1,61 @@ > +/****************************************************************************** > + * This library is free software; you can redistribute it and/or > + * modify it under the terms of the GNU Lesser General Public > + * License as published by the Free Software Foundation; > + * version 2.1 of the License. > + * > + * This library is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + * Lesser General Public License for more details. > + * > + * You should have received a copy of the GNU Lesser General Public > + * License along with this library; if not, write to the Free Software > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA > + * > + * Copyright (c) 2011, Citrix Systems > + */ > + > +#include <inttypes.h> > +#include <errno.h> > +#include <xenctrl.h> > +#include <xenguest.h> > + > +int xc_hvm_build(xc_interface *xch, > + uint32_t domid, > + int memsize, > + const char *image_name) > +{ > + errno = ENOSYS; > + return -1; > +} > + > +int xc_hvm_build_target_mem(xc_interface *xch, > + uint32_t domid, > + int memsize, > + int target, > + const char *image_name) > +{ > + errno = ENOSYS; > + return -1; > +} > + > +int xc_hvm_build_mem(xc_interface *xch, > + uint32_t domid, > + int memsize, > + const char *image_buffer, > + unsigned long image_size) > +{ > + errno = ENOSYS; > + return -1; > +} > + > +/* > + * Local variables: > + * mode: C > + * c-set-style: "BSD" > + * c-basic-offset: 4 > + * tab-width: 4 > + * indent-tabs-mode: nil > + * End: > + */ > diff --git a/tools/libxc/xc_hvm_build_x86.c b/tools/libxc/xc_hvm_build_x86.c > new file mode 100644 > index 0000000..1fa5658 > --- /dev/null > +++ b/tools/libxc/xc_hvm_build_x86.c > @@ -0,0 +1,511 @@ > +/****************************************************************************** > + * xc_hvm_build.c > + * > + * This library is free software; you can redistribute it and/or > + * modify it under the terms of the GNU Lesser General Public > + * License as published by the Free Software Foundation; > + * version 2.1 of the License. > + * > + * This library is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + * Lesser General Public License for more details. > + * > + * You should have received a copy of the GNU Lesser General Public > + * License along with this library; if not, write to the Free Software > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA > + */ > + > +#include <stddef.h> > +#include <inttypes.h> > +#include <stdlib.h> > +#include <unistd.h> > +#include <zlib.h> > + > +#include "xg_private.h" > +#include "xc_private.h" > + > +#include <xen/foreign/x86_32.h> > +#include <xen/foreign/x86_64.h> > +#include <xen/hvm/hvm_info_table.h> > +#include <xen/hvm/params.h> > +#include <xen/hvm/e820.h> > + > +#include <xen/libelf/libelf.h> > + > +#define SUPERPAGE_2MB_SHIFT 9 > +#define SUPERPAGE_2MB_NR_PFNS (1UL << SUPERPAGE_2MB_SHIFT) > +#define SUPERPAGE_1GB_SHIFT 18 > +#define SUPERPAGE_1GB_NR_PFNS (1UL << SUPERPAGE_1GB_SHIFT) > + > +#define SPECIALPAGE_BUFIOREQ 0 > +#define SPECIALPAGE_XENSTORE 1 > +#define SPECIALPAGE_IOREQ 2 > +#define SPECIALPAGE_IDENT_PT 3 > +#define SPECIALPAGE_CONSOLE 4 > +#define NR_SPECIAL_PAGES 5 > +#define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x)) > + > +static void build_hvm_info(void *hvm_info_page, uint64_t mem_size) > +{ > + struct hvm_info_table *hvm_info = (struct hvm_info_table *) > + (((unsigned char *)hvm_info_page) + HVM_INFO_OFFSET); > + uint64_t lowmem_end = mem_size, highmem_end = 0; > + uint8_t sum; > + int i; > + > + if ( lowmem_end > HVM_BELOW_4G_RAM_END ) > + { > + highmem_end = lowmem_end + (1ull<<32) - HVM_BELOW_4G_RAM_END; > + lowmem_end = HVM_BELOW_4G_RAM_END; > + } > + > + memset(hvm_info_page, 0, PAGE_SIZE); > + > + /* Fill in the header. */ > + strncpy(hvm_info->signature, "HVM INFO", 8); > + hvm_info->length = sizeof(struct hvm_info_table); > + > + /* Sensible defaults: these can be overridden by the caller. */ > + hvm_info->apic_mode = 1; > + hvm_info->nr_vcpus = 1; > + memset(hvm_info->vcpu_online, 0xff, sizeof(hvm_info->vcpu_online)); > + > + /* Memory parameters. */ > + hvm_info->low_mem_pgend = lowmem_end >> PAGE_SHIFT; > + hvm_info->high_mem_pgend = highmem_end >> PAGE_SHIFT; > + hvm_info->reserved_mem_pgstart = special_pfn(0); > + > + /* Finish with the checksum. */ > + for ( i = 0, sum = 0; i < hvm_info->length; i++ ) > + sum += ((uint8_t *)hvm_info)[i]; > + hvm_info->checksum = -sum; > +} > + > +static int loadelfimage( > + xc_interface *xch, > + struct elf_binary *elf, uint32_t dom, unsigned long *parray) > +{ > + privcmd_mmap_entry_t *entries = NULL; > + unsigned long pfn_start = elf->pstart >> PAGE_SHIFT; > + unsigned long pfn_end = (elf->pend + PAGE_SIZE - 1) >> PAGE_SHIFT; > + size_t pages = pfn_end - pfn_start; > + int i, rc = -1; > + > + /* Map address space for initial elf image. */ > + entries = calloc(pages, sizeof(privcmd_mmap_entry_t)); > + if ( entries == NULL ) > + goto err; > + > + for ( i = 0; i < pages; i++ ) > + entries[i].mfn = parray[(elf->pstart >> PAGE_SHIFT) + i]; > + > + elf->dest = xc_map_foreign_ranges( > + xch, dom, pages << PAGE_SHIFT, PROT_READ | PROT_WRITE, 1 << PAGE_SHIFT, > + entries, pages); > + if ( elf->dest == NULL ) > + goto err; > + > + elf->dest += elf->pstart & (PAGE_SIZE - 1); > + > + /* Load the initial elf image. */ > + rc = elf_load_binary(elf); > + if ( rc < 0 ) > + PERROR("Failed to load elf binary\n"); > + > + munmap(elf->dest, pages << PAGE_SHIFT); > + elf->dest = NULL; > + > + err: > + free(entries); > + > + return rc; > +} > + > +/* > + * Check whether there exists mmio hole in the specified memory range. > + * Returns 1 if exists, else returns 0. > + */ > +static int check_mmio_hole(uint64_t start, uint64_t memsize) > +{ > + if ( start + memsize <= HVM_BELOW_4G_MMIO_START || > + start >= HVM_BELOW_4G_MMIO_START + HVM_BELOW_4G_MMIO_LENGTH ) > + return 0; > + else > + return 1; > +} > + > +static int setup_guest(xc_interface *xch, > + uint32_t dom, int memsize, int target, > + char *image, unsigned long image_size) > +{ > + xen_pfn_t *page_array = NULL; > + unsigned long i, nr_pages = (unsigned long)memsize << (20 - PAGE_SHIFT); > + unsigned long target_pages = (unsigned long)target << (20 - PAGE_SHIFT); > + unsigned long entry_eip, cur_pages, cur_pfn; > + void *hvm_info_page; > + uint32_t *ident_pt; > + struct elf_binary elf; > + uint64_t v_start, v_end; > + int rc; > + xen_capabilities_info_t caps; > + unsigned long stat_normal_pages = 0, stat_2mb_pages = 0, > + stat_1gb_pages = 0; > + int pod_mode = 0; > + > + /* An HVM guest must be initialised with at least 2MB memory. */ > + if ( memsize < 2 || target < 2 ) > + goto error_out; > + > + if ( memsize > target ) > + pod_mode = 1; > + > + memset(&elf, 0, sizeof(elf)); > + if ( elf_init(&elf, image, image_size) != 0 ) > + goto error_out; > + > + xc_elf_set_logfile(xch, &elf, 1); > + > + elf_parse_binary(&elf); > + v_start = 0; > + v_end = (unsigned long long)memsize << 20; > + > + if ( xc_version(xch, XENVER_capabilities, &caps) != 0 ) > + { > + PERROR("Could not get Xen capabilities"); > + goto error_out; > + } > + > + IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n" > + " Loader: %016"PRIx64"->%016"PRIx64"\n" > + " TOTAL: %016"PRIx64"->%016"PRIx64"\n" > + " ENTRY ADDRESS: %016"PRIx64"\n", > + elf.pstart, elf.pend, > + v_start, v_end, > + elf_uval(&elf, elf.ehdr, e_entry)); > + > + if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL ) > + { > + PERROR("Could not allocate memory."); > + goto error_out; > + } > + > + for ( i = 0; i < nr_pages; i++ ) > + page_array[i] = i; > + for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ ) > + page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT; > + > + /* > + * Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000. > + * > + * We attempt to allocate 1GB pages if possible. It falls back on 2MB > + * pages if 1GB allocation fails. 4KB pages will be used eventually if > + * both fail. > + * > + * Under 2MB mode, we allocate pages in batches of no more than 8MB to > + * ensure that we can be preempted and hence dom0 remains responsive. > + */ > + rc = xc_domain_populate_physmap_exact( > + xch, dom, 0xa0, 0, 0, &page_array[0x00]); > + cur_pages = 0xc0; > + stat_normal_pages = 0xc0; > + while ( (rc == 0) && (nr_pages > cur_pages) ) > + { > + /* Clip count to maximum 1GB extent. */ > + unsigned long count = nr_pages - cur_pages; > + unsigned long max_pages = SUPERPAGE_1GB_NR_PFNS; > + > + if ( count > max_pages ) > + count = max_pages; > + > + cur_pfn = page_array[cur_pages]; > + > + /* Take care the corner cases of super page tails */ > + if ( ((cur_pfn & (SUPERPAGE_1GB_NR_PFNS-1)) != 0) && > + (count > (-cur_pfn & (SUPERPAGE_1GB_NR_PFNS-1))) ) > + count = -cur_pfn & (SUPERPAGE_1GB_NR_PFNS-1); > + else if ( ((count & (SUPERPAGE_1GB_NR_PFNS-1)) != 0) && > + (count > SUPERPAGE_1GB_NR_PFNS) ) > + count &= ~(SUPERPAGE_1GB_NR_PFNS - 1); > + > + /* Attemp to allocate 1GB super page. Because in each pass we only > + * allocate at most 1GB, we don''t have to clip super page boundaries. > + */ > + if ( ((count | cur_pfn) & (SUPERPAGE_1GB_NR_PFNS - 1)) == 0 && > + /* Check if there exists MMIO hole in the 1GB memory range */ > + !check_mmio_hole(cur_pfn << PAGE_SHIFT, > + SUPERPAGE_1GB_NR_PFNS << PAGE_SHIFT) ) > + { > + long done; > + unsigned long nr_extents = count >> SUPERPAGE_1GB_SHIFT; > + xen_pfn_t sp_extents[nr_extents]; > + > + for ( i = 0; i < nr_extents; i++ ) > + sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_1GB_SHIFT)]; > + > + done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_1GB_SHIFT, > + pod_mode ? XENMEMF_populate_on_demand : 0, > + sp_extents); > + > + if ( done > 0 ) > + { > + stat_1gb_pages += done; > + done <<= SUPERPAGE_1GB_SHIFT; > + cur_pages += done; > + count -= done; > + } > + } > + > + if ( count != 0 ) > + { > + /* Clip count to maximum 8MB extent. */ > + max_pages = SUPERPAGE_2MB_NR_PFNS * 4; > + if ( count > max_pages ) > + count = max_pages; > + > + /* Clip partial superpage extents to superpage boundaries. */ > + if ( ((cur_pfn & (SUPERPAGE_2MB_NR_PFNS-1)) != 0) && > + (count > (-cur_pfn & (SUPERPAGE_2MB_NR_PFNS-1))) ) > + count = -cur_pfn & (SUPERPAGE_2MB_NR_PFNS-1); > + else if ( ((count & (SUPERPAGE_2MB_NR_PFNS-1)) != 0) && > + (count > SUPERPAGE_2MB_NR_PFNS) ) > + count &= ~(SUPERPAGE_2MB_NR_PFNS - 1); /* clip non-s.p. tail */ > + > + /* Attempt to allocate superpage extents. */ > + if ( ((count | cur_pfn) & (SUPERPAGE_2MB_NR_PFNS - 1)) == 0 ) > + { > + long done; > + unsigned long nr_extents = count >> SUPERPAGE_2MB_SHIFT; > + xen_pfn_t sp_extents[nr_extents]; > + > + for ( i = 0; i < nr_extents; i++ ) > + sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)]; > + > + done = xc_domain_populate_physmap(xch, dom, nr_extents, SUPERPAGE_2MB_SHIFT, > + pod_mode ? XENMEMF_populate_on_demand : 0, > + sp_extents); > + > + if ( done > 0 ) > + { > + stat_2mb_pages += done; > + done <<= SUPERPAGE_2MB_SHIFT; > + cur_pages += done; > + count -= done; > + } > + } > + } > + > + /* Fall back to 4kB extents. */ > + if ( count != 0 ) > + { > + rc = xc_domain_populate_physmap_exact( > + xch, dom, count, 0, 0, &page_array[cur_pages]); > + cur_pages += count; > + stat_normal_pages += count; > + } > + } > + > + /* Subtract 0x20 from target_pages for the VGA "hole". Xen will > + * adjust the PoD cache size so that domain tot_pages will be > + * target_pages - 0x20 after this call. */ > + if ( pod_mode ) > + rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20, > + NULL, NULL, NULL); > + > + if ( rc != 0 ) > + { > + PERROR("Could not allocate memory for HVM guest."); > + goto error_out; > + } > + > + IPRINTF("PHYSICAL MEMORY ALLOCATION:\n" > + " 4KB PAGES: 0x%016lx\n" > + " 2MB PAGES: 0x%016lx\n" > + " 1GB PAGES: 0x%016lx\n", > + stat_normal_pages, stat_2mb_pages, stat_1gb_pages); > + > + if ( loadelfimage(xch, &elf, dom, page_array) != 0 ) > + goto error_out; > + > + if ( (hvm_info_page = xc_map_foreign_range( > + xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, > + HVM_INFO_PFN)) == NULL ) > + goto error_out; > + build_hvm_info(hvm_info_page, v_end); > + munmap(hvm_info_page, PAGE_SIZE); > + > + /* Allocate and clear special pages. */ > + for ( i = 0; i < NR_SPECIAL_PAGES; i++ ) > + { > + xen_pfn_t pfn = special_pfn(i); > + rc = xc_domain_populate_physmap_exact(xch, dom, 1, 0, 0, &pfn); > + if ( rc != 0 ) > + { > + PERROR("Could not allocate %d''th special page.", i); > + goto error_out; > + } > + if ( xc_clear_domain_page(xch, dom, special_pfn(i)) ) > + goto error_out; > + } > + > + xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_PFN, > + special_pfn(SPECIALPAGE_XENSTORE)); > + xc_set_hvm_param(xch, dom, HVM_PARAM_BUFIOREQ_PFN, > + special_pfn(SPECIALPAGE_BUFIOREQ)); > + xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_PFN, > + special_pfn(SPECIALPAGE_IOREQ)); > + xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN, > + special_pfn(SPECIALPAGE_CONSOLE)); > + > + /* > + * Identity-map page table is required for running with CR0.PG=0 when > + * using Intel EPT. Create a 32-bit non-PAE page directory of superpages. > + */ > + if ( (ident_pt = xc_map_foreign_range( > + xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, > + special_pfn(SPECIALPAGE_IDENT_PT))) == NULL ) > + goto error_out; > + for ( i = 0; i < PAGE_SIZE / sizeof(*ident_pt); i++ ) > + ident_pt[i] = ((i << 22) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER | > + _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); > + munmap(ident_pt, PAGE_SIZE); > + xc_set_hvm_param(xch, dom, HVM_PARAM_IDENT_PT, > + special_pfn(SPECIALPAGE_IDENT_PT) << PAGE_SHIFT); > + > + /* Insert JMP <rel32> instruction at address 0x0 to reach entry point. */ > + entry_eip = elf_uval(&elf, elf.ehdr, e_entry); > + if ( entry_eip != 0 ) > + { > + char *page0 = xc_map_foreign_range( > + xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, 0); > + if ( page0 == NULL ) > + goto error_out; > + page0[0] = 0xe9; > + *(uint32_t *)&page0[1] = entry_eip - 5; > + munmap(page0, PAGE_SIZE); > + } > + > + free(page_array); > + return 0; > + > + error_out: > + free(page_array); > + return -1; > +} > + > +static int xc_hvm_build_internal(xc_interface *xch, > + uint32_t domid, > + int memsize, > + int target, > + char *image, > + unsigned long image_size) > +{ > + if ( (image == NULL) || (image_size == 0) ) > + { > + ERROR("Image required"); > + return -1; > + } > + > + return setup_guest(xch, domid, memsize, target, image, image_size); > +} > + > +/* xc_hvm_build: > + * Create a domain for a virtualized Linux, using files/filenames. > + */ > +int xc_hvm_build(xc_interface *xch, > + uint32_t domid, > + int memsize, > + const char *image_name) > +{ > + char *image; > + int sts; > + unsigned long image_size; > + > + if ( (image_name == NULL) || > + ((image = xc_read_image(xch, image_name, &image_size)) == NULL) ) > + return -1; > + > + sts = xc_hvm_build_internal(xch, domid, memsize, memsize, image, image_size); > + > + free(image); > + > + return sts; > +} > + > +/* xc_hvm_build_target_mem: > + * Create a domain for a pre-ballooned virtualized Linux, using > + * files/filenames. If target < memsize, domain is created with > + * memsize pages marked populate-on-demand, > + * calculating pod cache size based on target. > + * If target == memsize, pages are populated normally. > + */ > +int xc_hvm_build_target_mem(xc_interface *xch, > + uint32_t domid, > + int memsize, > + int target, > + const char *image_name) > +{ > + char *image; > + int sts; > + unsigned long image_size; > + > + if ( (image_name == NULL) || > + ((image = xc_read_image(xch, image_name, &image_size)) == NULL) ) > + return -1; > + > + sts = xc_hvm_build_internal(xch, domid, memsize, target, image, image_size); > + > + free(image); > + > + return sts; > +} > + > +/* xc_hvm_build_mem: > + * Create a domain for a virtualized Linux, using memory buffers. > + */ > +int xc_hvm_build_mem(xc_interface *xch, > + uint32_t domid, > + int memsize, > + const char *image_buffer, > + unsigned long image_size) > +{ > + int sts; > + unsigned long img_len; > + char *img; > + > + /* Validate that there is a kernel buffer */ > + > + if ( (image_buffer == NULL) || (image_size == 0) ) > + { > + ERROR("kernel image buffer not present"); > + return -1; > + } > + > + img = xc_inflate_buffer(xch, image_buffer, image_size, &img_len); > + if ( img == NULL ) > + { > + ERROR("unable to inflate ram disk buffer"); > + return -1; > + } > + > + sts = xc_hvm_build_internal(xch, domid, memsize, memsize, > + img, img_len); > + > + /* xc_inflate_buffer may return the original buffer pointer (for > + for already inflated buffers), so exercise some care in freeing */ > + > + if ( (img != NULL) && (img != image_buffer) ) > + free(img); > + > + return sts; > +} > + > +/* > + * Local variables: > + * mode: C > + * c-set-style: "BSD" > + * c-basic-offset: 4 > + * tab-width: 4 > + * indent-tabs-mode: nil > + * End: > + */ > diff --git a/tools/libxc/xc_nomigrate.c b/tools/libxc/xc_nomigrate.c > new file mode 100644 > index 0000000..e734d73 > --- /dev/null > +++ b/tools/libxc/xc_nomigrate.c > @@ -0,0 +1,53 @@ > +/****************************************************************************** > + * This library is free software; you can redistribute it and/or > + * modify it under the terms of the GNU Lesser General Public > + * License as published by the Free Software Foundation; > + * version 2.1 of the License. > + * > + * This library is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + * Lesser General Public License for more details. > + * > + * You should have received a copy of the GNU Lesser General Public > + * License along with this library; if not, write to the Free Software > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA > + * > + * Copyright (c) 2011, Citrix Systems > + */ > + > +#include <inttypes.h> > +#include <errno.h> > +#include <xenctrl.h> > +#include <xenguest.h> > + > +int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t max_iters, > + uint32_t max_factor, uint32_t flags, > + struct save_callbacks* callbacks, int hvm, > + unsigned long vm_generationid_addr) > +{ > + errno = ENOSYS; > + return -1; > +} > + > +int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, > + unsigned int store_evtchn, unsigned long *store_mfn, > + domid_t store_domid, unsigned int console_evtchn, > + unsigned long *console_mfn, domid_t console_domid, > + unsigned int hvm, unsigned int pae, int superpages, > + int no_incr_generationid, > + unsigned long *vm_generationid_addr) > +{ > + errno = ENOSYS; > + return -1; > +} > + > +/* > + * Local variables: > + * mode: C > + * c-set-style: "BSD" > + * c-basic-offset: 4 > + * tab-width: 4 > + * indent-tabs-mode: nil > + * End: > + */ > -- > 1.7.2.5 >
Ian Campbell
2012-Feb-24 09:25 UTC
Re: [PATCH v5 7/7] libxl: Introduce libxl__arch_domain_create
On Thu, 2012-02-23 at 14:51 +0000, Stefano Stabellini wrote:> Introduce an arch specific internal domain creation function. At the > moment only x86 provides an implementation. > > Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>Acked-by: Ian Campbell <ian.campbell@citrix.com> (I thought I acked this last time)> --- > tools/libxl/Makefile | 6 +- > tools/libxl/libxl_arch.h | 22 ++++ > tools/libxl/libxl_create.c | 12 +-- > tools/libxl/libxl_internal.h | 2 - > tools/libxl/libxl_noarch.c | 8 ++ > tools/libxl/libxl_pci.c | 242 --------------------------------------- > tools/libxl/libxl_x86.c | 259 ++++++++++++++++++++++++++++++++++++++++++ > 7 files changed, 294 insertions(+), 257 deletions(-) > create mode 100644 tools/libxl/libxl_arch.h > create mode 100644 tools/libxl/libxl_noarch.c > create mode 100644 tools/libxl/libxl_x86.c > > diff --git a/tools/libxl/Makefile b/tools/libxl/Makefile > index 41b6ac4..ba5852b 100644 > --- a/tools/libxl/Makefile > +++ b/tools/libxl/Makefile > @@ -34,9 +34,9 @@ LIBXL_OBJS-y += libxl_blktap2.o > else > LIBXL_OBJS-y += libxl_noblktap2.o > endif > -LIBXL_OBJS-$(CONFIG_X86) += libxl_cpuid.o > -LIBXL_OBJS-$(CONFIG_IA64) += libxl_nocpuid.o > -LIBXL_OBJS-$(CONFIG_ARM) += libxl_nocpuid.o > +LIBXL_OBJS-$(CONFIG_X86) += libxl_cpuid.o libxl_x86.o > +LIBXL_OBJS-$(CONFIG_IA64) += libxl_nocpuid.o libxl_noarch.o > +LIBXL_OBJS-$(CONFIG_ARM) += libxl_nocpuid.o libxl_noarch.o > > ifeq ($(CONFIG_NetBSD),y) > LIBXL_OBJS-y += libxl_netbsd.o > diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h > new file mode 100644 > index 0000000..d1bbdf7 > --- /dev/null > +++ b/tools/libxl/libxl_arch.h > @@ -0,0 +1,22 @@ > +/* > + * Copyright (C) 2012 Citrix Ltd. > + * > + * This program is free software; you can redistribute it and/or modify > + * it under the terms of the GNU Lesser General Public License as published > + * by the Free Software Foundation; version 2.1 only. with the special > + * exception on linking described in file LICENSE. > + * > + * This program is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > + * GNU Lesser General Public License for more details. > + */ > + > +#ifndef LIBXL_ARCH_H > +#define LIBXL_ARCH_H > + > +/* arch specific internal domain creation function */ > +int libxl__arch_domain_create(libxl__gc *gc, libxl_domain_config *d_config, > + uint32_t domid); > + > +#endif > diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c > index f28d814..ff589a8 100644 > --- a/tools/libxl/libxl_create.c > +++ b/tools/libxl/libxl_create.c > @@ -18,6 +18,7 @@ > #include "libxl_osdeps.h" /* must come before any other headers */ > > #include "libxl_internal.h" > +#include "libxl_arch.h" > > #include <xc_dom.h> > #include <xenguest.h> > @@ -616,16 +617,7 @@ static int do_domain_create(libxl__gc *gc, libxl_domain_config *d_config, > goto error_out; > } > } > - > - if (d_config->c_info.type == LIBXL_DOMAIN_TYPE_PV && > - d_config->b_info.u.pv.e820_host) { > - int rc; > - rc = libxl__e820_alloc(gc, domid, d_config); > - if (rc) > - LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, > - "Failed while collecting E820 with: %d (errno:%d)\n", > - rc, errno); > - } > + libxl__arch_domain_create(gc, d_config, domid); > if ( cb && (d_config->c_info.type == LIBXL_DOMAIN_TYPE_HVM || > (d_config->c_info.type == LIBXL_DOMAIN_TYPE_PV && > d_config->b_info.u.pv.bootloader ))) { > diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h > index 8846c68..bd384e2 100644 > --- a/tools/libxl/libxl_internal.h > +++ b/tools/libxl/libxl_internal.h > @@ -975,8 +975,6 @@ _hidden int libxl__error_set(libxl__gc *gc, int code); > _hidden int libxl__file_reference_map(libxl_file_reference *f); > _hidden int libxl__file_reference_unmap(libxl_file_reference *f); > > -_hidden int libxl__e820_alloc(libxl__gc *gc, uint32_t domid, libxl_domain_config *d_config); > - > /* parse the string @s as a sequence of 6 colon separated bytes in to @mac */ > _hidden int libxl__parse_mac(const char *s, libxl_mac mac); > /* compare mac address @a and @b. 0 if the same, -ve if a<b and +ve if a>b */ > diff --git a/tools/libxl/libxl_noarch.c b/tools/libxl/libxl_noarch.c > new file mode 100644 > index 0000000..7893535 > --- /dev/null > +++ b/tools/libxl/libxl_noarch.c > @@ -0,0 +1,8 @@ > +#include "libxl_internal.h" > +#include "libxl_arch.h" > + > +int libxl__arch_domain_create(libxl__gc *gc, libxl_domain_config *d_config, > + uint32_t domid) > +{ > + return 0; > +} > diff --git a/tools/libxl/libxl_pci.c b/tools/libxl/libxl_pci.c > index 33425f5..d960f4b 100644 > --- a/tools/libxl/libxl_pci.c > +++ b/tools/libxl/libxl_pci.c > @@ -1147,248 +1147,6 @@ int libxl__device_pci_destroy_all(libxl__gc *gc, uint32_t domid) > return 0; > } > > -static const char *e820_names(int type) > -{ > - switch (type) { > - case E820_RAM: return "RAM"; > - case E820_RESERVED: return "Reserved"; > - case E820_ACPI: return "ACPI"; > - case E820_NVS: return "ACPI NVS"; > - case E820_UNUSABLE: return "Unusable"; > - default: break; > - } > - return "Unknown"; > -} > - > -static int e820_sanitize(libxl_ctx *ctx, struct e820entry src[], > - uint32_t *nr_entries, > - unsigned long map_limitkb, > - unsigned long balloon_kb) > -{ > - uint64_t delta_kb = 0, start = 0, start_kb = 0, last = 0, ram_end; > - uint32_t i, idx = 0, nr; > - struct e820entry e820[E820MAX]; > - > - if (!src || !map_limitkb || !balloon_kb || !nr_entries) > - return ERROR_INVAL; > - > - nr = *nr_entries; > - if (!nr) > - return ERROR_INVAL; > - > - if (nr > E820MAX) > - return ERROR_NOMEM; > - > - /* Weed out anything under 1MB */ > - for (i = 0; i < nr; i++) { > - if (src[i].addr > 0x100000) > - continue; > - > - src[i].type = 0; > - src[i].size = 0; > - src[i].addr = -1ULL; > - } > - > - /* Find the lowest and highest entry in E820, skipping over > - * undesired entries. */ > - start = -1ULL; > - last = 0; > - for (i = 0; i < nr; i++) { > - if ((src[i].type == E820_RAM) || > - (src[i].type == E820_UNUSABLE) || > - (src[i].type == 0)) > - continue; > - > - start = src[i].addr < start ? src[i].addr : start; > - last = src[i].addr + src[i].size > last ? > - src[i].addr + src[i].size > last : last; > - } > - if (start > 1024) > - start_kb = start >> 10; > - > - /* Add the memory RAM region for the guest */ > - e820[idx].addr = 0; > - e820[idx].size = (uint64_t)map_limitkb << 10; > - e820[idx].type = E820_RAM; > - > - /* .. and trim if neccessary */ > - if (start_kb && map_limitkb > start_kb) { > - delta_kb = map_limitkb - start_kb; > - if (delta_kb) > - e820[idx].size -= (uint64_t)(delta_kb << 10); > - } > - /* Note: We don''t touch balloon_kb here. Will add it at the end. */ > - ram_end = e820[idx].addr + e820[idx].size; > - idx ++; > - > - LIBXL__LOG(ctx, LIBXL__LOG_DEBUG, "Memory: %"PRIu64"kB End of RAM: " \ > - "0x%"PRIx64" (PFN) Delta: %"PRIu64"kB, PCI start: %"PRIu64"kB " \ > - "(0x%"PRIx64" PFN), Balloon %"PRIu64"kB\n", (uint64_t)map_limitkb, > - ram_end >> 12, delta_kb, start_kb ,start >> 12, > - (uint64_t)balloon_kb); > - > - > - /* This whole code below is to guard against if the Intel IGD is passed into > - * the guest. If we don''t pass in IGD, this whole code can be ignored. > - * > - * The reason for this code is that Intel boxes fill their E820 with > - * E820_RAM amongst E820_RESERVED and we can''t just ditch those E820_RAM. > - * That is b/c any "gaps" in the E820 is considered PCI I/O space by > - * Linux and it would be utilized by the Intel IGD as I/O space while > - * in reality it was an RAM region. > - * > - * What this means is that we have to walk the E820 and for any region > - * that is RAM and below 4GB and above ram_end, needs to change its type > - * to E820_UNUSED. We also need to move some of the E820_RAM regions if > - * the overlap with ram_end. */ > - for (i = 0; i < nr; i++) { > - uint64_t end = src[i].addr + src[i].size; > - > - /* We don''t care about E820_UNUSABLE, but we need to > - * change the type to zero b/c the loop after this > - * sticks E820_UNUSABLE on the guest''s E820 but ignores > - * the ones with type zero. */ > - if ((src[i].type == E820_UNUSABLE) || > - /* Any region that is within the "RAM region" can > - * be safely ditched. */ > - (end < ram_end)) { > - src[i].type = 0; > - continue; > - } > - > - /* Look only at RAM regions. */ > - if (src[i].type != E820_RAM) > - continue; > - > - /* We only care about RAM regions below 4GB. */ > - if (src[i].addr >= (1ULL<<32)) > - continue; > - > - /* E820_RAM overlaps with our RAM region. Move it */ > - if (src[i].addr < ram_end) { > - uint64_t delta; > - > - src[i].type = E820_UNUSABLE; > - delta = ram_end - src[i].addr; > - /* The end < ram_end should weed this out */ > - if (src[i].size - delta < 0) > - src[i].type = 0; > - else { > - src[i].size -= delta; > - src[i].addr = ram_end; > - } > - if (src[i].addr + src[i].size != end) { > - /* We messed up somewhere */ > - src[i].type = 0; > - LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "Computed E820 wrongly. Continuing on."); > - } > - } > - /* Lastly, convert the RAM to UNSUABLE. Look in the Linux kernel > - at git commit 2f14ddc3a7146ea4cd5a3d1ecd993f85f2e4f948 > - "xen/setup: Inhibit resource API from using System RAM E820 > - gaps as PCI mem gaps" for full explanation. */ > - if (end > ram_end) > - src[i].type = E820_UNUSABLE; > - } > - > - /* Check if there is a region between ram_end and start. */ > - if (start > ram_end) { > - int add_unusable = 1; > - for (i = 0; i < nr && add_unusable; i++) { > - if (src[i].type != E820_UNUSABLE) > - continue; > - if (ram_end != src[i].addr) > - continue; > - if (start != src[i].addr + src[i].size) { > - /* there is one, adjust it */ > - src[i].size = start - src[i].addr; > - } > - add_unusable = 0; > - } > - /* .. and if not present, add it in. This is to guard against > - the Linux guest assuming that the gap between the end of > - RAM region and the start of the E820_[ACPI,NVS,RESERVED] > - is PCI I/O space. Which it certainly is _not_. */ > - if (add_unusable) { > - e820[idx].type = E820_UNUSABLE; > - e820[idx].addr = ram_end; > - e820[idx].size = start - ram_end; > - idx++; > - } > - } > - /* Almost done: copy them over, ignoring the undesireable ones */ > - for (i = 0; i < nr; i++) { > - if ((src[i].type == E820_RAM) || > - (src[i].type == 0)) > - continue; > - > - e820[idx].type = src[i].type; > - e820[idx].addr = src[i].addr; > - e820[idx].size = src[i].size; > - idx++; > - } > - /* At this point we have the mapped RAM + E820 entries from src. */ > - if (balloon_kb) { > - /* and if we truncated the RAM region, then add it to the end. */ > - e820[idx].type = E820_RAM; > - e820[idx].addr = (uint64_t)(1ULL << 32) > last ? > - (uint64_t)(1ULL << 32) : last; > - /* also add the balloon memory to the end. */ > - e820[idx].size = (uint64_t)(delta_kb << 10) + > - (uint64_t)(balloon_kb << 10); > - idx++; > - > - } > - nr = idx; > - > - for (i = 0; i < nr; i++) { > - LIBXL__LOG(ctx, LIBXL__LOG_DEBUG, ":\t[%"PRIx64" -> %"PRIx64"] %s", > - e820[i].addr >> 12, (e820[i].addr + e820[i].size) >> 12, > - e820_names(e820[i].type)); > - } > - > - /* Done: copy the sanitized version. */ > - *nr_entries = nr; > - memcpy(src, e820, nr * sizeof(struct e820entry)); > - return 0; > -} > - > -int libxl__e820_alloc(libxl__gc *gc, uint32_t domid, libxl_domain_config *d_config) > -{ > - libxl_ctx *ctx = libxl__gc_owner(gc); > - int rc; > - uint32_t nr; > - struct e820entry map[E820MAX]; > - libxl_domain_build_info *b_info; > - > - if (d_config == NULL || d_config->c_info.type == LIBXL_DOMAIN_TYPE_HVM) > - return ERROR_INVAL; > - > - b_info = &d_config->b_info; > - if (!b_info->u.pv.e820_host) > - return ERROR_INVAL; > - > - rc = xc_get_machine_memory_map(ctx->xch, map, E820MAX); > - if (rc < 0) { > - errno = rc; > - return ERROR_FAIL; > - } > - nr = rc; > - rc = e820_sanitize(ctx, map, &nr, b_info->target_memkb, > - (b_info->max_memkb - b_info->target_memkb) + > - b_info->u.pv.slack_memkb); > - if (rc) > - return ERROR_FAIL; > - > - rc = xc_domain_set_memory_map(ctx->xch, domid, map, nr); > - > - if (rc < 0) { > - errno = rc; > - return ERROR_FAIL; > - } > - return 0; > -} > - > /* > * Local variables: > * mode: C > diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c > new file mode 100644 > index 0000000..7e11f2d > --- /dev/null > +++ b/tools/libxl/libxl_x86.c > @@ -0,0 +1,259 @@ > +#include "libxl_internal.h" > +#include "libxl_arch.h" > + > +static const char *e820_names(int type) > +{ > + switch (type) { > + case E820_RAM: return "RAM"; > + case E820_RESERVED: return "Reserved"; > + case E820_ACPI: return "ACPI"; > + case E820_NVS: return "ACPI NVS"; > + case E820_UNUSABLE: return "Unusable"; > + default: break; > + } > + return "Unknown"; > +} > + > +static int e820_sanitize(libxl_ctx *ctx, struct e820entry src[], > + uint32_t *nr_entries, > + unsigned long map_limitkb, > + unsigned long balloon_kb) > +{ > + uint64_t delta_kb = 0, start = 0, start_kb = 0, last = 0, ram_end; > + uint32_t i, idx = 0, nr; > + struct e820entry e820[E820MAX]; > + > + if (!src || !map_limitkb || !balloon_kb || !nr_entries) > + return ERROR_INVAL; > + > + nr = *nr_entries; > + if (!nr) > + return ERROR_INVAL; > + > + if (nr > E820MAX) > + return ERROR_NOMEM; > + > + /* Weed out anything under 1MB */ > + for (i = 0; i < nr; i++) { > + if (src[i].addr > 0x100000) > + continue; > + > + src[i].type = 0; > + src[i].size = 0; > + src[i].addr = -1ULL; > + } > + > + /* Find the lowest and highest entry in E820, skipping over > + * undesired entries. */ > + start = -1ULL; > + last = 0; > + for (i = 0; i < nr; i++) { > + if ((src[i].type == E820_RAM) || > + (src[i].type == E820_UNUSABLE) || > + (src[i].type == 0)) > + continue; > + > + start = src[i].addr < start ? src[i].addr : start; > + last = src[i].addr + src[i].size > last ? > + src[i].addr + src[i].size > last : last; > + } > + if (start > 1024) > + start_kb = start >> 10; > + > + /* Add the memory RAM region for the guest */ > + e820[idx].addr = 0; > + e820[idx].size = (uint64_t)map_limitkb << 10; > + e820[idx].type = E820_RAM; > + > + /* .. and trim if neccessary */ > + if (start_kb && map_limitkb > start_kb) { > + delta_kb = map_limitkb - start_kb; > + if (delta_kb) > + e820[idx].size -= (uint64_t)(delta_kb << 10); > + } > + /* Note: We don''t touch balloon_kb here. Will add it at the end. */ > + ram_end = e820[idx].addr + e820[idx].size; > + idx ++; > + > + LIBXL__LOG(ctx, LIBXL__LOG_DEBUG, "Memory: %"PRIu64"kB End of RAM: " \ > + "0x%"PRIx64" (PFN) Delta: %"PRIu64"kB, PCI start: %"PRIu64"kB " \ > + "(0x%"PRIx64" PFN), Balloon %"PRIu64"kB\n", (uint64_t)map_limitkb, > + ram_end >> 12, delta_kb, start_kb ,start >> 12, > + (uint64_t)balloon_kb); > + > + > + /* This whole code below is to guard against if the Intel IGD is passed into > + * the guest. If we don''t pass in IGD, this whole code can be ignored. > + * > + * The reason for this code is that Intel boxes fill their E820 with > + * E820_RAM amongst E820_RESERVED and we can''t just ditch those E820_RAM. > + * That is b/c any "gaps" in the E820 is considered PCI I/O space by > + * Linux and it would be utilized by the Intel IGD as I/O space while > + * in reality it was an RAM region. > + * > + * What this means is that we have to walk the E820 and for any region > + * that is RAM and below 4GB and above ram_end, needs to change its type > + * to E820_UNUSED. We also need to move some of the E820_RAM regions if > + * the overlap with ram_end. */ > + for (i = 0; i < nr; i++) { > + uint64_t end = src[i].addr + src[i].size; > + > + /* We don''t care about E820_UNUSABLE, but we need to > + * change the type to zero b/c the loop after this > + * sticks E820_UNUSABLE on the guest''s E820 but ignores > + * the ones with type zero. */ > + if ((src[i].type == E820_UNUSABLE) || > + /* Any region that is within the "RAM region" can > + * be safely ditched. */ > + (end < ram_end)) { > + src[i].type = 0; > + continue; > + } > + > + /* Look only at RAM regions. */ > + if (src[i].type != E820_RAM) > + continue; > + > + /* We only care about RAM regions below 4GB. */ > + if (src[i].addr >= (1ULL<<32)) > + continue; > + > + /* E820_RAM overlaps with our RAM region. Move it */ > + if (src[i].addr < ram_end) { > + uint64_t delta; > + > + src[i].type = E820_UNUSABLE; > + delta = ram_end - src[i].addr; > + /* The end < ram_end should weed this out */ > + if (src[i].size - delta < 0) > + src[i].type = 0; > + else { > + src[i].size -= delta; > + src[i].addr = ram_end; > + } > + if (src[i].addr + src[i].size != end) { > + /* We messed up somewhere */ > + src[i].type = 0; > + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "Computed E820 wrongly. Continuing on."); > + } > + } > + /* Lastly, convert the RAM to UNSUABLE. Look in the Linux kernel > + at git commit 2f14ddc3a7146ea4cd5a3d1ecd993f85f2e4f948 > + "xen/setup: Inhibit resource API from using System RAM E820 > + gaps as PCI mem gaps" for full explanation. */ > + if (end > ram_end) > + src[i].type = E820_UNUSABLE; > + } > + > + /* Check if there is a region between ram_end and start. */ > + if (start > ram_end) { > + int add_unusable = 1; > + for (i = 0; i < nr && add_unusable; i++) { > + if (src[i].type != E820_UNUSABLE) > + continue; > + if (ram_end != src[i].addr) > + continue; > + if (start != src[i].addr + src[i].size) { > + /* there is one, adjust it */ > + src[i].size = start - src[i].addr; > + } > + add_unusable = 0; > + } > + /* .. and if not present, add it in. This is to guard against > + the Linux guest assuming that the gap between the end of > + RAM region and the start of the E820_[ACPI,NVS,RESERVED] > + is PCI I/O space. Which it certainly is _not_. */ > + if (add_unusable) { > + e820[idx].type = E820_UNUSABLE; > + e820[idx].addr = ram_end; > + e820[idx].size = start - ram_end; > + idx++; > + } > + } > + /* Almost done: copy them over, ignoring the undesireable ones */ > + for (i = 0; i < nr; i++) { > + if ((src[i].type == E820_RAM) || > + (src[i].type == 0)) > + continue; > + > + e820[idx].type = src[i].type; > + e820[idx].addr = src[i].addr; > + e820[idx].size = src[i].size; > + idx++; > + } > + /* At this point we have the mapped RAM + E820 entries from src. */ > + if (balloon_kb) { > + /* and if we truncated the RAM region, then add it to the end. */ > + e820[idx].type = E820_RAM; > + e820[idx].addr = (uint64_t)(1ULL << 32) > last ? > + (uint64_t)(1ULL << 32) : last; > + /* also add the balloon memory to the end. */ > + e820[idx].size = (uint64_t)(delta_kb << 10) + > + (uint64_t)(balloon_kb << 10); > + idx++; > + > + } > + nr = idx; > + > + for (i = 0; i < nr; i++) { > + LIBXL__LOG(ctx, LIBXL__LOG_DEBUG, ":\t[%"PRIx64" -> %"PRIx64"] %s", > + e820[i].addr >> 12, (e820[i].addr + e820[i].size) >> 12, > + e820_names(e820[i].type)); > + } > + > + /* Done: copy the sanitized version. */ > + *nr_entries = nr; > + memcpy(src, e820, nr * sizeof(struct e820entry)); > + return 0; > +} > + > +static int libxl__e820_alloc(libxl__gc *gc, uint32_t domid, libxl_domain_config *d_config) > +{ > + libxl_ctx *ctx = libxl__gc_owner(gc); > + int rc; > + uint32_t nr; > + struct e820entry map[E820MAX]; > + libxl_domain_build_info *b_info; > + > + if (d_config == NULL || d_config->c_info.type == LIBXL_DOMAIN_TYPE_HVM) > + return ERROR_INVAL; > + > + b_info = &d_config->b_info; > + if (!b_info->u.pv.e820_host) > + return ERROR_INVAL; > + > + rc = xc_get_machine_memory_map(ctx->xch, map, E820MAX); > + if (rc < 0) { > + errno = rc; > + return ERROR_FAIL; > + } > + nr = rc; > + rc = e820_sanitize(ctx, map, &nr, b_info->target_memkb, > + (b_info->max_memkb - b_info->target_memkb) + > + b_info->u.pv.slack_memkb); > + if (rc) > + return ERROR_FAIL; > + > + rc = xc_domain_set_memory_map(ctx->xch, domid, map, nr); > + > + if (rc < 0) { > + errno = rc; > + return ERROR_FAIL; > + } > + return 0; > +} > + > +int libxl__arch_domain_create(libxl__gc *gc, libxl_domain_config *d_config, > + uint32_t domid) > +{ > + int rc = 0; > + if (d_config->c_info.type == LIBXL_DOMAIN_TYPE_PV && > + d_config->b_info.u.pv.e820_host) { > + rc = libxl__e820_alloc(gc, domid, d_config); > + if (rc) > + LIBXL__LOG_ERRNO(libxl__gc_owner(gc), LIBXL__LOG_ERROR, > + "Failed while collecting E820 with: %d (errno:%d)\n", > + rc, errno); > + } > + return rc; > +} > -- > 1.7.2.5 >
Stefano Stabellini writes ("[Xen-devel] [PATCH v5 0/7] arm: compile
tools"):> this patch series allows tools/ to compile on ARM, mostly providing an
> empty implementation for all the arch specific functions that are needed.
Thanks. I applied 1/7 and 2/7. 3/7 had conflicts due to David
Vrabel''s change to the libxc hvm build API so I stopped there.
The others look plausible though and you can put my ack on them.
Ian.