Richard W.M. Jones
2013-Aug-09 16:58 UTC
[Libguestfs] [PATCH 0/4] Not quite working User-Mode Linux backend.
This is a User-Mode Linux backend for libguestfs. You can select it by doing: export LIBGUESTFS_BACKEND=uml export LIBGUESTFS_QEMU=/path/to/vmlinux Note we're reusing the 'qemu' variable in the handle for convenience. QEmu is not involved when using the UML backend. This almost works. UML itself crashes when the daemon tries to connect to the serial port. I suspect it's because I'm using the 'ssl3=fd:<N>' option. Rich.
Richard W.M. Jones
2013-Aug-09 16:58 UTC
[Libguestfs] [PATCH 1/4] appliance/init: Display uname -a in debug output.
From: "Richard W.M. Jones" <rjones at redhat.com> Useful for debugging kernel / architecture issues. Note this has no effect if debugging is disabled. --- appliance/init | 1 + 1 file changed, 1 insertion(+) diff --git a/appliance/init b/appliance/init index 5c9f04a..681e59b 100755 --- a/appliance/init +++ b/appliance/init @@ -116,6 +116,7 @@ ldmtool create all # These are useful when debugging. if grep -sq guestfs_verbose=1 /proc/cmdline; then + uname -a ls -lR /dev cat /proc/mounts lvm pvs -- 1.8.3.1
Richard W.M. Jones
2013-Aug-09 16:58 UTC
[Libguestfs] [PATCH 2/4] appliance/init: Allow guestfs_noreboot=1 flag to avoid reboot at end of /init script.
From: "Richard W.M. Jones" <rjones at redhat.com> --- appliance/init | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/appliance/init b/appliance/init index 681e59b..5e1fac4 100755 --- a/appliance/init +++ b/appliance/init @@ -171,4 +171,9 @@ else fi sync -reboot -f + +if ! grep -sq guestfs_noreboot=1 /proc/cmdline; then + # qemu has the -no-reboot flag, so issuing a reboot here actually + # causes qemu to exit gracefully. + reboot -f +fi -- 1.8.3.1
Richard W.M. Jones
2013-Aug-09 16:58 UTC
[Libguestfs] [PATCH 3/4] conn: Make sure we display all log messages when qemu goes away.
From: "Richard W.M. Jones" <rjones at redhat.com> If qemu goes away it is (more in theory than in reality) possible for log messages to be lost in the kernel's socket buffer. This turns out to matter more for User-Mode Linux. --- src/conn-socket.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/conn-socket.c b/src/conn-socket.c index 2b3f222..aa66e3d 100644 --- a/src/conn-socket.c +++ b/src/conn-socket.c @@ -169,12 +169,22 @@ read_data (guestfs_h *g, struct connection *connv, void *bufv, size_t len) if (errno == EINTR || errno == EAGAIN) continue; if (errno == ECONNRESET) /* essentially the same as EOF case */ - return 0; + goto closed; perrorf (g, "read_data: read"); return -1; } - if (n == 0) + if (n == 0) { + closed: + /* Even though qemu has gone away, there could be more log + * messages in the console socket buffer in the kernel. Read + * them out here. + */ + if (g->verbose && conn->console_sock >= 0) { + while (handle_log_message (g, conn) == 1) + ; + } return 0; + } buf += n; len -= n; -- 1.8.3.1
Richard W.M. Jones
2013-Aug-09 16:58 UTC
[Libguestfs] [PATCH 4/4] Add experimental User-Mode Linux backend.
From: "Richard W.M. Jones" <rjones at redhat.com> --- daemon/guestfsd.c | 24 +- po/POTFILES | 1 + src/Makefile.am | 1 + src/guestfs-internal.h | 10 + src/guestfs.pod | 52 ++++- src/handle.c | 11 + src/launch-uml.c | 609 +++++++++++++++++++++++++++++++++++++++++++++++++ src/launch.c | 1 + 8 files changed, 703 insertions(+), 6 deletions(-) create mode 100644 src/launch-uml.c diff --git a/daemon/guestfsd.c b/daemon/guestfsd.c index 580315a..5930279 100644 --- a/daemon/guestfsd.c +++ b/daemon/guestfsd.c @@ -210,8 +210,6 @@ main (int argc, char *argv[]) printf ("could not read linux command line\n"); } - free (cmdline); - #ifndef WIN32 /* Make sure SIGPIPE doesn't kill us. */ struct sigaction sa; @@ -254,7 +252,22 @@ main (int argc, char *argv[]) copy_lvm (); /* Connect to virtio-serial channel. */ - int sock = open (VIRTIO_SERIAL_CHANNEL, O_RDWR|O_CLOEXEC); + char *channel, *p; + if (cmdline && (p = strstr (cmdline, "guestfs_channel=")) != NULL) { + p += 16; + channel = strndup (p, strcspn (p, " \n")); + } + else + channel = strdup (VIRTIO_SERIAL_CHANNEL); + if (!channel) { + perror ("strdup"); + exit (EXIT_FAILURE); + } + + if (verbose) + printf ("trying to open virtio-serial channel '%s'\n", channel); + + int sock = open (channel, O_RDWR|O_CLOEXEC); if (sock == -1) { fprintf (stderr, "\n" @@ -269,10 +282,13 @@ main (int argc, char *argv[]) "output to the libguestfs developers, either in a bug report\n" "or on the libguestfs redhat com mailing list.\n" "\n"); - perror (VIRTIO_SERIAL_CHANNEL); + perror (channel); exit (EXIT_FAILURE); } + /* cmdline is not used after this point */ + free (cmdline); + /* Wait for udev devices to be created. If you start libguestfs, * especially with disks that contain complex (eg. mdadm) data * already, then it is possible for the 'mdadm' and LVM commands diff --git a/po/POTFILES b/po/POTFILES index 2e9a00b..e8f01fd 100644 --- a/po/POTFILES +++ b/po/POTFILES @@ -280,6 +280,7 @@ src/inspect.c src/journal.c src/launch-direct.c src/launch-libvirt.c +src/launch-uml.c src/launch-unix.c src/launch.c src/libvirt-auth.c diff --git a/src/Makefile.am b/src/Makefile.am index 9e13c78..b36f59d 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -108,6 +108,7 @@ libguestfs_la_SOURCES = \ launch.c \ launch-direct.c \ launch-libvirt.c \ + launch-uml.c \ launch-unix.c \ libvirt-auth.c \ libvirt-domain.c \ diff --git a/src/guestfs-internal.h b/src/guestfs-internal.h index 28e40a2..451d678 100644 --- a/src/guestfs-internal.h +++ b/src/guestfs-internal.h @@ -97,6 +97,7 @@ enum state { CONFIG = 0, LAUNCHING = 1, READY = 2, enum backend { BACKEND_DIRECT, BACKEND_LIBVIRT, + BACKEND_UML, BACKEND_UNIX, }; @@ -209,6 +210,7 @@ struct backend_ops { }; extern struct backend_ops backend_ops_direct; extern struct backend_ops backend_ops_libvirt; +extern struct backend_ops backend_ops_uml; extern struct backend_ops backend_ops_unix; /* Connection module. A 'connection' represents the appliance console @@ -432,6 +434,14 @@ struct guestfs_h char *virt_selinux_label; char *virt_selinux_imagelabel; bool virt_selinux_norelabel_disks; + + struct { /* Used only by src/launch-uml.c. */ + pid_t pid; /* vmlinux PID. */ + pid_t recoverypid; /* Recovery process PID. */ + + char **cmdline; /* Only used in child, does not need freeing. */ + size_t cmdline_size; + } uml; }; /* Per-filesystem data stored for inspect_os. */ diff --git a/src/guestfs.pod b/src/guestfs.pod index 93c7cd4..2bccdbe 100644 --- a/src/guestfs.pod +++ b/src/guestfs.pod @@ -1406,8 +1406,8 @@ options into the C<args> array. The backend (previously known as the "attach method") controls how libguestfs creates and/or connects to the backend daemon, eg. by -starting qemu directly, or by using libvirt to manage an appliance, or -connecting to an already running daemon. +starting qemu directly, or using libvirt to manage an appliance, +running User-Mode Linux, or connecting to an already running daemon. You can set the backend by calling L</guestfs_set_backend>, or by setting the environment variable C<LIBGUESTFS_BACKEND>. @@ -1440,6 +1440,17 @@ with a URI would be C<libvirt:qemu:///session> The libvirt backend supports more features, including hotplugging (see L</HOTPLUGGING>) and sVirt. +=item C<uml> + +Run the User-Mode Linux kernel. The location of the kernel is set +using C<$LIBGUESTFS_QEMU> or using the L</guestfs_set_qemu> API (note +that qemu is not involved, we just reuse the same variable in the +handle for convenience). + +User-Mode Linux can be much faster, simpler and more lightweight than +using a full-blown virtual machine, but it also has some shortcomings. +See L</USER-MODE LINUX BACKEND> below. + =item C<unix:I<path>> Connect to the Unix domain socket I<path>. @@ -1520,6 +1531,43 @@ The virtual machine needs to have been set up beforehand so that it has the virtio-serial channel and so that guestfsd is running inside it. +=head2 USER-MODE LINUX BACKEND + +B<This is an experimental feature> which you should use with care. + +Setting the following environment variables (or the equivalent in the +API) selects the User-Mode Linux backend: + + export LIBGUESTFS_BACKEND=uml + export LIBGUESTFS_QEMU=/path/to/vmlinux + +C<vmlinux> (or it may be called C<linux>) is the Linux binary, +compiled to run as a userspace process. Note that we reuse the qemu +variable in the handle for convenience; qemu is not involved. Your +Linux distro may provide C<vmlinux>, or you may need to compile it +yourself from the kernel source (which is dead easy, follow the +instructions here: +L<http://user-mode-linux.sourceforge.net/source.html>). + +User-Mode Linux can be faster and more lightweight than running a +full-blown virtual machine as the backend (especially if you are +already running libguestfs in a virtual machine or cloud instance), +but it also has some important shortcomings. The main ones are: + +=over 4 + +=item UML only supports raw-format images + +Only plain raw-format images will work. No qcow2, no backing files. + +=item UML does not support any remote drives + +No NBD, etc. + +=item UML only works on ix86 and x86-64 + +=back + =head2 ABI GUARANTEE We guarantee the libguestfs ABI (binary interface), for public, diff --git a/src/handle.c b/src/handle.c index b4291e8..1aea423 100644 --- a/src/handle.c +++ b/src/handle.c @@ -611,6 +611,13 @@ parse_backend (guestfs_h *g, const char *method) return 0; } + if (STREQ (method, "uml")) { + g->backend = BACKEND_UML; + free (g->backend_arg); + g->backend_arg = NULL; + return 0; + } + if (STRPREFIX (method, "unix:") && strlen (method) > 5) { g->backend = BACKEND_UNIX; free (g->backend_arg); @@ -656,6 +663,10 @@ guestfs__get_backend (guestfs_h *g) ret = safe_asprintf (g, "libvirt:%s", g->backend_arg); break; + case BACKEND_UML: + ret = safe_strdup (g, "uml"); + break; + case BACKEND_UNIX: ret = safe_asprintf (g, "unix:%s", g->backend_arg); break; diff --git a/src/launch-uml.c b/src/launch-uml.c new file mode 100644 index 0000000..1099ec1 --- /dev/null +++ b/src/launch-uml.c @@ -0,0 +1,609 @@ +/* libguestfs + * Copyright (C) 2009-2013 Red Hat Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <config.h> + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <stdbool.h> +#include <stdint.h> +#include <inttypes.h> +#include <unistd.h> +#include <errno.h> +#include <sys/types.h> +#include <sys/socket.h> +#include <sys/wait.h> + +#include "cloexec.h" + +#include "guestfs.h" +#include "guestfs-internal.h" +#include "guestfs-internal-actions.h" +#include "guestfs_protocol.h" + +static void print_vmlinux_command_line (guestfs_h *g, char **argv); +static char *make_cow_overlay (guestfs_h *g, const char *original); + +/* Functions to build up the vmlinux command line. These are only run + * in the child process so no clean-up is required. + */ +static void +alloc_cmdline (guestfs_h *g) +{ + g->uml.cmdline_size = 1; + g->uml.cmdline = safe_malloc (g, sizeof (char *)); + g->uml.cmdline[0] = g->qemu; +} + +static void +incr_cmdline_size (guestfs_h *g) +{ + g->uml.cmdline_size++; + g->uml.cmdline + safe_realloc (g, g->uml.cmdline, sizeof (char *) * g->uml.cmdline_size); +} + +static void +add_cmdline (guestfs_h *g, const char *str) +{ + incr_cmdline_size (g); + g->uml.cmdline[g->uml.cmdline_size-1] = safe_strdup (g, str); +} + +/* Test for features which are not supported by the UML backend. + * Possibly some of these should just be warnings, not errors. + */ +static bool +uml_supported (guestfs_h *g) +{ + size_t i; + struct drive *drv; + + if (g->enable_network) { + error (g, _("uml backend does not support networking")); + return false; + } + if (g->smp > 1) { + error (g, _("uml backend does not support SMP")); + return false; + } + + ITER_DRIVES (g, i, drv) { + if (drv->src.protocol != drive_protocol_file) { + error (g, _("uml backend does not support remote drives")); + return false; + } + if (drv->format && STRNEQ (drv->format, "raw")) { + error (g, _("uml backend does not support non-raw-format drives")); + return false; + } + if (drv->iface) { + error (g, + _("uml backend does not support drives with 'iface' parameter")); + return false; + } + if (drv->disk_label) { + error (g, + _("uml backend does not support drives with 'label' parameter")); + return false; + } + } + + return true; +} + +static int +launch_uml (guestfs_h *g, const char *arg) +{ + int console_sock = -1, daemon_sock = -1; + int r; + int csv[2], dsv[2]; + CLEANUP_FREE char *kernel = NULL, *initrd = NULL, *appliance = NULL; + int has_appliance_drive; + CLEANUP_FREE char *appliance_cow = NULL; + uint32_t size; + CLEANUP_FREE void *buf = NULL; + struct drive *drv; + size_t i; + + if (!uml_supported (g)) + return -1; + + if (!g->nr_drives) { + error (g, _("you must call guestfs_add_drive before guestfs_launch")); + return -1; + } + + /* Locate and/or build the appliance. */ + if (guestfs___build_appliance (g, &kernel, &initrd, &appliance) == -1) + return -1; + has_appliance_drive = appliance != NULL; + + /* Create COW overlays for any readonly drives, and for the root. + * Note that the documented syntax ubd0=cow,orig does not work since + * kernel 3.3. See: + * http://thread.gmane.org/gmane.linux.uml.devel/13556 + */ + ITER_DRIVES (g, i, drv) { + if (drv->readonly) { + drv->priv = make_cow_overlay (g, drv->src.u.path); + if (!drv->priv) + goto cleanup0; + drv->free_priv = free; + } + } + + if (has_appliance_drive) { + appliance_cow = make_cow_overlay (g, appliance); + if (!appliance_cow) + goto cleanup0; + } + + /* The socket that the daemon will talk to us on. + */ + if (socketpair (AF_LOCAL, SOCK_STREAM|SOCK_CLOEXEC, 0, dsv) == -1) { + perrorf (g, "socketpair"); + goto cleanup0; + } + + /* The console socket. */ + if (!g->direct_mode) { + if (socketpair (AF_LOCAL, SOCK_STREAM|SOCK_CLOEXEC, 0, csv) == -1) { + perrorf (g, "socketpair"); + close (dsv[0]); + close (dsv[1]); + goto cleanup0; + } + } + + r = fork (); + if (r == -1) { + perrorf (g, "fork"); + if (!g->direct_mode) { + close (csv[0]); + close (csv[1]); + } + close (dsv[0]); + close (dsv[1]); + goto cleanup0; + } + + if (r == 0) { /* Child (vmlinux). */ + char *buf; + struct qemu_param *qp; + char *term = getenv ("TERM"); + + /* Set up the full command line. Do this in the subprocess so we + * don't need to worry about cleaning up. + */ + alloc_cmdline (g); + + /* Set memory size. */ + buf = safe_asprintf (g, "mem=%dM", g->memsize); + add_cmdline (g, buf); + free (buf); + + /* vmlinux appears to ignore this, but let's add it anyway. */ + buf = safe_asprintf (g, "initrd=%s", initrd); + add_cmdline (g, buf); + free (buf); + + /* Make sure our appliance init script runs first. */ + add_cmdline (g, "init=/init"); + + /* This tells the /init script not to reboot at the end. */ + add_cmdline (g, "guestfs_noreboot=1"); + + /* Root filesystem should be mounted read-write (default seems to + * be "ro"). + */ + add_cmdline (g, "rw"); + + /* See also guestfs___appliance_command_line. */ + if (g->verbose) + add_cmdline (g, "guestfs_verbose=1"); + + add_cmdline (g, "panic=1"); + + buf = safe_asprintf (g, "TERM=%s", term ? term : "linux"); + add_cmdline (g, buf); + free (buf); + + if (g->selinux) + add_cmdline (g, "selinux=1 enforcing=0"); + else + add_cmdline (g, "selinux=0"); + + /* XXX This isn't quite right. Multiple append args won't work. */ + if (g->append) + add_cmdline (g, g->append); + + /* Add the drives. */ + ITER_DRIVES (g, i, drv) { + if (!drv->readonly) + buf = safe_asprintf (g, "ubd%zu=%s", i, drv->src.u.path); + else + buf = safe_asprintf (g, "ubd%zu=%s", i, (char *) drv->priv); + add_cmdline (g, buf); + free (buf); + } + + /* Add the ext2 appliance drive (after all the drives). */ + if (has_appliance_drive) { + char drv_name[64] = "ubd"; + guestfs___drive_name (g->nr_drives, &drv_name[3]); + + buf = safe_asprintf (g, "ubd%zu=%s", g->nr_drives, appliance_cow); + add_cmdline (g, buf); + free (buf); + buf = safe_asprintf (g, "root=/dev/%s", drv_name); + add_cmdline (g, buf); + free (buf); + } + + /* Create the daemon socket. */ + close (dsv[0]); + buf = safe_asprintf (g, "ssl3=fd:%d", dsv[1]); + add_cmdline (g, buf); + free (buf); + add_cmdline (g, "guestfs_channel=/dev/ttyS3"); + +#if 0 /* XXX This could be made to work. */ +#ifdef VALGRIND_DAEMON + /* Set up virtio-serial channel for valgrind messages. */ + add_cmdline (g, "-chardev"); + snprintf (buf, sizeof buf, "file,path=%s/valgrind.log.%d,id=valgrind", + VALGRIND_LOG_PATH, getpid ()); + add_cmdline (g, buf); + add_cmdline (g, "-device"); + add_cmdline (g, "virtserialport,chardev=valgrind,name=org.libguestfs.valgrind"); +#endif +#endif + + /* Add any vmlinux parameters. */ + for (qp = g->qemu_params; qp; qp = qp->next) { + add_cmdline (g, qp->qemu_param); + if (qp->qemu_value) + add_cmdline (g, qp->qemu_value); + } + + /* Finish off the command line. */ + incr_cmdline_size (g); + g->uml.cmdline[g->uml.cmdline_size-1] = NULL; + + if (!g->direct_mode) { + /* Set up stdin, stdout, stderr. */ + close (0); + close (1); + close (csv[0]); + + /* We set the FD_CLOEXEC flag on the socket above, but now (in + * the child) it's safe to unset this flag so vmlinux can use the + * socket. + */ + set_cloexec_flag (csv[1], 0); + + /* Stdin. */ + if (dup (csv[1]) == -1) { + dup_failed: + perror ("dup failed"); + _exit (EXIT_FAILURE); + } + /* Stdout. */ + if (dup (csv[1]) == -1) + goto dup_failed; + + /* Send stderr to the pipe as well. */ + close (2); + if (dup (csv[1]) == -1) + goto dup_failed; + + close (csv[1]); + } + + /* Dump the command line (after setting up stderr above). */ + if (g->verbose) + print_vmlinux_command_line (g, g->uml.cmdline); + + /* Put vmlinux in a new process group. */ + if (g->pgroup) + setpgid (0, 0); + + setenv ("LC_ALL", "C", 1); + + execv (g->qemu, g->uml.cmdline); /* Run vmlinux. */ + perror (g->qemu); + _exit (EXIT_FAILURE); + } + + /* Parent (library). */ + g->uml.pid = r; + + /* Fork the recovery process off which will kill vmlinux if the + * parent process fails to do so (eg. if the parent segfaults). + */ + g->uml.recoverypid = -1; + if (g->recovery_proc) { + r = fork (); + if (r == 0) { + int i, fd, max_fd; + struct sigaction sa; + pid_t vmlinux_pid = g->uml.pid; + pid_t parent_pid = getppid (); + + /* Remove all signal handlers. See the justification here: + * https://www.redhat.com/archives/libvir-list/2008-August/msg00303.html + * We don't mask signal handlers yet, so this isn't completely + * race-free, but better than not doing it at all. + */ + memset (&sa, 0, sizeof sa); + sa.sa_handler = SIG_DFL; + sa.sa_flags = 0; + sigemptyset (&sa.sa_mask); + for (i = 1; i < NSIG; ++i) + sigaction (i, &sa, NULL); + + /* Close all other file descriptors. This ensures that we don't + * hold open (eg) pipes from the parent process. + */ + max_fd = sysconf (_SC_OPEN_MAX); + if (max_fd == -1) + max_fd = 1024; + if (max_fd > 65536) + max_fd = 65536; /* bound the amount of work we do here */ + for (fd = 0; fd < max_fd; ++fd) + close (fd); + + /* It would be nice to be able to put this in the same process + * group as vmlinux (ie. setpgid (0, vmlinux_pid)). However + * this is not possible because we don't have any guarantee here + * that the vmlinux process has started yet. + */ + if (g->pgroup) + setpgid (0, 0); + + /* Writing to argv is hideously complicated and error prone. See: + * http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/backend/utils/misc/ps_status.c;hb=HEAD + */ + + /* Loop around waiting for one or both of the other processes to + * disappear. It's fair to say this is very hairy. The PIDs that + * we are looking at might be reused by another process. We are + * effectively polling. Is the cure worse than the disease? + */ + for (;;) { + /* vmlinux's gone away, we aren't needed */ + if (kill (vmlinux_pid, 0) == -1) + _exit (EXIT_SUCCESS); + if (kill (parent_pid, 0) == -1) { + /* Parent's gone away, vmlinux still around, so kill vmlinux. */ + kill (vmlinux_pid, 9); + _exit (EXIT_SUCCESS); + } + sleep (2); + } + } + + /* Don't worry, if the fork failed, this will be -1. The recovery + * process isn't essential. + */ + g->uml.recoverypid = r; + } + + if (!g->direct_mode) { + /* Close the other end of the console socketpair. */ + close (csv[1]); + + console_sock = csv[0]; /* stdin of child */ + csv[0] = -1; + } + + daemon_sock = dsv[0]; + close (dsv[1]); + dsv[0] = -1; + + g->state = LAUNCHING; + + /* Wait for vmlinux to start and to connect back to us via + * virtio-serial and send the GUESTFS_LAUNCH_FLAG message. + */ + g->conn + guestfs___new_conn_socket_connected (g, daemon_sock, console_sock); + if (!g->conn) + goto cleanup1; + + /* g->conn now owns these sockets. */ + daemon_sock = console_sock = -1; + + /* We now have to wait for vmlinux to start up, the daemon to start + * running, and for it to send the GUESTFS_LAUNCH_FLAG to us. + */ + r = guestfs___recv_from_daemon (g, &size, &buf); + + if (r == -1) { + guestfs___launch_failed_error (g); + goto cleanup1; + } + + if (size != GUESTFS_LAUNCH_FLAG) { + guestfs___launch_failed_error (g); + goto cleanup1; + } + + if (g->verbose) + guestfs___print_timestamped_message (g, "appliance is up"); + + /* This is possible in some really strange situations, such as + * guestfsd starts up OK but then vmlinux immediately exits. Check + * for it because the caller is probably expecting to be able to + * send commands after this function returns. + */ + if (g->state != READY) { + error (g, _("vmlinux launched and contacted daemon, but state != READY")); + goto cleanup1; + } + + if (has_appliance_drive) + guestfs___add_dummy_appliance_drive (g); + + return 0; + + cleanup1: + if (!g->direct_mode && csv[0] >= 0) + close (csv[0]); + if (dsv[0] >= 0) + close (dsv[0]); + if (g->uml.pid > 0) kill (g->uml.pid, 9); + if (g->uml.recoverypid > 0) kill (g->uml.recoverypid, 9); + if (g->uml.pid > 0) waitpid (g->uml.pid, NULL, 0); + if (g->uml.recoverypid > 0) waitpid (g->uml.recoverypid, NULL, 0); + g->uml.pid = 0; + g->uml.recoverypid = 0; + memset (&g->launch_t, 0, sizeof g->launch_t); + + cleanup0: + if (daemon_sock >= 0) + close (daemon_sock); + if (console_sock >= 0) + close (console_sock); + if (g->conn) { + g->conn->ops->free_connection (g, g->conn); + g->conn = NULL; + } + g->state = CONFIG; + return -1; +} + +/* Run uml_mkcow to create a COW overlay. This works around a kernel + * bug in UML option parsing. + */ +static char * +make_cow_overlay (guestfs_h *g, const char *original) +{ + CLEANUP_CMD_CLOSE struct command *cmd = guestfs___new_command (g); + char *cow; + int r; + + cow = safe_asprintf (g, "%s/cow%d", g->tmpdir, g->unique++); + + guestfs___cmd_add_arg (cmd, "uml_mkcow"); + guestfs___cmd_add_arg (cmd, cow); + guestfs___cmd_add_arg (cmd, original); + r = guestfs___cmd_run (cmd); + if (r == -1) { + free (cow); + return NULL; + } + if (!WIFEXITED (r) || WEXITSTATUS (r) != 0) { + guestfs___external_command_failed (g, r, "uml_mkcow", original); + free (cow); + return NULL; + } + + return cow; /* caller must free */ +} + +/* This is called from the forked subprocess just before vmlinux runs, + * so it can just print the message straight to stderr, where it will + * be picked up and funnelled through the usual appliance event API. + */ +static void +print_vmlinux_command_line (guestfs_h *g, char **argv) +{ + size_t i = 0; + int needs_quote; + + struct timeval tv; + gettimeofday (&tv, NULL); + fprintf (stderr, "[%05" PRIi64 "ms] ", + guestfs___timeval_diff (&g->launch_t, &tv)); + + while (argv[i]) { + if (i > 0) fputc (' ', stderr); + + /* Does it need shell quoting? This only deals with simple cases. */ + needs_quote = strcspn (argv[i], " ") != strlen (argv[i]); + + if (needs_quote) fputc ('\'', stderr); + fprintf (stderr, "%s", argv[i]); + if (needs_quote) fputc ('\'', stderr); + i++; + } + + fputc ('\n', stderr); +} + +static int +shutdown_uml (guestfs_h *g, int check_for_errors) +{ + int ret = 0; + int status; + + /* Signal vmlinux to shutdown cleanly, and kill the recovery process. */ + if (g->uml.pid > 0) { + debug (g, "sending SIGTERM to process %d", g->uml.pid); + kill (g->uml.pid, SIGTERM); + } + if (g->uml.recoverypid > 0) kill (g->uml.recoverypid, 9); + + /* Wait for subprocess(es) to exit. */ + if (g->uml.pid > 0) { + if (waitpid (g->uml.pid, &status, 0) == -1) { + perrorf (g, "waitpid (vmlinux)"); + ret = -1; + } + else if (!WIFEXITED (status) || WEXITSTATUS (status) != 0) { + guestfs___external_command_failed (g, status, g->qemu, NULL); + ret = -1; + } + } + if (g->uml.recoverypid > 0) waitpid (g->uml.recoverypid, NULL, 0); + + g->uml.pid = g->uml.recoverypid = 0; + + return ret; +} + +static int +get_pid_uml (guestfs_h *g) +{ + if (g->uml.pid > 0) + return g->uml.pid; + else { + error (g, "get_pid: no vmlinux subprocess"); + return -1; + } +} + +/* XXX This is a guess. UML appears to use a single major, and puts + * ubda at minor 0 with each partition at minors 1-15, ubdb at minor + * 16, etc, so my guess is that the maximum is 256/16. + */ +static int +max_disks_uml (guestfs_h *g) +{ + return 256/16; +} + +struct backend_ops backend_ops_uml = { + .launch = launch_uml, + .shutdown = shutdown_uml, + .get_pid = get_pid_uml, + .max_disks = max_disks_uml, +}; diff --git a/src/launch.c b/src/launch.c index 894ca15..9e2b3ae 100644 --- a/src/launch.c +++ b/src/launch.c @@ -44,6 +44,7 @@ get_backend_ops (guestfs_h *g) switch (g->backend) { case BACKEND_DIRECT: return &backend_ops_direct; case BACKEND_LIBVIRT: return &backend_ops_libvirt; + case BACKEND_UML: return &backend_ops_uml; case BACKEND_UNIX: return &backend_ops_unix; default: abort (); } -- 1.8.3.1
Daniel P. Berrange
2013-Aug-09 17:09 UTC
[Libguestfs] [PATCH 0/4] Not quite working User-Mode Linux backend.
On Fri, Aug 09, 2013 at 05:58:04PM +0100, Richard W.M. Jones wrote:> This is a User-Mode Linux backend for libguestfs. You can select it > by doing: > > export LIBGUESTFS_BACKEND=uml > export LIBGUESTFS_QEMU=/path/to/vmlinux > > Note we're reusing the 'qemu' variable in the handle for convenience. > QEmu is not involved when using the UML backend. > > This almost works. UML itself crashes when the daemon tries to > connect to the serial port. I suspect it's because I'm using the > 'ssl3=fd:<N>' option.Neat. We also have a (possibly not quite working) UML driver in libvirt which I added mostly for fun. I wonder what work libvirt UML would require in order to support libguestfs usage. There's plenty of scope for integrating the libvirt UML driver with libvirt's auditing, svirt, and cgroups infrastructure too, if anyone thinks UML is still actually useful in the real world. Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|
Apparently Analagous Threads
- [PATCH v2 0/4] Experimental User-Mode Linux backend.
- [PATCH 0/4] Small refactorings of the protocol layer.
- [PATCH] Add safe wrapper around waitpid which deals with EINTR correctly.
- [PATCH 0/3] protocol: Abstract out socket operations.
- Libguestfs (1.22.6) driver/changes for mingw/win32