This is a complete rewrite of the Xen TPM frontend driver, taking advantage of a simplified frontend/backend interface and adding support for cancellation and timeouts. Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> --- This patch does not include the v1 interfaces from linux-2.6.18-xen.hg in include/xen/interface/io/tpmif.h, since they do not pass checkpatch due to their use of typedefs, are not 32/64-bit clean, and are not used in this patch. This patch relies on the mini-os patch to tpmback/front: [PATCH v5.1 01/12] mini-os/tpm{back,front}: Change shared page ABI which should be sent as a reply. drivers/char/tpm/Kconfig | 11 ++ drivers/char/tpm/Makefile | 1 + drivers/char/tpm/tpm.h | 10 + drivers/char/tpm/xen-tpmfront.c | 418 +++++++++++++++++++++++++++++++++++++++ include/xen/interface/io/tpmif.h | 50 +++++ 5 files changed, 490 insertions(+) create mode 100644 drivers/char/tpm/xen-tpmfront.c create mode 100644 include/xen/interface/io/tpmif.h diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 915875e..23d272f 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -81,4 +81,15 @@ config TCG_IBMVTPM will be accessible from within Linux. To compile this driver as a module, choose M here; the module will be called tpm_ibmvtpm. +config TCG_XEN + tristate "XEN TPM Interface" + depends on TCG_TPM && XEN + ---help--- + If you want to make TPM support available to a Xen user domain, + say Yes and it will be accessible from within Linux. See + the manpages for xl, xl.conf, and docs/misc/vtpm.txt in + the Xen source repository for more details. + To compile this driver as a module, choose M here; the module + will be called xen-tpmfront. + endif # TCG_TPM diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 5b3fc8b..424ecaa 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -17,3 +17,4 @@ obj-$(CONFIG_TCG_NSC) += tpm_nsc.o obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o +obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 8ef7649..b575892 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -328,6 +328,16 @@ extern int tpm_pm_resume(struct device *); extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long, wait_queue_head_t *); +static inline void *chip_get_private(const struct tpm_chip *chip) +{ + return chip->vendor.data; +} + +static inline void chip_set_private(struct tpm_chip *chip, void *priv) +{ + chip->vendor.data = priv; +} + #ifdef CONFIG_ACPI extern int tpm_add_ppi(struct kobject *); extern void tpm_remove_ppi(struct kobject *); diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c new file mode 100644 index 0000000..c02f172 --- /dev/null +++ b/drivers/char/tpm/xen-tpmfront.c @@ -0,0 +1,418 @@ +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/uaccess.h> +#include <xen/events.h> +#include <xen/interface/grant_table.h> +#include <xen/interface/io/tpmif.h> +#include <xen/grant_table.h> +#include <xen/xenbus.h> +#include <xen/page.h> +#include "tpm.h" + +struct tpm_private { + struct tpm_chip *chip; + struct xenbus_device *dev; + + struct vtpm_shared_page *shr; + + unsigned int evtchn; + int ring_ref; + domid_t backend_id; +}; + +enum status_bits { + VTPM_STATUS_RUNNING = 0x1, + VTPM_STATUS_IDLE = 0x2, + VTPM_STATUS_RESULT = 0x4, + VTPM_STATUS_CANCELED = 0x8, +}; + +static u8 vtpm_status(struct tpm_chip *chip) +{ + struct tpm_private *priv = chip_get_private(chip); + switch (priv->shr->state) { + case VTPM_STATE_IDLE: + return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED; + case VTPM_STATE_FINISH: + return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT; + case VTPM_STATE_SUBMIT: + case VTPM_STATE_CANCEL: + return VTPM_STATUS_RUNNING; + default: + return 0; + } +} + +static void vtpm_cancel(struct tpm_chip *chip) +{ + struct tpm_private *priv = chip_get_private(chip); + priv->shr->state = VTPM_STATE_CANCEL; + notify_remote_via_evtchn(priv->evtchn); +} + +static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct tpm_private *priv = chip_get_private(chip); + struct vtpm_shared_page *shr = priv->shr; + unsigned int offset = sizeof(*shr) + 4*shr->nr_extra_pages; + + u32 ordinal; + unsigned long duration; + + if (count < TPM_HEADER_SIZE) + return -EIO; + + if (offset > PAGE_SIZE) + return -EIO; + + if (offset + count > PAGE_SIZE) + return -EIO; + + /* Wait for completion of any existing command or cancellation */ + if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, + chip->vendor.timeout_c, &chip->vendor.read_queue) < 0) { + vtpm_cancel(chip); + return -ETIME; + } + + memcpy(offset + (u8 *)shr, buf, count); + shr->length = count; + barrier(); + shr->state = VTPM_STATE_SUBMIT; + notify_remote_via_evtchn(priv->evtchn); + + ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); + duration = tpm_calc_ordinal_duration(chip, ordinal); + + if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration, + &chip->vendor.read_queue) < 0) { + /* got a signal or timeout, try to cancel */ + vtpm_cancel(chip); + return -ETIME; + } + + return count; +} + +static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct tpm_private *priv = chip_get_private(chip); + struct vtpm_shared_page *shr = priv->shr; + unsigned int offset = sizeof(*shr) + 4*shr->nr_extra_pages; + size_t length = shr->length; + + if (shr->state == VTPM_STATE_IDLE) + return -ECANCELED; + + /* In theory the wait at the end of _send makes this one unnecessary */ + if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, + chip->vendor.timeout_c, &chip->vendor.read_queue) < 0) { + vtpm_cancel(chip); + return -ETIME; + } + + if (offset > PAGE_SIZE) + return -EIO; + + if (offset + length > PAGE_SIZE) + length = PAGE_SIZE - offset; + + if (length > count) + length = count; + + memcpy(buf, offset + (u8 *)shr, count); + + return length; +} + +static const struct file_operations vtpm_ops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = tpm_open, + .read = tpm_read, + .write = tpm_write, + .release = tpm_release, +}; + +static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); +static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); +static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); +static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); +static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); +static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, + NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); +static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); +static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); +static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); + +static struct attribute *vtpm_attrs[] = { + &dev_attr_pubek.attr, + &dev_attr_pcrs.attr, + &dev_attr_enabled.attr, + &dev_attr_active.attr, + &dev_attr_owned.attr, + &dev_attr_temp_deactivated.attr, + &dev_attr_caps.attr, + &dev_attr_cancel.attr, + &dev_attr_durations.attr, + &dev_attr_timeouts.attr, + NULL, +}; + +static struct attribute_group vtpm_attr_grp = { + .attrs = vtpm_attrs +}; + +#define TPM_LONG_TIMEOUT (10 * 60 * HZ) + +static const struct tpm_vendor_specific tpm_vtpm = { + .status = vtpm_status, + .recv = vtpm_recv, + .send = vtpm_send, + .cancel = vtpm_cancel, + .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT, + .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT, + .req_canceled = VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED, + .attr_group = &vtpm_attr_grp, + .miscdev = { + .fops = &vtpm_ops, + }, + .duration = { + TPM_LONG_TIMEOUT, + TPM_LONG_TIMEOUT, + TPM_LONG_TIMEOUT, + }, +}; + +static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) +{ + struct tpm_private *priv = dev_id; + + switch (priv->shr->state) { + case VTPM_STATE_IDLE: + case VTPM_STATE_FINISH: + wake_up_interruptible(&priv->chip->vendor.read_queue); + break; + case VTPM_STATE_SUBMIT: + case VTPM_STATE_CANCEL: + default: + break; + } + return IRQ_HANDLED; +} + +static int setup_chip(struct device *dev, struct tpm_private *priv) +{ + struct tpm_chip *chip; + + chip = tpm_register_hardware(dev, &tpm_vtpm); + if (!chip) + return -ENODEV; + + init_waitqueue_head(&chip->vendor.read_queue); + + priv->chip = chip; + chip_set_private(chip, priv); + + return 0; +} + +static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) +{ + struct xenbus_transaction xbt; + const char *message = NULL; + int rv; + + priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); + if (!priv->shr) { + xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); + return -ENOMEM; + } + + rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr)); + if (rv < 0) + return rv; + + priv->ring_ref = rv; + + rv = xenbus_alloc_evtchn(dev, &priv->evtchn); + if (rv) + return rv; + + rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0, + "tpmif", priv); + if (rv <= 0) { + xenbus_dev_fatal(dev, rv, "allocating TPM irq"); + return rv; + } + priv->chip->vendor.irq = rv; + + again: + rv = xenbus_transaction_start(&xbt); + if (rv) { + xenbus_dev_fatal(dev, rv, "starting transaction"); + return rv; + } + + rv = xenbus_printf(xbt, dev->nodename, + "ring-ref", "%u", priv->ring_ref); + if (rv) { + message = "writing ring-ref"; + goto abort_transaction; + } + + rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", + priv->evtchn); + if (rv) { + message = "writing event-channel"; + goto abort_transaction; + } + + rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1"); + if (rv) { + message = "writing feature-protocol-v2"; + goto abort_transaction; + } + + rv = xenbus_transaction_end(xbt, 0); + if (rv == -EAGAIN) + goto again; + if (rv) { + xenbus_dev_fatal(dev, rv, "completing transaction"); + return rv; + } + + xenbus_switch_state(dev, XenbusStateInitialised); + + return 0; + + abort_transaction: + xenbus_transaction_end(xbt, 1); + if (message) + xenbus_dev_error(dev, rv, "%s", message); + + return rv; +} + +static void ring_free(struct tpm_private *priv) +{ + if (priv->ring_ref) + gnttab_end_foreign_access(priv->ring_ref, 0, + (unsigned long)priv->shr); + + if (priv->chip && priv->chip->vendor.irq) + unbind_from_irqhandler(priv->chip->vendor.irq, priv); + + free_page((unsigned long)priv->shr); + kfree(priv); +} + +static int tpmfront_probe(struct xenbus_device *dev, + const struct xenbus_device_id *id) +{ + struct tpm_private *priv; + int rv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure"); + return -ENOMEM; + } + + rv = setup_chip(&dev->dev, priv); + if (rv) { + kfree(priv); + return rv; + } + + rv = setup_ring(dev, priv); + if (rv) { + tpm_remove_hardware(&dev->dev); + ring_free(priv); + return rv; + } + + tpm_get_timeouts(priv->chip); + + dev_set_drvdata(&dev->dev, priv->chip); + + return rv; +} + +static int tpmfront_remove(struct xenbus_device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(&dev->dev); + struct tpm_private *priv = chip_get_private(chip); + tpm_remove_hardware(&dev->dev); + ring_free(priv); + return 0; +} + +static int tpmfront_resume(struct xenbus_device *dev) +{ + /* A suspend/resume/migrate will interrupt a vTPM anyway */ + tpmfront_remove(dev); + return tpmfront_probe(dev, NULL); +} + +static void backend_changed(struct xenbus_device *dev, + enum xenbus_state backend_state) +{ + int val; + + switch (backend_state) { + case XenbusStateInitialised: + case XenbusStateConnected: + if (xenbus_scanf(XBT_NIL, dev->otherend, + "feature-protocol-v2", "%d", &val) < 0) + val = 0; + if (!val) { + xenbus_dev_fatal(dev, -EINVAL, + "vTPM protocol 2 required"); + return; + } + xenbus_switch_state(dev, XenbusStateConnected); + break; + + case XenbusStateClosing: + case XenbusStateClosed: + device_unregister(&dev->dev); + xenbus_frontend_closed(dev); + break; + default: + break; + } +} + +static const struct xenbus_device_id tpmfront_ids[] = { + { "vtpm" }, + { "" } +}; +MODULE_ALIAS("xen:vtpm"); + +static DEFINE_XENBUS_DRIVER(tpmfront, , + .probe = tpmfront_probe, + .remove = tpmfront_remove, + .resume = tpmfront_resume, + .otherend_changed = backend_changed, + ); + +static int __init xen_tpmfront_init(void) +{ + if (!xen_domain()) + return -ENODEV; + + return xenbus_register_frontend(&tpmfront_driver); +} +module_init(xen_tpmfront_init); + +static void __exit xen_tpmfront_exit(void) +{ + xenbus_unregister_driver(&tpmfront_driver); +} +module_exit(xen_tpmfront_exit); + +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/include/xen/interface/io/tpmif.h b/include/xen/interface/io/tpmif.h new file mode 100644 index 0000000..92522a4 --- /dev/null +++ b/include/xen/interface/io/tpmif.h @@ -0,0 +1,50 @@ +/****************************************************************************** + * tpmif.h + * + * TPM I/O interface for Xen guest OSes, v2 + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __XEN_PUBLIC_IO_TPMIF_H__ +#define __XEN_PUBLIC_IO_TPMIF_H__ + +enum vtpm_shared_page_state { + VTPM_STATE_IDLE, /* no contents / vTPM idle / cancel complete */ + VTPM_STATE_SUBMIT, /* request ready / vTPM working */ + VTPM_STATE_FINISH, /* response ready / vTPM idle */ + VTPM_STATE_CANCEL, /* cancel requested / vTPM working */ +}; +/* The backend should only change state to IDLE or FINISH, while the + * frontend should only change to SUBMIT or CANCEL. */ + + +struct vtpm_shared_page { + uint32_t length; /* request/response length in bytes */ + + uint8_t state; /* enum vtpm_shared_page_state */ + uint8_t locality; /* for the current request */ + uint8_t pad; + + uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */ + uint32_t extra_pages[0]; /* grant IDs; length in nr_extra_pages */ +}; + +#endif -- 1.8.1.4
Daniel De Graaf
2013-Mar-22 22:30 UTC
[PATCH v5.1 01/12] mini-os/tpm{back, front}: Change shared page ABI
This changes the vTPM shared page ABI from a copy of the Xen network interface to a single-page interface that better reflects the expected behavior of a TPM: only a single request packet can be sent at any given time, and every packet sent generates a single response packet. This protocol change should also increase efficiency as it avoids mapping and unmapping grants when possible. The vtpm xenbus device now requires a feature-protocol-v2 node in xenstore to avoid conflicts with existing (xen-patched) kernels supporting the old interface. While the contents of the shared page have been defined to allow packets larger than a single page (actually 4088 bytes) by allowing the client to add extra grant references, the mapping of these extra references has not been implemented; a feature node in xenstore may be used in the future to indicate full support for the multi-page protocol. Most uses of the TPM should not require this feature. Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> Cc: Jan Beulich <JBeulich@suse.com> --- Changes from v5: handle xenbus states properly, and keep the old header contents intact. Changes from v4: use feature-protocol-v2 nodes instead of changing the device name; add command cancellation support to the protocol. extras/mini-os/include/tpmback.h | 1 + extras/mini-os/include/tpmfront.h | 7 +- extras/mini-os/tpmback.c | 163 +++++++++++++++++++------------------- extras/mini-os/tpmfront.c | 143 ++++++++++++++++++--------------- xen/include/public/io/tpmif.h | 28 +++++++ 5 files changed, 190 insertions(+), 152 deletions(-) diff --git a/extras/mini-os/include/tpmback.h b/extras/mini-os/include/tpmback.h index ff86732..ec9eda4 100644 --- a/extras/mini-os/include/tpmback.h +++ b/extras/mini-os/include/tpmback.h @@ -43,6 +43,7 @@ struct tpmcmd { domid_t domid; /* Domid of the frontend */ + uint8_t locality; /* Locality requested by the frontend */ unsigned int handle; /* Handle of the frontend */ unsigned char uuid[16]; /* uuid of the tpm interface */ diff --git a/extras/mini-os/include/tpmfront.h b/extras/mini-os/include/tpmfront.h index fd2cb17..a0c7c4d 100644 --- a/extras/mini-os/include/tpmfront.h +++ b/extras/mini-os/include/tpmfront.h @@ -37,9 +37,7 @@ struct tpmfront_dev { grant_ref_t ring_ref; evtchn_port_t evtchn; - tpmif_tx_interface_t* tx; - - void** pages; + vtpm_shared_page_t *page; domid_t bedomid; char* nodename; @@ -77,6 +75,9 @@ void shutdown_tpmfront(struct tpmfront_dev* dev); * */ int tpmfront_cmd(struct tpmfront_dev* dev, uint8_t* req, size_t reqlen, uint8_t** resp, size_t* resplen); +/* Set the locality used for communicating with a vTPM */ +int tpmfront_set_locality(struct tpmfront_dev* dev, int locality); + #ifdef HAVE_LIBC #include <sys/stat.h> /* POSIX IO functions: diff --git a/extras/mini-os/tpmback.c b/extras/mini-os/tpmback.c index 658fed1..d6a504e 100644 --- a/extras/mini-os/tpmback.c +++ b/extras/mini-os/tpmback.c @@ -86,10 +86,7 @@ struct tpmif { evtchn_port_t evtchn; /* Shared page */ - tpmif_tx_interface_t* tx; - - /* pointer to TPMIF_RX_RING_SIZE pages */ - void** pages; + vtpm_shared_page_t *page; enum xenbus_state state; enum { DISCONNECTED, DISCONNECTING, CONNECTED } status; @@ -266,6 +263,7 @@ int insert_tpmif(tpmif_t* tpmif) unsigned int i, j; tpmif_t* tmp; char* err; + char path[512]; local_irq_save(flags); @@ -303,6 +301,16 @@ int insert_tpmif(tpmif_t* tpmif) local_irq_restore(flags); + snprintf(path, 512, "backend/vtpm/%u/%u/feature-protocol-v2", (unsigned int) tpmif->domid, tpmif->handle); + if ((err = xenbus_write(XBT_NIL, path, "1"))) + { + /* if we got an error here we should carefully remove the interface and then return */ + TPMBACK_ERR("Unable to write feature-protocol-v2 node: %s\n", err); + free(err); + remove_tpmif(tpmif); + goto error_post_irq; + } + /*Listen for state changes on the new interface */ if((err = xenbus_watch_path_token(XBT_NIL, tpmif->fe_state_path, tpmif->fe_state_path, >pmdev.events))) { @@ -312,7 +320,6 @@ int insert_tpmif(tpmif_t* tpmif) remove_tpmif(tpmif); goto error_post_irq; } - return 0; error: local_irq_restore(flags); @@ -386,8 +393,7 @@ inline tpmif_t* __init_tpmif(domid_t domid, unsigned int handle) tpmif->fe_state_path = NULL; tpmif->state = XenbusStateInitialising; tpmif->status = DISCONNECTED; - tpmif->tx = NULL; - tpmif->pages = NULL; + tpmif->page = NULL; tpmif->flags = 0; memset(tpmif->uuid, 0, sizeof(tpmif->uuid)); return tpmif; @@ -395,9 +401,6 @@ inline tpmif_t* __init_tpmif(domid_t domid, unsigned int handle) void __free_tpmif(tpmif_t* tpmif) { - if(tpmif->pages) { - free(tpmif->pages); - } if(tpmif->fe_path) { free(tpmif->fe_path); } @@ -430,12 +433,6 @@ tpmif_t* new_tpmif(domid_t domid, unsigned int handle) goto error; } - /* allocate pages to be used for shared mapping */ - if((tpmif->pages = malloc(sizeof(void*) * TPMIF_TX_RING_SIZE)) == NULL) { - goto error; - } - memset(tpmif->pages, 0, sizeof(void*) * TPMIF_TX_RING_SIZE); - if(tpmif_change_state(tpmif, XenbusStateInitWait)) { goto error; } @@ -486,7 +483,7 @@ void free_tpmif(tpmif_t* tpmif) tpmif->status = DISCONNECTING; mask_evtchn(tpmif->evtchn); - if(gntmap_munmap(>pmdev.map, (unsigned long)tpmif->tx, 1)) { + if(gntmap_munmap(>pmdev.map, (unsigned long)tpmif->page, 1)) { TPMBACK_ERR("%u/%u Error occured while trying to unmap shared page\n", (unsigned int) tpmif->domid, tpmif->handle); } @@ -529,15 +526,27 @@ void free_tpmif(tpmif_t* tpmif) void tpmback_handler(evtchn_port_t port, struct pt_regs *regs, void *data) { tpmif_t* tpmif = (tpmif_t*) data; - tpmif_tx_request_t* tx = &tpmif->tx->ring[0].req; - /* Throw away 0 size events, these can trigger from event channel unmasking */ - if(tx->size == 0) - return; - - TPMBACK_DEBUG("EVENT CHANNEL FIRE %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); - tpmif_req_ready(tpmif); - wake_up(&waitq); + vtpm_shared_page_t* pg = tpmif->page; + switch (pg->state) + { + case VTPM_STATE_SUBMIT: + TPMBACK_DEBUG("EVENT CHANNEL FIRE %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); + tpmif_req_ready(tpmif); + wake_up(&waitq); + break; + case VTPM_STATE_CANCEL: + /* If we are busy with a request, do nothing */ + if (tpmif->flags & TPMIF_REQ_READY) + return; + /* Acknowledge the cancellation if we are idle */ + pg->state = VTPM_STATE_IDLE; + notify_remote_via_evtchn(tpmif->evtchn); + return; + default: + /* Spurious wakeup; do nothing */ + return; + } } /* Connect to frontend */ @@ -584,12 +593,25 @@ int connect_fe(tpmif_t* tpmif) } free(value); + /* Check that protocol v2 is being used */ + snprintf(path, 512, "%s/feature-protocol-v2", tpmif->fe_path); + if((err = xenbus_read(XBT_NIL, path, &value))) { + TPMBACK_ERR("Unable to read %s during tpmback initialization! error = %s\n", path, err); + free(err); + return -1; + } + if(strcmp(value, "1")) { + TPMBACK_ERR("%s has an invalid value (%s)\n", path, value); + free(value); + return -1; + } + free(value); + domid = tpmif->domid; - if((tpmif->tx = gntmap_map_grant_refs(>pmdev.map, 1, &domid, 0, &ringref, PROT_READ | PROT_WRITE)) == NULL) { + if((tpmif->page = gntmap_map_grant_refs(>pmdev.map, 1, &domid, 0, &ringref, PROT_READ | PROT_WRITE)) == NULL) { TPMBACK_ERR("Failed to map grant reference %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); return -1; } - memset(tpmif->tx, 0, PAGE_SIZE); /*Bind the event channel */ if((evtchn_bind_interdomain(tpmif->domid, evtchn, tpmback_handler, tpmif, &tpmif->evtchn))) @@ -618,7 +640,7 @@ error_post_evtchn: mask_evtchn(tpmif->evtchn); unbind_evtchn(tpmif->evtchn); error_post_map: - gntmap_munmap(>pmdev.map, (unsigned long)tpmif->tx, 1); + gntmap_munmap(>pmdev.map, (unsigned long)tpmif->page, 1); return -1; } @@ -633,9 +655,9 @@ static int frontend_changed(tpmif_t* tpmif) switch (state) { case XenbusStateInitialising: - case XenbusStateInitialised: break; + case XenbusStateInitialised: case XenbusStateConnected: if(connect_fe(tpmif)) { TPMBACK_ERR("Failed to connect to front end %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); @@ -874,6 +896,7 @@ void shutdown_tpmback(void) inline void init_tpmcmd(tpmcmd_t* tpmcmd, domid_t domid, unsigned int handle, unsigned char uuid[16]) { tpmcmd->domid = domid; + tpmcmd->locality = -1; tpmcmd->handle = handle; memcpy(tpmcmd->uuid, uuid, sizeof(tpmcmd->uuid)); tpmcmd->req = NULL; @@ -884,12 +907,12 @@ inline void init_tpmcmd(tpmcmd_t* tpmcmd, domid_t domid, unsigned int handle, un tpmcmd_t* get_request(tpmif_t* tpmif) { tpmcmd_t* cmd; - tpmif_tx_request_t* tx; - int offset; - int tocopy; - int i; - uint32_t domid; + vtpm_shared_page_t* shr; + unsigned int offset; int flags; +#ifdef TPMBACK_PRINT_DEBUG + int i; +#endif local_irq_save(flags); @@ -899,35 +922,22 @@ tpmcmd_t* get_request(tpmif_t* tpmif) { } init_tpmcmd(cmd, tpmif->domid, tpmif->handle, tpmif->uuid); - tx = &tpmif->tx->ring[0].req; - cmd->req_len = tx->size; + shr = tpmif->page; + cmd->req_len = shr->length; + cmd->locality = shr->locality; + offset = sizeof(*shr) + 4*shr->nr_extra_pages; + if (offset > PAGE_SIZE || offset + cmd->req_len > PAGE_SIZE) { + TPMBACK_ERR("%u/%u Command size too long for shared page!\n", (unsigned int) tpmif->domid, tpmif->handle); + goto error; + } /* Allocate the buffer */ if(cmd->req_len) { if((cmd->req = malloc(cmd->req_len)) == NULL) { goto error; } } - /* Copy the bits from the shared pages */ - offset = 0; - for(i = 0; i < TPMIF_TX_RING_SIZE && offset < cmd->req_len; ++i) { - tx = &tpmif->tx->ring[i].req; - - /* Map the page with the data */ - domid = (uint32_t)tpmif->domid; - if((tpmif->pages[i] = gntmap_map_grant_refs(>pmdev.map, 1, &domid, 0, &tx->ref, PROT_READ)) == NULL) { - TPMBACK_ERR("%u/%u Unable to map shared page during read!\n", (unsigned int) tpmif->domid, tpmif->handle); - goto error; - } - - /* do the copy now */ - tocopy = min(cmd->req_len - offset, PAGE_SIZE); - memcpy(&cmd->req[offset], tpmif->pages[i], tocopy); - offset += tocopy; - - /* release the page */ - gntmap_munmap(>pmdev.map, (unsigned long)tpmif->pages[i], 1); - - } + /* Copy the bits from the shared page(s) */ + memcpy(cmd->req, offset + (uint8_t*)shr, cmd->req_len); #ifdef TPMBACK_PRINT_DEBUG TPMBACK_DEBUG("Received Tpm Command from %u/%u of size %u", (unsigned int) tpmif->domid, tpmif->handle, cmd->req_len); @@ -958,38 +968,24 @@ error: void send_response(tpmcmd_t* cmd, tpmif_t* tpmif) { - tpmif_tx_request_t* tx; - int offset; - int i; - uint32_t domid; - int tocopy; + vtpm_shared_page_t* shr; + unsigned int offset; int flags; +#ifdef TPMBACK_PRINT_DEBUG +int i; +#endif local_irq_save(flags); - tx = &tpmif->tx->ring[0].req; - tx->size = cmd->resp_len; - - offset = 0; - for(i = 0; i < TPMIF_TX_RING_SIZE && offset < cmd->resp_len; ++i) { - tx = &tpmif->tx->ring[i].req; - - /* Map the page with the data */ - domid = (uint32_t)tpmif->domid; - if((tpmif->pages[i] = gntmap_map_grant_refs(>pmdev.map, 1, &domid, 0, &tx->ref, PROT_WRITE)) == NULL) { - TPMBACK_ERR("%u/%u Unable to map shared page during write!\n", (unsigned int) tpmif->domid, tpmif->handle); - goto error; - } - - /* do the copy now */ - tocopy = min(cmd->resp_len - offset, PAGE_SIZE); - memcpy(tpmif->pages[i], &cmd->resp[offset], tocopy); - offset += tocopy; - - /* release the page */ - gntmap_munmap(>pmdev.map, (unsigned long)tpmif->pages[i], 1); + shr = tpmif->page; + shr->length = cmd->resp_len; + offset = sizeof(*shr) + 4*shr->nr_extra_pages; + if (offset > PAGE_SIZE || offset + cmd->resp_len > PAGE_SIZE) { + TPMBACK_ERR("%u/%u Command size too long for shared page!\n", (unsigned int) tpmif->domid, tpmif->handle); + goto error; } + memcpy(offset + (uint8_t*)shr, cmd->resp, cmd->resp_len); #ifdef TPMBACK_PRINT_DEBUG TPMBACK_DEBUG("Sent response to %u/%u of size %u", (unsigned int) tpmif->domid, tpmif->handle, cmd->resp_len); @@ -1003,6 +999,7 @@ void send_response(tpmcmd_t* cmd, tpmif_t* tpmif) #endif /* clear the ready flag and send the event channel notice to the frontend */ tpmif_req_finished(tpmif); + shr->state = VTPM_STATE_FINISH; notify_remote_via_evtchn(tpmif->evtchn); error: local_irq_restore(flags); diff --git a/extras/mini-os/tpmfront.c b/extras/mini-os/tpmfront.c index 0218d7f..a15b5cf 100644 --- a/extras/mini-os/tpmfront.c +++ b/extras/mini-os/tpmfront.c @@ -47,11 +47,21 @@ void tpmfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data) { struct tpmfront_dev* dev = (struct tpmfront_dev*) data; + vtpm_shared_page_t* shr = dev->page; /*If we get a response when we didnt make a request, just ignore it */ if(!dev->waiting) { return; } + switch (shr->state) { + case VTPM_STATE_FINISH: /* request was completed */ + case VTPM_STATE_IDLE: /* request was cancelled */ + break; + default: + /* Spurious wakeup; do nothing, request is still pending */ + return; + } + dev->waiting = 0; #ifdef HAVE_LIBC if(dev->fd >= 0) { @@ -176,7 +186,7 @@ static int wait_for_backend_state_changed(struct tpmfront_dev* dev, XenbusState ret = wait_for_backend_closed(&events, path); break; default: - break; + TPMFRONT_ERR("Bad wait state %d, ignoring\n", state); } if((err = xenbus_unwatch_path_token(XBT_NIL, path, path))) { @@ -190,13 +200,13 @@ static int tpmfront_connect(struct tpmfront_dev* dev) { char* err; /* Create shared page */ - dev->tx = (tpmif_tx_interface_t*) alloc_page(); - if(dev->tx == NULL) { + dev->page = (vtpm_shared_page_t*) alloc_page(); + if(dev->page == NULL) { TPMFRONT_ERR("Unable to allocate page for shared memory\n"); goto error; } - memset(dev->tx, 0, PAGE_SIZE); - dev->ring_ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->tx), 0); + memset(dev->page, 0, PAGE_SIZE); + dev->ring_ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->page), 0); TPMFRONT_DEBUG("grant ref is %lu\n", (unsigned long) dev->ring_ref); /*Create event channel */ @@ -228,7 +238,7 @@ error_postevtchn: unbind_evtchn(dev->evtchn); error_postmap: gnttab_end_access(dev->ring_ref); - free_page(dev->tx); + free_page(dev->page); error: return -1; } @@ -240,7 +250,6 @@ struct tpmfront_dev* init_tpmfront(const char* _nodename) char path[512]; char* value, *err; unsigned long long ival; - int i; printk("============= Init TPM Front ================\n"); @@ -279,6 +288,15 @@ struct tpmfront_dev* init_tpmfront(const char* _nodename) goto error; } + /* Publish protocol v2 feature */ + snprintf(path, 512, "%s/feature-protocol-v2", dev->nodename); + if ((err = xenbus_write(XBT_NIL, path, "1"))) + { + TPMFRONT_ERR("Unable to write feature-protocol-v2 node: %s\n", err); + free(err); + goto error; + } + /* Create and publish grant reference and event channel */ if (tpmfront_connect(dev)) { goto error; @@ -289,18 +307,19 @@ struct tpmfront_dev* init_tpmfront(const char* _nodename) goto error; } - /* Allocate pages that will contain the messages */ - dev->pages = malloc(sizeof(void*) * TPMIF_TX_RING_SIZE); - if(dev->pages == NULL) { + /* Ensure backend is also using protocol v2 */ + snprintf(path, 512, "%s/feature-protocol-v2", dev->bepath); + if((err = xenbus_read(XBT_NIL, path, &value))) { + TPMFRONT_ERR("Unable to read %s during tpmfront initialization! error = %s\n", path, err); + free(err); goto error; } - memset(dev->pages, 0, sizeof(void*) * TPMIF_TX_RING_SIZE); - for(i = 0; i < TPMIF_TX_RING_SIZE; ++i) { - dev->pages[i] = (void*)alloc_page(); - if(dev->pages[i] == NULL) { - goto error; - } + if(strcmp(value, "1")) { + TPMFRONT_ERR("%s has an invalid value (%s)\n", path, value); + free(value); + goto error; } + free(value); TPMFRONT_LOG("Initialization Completed successfully\n"); @@ -314,8 +333,6 @@ void shutdown_tpmfront(struct tpmfront_dev* dev) { char* err; char path[512]; - int i; - tpmif_tx_request_t* tx; if(dev == NULL) { return; } @@ -349,27 +366,12 @@ void shutdown_tpmfront(struct tpmfront_dev* dev) /* Wait for the backend to close and unmap shared pages, ignore any errors */ wait_for_backend_state_changed(dev, XenbusStateClosed); - /* Cleanup any shared pages */ - if(dev->pages) { - for(i = 0; i < TPMIF_TX_RING_SIZE; ++i) { - if(dev->pages[i]) { - tx = &dev->tx->ring[i].req; - if(tx->ref != 0) { - gnttab_end_access(tx->ref); - } - free_page(dev->pages[i]); - } - } - free(dev->pages); - } - /* Close event channel and unmap shared page */ mask_evtchn(dev->evtchn); unbind_evtchn(dev->evtchn); gnttab_end_access(dev->ring_ref); - free_page(dev->tx); - + free_page(dev->page); } /* Cleanup memory usage */ @@ -387,13 +389,17 @@ void shutdown_tpmfront(struct tpmfront_dev* dev) int tpmfront_send(struct tpmfront_dev* dev, const uint8_t* msg, size_t length) { + unsigned int offset; + vtpm_shared_page_t* shr = NULL; +#ifdef TPMFRONT_PRINT_DEBUG int i; - tpmif_tx_request_t* tx = NULL; +#endif /* Error Checking */ if(dev == NULL || dev->state != XenbusStateConnected) { TPMFRONT_ERR("Tried to send message through disconnected frontend\n"); return -1; } + shr = dev->page; #ifdef TPMFRONT_PRINT_DEBUG TPMFRONT_DEBUG("Sending Msg to backend size=%u", (unsigned int) length); @@ -407,19 +413,16 @@ int tpmfront_send(struct tpmfront_dev* dev, const uint8_t* msg, size_t length) #endif /* Copy to shared pages now */ - for(i = 0; length > 0 && i < TPMIF_TX_RING_SIZE; ++i) { - /* Share the page */ - tx = &dev->tx->ring[i].req; - tx->unused = 0; - tx->addr = virt_to_mach(dev->pages[i]); - tx->ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->pages[i]), 0); - /* Copy the bits to the page */ - tx->size = length > PAGE_SIZE ? PAGE_SIZE : length; - memcpy(dev->pages[i], &msg[i * PAGE_SIZE], tx->size); - - /* Update counters */ - length -= tx->size; + offset = sizeof(*shr); + if (length + offset > PAGE_SIZE) { + TPMFRONT_ERR("Message too long for shared page\n"); + return -1; } + memcpy(offset + (uint8_t*)shr, msg, length); + shr->length = length; + barrier(); + shr->state = VTPM_STATE_SUBMIT; + dev->waiting = 1; dev->resplen = 0; #ifdef HAVE_LIBC @@ -434,44 +437,44 @@ int tpmfront_send(struct tpmfront_dev* dev, const uint8_t* msg, size_t length) } int tpmfront_recv(struct tpmfront_dev* dev, uint8_t** msg, size_t *length) { - tpmif_tx_request_t* tx; - int i; + unsigned int offset; + vtpm_shared_page_t* shr = NULL; +#ifdef TPMFRONT_PRINT_DEBUG +int i; +#endif if(dev == NULL || dev->state != XenbusStateConnected) { TPMFRONT_ERR("Tried to receive message from disconnected frontend\n"); return -1; } /*Wait for the response */ wait_event(dev->waitq, (!dev->waiting)); + shr = dev->page; /* Initialize */ *msg = NULL; *length = 0; + offset = sizeof(*shr); - /* special case, just quit */ - tx = &dev->tx->ring[0].req; - if(tx->size == 0 ) { - goto quit; - } - /* Get the total size */ - tx = &dev->tx->ring[0].req; - for(i = 0; i < TPMIF_TX_RING_SIZE && tx->size > 0; ++i) { - tx = &dev->tx->ring[i].req; - *length += tx->size; + if (shr->state != VTPM_STATE_FINISH) + goto quit; + + *length = shr->length; + + if (*length + offset > PAGE_SIZE) { + TPMFRONT_ERR("Reply too long for shared page\n"); + return -1; } + /* Alloc the buffer */ if(dev->respbuf) { free(dev->respbuf); } *msg = dev->respbuf = malloc(*length); dev->resplen = *length; + /* Copy the bits */ - tx = &dev->tx->ring[0].req; - for(i = 0; i < TPMIF_TX_RING_SIZE && tx->size > 0; ++i) { - tx = &dev->tx->ring[i].req; - memcpy(&(*msg)[i * PAGE_SIZE], dev->pages[i], tx->size); - gnttab_end_access(tx->ref); - tx->ref = 0; - } + memcpy(*msg, offset + (uint8_t*)shr, *length); + #ifdef TPMFRONT_PRINT_DEBUG TPMFRONT_DEBUG("Received response from backend size=%u", (unsigned int) *length); for(i = 0; i < *length; ++i) { @@ -504,6 +507,14 @@ int tpmfront_cmd(struct tpmfront_dev* dev, uint8_t* req, size_t reqlen, uint8_t* return 0; } +int tpmfront_set_locality(struct tpmfront_dev* dev, int locality) +{ + if (!dev || !dev->page) + return -1; + dev->page->locality = locality; + return 0; +} + #ifdef HAVE_LIBC #include <errno.h> int tpmfront_open(struct tpmfront_dev* dev) diff --git a/xen/include/public/io/tpmif.h b/xen/include/public/io/tpmif.h index fca2c4e..afa4324 100644 --- a/xen/include/public/io/tpmif.h +++ b/xen/include/public/io/tpmif.h @@ -64,6 +64,34 @@ struct tpmif_tx_interface { }; typedef struct tpmif_tx_interface tpmif_tx_interface_t; +/****************************************************************************** + * TPM I/O interface for Xen guest OSes, v2 + * + * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov> + * + */ + +enum vtpm_state { + VTPM_STATE_IDLE, /* no contents / vTPM idle / cancel complete */ + VTPM_STATE_SUBMIT, /* request ready / vTPM working */ + VTPM_STATE_FINISH, /* response ready / vTPM idle */ + VTPM_STATE_CANCEL, /* cancel requested / vTPM working */ +}; +/* The backend should only change state to IDLE or FINISH, while the + * frontend should only change to SUBMIT or CANCEL. */ + +struct vtpm_shared_page { + uint32_t length; /* request/response length in bytes */ + + uint8_t state; /* enum vtpm_state */ + uint8_t locality; /* for the current request */ + uint8_t pad; /* should be zero */ + + uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */ + uint32_t extra_pages[0]; /* grant IDs; length is actually nr_extra_pages */ +}; +typedef struct vtpm_shared_page vtpm_shared_page_t; + #endif /* -- 1.8.1.4
Ian Campbell
2013-Apr-11 14:00 UTC
Re: [PATCH v5.1 01/12] mini-os/tpm{back, front}: Change shared page ABI
Is this in the wrong thread and should be in "[PATCH v5 00/12] vTPM updates for 4.3" as an update to <1363896689-11086-2-git-send-email-dgdegra@tycho.nsa.gov> ? On Fri, 2013-03-22 at 22:30 +0000, Daniel De Graaf wrote:> This changes the vTPM shared page ABI from a copy of the Xen network > interface to a single-page interface that better reflects the expected > behavior of a TPM: only a single request packet can be sent at any given > time, and every packet sent generates a single response packet. This > protocol change should also increase efficiency as it avoids mapping and > unmapping grants when possible. The vtpm xenbus device now requires a > feature-protocol-v2 node in xenstore to avoid conflicts with existing > (xen-patched) kernels supporting the old interface. > > While the contents of the shared page have been defined to allow packets > larger than a single page (actually 4088 bytes) by allowing the client > to add extra grant references, the mapping of these extra references has > not been implemented; a feature node in xenstore may be used in the > future to indicate full support for the multi-page protocol. Most uses > of the TPM should not require this feature. > > Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> > Cc: Jan Beulich <JBeulich@suse.com> > > --- > > Changes from v5: handle xenbus states properly, and keep the old header > contents intact. > > Changes from v4: use feature-protocol-v2 nodes instead of changing the > device name; add command cancellation support to the protocol. > > extras/mini-os/include/tpmback.h | 1 + > extras/mini-os/include/tpmfront.h | 7 +- > extras/mini-os/tpmback.c | 163 +++++++++++++++++++------------------- > extras/mini-os/tpmfront.c | 143 ++++++++++++++++++--------------- > xen/include/public/io/tpmif.h | 28 +++++++ > 5 files changed, 190 insertions(+), 152 deletions(-) > > diff --git a/extras/mini-os/include/tpmback.h b/extras/mini-os/include/tpmback.h > index ff86732..ec9eda4 100644 > --- a/extras/mini-os/include/tpmback.h > +++ b/extras/mini-os/include/tpmback.h > @@ -43,6 +43,7 @@ > > struct tpmcmd { > domid_t domid; /* Domid of the frontend */ > + uint8_t locality; /* Locality requested by the frontend */ > unsigned int handle; /* Handle of the frontend */ > unsigned char uuid[16]; /* uuid of the tpm interface */ > > diff --git a/extras/mini-os/include/tpmfront.h b/extras/mini-os/include/tpmfront.h > index fd2cb17..a0c7c4d 100644 > --- a/extras/mini-os/include/tpmfront.h > +++ b/extras/mini-os/include/tpmfront.h > @@ -37,9 +37,7 @@ struct tpmfront_dev { > grant_ref_t ring_ref; > evtchn_port_t evtchn; > > - tpmif_tx_interface_t* tx; > - > - void** pages; > + vtpm_shared_page_t *page; > > domid_t bedomid; > char* nodename; > @@ -77,6 +75,9 @@ void shutdown_tpmfront(struct tpmfront_dev* dev); > * */ > int tpmfront_cmd(struct tpmfront_dev* dev, uint8_t* req, size_t reqlen, uint8_t** resp, size_t* resplen); > > +/* Set the locality used for communicating with a vTPM */ > +int tpmfront_set_locality(struct tpmfront_dev* dev, int locality); > + > #ifdef HAVE_LIBC > #include <sys/stat.h> > /* POSIX IO functions: > diff --git a/extras/mini-os/tpmback.c b/extras/mini-os/tpmback.c > index 658fed1..d6a504e 100644 > --- a/extras/mini-os/tpmback.c > +++ b/extras/mini-os/tpmback.c > @@ -86,10 +86,7 @@ struct tpmif { > evtchn_port_t evtchn; > > /* Shared page */ > - tpmif_tx_interface_t* tx; > - > - /* pointer to TPMIF_RX_RING_SIZE pages */ > - void** pages; > + vtpm_shared_page_t *page; > > enum xenbus_state state; > enum { DISCONNECTED, DISCONNECTING, CONNECTED } status; > @@ -266,6 +263,7 @@ int insert_tpmif(tpmif_t* tpmif) > unsigned int i, j; > tpmif_t* tmp; > char* err; > + char path[512]; > > local_irq_save(flags); > > @@ -303,6 +301,16 @@ int insert_tpmif(tpmif_t* tpmif) > > local_irq_restore(flags); > > + snprintf(path, 512, "backend/vtpm/%u/%u/feature-protocol-v2", (unsigned int) tpmif->domid, tpmif->handle); > + if ((err = xenbus_write(XBT_NIL, path, "1"))) > + { > + /* if we got an error here we should carefully remove the interface and then return */ > + TPMBACK_ERR("Unable to write feature-protocol-v2 node: %s\n", err); > + free(err); > + remove_tpmif(tpmif); > + goto error_post_irq; > + } > + > /*Listen for state changes on the new interface */ > if((err = xenbus_watch_path_token(XBT_NIL, tpmif->fe_state_path, tpmif->fe_state_path, >pmdev.events))) > { > @@ -312,7 +320,6 @@ int insert_tpmif(tpmif_t* tpmif) > remove_tpmif(tpmif); > goto error_post_irq; > } > - > return 0; > error: > local_irq_restore(flags); > @@ -386,8 +393,7 @@ inline tpmif_t* __init_tpmif(domid_t domid, unsigned int handle) > tpmif->fe_state_path = NULL; > tpmif->state = XenbusStateInitialising; > tpmif->status = DISCONNECTED; > - tpmif->tx = NULL; > - tpmif->pages = NULL; > + tpmif->page = NULL; > tpmif->flags = 0; > memset(tpmif->uuid, 0, sizeof(tpmif->uuid)); > return tpmif; > @@ -395,9 +401,6 @@ inline tpmif_t* __init_tpmif(domid_t domid, unsigned int handle) > > void __free_tpmif(tpmif_t* tpmif) > { > - if(tpmif->pages) { > - free(tpmif->pages); > - } > if(tpmif->fe_path) { > free(tpmif->fe_path); > } > @@ -430,12 +433,6 @@ tpmif_t* new_tpmif(domid_t domid, unsigned int handle) > goto error; > } > > - /* allocate pages to be used for shared mapping */ > - if((tpmif->pages = malloc(sizeof(void*) * TPMIF_TX_RING_SIZE)) == NULL) { > - goto error; > - } > - memset(tpmif->pages, 0, sizeof(void*) * TPMIF_TX_RING_SIZE); > - > if(tpmif_change_state(tpmif, XenbusStateInitWait)) { > goto error; > } > @@ -486,7 +483,7 @@ void free_tpmif(tpmif_t* tpmif) > tpmif->status = DISCONNECTING; > mask_evtchn(tpmif->evtchn); > > - if(gntmap_munmap(>pmdev.map, (unsigned long)tpmif->tx, 1)) { > + if(gntmap_munmap(>pmdev.map, (unsigned long)tpmif->page, 1)) { > TPMBACK_ERR("%u/%u Error occured while trying to unmap shared page\n", (unsigned int) tpmif->domid, tpmif->handle); > } > > @@ -529,15 +526,27 @@ void free_tpmif(tpmif_t* tpmif) > void tpmback_handler(evtchn_port_t port, struct pt_regs *regs, void *data) > { > tpmif_t* tpmif = (tpmif_t*) data; > - tpmif_tx_request_t* tx = &tpmif->tx->ring[0].req; > - /* Throw away 0 size events, these can trigger from event channel unmasking */ > - if(tx->size == 0) > - return; > - > - TPMBACK_DEBUG("EVENT CHANNEL FIRE %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); > - tpmif_req_ready(tpmif); > - wake_up(&waitq); > + vtpm_shared_page_t* pg = tpmif->page; > > + switch (pg->state) > + { > + case VTPM_STATE_SUBMIT: > + TPMBACK_DEBUG("EVENT CHANNEL FIRE %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); > + tpmif_req_ready(tpmif); > + wake_up(&waitq); > + break; > + case VTPM_STATE_CANCEL: > + /* If we are busy with a request, do nothing */ > + if (tpmif->flags & TPMIF_REQ_READY) > + return; > + /* Acknowledge the cancellation if we are idle */ > + pg->state = VTPM_STATE_IDLE; > + notify_remote_via_evtchn(tpmif->evtchn); > + return; > + default: > + /* Spurious wakeup; do nothing */ > + return; > + } > } > > /* Connect to frontend */ > @@ -584,12 +593,25 @@ int connect_fe(tpmif_t* tpmif) > } > free(value); > > + /* Check that protocol v2 is being used */ > + snprintf(path, 512, "%s/feature-protocol-v2", tpmif->fe_path); > + if((err = xenbus_read(XBT_NIL, path, &value))) { > + TPMBACK_ERR("Unable to read %s during tpmback initialization! error = %s\n", path, err); > + free(err); > + return -1; > + } > + if(strcmp(value, "1")) { > + TPMBACK_ERR("%s has an invalid value (%s)\n", path, value); > + free(value); > + return -1; > + } > + free(value); > + > domid = tpmif->domid; > - if((tpmif->tx = gntmap_map_grant_refs(>pmdev.map, 1, &domid, 0, &ringref, PROT_READ | PROT_WRITE)) == NULL) { > + if((tpmif->page = gntmap_map_grant_refs(>pmdev.map, 1, &domid, 0, &ringref, PROT_READ | PROT_WRITE)) == NULL) { > TPMBACK_ERR("Failed to map grant reference %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); > return -1; > } > - memset(tpmif->tx, 0, PAGE_SIZE); > > /*Bind the event channel */ > if((evtchn_bind_interdomain(tpmif->domid, evtchn, tpmback_handler, tpmif, &tpmif->evtchn))) > @@ -618,7 +640,7 @@ error_post_evtchn: > mask_evtchn(tpmif->evtchn); > unbind_evtchn(tpmif->evtchn); > error_post_map: > - gntmap_munmap(>pmdev.map, (unsigned long)tpmif->tx, 1); > + gntmap_munmap(>pmdev.map, (unsigned long)tpmif->page, 1); > return -1; > } > > @@ -633,9 +655,9 @@ static int frontend_changed(tpmif_t* tpmif) > > switch (state) { > case XenbusStateInitialising: > - case XenbusStateInitialised: > break; > > + case XenbusStateInitialised: > case XenbusStateConnected: > if(connect_fe(tpmif)) { > TPMBACK_ERR("Failed to connect to front end %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); > @@ -874,6 +896,7 @@ void shutdown_tpmback(void) > inline void init_tpmcmd(tpmcmd_t* tpmcmd, domid_t domid, unsigned int handle, unsigned char uuid[16]) > { > tpmcmd->domid = domid; > + tpmcmd->locality = -1; > tpmcmd->handle = handle; > memcpy(tpmcmd->uuid, uuid, sizeof(tpmcmd->uuid)); > tpmcmd->req = NULL; > @@ -884,12 +907,12 @@ inline void init_tpmcmd(tpmcmd_t* tpmcmd, domid_t domid, unsigned int handle, un > > tpmcmd_t* get_request(tpmif_t* tpmif) { > tpmcmd_t* cmd; > - tpmif_tx_request_t* tx; > - int offset; > - int tocopy; > - int i; > - uint32_t domid; > + vtpm_shared_page_t* shr; > + unsigned int offset; > int flags; > +#ifdef TPMBACK_PRINT_DEBUG > + int i; > +#endif > > local_irq_save(flags); > > @@ -899,35 +922,22 @@ tpmcmd_t* get_request(tpmif_t* tpmif) { > } > init_tpmcmd(cmd, tpmif->domid, tpmif->handle, tpmif->uuid); > > - tx = &tpmif->tx->ring[0].req; > - cmd->req_len = tx->size; > + shr = tpmif->page; > + cmd->req_len = shr->length; > + cmd->locality = shr->locality; > + offset = sizeof(*shr) + 4*shr->nr_extra_pages; > + if (offset > PAGE_SIZE || offset + cmd->req_len > PAGE_SIZE) { > + TPMBACK_ERR("%u/%u Command size too long for shared page!\n", (unsigned int) tpmif->domid, tpmif->handle); > + goto error; > + } > /* Allocate the buffer */ > if(cmd->req_len) { > if((cmd->req = malloc(cmd->req_len)) == NULL) { > goto error; > } > } > - /* Copy the bits from the shared pages */ > - offset = 0; > - for(i = 0; i < TPMIF_TX_RING_SIZE && offset < cmd->req_len; ++i) { > - tx = &tpmif->tx->ring[i].req; > - > - /* Map the page with the data */ > - domid = (uint32_t)tpmif->domid; > - if((tpmif->pages[i] = gntmap_map_grant_refs(>pmdev.map, 1, &domid, 0, &tx->ref, PROT_READ)) == NULL) { > - TPMBACK_ERR("%u/%u Unable to map shared page during read!\n", (unsigned int) tpmif->domid, tpmif->handle); > - goto error; > - } > - > - /* do the copy now */ > - tocopy = min(cmd->req_len - offset, PAGE_SIZE); > - memcpy(&cmd->req[offset], tpmif->pages[i], tocopy); > - offset += tocopy; > - > - /* release the page */ > - gntmap_munmap(>pmdev.map, (unsigned long)tpmif->pages[i], 1); > - > - } > + /* Copy the bits from the shared page(s) */ > + memcpy(cmd->req, offset + (uint8_t*)shr, cmd->req_len); > > #ifdef TPMBACK_PRINT_DEBUG > TPMBACK_DEBUG("Received Tpm Command from %u/%u of size %u", (unsigned int) tpmif->domid, tpmif->handle, cmd->req_len); > @@ -958,38 +968,24 @@ error: > > void send_response(tpmcmd_t* cmd, tpmif_t* tpmif) > { > - tpmif_tx_request_t* tx; > - int offset; > - int i; > - uint32_t domid; > - int tocopy; > + vtpm_shared_page_t* shr; > + unsigned int offset; > int flags; > +#ifdef TPMBACK_PRINT_DEBUG > +int i; > +#endif > > local_irq_save(flags); > > - tx = &tpmif->tx->ring[0].req; > - tx->size = cmd->resp_len; > - > - offset = 0; > - for(i = 0; i < TPMIF_TX_RING_SIZE && offset < cmd->resp_len; ++i) { > - tx = &tpmif->tx->ring[i].req; > - > - /* Map the page with the data */ > - domid = (uint32_t)tpmif->domid; > - if((tpmif->pages[i] = gntmap_map_grant_refs(>pmdev.map, 1, &domid, 0, &tx->ref, PROT_WRITE)) == NULL) { > - TPMBACK_ERR("%u/%u Unable to map shared page during write!\n", (unsigned int) tpmif->domid, tpmif->handle); > - goto error; > - } > - > - /* do the copy now */ > - tocopy = min(cmd->resp_len - offset, PAGE_SIZE); > - memcpy(tpmif->pages[i], &cmd->resp[offset], tocopy); > - offset += tocopy; > - > - /* release the page */ > - gntmap_munmap(>pmdev.map, (unsigned long)tpmif->pages[i], 1); > + shr = tpmif->page; > + shr->length = cmd->resp_len; > > + offset = sizeof(*shr) + 4*shr->nr_extra_pages; > + if (offset > PAGE_SIZE || offset + cmd->resp_len > PAGE_SIZE) { > + TPMBACK_ERR("%u/%u Command size too long for shared page!\n", (unsigned int) tpmif->domid, tpmif->handle); > + goto error; > } > + memcpy(offset + (uint8_t*)shr, cmd->resp, cmd->resp_len); > > #ifdef TPMBACK_PRINT_DEBUG > TPMBACK_DEBUG("Sent response to %u/%u of size %u", (unsigned int) tpmif->domid, tpmif->handle, cmd->resp_len); > @@ -1003,6 +999,7 @@ void send_response(tpmcmd_t* cmd, tpmif_t* tpmif) > #endif > /* clear the ready flag and send the event channel notice to the frontend */ > tpmif_req_finished(tpmif); > + shr->state = VTPM_STATE_FINISH; > notify_remote_via_evtchn(tpmif->evtchn); > error: > local_irq_restore(flags); > diff --git a/extras/mini-os/tpmfront.c b/extras/mini-os/tpmfront.c > index 0218d7f..a15b5cf 100644 > --- a/extras/mini-os/tpmfront.c > +++ b/extras/mini-os/tpmfront.c > @@ -47,11 +47,21 @@ > > void tpmfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data) { > struct tpmfront_dev* dev = (struct tpmfront_dev*) data; > + vtpm_shared_page_t* shr = dev->page; > /*If we get a response when we didnt make a request, just ignore it */ > if(!dev->waiting) { > return; > } > > + switch (shr->state) { > + case VTPM_STATE_FINISH: /* request was completed */ > + case VTPM_STATE_IDLE: /* request was cancelled */ > + break; > + default: > + /* Spurious wakeup; do nothing, request is still pending */ > + return; > + } > + > dev->waiting = 0; > #ifdef HAVE_LIBC > if(dev->fd >= 0) { > @@ -176,7 +186,7 @@ static int wait_for_backend_state_changed(struct tpmfront_dev* dev, XenbusState > ret = wait_for_backend_closed(&events, path); > break; > default: > - break; > + TPMFRONT_ERR("Bad wait state %d, ignoring\n", state); > } > > if((err = xenbus_unwatch_path_token(XBT_NIL, path, path))) { > @@ -190,13 +200,13 @@ static int tpmfront_connect(struct tpmfront_dev* dev) > { > char* err; > /* Create shared page */ > - dev->tx = (tpmif_tx_interface_t*) alloc_page(); > - if(dev->tx == NULL) { > + dev->page = (vtpm_shared_page_t*) alloc_page(); > + if(dev->page == NULL) { > TPMFRONT_ERR("Unable to allocate page for shared memory\n"); > goto error; > } > - memset(dev->tx, 0, PAGE_SIZE); > - dev->ring_ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->tx), 0); > + memset(dev->page, 0, PAGE_SIZE); > + dev->ring_ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->page), 0); > TPMFRONT_DEBUG("grant ref is %lu\n", (unsigned long) dev->ring_ref); > > /*Create event channel */ > @@ -228,7 +238,7 @@ error_postevtchn: > unbind_evtchn(dev->evtchn); > error_postmap: > gnttab_end_access(dev->ring_ref); > - free_page(dev->tx); > + free_page(dev->page); > error: > return -1; > } > @@ -240,7 +250,6 @@ struct tpmfront_dev* init_tpmfront(const char* _nodename) > char path[512]; > char* value, *err; > unsigned long long ival; > - int i; > > printk("============= Init TPM Front ================\n"); > > @@ -279,6 +288,15 @@ struct tpmfront_dev* init_tpmfront(const char* _nodename) > goto error; > } > > + /* Publish protocol v2 feature */ > + snprintf(path, 512, "%s/feature-protocol-v2", dev->nodename); > + if ((err = xenbus_write(XBT_NIL, path, "1"))) > + { > + TPMFRONT_ERR("Unable to write feature-protocol-v2 node: %s\n", err); > + free(err); > + goto error; > + } > + > /* Create and publish grant reference and event channel */ > if (tpmfront_connect(dev)) { > goto error; > @@ -289,18 +307,19 @@ struct tpmfront_dev* init_tpmfront(const char* _nodename) > goto error; > } > > - /* Allocate pages that will contain the messages */ > - dev->pages = malloc(sizeof(void*) * TPMIF_TX_RING_SIZE); > - if(dev->pages == NULL) { > + /* Ensure backend is also using protocol v2 */ > + snprintf(path, 512, "%s/feature-protocol-v2", dev->bepath); > + if((err = xenbus_read(XBT_NIL, path, &value))) { > + TPMFRONT_ERR("Unable to read %s during tpmfront initialization! error = %s\n", path, err); > + free(err); > goto error; > } > - memset(dev->pages, 0, sizeof(void*) * TPMIF_TX_RING_SIZE); > - for(i = 0; i < TPMIF_TX_RING_SIZE; ++i) { > - dev->pages[i] = (void*)alloc_page(); > - if(dev->pages[i] == NULL) { > - goto error; > - } > + if(strcmp(value, "1")) { > + TPMFRONT_ERR("%s has an invalid value (%s)\n", path, value); > + free(value); > + goto error; > } > + free(value); > > TPMFRONT_LOG("Initialization Completed successfully\n"); > > @@ -314,8 +333,6 @@ void shutdown_tpmfront(struct tpmfront_dev* dev) > { > char* err; > char path[512]; > - int i; > - tpmif_tx_request_t* tx; > if(dev == NULL) { > return; > } > @@ -349,27 +366,12 @@ void shutdown_tpmfront(struct tpmfront_dev* dev) > /* Wait for the backend to close and unmap shared pages, ignore any errors */ > wait_for_backend_state_changed(dev, XenbusStateClosed); > > - /* Cleanup any shared pages */ > - if(dev->pages) { > - for(i = 0; i < TPMIF_TX_RING_SIZE; ++i) { > - if(dev->pages[i]) { > - tx = &dev->tx->ring[i].req; > - if(tx->ref != 0) { > - gnttab_end_access(tx->ref); > - } > - free_page(dev->pages[i]); > - } > - } > - free(dev->pages); > - } > - > /* Close event channel and unmap shared page */ > mask_evtchn(dev->evtchn); > unbind_evtchn(dev->evtchn); > gnttab_end_access(dev->ring_ref); > > - free_page(dev->tx); > - > + free_page(dev->page); > } > > /* Cleanup memory usage */ > @@ -387,13 +389,17 @@ void shutdown_tpmfront(struct tpmfront_dev* dev) > > int tpmfront_send(struct tpmfront_dev* dev, const uint8_t* msg, size_t length) > { > + unsigned int offset; > + vtpm_shared_page_t* shr = NULL; > +#ifdef TPMFRONT_PRINT_DEBUG > int i; > - tpmif_tx_request_t* tx = NULL; > +#endif > /* Error Checking */ > if(dev == NULL || dev->state != XenbusStateConnected) { > TPMFRONT_ERR("Tried to send message through disconnected frontend\n"); > return -1; > } > + shr = dev->page; > > #ifdef TPMFRONT_PRINT_DEBUG > TPMFRONT_DEBUG("Sending Msg to backend size=%u", (unsigned int) length); > @@ -407,19 +413,16 @@ int tpmfront_send(struct tpmfront_dev* dev, const uint8_t* msg, size_t length) > #endif > > /* Copy to shared pages now */ > - for(i = 0; length > 0 && i < TPMIF_TX_RING_SIZE; ++i) { > - /* Share the page */ > - tx = &dev->tx->ring[i].req; > - tx->unused = 0; > - tx->addr = virt_to_mach(dev->pages[i]); > - tx->ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->pages[i]), 0); > - /* Copy the bits to the page */ > - tx->size = length > PAGE_SIZE ? PAGE_SIZE : length; > - memcpy(dev->pages[i], &msg[i * PAGE_SIZE], tx->size); > - > - /* Update counters */ > - length -= tx->size; > + offset = sizeof(*shr); > + if (length + offset > PAGE_SIZE) { > + TPMFRONT_ERR("Message too long for shared page\n"); > + return -1; > } > + memcpy(offset + (uint8_t*)shr, msg, length); > + shr->length = length; > + barrier(); > + shr->state = VTPM_STATE_SUBMIT; > + > dev->waiting = 1; > dev->resplen = 0; > #ifdef HAVE_LIBC > @@ -434,44 +437,44 @@ int tpmfront_send(struct tpmfront_dev* dev, const uint8_t* msg, size_t length) > } > int tpmfront_recv(struct tpmfront_dev* dev, uint8_t** msg, size_t *length) > { > - tpmif_tx_request_t* tx; > - int i; > + unsigned int offset; > + vtpm_shared_page_t* shr = NULL; > +#ifdef TPMFRONT_PRINT_DEBUG > +int i; > +#endif > if(dev == NULL || dev->state != XenbusStateConnected) { > TPMFRONT_ERR("Tried to receive message from disconnected frontend\n"); > return -1; > } > /*Wait for the response */ > wait_event(dev->waitq, (!dev->waiting)); > + shr = dev->page; > > /* Initialize */ > *msg = NULL; > *length = 0; > + offset = sizeof(*shr); > > - /* special case, just quit */ > - tx = &dev->tx->ring[0].req; > - if(tx->size == 0 ) { > - goto quit; > - } > - /* Get the total size */ > - tx = &dev->tx->ring[0].req; > - for(i = 0; i < TPMIF_TX_RING_SIZE && tx->size > 0; ++i) { > - tx = &dev->tx->ring[i].req; > - *length += tx->size; > + if (shr->state != VTPM_STATE_FINISH) > + goto quit; > + > + *length = shr->length; > + > + if (*length + offset > PAGE_SIZE) { > + TPMFRONT_ERR("Reply too long for shared page\n"); > + return -1; > } > + > /* Alloc the buffer */ > if(dev->respbuf) { > free(dev->respbuf); > } > *msg = dev->respbuf = malloc(*length); > dev->resplen = *length; > + > /* Copy the bits */ > - tx = &dev->tx->ring[0].req; > - for(i = 0; i < TPMIF_TX_RING_SIZE && tx->size > 0; ++i) { > - tx = &dev->tx->ring[i].req; > - memcpy(&(*msg)[i * PAGE_SIZE], dev->pages[i], tx->size); > - gnttab_end_access(tx->ref); > - tx->ref = 0; > - } > + memcpy(*msg, offset + (uint8_t*)shr, *length); > + > #ifdef TPMFRONT_PRINT_DEBUG > TPMFRONT_DEBUG("Received response from backend size=%u", (unsigned int) *length); > for(i = 0; i < *length; ++i) { > @@ -504,6 +507,14 @@ int tpmfront_cmd(struct tpmfront_dev* dev, uint8_t* req, size_t reqlen, uint8_t* > return 0; > } > > +int tpmfront_set_locality(struct tpmfront_dev* dev, int locality) > +{ > + if (!dev || !dev->page) > + return -1; > + dev->page->locality = locality; > + return 0; > +} > + > #ifdef HAVE_LIBC > #include <errno.h> > int tpmfront_open(struct tpmfront_dev* dev) > diff --git a/xen/include/public/io/tpmif.h b/xen/include/public/io/tpmif.h > index fca2c4e..afa4324 100644 > --- a/xen/include/public/io/tpmif.h > +++ b/xen/include/public/io/tpmif.h > @@ -64,6 +64,34 @@ struct tpmif_tx_interface { > }; > typedef struct tpmif_tx_interface tpmif_tx_interface_t; > > +/****************************************************************************** > + * TPM I/O interface for Xen guest OSes, v2 > + * > + * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov> > + * > + */ > + > +enum vtpm_state { > + VTPM_STATE_IDLE, /* no contents / vTPM idle / cancel complete */ > + VTPM_STATE_SUBMIT, /* request ready / vTPM working */ > + VTPM_STATE_FINISH, /* response ready / vTPM idle */ > + VTPM_STATE_CANCEL, /* cancel requested / vTPM working */ > +}; > +/* The backend should only change state to IDLE or FINISH, while the > + * frontend should only change to SUBMIT or CANCEL. */ > + > +struct vtpm_shared_page { > + uint32_t length; /* request/response length in bytes */ > + > + uint8_t state; /* enum vtpm_state */ > + uint8_t locality; /* for the current request */ > + uint8_t pad; /* should be zero */ > + > + uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */ > + uint32_t extra_pages[0]; /* grant IDs; length is actually nr_extra_pages */ > +}; > +typedef struct vtpm_shared_page vtpm_shared_page_t; > + > #endif > > /* > -- > 1.8.1.4 >
Daniel De Graaf
2013-Apr-11 14:11 UTC
Re: [PATCH v5.1 01/12] mini-os/tpm{back, front}: Change shared page ABI
On 04/11/2013 10:00 AM, Ian Campbell wrote:> Is this in the wrong thread and should be in "[PATCH v5 00/12] vTPM > updates for 4.3" as an update to > <1363896689-11086-2-git-send-email-dgdegra@tycho.nsa.gov> ? >Yes, this is an update to that patch and should have also been sent in reply to that thread.> On Fri, 2013-03-22 at 22:30 +0000, Daniel De Graaf wrote: >> This changes the vTPM shared page ABI from a copy of the Xen network >> interface to a single-page interface that better reflects the expected >> behavior of a TPM: only a single request packet can be sent at any given >> time, and every packet sent generates a single response packet. This >> protocol change should also increase efficiency as it avoids mapping and >> unmapping grants when possible. The vtpm xenbus device now requires a >> feature-protocol-v2 node in xenstore to avoid conflicts with existing >> (xen-patched) kernels supporting the old interface. >> >> While the contents of the shared page have been defined to allow packets >> larger than a single page (actually 4088 bytes) by allowing the client >> to add extra grant references, the mapping of these extra references has >> not been implemented; a feature node in xenstore may be used in the >> future to indicate full support for the multi-page protocol. Most uses >> of the TPM should not require this feature. >> >> Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> >> Cc: Jan Beulich <JBeulich@suse.com> >> >> --- >> >> Changes from v5: handle xenbus states properly, and keep the old header >> contents intact. >> >> Changes from v4: use feature-protocol-v2 nodes instead of changing the >> device name; add command cancellation support to the protocol. >>
Ian Campbell
2013-Apr-11 14:14 UTC
Re: [PATCH v5.1 01/12] mini-os/tpm{back, front}: Change shared page ABI
On Fri, 2013-03-22 at 22:30 +0000, Daniel De Graaf wrote:> @@ -529,15 +526,27 @@ void free_tpmif(tpmif_t* tpmif) > void tpmback_handler(evtchn_port_t port, struct pt_regs *regs, void *data) > { > tpmif_t* tpmif = (tpmif_t*) data; > - tpmif_tx_request_t* tx = &tpmif->tx->ring[0].req; > - /* Throw away 0 size events, these can trigger from event channel unmasking */ > - if(tx->size == 0) > - return; > - > - TPMBACK_DEBUG("EVENT CHANNEL FIRE %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); > - tpmif_req_ready(tpmif); > - wake_up(&waitq); > + vtpm_shared_page_t* pg = tpmif->page; >Do we not need a barrier somewhere around here to ensure that the far end''s write to pg->state is visible to this cpu? The writer does: write all fields apart from state barrier() write state. no need for a barrier at the end of that lot either?> + switch (pg->state) > + { > + case VTPM_STATE_SUBMIT: > + TPMBACK_DEBUG("EVENT CHANNEL FIRE %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); > + tpmif_req_ready(tpmif); > + wake_up(&waitq); > + break; > + case VTPM_STATE_CANCEL: > + /* If we are busy with a request, do nothing */ > + if (tpmif->flags & TPMIF_REQ_READY) > + return; > + /* Acknowledge the cancellation if we are idle */ > + pg->state = VTPM_STATE_IDLE; > + notify_remote_via_evtchn(tpmif->evtchn); > + return; > + default: > + /* Spurious wakeup; do nothing */ > + return; > + } > } >
Ian Campbell
2013-Apr-11 14:17 UTC
Re: [PATCH RFC] drivers/tpm: add xen tpmfront interface
On Fri, 2013-03-22 at 22:28 +0000, Daniel De Graaf wrote:> This is a complete rewrite of the Xen TPM frontend driver, taking > advantage of a simplified frontend/backend interface and adding support > for cancellation and timeouts. > > Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> > --- > > This patch does not include the v1 interfaces from linux-2.6.18-xen.hg > in include/xen/interface/io/tpmif.h, since they do not pass checkpatch > due to their use of typedefs, are not 32/64-bit clean, and are not used > in this patch. > > This patch relies on the mini-os patch to tpmback/front: > > [PATCH v5.1 01/12] mini-os/tpm{back,front}: Change shared page ABI > > which should be sent as a reply.I think the Xen side and the Linux side can be committed independently though, right? Did you want to CC LKML and the tpm subsystem maintainers there?
Ian Campbell
2013-Apr-11 14:27 UTC
Re: [PATCH v5.1 01/12] mini-os/tpm{back, front}: Change shared page ABI
On Fri, 2013-03-22 at 22:30 +0000, Daniel De Graaf wrote:> > +struct vtpm_shared_page { > + uint32_t length; /* request/response length in bytes */The data is inline immediately after this struct? How does it interact with the extra_pages stuff?> + > + uint8_t state; /* enum vtpm_state */ > + uint8_t locality; /* for the current request */I''ve had a look at the 7/12 and 10/12 and I''m still not sure how this byte is used -- it''s looked up in the XSM label as a string but how does it become a uint8_t agreed by both the front and backend? Could we perhaps get a few more words on the protocol in general? Or have I missed some existing doc? What is the format of the payload, is it defined by some independent TPM standard?> + uint8_t pad; /* should be zero */ > + > + uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */ > + uint32_t extra_pages[0]; /* grant IDs; length is actually nr_extra_pages */Not actually used AFAICT? Future expansion I presume?> +}; > +typedef struct vtpm_shared_page vtpm_shared_page_t;
Daniel De Graaf
2013-Apr-11 14:38 UTC
Re: [PATCH RFC] drivers/tpm: add xen tpmfront interface
On 04/11/2013 10:17 AM, Ian Campbell wrote:> On Fri, 2013-03-22 at 22:28 +0000, Daniel De Graaf wrote: >> This is a complete rewrite of the Xen TPM frontend driver, taking >> advantage of a simplified frontend/backend interface and adding support >> for cancellation and timeouts. >> >> Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> >> --- >> >> This patch does not include the v1 interfaces from linux-2.6.18-xen.hg >> in include/xen/interface/io/tpmif.h, since they do not pass checkpatch >> due to their use of typedefs, are not 32/64-bit clean, and are not used >> in this patch. >> >> This patch relies on the mini-os patch to tpmback/front: >> >> [PATCH v5.1 01/12] mini-os/tpm{back,front}: Change shared page ABI >> >> which should be sent as a reply. > > I think the Xen side and the Linux side can be committed independently > though, right? Did you want to CC LKML and the tpm subsystem maintainers > there? >I did CC tpmdd-devel on the non-RFC version of this patch (sent on the 26th) although I neglected to CC LKML. I agree that the Xen and Linux sides can be committed independently; I will send a v2 of this patch out soon with one additional change from the v1. -- Daniel De Graaf National Security Agency
Daniel De Graaf
2013-Apr-11 14:38 UTC
Re: [PATCH v5.1 01/12] mini-os/tpm{back, front}: Change shared page ABI
On 04/11/2013 10:14 AM, Ian Campbell wrote:> On Fri, 2013-03-22 at 22:30 +0000, Daniel De Graaf wrote: >> @@ -529,15 +526,27 @@ void free_tpmif(tpmif_t* tpmif) >> void tpmback_handler(evtchn_port_t port, struct pt_regs *regs, void *data) >> { >> tpmif_t* tpmif = (tpmif_t*) data; >> - tpmif_tx_request_t* tx = &tpmif->tx->ring[0].req; >> - /* Throw away 0 size events, these can trigger from event channel unmasking */ >> - if(tx->size == 0) >> - return; >> - >> - TPMBACK_DEBUG("EVENT CHANNEL FIRE %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); >> - tpmif_req_ready(tpmif); >> - wake_up(&waitq); >> + vtpm_shared_page_t* pg = tpmif->page; >> > > Do we not need a barrier somewhere around here to ensure that the far > end''s write to pg->state is visible to this cpu?The frontend''s write to pg->state is always done prior to the frontend sending its event channel notification, so an explicit barrier is not needed in this function. Since there is only one read and a clear dependency on the one write, so I''m not sure where the barrier here would need to go even if it was needed. We might need a barrier in send_response between the memcpy and setting the state to VTPM_STATE_FINISH. It so happens that the existing code includes a call to local_irq_save which includes barrier(), making the code technically safe - but an explicit barrier would clarify this and avoid potential bugs introduced by moving tpmif_req_finished() around.> The writer does: > write all fields apart from state > barrier() > write state. > > no need for a barrier at the end of that lot either? > >> + switch (pg->state) >> + { >> + case VTPM_STATE_SUBMIT: >> + TPMBACK_DEBUG("EVENT CHANNEL FIRE %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); >> + tpmif_req_ready(tpmif); >> + wake_up(&waitq); >> + break; >> + case VTPM_STATE_CANCEL: >> + /* If we are busy with a request, do nothing */ >> + if (tpmif->flags & TPMIF_REQ_READY) >> + return; >> + /* Acknowledge the cancellation if we are idle */ >> + pg->state = VTPM_STATE_IDLE; >> + notify_remote_via_evtchn(tpmif->evtchn); >> + return; >> + default: >> + /* Spurious wakeup; do nothing */ >> + return; >> + } >> } >> >-- Daniel De Graaf National Security Agency
Daniel De Graaf
2013-Apr-11 15:10 UTC
Re: [PATCH v5.1 01/12] mini-os/tpm{back, front}: Change shared page ABI
On 04/11/2013 10:27 AM, Ian Campbell wrote:> On Fri, 2013-03-22 at 22:30 +0000, Daniel De Graaf wrote: >> >> +struct vtpm_shared_page { >> + uint32_t length; /* request/response length in bytes */ > > The data is inline immediately after this struct? How does it interact > with the extra_pages stuff?The data follows immediately after the extra_pages array whose size is visible in nr_extra_pages. The current code only supports skipping the right number of bytes if this array is filled in, it doesn''t actually read or write the grant IDs.>> + >> + uint8_t state; /* enum vtpm_state */ >> + uint8_t locality; /* for the current request */ > > I''ve had a look at the 7/12 and 10/12 and I''m still not sure how this > byte is used -- it''s looked up in the XSM label as a string but how does > it become a uint8_t agreed by both the front and backend?The frontend can set this byte (the existing Linux patch does not do so, but the v2 includes a sysfs attribute that allows you to set the locality for a given request). In the hardware TPM 1.2 interface, the TPM exposes a distinct MMIO page for each locality and the chipset provides limits on when writes to locality 4 (and possibly 3) are allowed.> Could we perhaps get a few more words on the protocol in general? Or > have I missed some existing doc?This protocol emulates the request/response behavior of a TPM using a Xen shared memory interface. All interaction with the TPM is at the direction of the frontend, since a TPM (hardware or virtual) is a passive device - the backend only processes commands as requested by the frontend. The frontend sends a request to the TPM by populating the shared page with the request packet, changing the state to VTPM_STATE_SUBMIT, and sending and event channel notification. When the backend is finished, it will set the state to VTPM_STATE_FINISH and send an event channel notification. In order to allow long-running commands to be canceled, the frontend can at any time change the state to VTPM_STATE_CANCEL and send a notification. The TPM can either finish the command (changing state to VTPM_STATE_FINISH) or can cancel the command and change the state to VTPM_STATE_IDLE. The TPM can also change the state to VTPM_STATE_IDLE instead of VTPM_STATE_FINISH if another reason for cancellation is required - for example, a physical TPM may cancel a command if the interface is seized by another locality. If you would like this description in the Xen tree, where is the best place to locate it? The existing docs/misc/vtpm.txt is more focused on the use of the TPM domains, not the protocol, but an additional section could be added for documenting the Xen protocol.> What is the format of the payload, is it defined by some independent TPM > standard?The payload is a TPM packet as defined by the TPM specification: http://www.trustedcomputinggroup.org/resources/tpm_main_specification (part 3 defines the packet format).>> + uint8_t pad; /* should be zero */ >> + >> + uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */ >> + uint32_t extra_pages[0]; /* grant IDs; length is actually nr_extra_pages */ > > Not actually used AFAICT? Future expansion I presume?Yes. While normally TPM packets are all under 4000 bytes, the specification allows larger packets (the TPM itself defines the maximum), and future versions of the TPM specification that do not limit the TPM to 2048-bit RSA keys may require using larger packets. Some commands (such as GetRandom) can produce packets of arbitrary size, although it is reasonable for an implementation to limit what it returns so this is not a problem for the vTPM. -- Daniel De Graaf National Security Agency
Ian Campbell
2013-Apr-11 15:22 UTC
Re: [PATCH v5.1 01/12] mini-os/tpm{back, front}: Change shared page ABI
On Thu, 2013-04-11 at 16:10 +0100, Daniel De Graaf wrote:> On 04/11/2013 10:27 AM, Ian Campbell wrote: > > On Fri, 2013-03-22 at 22:30 +0000, Daniel De Graaf wrote: > >> > >> +struct vtpm_shared_page { > >> + uint32_t length; /* request/response length in bytes */ > > > > The data is inline immediately after this struct? How does it interact > > with the extra_pages stuff? > > The data follows immediately after the extra_pages array whose size is > visible in nr_extra_pages. The current code only supports skipping the > right number of bytes if this array is filled in, it doesn''t actually > read or write the grant IDs.Please can we get a comment about this in the header? e.g. netif.h says: /* * This is the ''wire'' format for packets: * Request 1: netif_tx_request -- NETTXF_* (any flags) * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info) * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE) * Request 4: netif_tx_request -- NETTXF_more_data * Request 5: netif_tx_request -- NETTXF_more_data * ... * Request N: netif_tx_request -- 0 */ An equivalent diagram for tpm would be useful.> >> + > >> + uint8_t state; /* enum vtpm_state */ > >> + uint8_t locality; /* for the current request */ > > > > I''ve had a look at the 7/12 and 10/12 and I''m still not sure how this > > byte is used -- it''s looked up in the XSM label as a string but how does > > it become a uint8_t agreed by both the front and backend? > > The frontend can set this byte (the existing Linux patch does not do so, > but the v2 includes a sysfs attribute that allows you to set the locality > for a given request). In the hardware TPM 1.2 interface, the TPM exposes > a distinct MMIO page for each locality and the chipset provides limits on > when writes to locality 4 (and possibly 3) are allowed.These localities are defined by the TPM spec?> > Could we perhaps get a few more words on the protocol in general? Or > > have I missed some existing doc? > > This protocol emulates the request/response behavior of a TPM using a Xen > shared memory interface. All interaction with the TPM is at the direction > of the frontend, since a TPM (hardware or virtual) is a passive device - > the backend only processes commands as requested by the frontend. > > The frontend sends a request to the TPM by populating the shared page with > the request packet, changing the state to VTPM_STATE_SUBMIT, and sending > and event channel notification. When the backend is finished, it will set > the state to VTPM_STATE_FINISH and send an event channel notification. > > In order to allow long-running commands to be canceled, the frontend can > at any time change the state to VTPM_STATE_CANCEL and send a notification. > The TPM can either finish the command (changing state to VTPM_STATE_FINISH) > or can cancel the command and change the state to VTPM_STATE_IDLE. The TPM > can also change the state to VTPM_STATE_IDLE instead of VTPM_STATE_FINISH > if another reason for cancellation is required - for example, a physical > TPM may cancel a command if the interface is seized by another locality.Understood, thanks.> If you would like this description in the Xen tree, where is the best place > to locate it? The existing docs/misc/vtpm.txt is more focused on the use of > the TPM domains, not the protocol, but an additional section could be added > for documenting the Xen protocol.Having all the info you gave in this mail in the vtpm.h header would be good.> > What is the format of the payload, is it defined by some independent TPM > > standard? > > The payload is a TPM packet as defined by the TPM specification: > http://www.trustedcomputinggroup.org/resources/tpm_main_specification > (part 3 defines the packet format).Can you add this link to the header please?> > >> + uint8_t pad; /* should be zero */ > >> + > >> + uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */ > >> + uint32_t extra_pages[0]; /* grant IDs; length is actually nr_extra_pages */ > > > > Not actually used AFAICT? Future expansion I presume? > > Yes. While normally TPM packets are all under 4000 bytes, the specification allows > larger packets (the TPM itself defines the maximum), and future versions of the > TPM specification that do not limit the TPM to 2048-bit RSA keys may require using > larger packets. Some commands (such as GetRandom) can produce packets of arbitrary > size, although it is reasonable for an implementation to limit what it returns so > this is not a problem for the vTPM.OK. Thanks, Ian.
Ian Campbell
2013-Apr-11 15:25 UTC
Re: [PATCH v5.1 01/12] mini-os/tpm{back, front}: Change shared page ABI
On Thu, 2013-04-11 at 15:38 +0100, Daniel De Graaf wrote:> On 04/11/2013 10:14 AM, Ian Campbell wrote: > > On Fri, 2013-03-22 at 22:30 +0000, Daniel De Graaf wrote: > >> @@ -529,15 +526,27 @@ void free_tpmif(tpmif_t* tpmif) > >> void tpmback_handler(evtchn_port_t port, struct pt_regs *regs, void *data) > >> { > >> tpmif_t* tpmif = (tpmif_t*) data; > >> - tpmif_tx_request_t* tx = &tpmif->tx->ring[0].req; > >> - /* Throw away 0 size events, these can trigger from event channel unmasking */ > >> - if(tx->size == 0) > >> - return; > >> - > >> - TPMBACK_DEBUG("EVENT CHANNEL FIRE %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); > >> - tpmif_req_ready(tpmif); > >> - wake_up(&waitq); > >> + vtpm_shared_page_t* pg = tpmif->page; > >> > > > > Do we not need a barrier somewhere around here to ensure that the far > > end''s write to pg->state is visible to this cpu? > > The frontend''s write to pg->state is always done prior to the frontend > sending its event channel notification, so an explicit barrier is not > needed in this function.An event channel notification might happen to include barriers as part of its implementation but does the interface make any guarantees?> Since there is only one read and a clear > dependency on the one write, so I''m not sure where the barrier here > would need to go even if it was needed.DOMU DOM_TPM PRE: pg->status == IDLE write data barrier() write pg->status = SUBMIT (A) notify evtchn receive evtchn read pg->status (B) do some stuff with the data write pg->status (C) Don''t we need some sort of memory barrier (as in rmb/wmb/mb not a compiler barrier()) between (A) and (B), to ensure that B sees the write at A and gets SUBMIT and not the previous value of IDLE? I wasn''t thinking about barriers between the read at (B) and the write at (C), although now that you mention it a barrier might be needed *after* (C) so that the domU sees the vTPM as IDLE next time it come to use it... Ian.
Daniel De Graaf
2013-Apr-11 15:46 UTC
Re: [PATCH v5.1 01/12] mini-os/tpm{back, front}: Change shared page ABI
On 04/11/2013 11:25 AM, Ian Campbell wrote:> On Thu, 2013-04-11 at 15:38 +0100, Daniel De Graaf wrote: >> On 04/11/2013 10:14 AM, Ian Campbell wrote: >>> On Fri, 2013-03-22 at 22:30 +0000, Daniel De Graaf wrote: >>>> @@ -529,15 +526,27 @@ void free_tpmif(tpmif_t* tpmif) >>>> void tpmback_handler(evtchn_port_t port, struct pt_regs *regs, void *data) >>>> { >>>> tpmif_t* tpmif = (tpmif_t*) data; >>>> - tpmif_tx_request_t* tx = &tpmif->tx->ring[0].req; >>>> - /* Throw away 0 size events, these can trigger from event channel unmasking */ >>>> - if(tx->size == 0) >>>> - return; >>>> - >>>> - TPMBACK_DEBUG("EVENT CHANNEL FIRE %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); >>>> - tpmif_req_ready(tpmif); >>>> - wake_up(&waitq); >>>> + vtpm_shared_page_t* pg = tpmif->page; >>>> >>> >>> Do we not need a barrier somewhere around here to ensure that the far >>> end''s write to pg->state is visible to this cpu? >> >> The frontend''s write to pg->state is always done prior to the frontend >> sending its event channel notification, so an explicit barrier is not >> needed in this function. > > An event channel notification might happen to include barriers as part > of its implementation but does the interface make any guarantees?Since it looks like other mini-os drivers use wmb() before their event channel notifications, I''ll add that to be consistent.>> Since there is only one read and a clear >> dependency on the one write, so I''m not sure where the barrier here >> would need to go even if it was needed. > > DOMU DOM_TPM > PRE: pg->status == IDLE > write data > barrier() > write pg->status = SUBMIT (A) > notify evtchn > > receive evtchn > read pg->status (B) > do some stuff with the data > write pg->status (C) > > Don''t we need some sort of memory barrier (as in rmb/wmb/mb not a > compiler barrier()) between (A) and (B), to ensure that B sees the write > at A and gets SUBMIT and not the previous value of IDLE? > > I wasn''t thinking about barriers between the read at (B) and the write > at (C), although now that you mention it a barrier might be needed > *after* (C) so that the domU sees the vTPM as IDLE next time it come to > use it... > > Ian.-- Daniel De Graaf National Security Agency
Daniel De Graaf
2013-Apr-11 16:18 UTC
Re: [PATCH v5.1 01/12] mini-os/tpm{back, front}: Change shared page ABI
On 04/11/2013 11:22 AM, Ian Campbell wrote:> On Thu, 2013-04-11 at 16:10 +0100, Daniel De Graaf wrote: >> On 04/11/2013 10:27 AM, Ian Campbell wrote: >>> On Fri, 2013-03-22 at 22:30 +0000, Daniel De Graaf wrote: >>>> >>>> +struct vtpm_shared_page { >>>> + uint32_t length; /* request/response length in bytes */ >>> >>> The data is inline immediately after this struct? How does it interact >>> with the extra_pages stuff? >> >> The data follows immediately after the extra_pages array whose size is >> visible in nr_extra_pages. The current code only supports skipping the >> right number of bytes if this array is filled in, it doesn''t actually >> read or write the grant IDs. > > Please can we get a comment about this in the header? e.g. netif.h says: > /* > * This is the ''wire'' format for packets: > * Request 1: netif_tx_request -- NETTXF_* (any flags) > * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info) > * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE) > * Request 4: netif_tx_request -- NETTXF_more_data > * Request 5: netif_tx_request -- NETTXF_more_data > * ... > * Request N: netif_tx_request -- 0 > */ > > An equivalent diagram for tpm would be useful.OK>>>> + >>>> + uint8_t state; /* enum vtpm_state */ >>>> + uint8_t locality; /* for the current request */ >>> >>> I''ve had a look at the 7/12 and 10/12 and I''m still not sure how this >>> byte is used -- it''s looked up in the XSM label as a string but how does >>> it become a uint8_t agreed by both the front and backend? >> >> The frontend can set this byte (the existing Linux patch does not do so, >> but the v2 includes a sysfs attribute that allows you to set the locality >> for a given request). In the hardware TPM 1.2 interface, the TPM exposes >> a distinct MMIO page for each locality and the chipset provides limits on >> when writes to locality 4 (and possibly 3) are allowed. > > These localities are defined by the TPM spec?Yes, although the fact that 3 and 4 are special is specific to the PC Client specification - http://www.trustedcomputinggroup.org/resources/pc_client_work_group_pc_client_specific_tpm_interface_specification_tis While localities are currently restricted to the values 0-4, a future spec release will allow virtual TPMs to define and use higher locality numbers, although the locality number will always be limited to a single byte due to existing data structure sizes.>>> Could we perhaps get a few more words on the protocol in general? Or >>> have I missed some existing doc? >> >> This protocol emulates the request/response behavior of a TPM using a Xen >> shared memory interface. All interaction with the TPM is at the direction >> of the frontend, since a TPM (hardware or virtual) is a passive device - >> the backend only processes commands as requested by the frontend. >> >> The frontend sends a request to the TPM by populating the shared page with >> the request packet, changing the state to VTPM_STATE_SUBMIT, and sending >> and event channel notification. When the backend is finished, it will set >> the state to VTPM_STATE_FINISH and send an event channel notification. >> >> In order to allow long-running commands to be canceled, the frontend can >> at any time change the state to VTPM_STATE_CANCEL and send a notification. >> The TPM can either finish the command (changing state to VTPM_STATE_FINISH) >> or can cancel the command and change the state to VTPM_STATE_IDLE. The TPM >> can also change the state to VTPM_STATE_IDLE instead of VTPM_STATE_FINISH >> if another reason for cancellation is required - for example, a physical >> TPM may cancel a command if the interface is seized by another locality. > > Understood, thanks. > >> If you would like this description in the Xen tree, where is the best place >> to locate it? The existing docs/misc/vtpm.txt is more focused on the use of >> the TPM domains, not the protocol, but an additional section could be added >> for documenting the Xen protocol. > > Having all the info you gave in this mail in the vtpm.h header would be > good.OK>>> What is the format of the payload, is it defined by some independent TPM >>> standard? >> >> The payload is a TPM packet as defined by the TPM specification: >> http://www.trustedcomputinggroup.org/resources/tpm_main_specification >> (part 3 defines the packet format). > > Can you add this link to the header please?Yep.>> >>>> + uint8_t pad; /* should be zero */ >>>> + >>>> + uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */ >>>> + uint32_t extra_pages[0]; /* grant IDs; length is actually nr_extra_pages */ >>> >>> Not actually used AFAICT? Future expansion I presume? >> >> Yes. While normally TPM packets are all under 4000 bytes, the specification allows >> larger packets (the TPM itself defines the maximum), and future versions of the >> TPM specification that do not limit the TPM to 2048-bit RSA keys may require using >> larger packets. Some commands (such as GetRandom) can produce packets of arbitrary >> size, although it is reasonable for an implementation to limit what it returns so >> this is not a problem for the vTPM. > > OK. Thanks, > > Ian. >I''ll send v5.2 soon. -- Daniel De Graaf National Security Agency
Daniel De Graaf
2013-Apr-11 16:20 UTC
[PATCH v5.2] mini-os/tpm{back, front}: Change shared page ABI
This changes the vTPM shared page ABI from a copy of the Xen network interface to a single-page interface that better reflects the expected behavior of a TPM: only a single request packet can be sent at any given time, and every packet sent generates a single response packet. This protocol change should also increase efficiency as it avoids mapping and unmapping grants when possible. The vtpm xenbus device now requires a feature-protocol-v2 node in xenstore to avoid conflicts with existing (xen-patched) kernels supporting the old interface. While the contents of the shared page have been defined to allow packets larger than a single page (actually 4088 bytes) by allowing the client to add extra grant references, the mapping of these extra references has not been implemented; a feature node in xenstore may be used in the future to indicate full support for the multi-page protocol. Most uses of the TPM should not require this feature. Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> Cc: Jan Beulich <JBeulich@suse.com> --- Changes from v5.1: add memory barriers and document the protocol Changes from v5: fix XenbusStateInitialised handling Changes from v4: use feature-protocol-v2 nodes instead of changing the device name; add command cancellation support to the protocol. extras/mini-os/include/tpmback.h | 1 + extras/mini-os/include/tpmfront.h | 7 +- extras/mini-os/tpmback.c | 166 +++++++++++++++++++------------------- extras/mini-os/tpmfront.c | 144 ++++++++++++++++++--------------- xen/include/public/io/tpmif.h | 66 +++++++++++++++ 5 files changed, 232 insertions(+), 152 deletions(-) diff --git a/extras/mini-os/include/tpmback.h b/extras/mini-os/include/tpmback.h index ff86732..ec9eda4 100644 --- a/extras/mini-os/include/tpmback.h +++ b/extras/mini-os/include/tpmback.h @@ -43,6 +43,7 @@ struct tpmcmd { domid_t domid; /* Domid of the frontend */ + uint8_t locality; /* Locality requested by the frontend */ unsigned int handle; /* Handle of the frontend */ unsigned char uuid[16]; /* uuid of the tpm interface */ diff --git a/extras/mini-os/include/tpmfront.h b/extras/mini-os/include/tpmfront.h index fd2cb17..a0c7c4d 100644 --- a/extras/mini-os/include/tpmfront.h +++ b/extras/mini-os/include/tpmfront.h @@ -37,9 +37,7 @@ struct tpmfront_dev { grant_ref_t ring_ref; evtchn_port_t evtchn; - tpmif_tx_interface_t* tx; - - void** pages; + vtpm_shared_page_t *page; domid_t bedomid; char* nodename; @@ -77,6 +75,9 @@ void shutdown_tpmfront(struct tpmfront_dev* dev); * */ int tpmfront_cmd(struct tpmfront_dev* dev, uint8_t* req, size_t reqlen, uint8_t** resp, size_t* resplen); +/* Set the locality used for communicating with a vTPM */ +int tpmfront_set_locality(struct tpmfront_dev* dev, int locality); + #ifdef HAVE_LIBC #include <sys/stat.h> /* POSIX IO functions: diff --git a/extras/mini-os/tpmback.c b/extras/mini-os/tpmback.c index 658fed1..d68ad2b 100644 --- a/extras/mini-os/tpmback.c +++ b/extras/mini-os/tpmback.c @@ -86,10 +86,7 @@ struct tpmif { evtchn_port_t evtchn; /* Shared page */ - tpmif_tx_interface_t* tx; - - /* pointer to TPMIF_RX_RING_SIZE pages */ - void** pages; + vtpm_shared_page_t *page; enum xenbus_state state; enum { DISCONNECTED, DISCONNECTING, CONNECTED } status; @@ -266,6 +263,7 @@ int insert_tpmif(tpmif_t* tpmif) unsigned int i, j; tpmif_t* tmp; char* err; + char path[512]; local_irq_save(flags); @@ -303,6 +301,16 @@ int insert_tpmif(tpmif_t* tpmif) local_irq_restore(flags); + snprintf(path, 512, "backend/vtpm/%u/%u/feature-protocol-v2", (unsigned int) tpmif->domid, tpmif->handle); + if ((err = xenbus_write(XBT_NIL, path, "1"))) + { + /* if we got an error here we should carefully remove the interface and then return */ + TPMBACK_ERR("Unable to write feature-protocol-v2 node: %s\n", err); + free(err); + remove_tpmif(tpmif); + goto error_post_irq; + } + /*Listen for state changes on the new interface */ if((err = xenbus_watch_path_token(XBT_NIL, tpmif->fe_state_path, tpmif->fe_state_path, >pmdev.events))) { @@ -312,7 +320,6 @@ int insert_tpmif(tpmif_t* tpmif) remove_tpmif(tpmif); goto error_post_irq; } - return 0; error: local_irq_restore(flags); @@ -386,8 +393,7 @@ inline tpmif_t* __init_tpmif(domid_t domid, unsigned int handle) tpmif->fe_state_path = NULL; tpmif->state = XenbusStateInitialising; tpmif->status = DISCONNECTED; - tpmif->tx = NULL; - tpmif->pages = NULL; + tpmif->page = NULL; tpmif->flags = 0; memset(tpmif->uuid, 0, sizeof(tpmif->uuid)); return tpmif; @@ -395,9 +401,6 @@ inline tpmif_t* __init_tpmif(domid_t domid, unsigned int handle) void __free_tpmif(tpmif_t* tpmif) { - if(tpmif->pages) { - free(tpmif->pages); - } if(tpmif->fe_path) { free(tpmif->fe_path); } @@ -430,12 +433,6 @@ tpmif_t* new_tpmif(domid_t domid, unsigned int handle) goto error; } - /* allocate pages to be used for shared mapping */ - if((tpmif->pages = malloc(sizeof(void*) * TPMIF_TX_RING_SIZE)) == NULL) { - goto error; - } - memset(tpmif->pages, 0, sizeof(void*) * TPMIF_TX_RING_SIZE); - if(tpmif_change_state(tpmif, XenbusStateInitWait)) { goto error; } @@ -486,7 +483,7 @@ void free_tpmif(tpmif_t* tpmif) tpmif->status = DISCONNECTING; mask_evtchn(tpmif->evtchn); - if(gntmap_munmap(>pmdev.map, (unsigned long)tpmif->tx, 1)) { + if(gntmap_munmap(>pmdev.map, (unsigned long)tpmif->page, 1)) { TPMBACK_ERR("%u/%u Error occured while trying to unmap shared page\n", (unsigned int) tpmif->domid, tpmif->handle); } @@ -529,15 +526,28 @@ void free_tpmif(tpmif_t* tpmif) void tpmback_handler(evtchn_port_t port, struct pt_regs *regs, void *data) { tpmif_t* tpmif = (tpmif_t*) data; - tpmif_tx_request_t* tx = &tpmif->tx->ring[0].req; - /* Throw away 0 size events, these can trigger from event channel unmasking */ - if(tx->size == 0) - return; - - TPMBACK_DEBUG("EVENT CHANNEL FIRE %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); - tpmif_req_ready(tpmif); - wake_up(&waitq); + vtpm_shared_page_t* pg = tpmif->page; + switch (pg->state) + { + case VTPM_STATE_SUBMIT: + TPMBACK_DEBUG("EVENT CHANNEL FIRE %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); + tpmif_req_ready(tpmif); + wake_up(&waitq); + break; + case VTPM_STATE_CANCEL: + /* If we are busy with a request, do nothing */ + if (tpmif->flags & TPMIF_REQ_READY) + return; + /* Acknowledge the cancellation if we are idle */ + pg->state = VTPM_STATE_IDLE; + wmb(); + notify_remote_via_evtchn(tpmif->evtchn); + return; + default: + /* Spurious wakeup; do nothing */ + return; + } } /* Connect to frontend */ @@ -584,12 +594,25 @@ int connect_fe(tpmif_t* tpmif) } free(value); + /* Check that protocol v2 is being used */ + snprintf(path, 512, "%s/feature-protocol-v2", tpmif->fe_path); + if((err = xenbus_read(XBT_NIL, path, &value))) { + TPMBACK_ERR("Unable to read %s during tpmback initialization! error = %s\n", path, err); + free(err); + return -1; + } + if(strcmp(value, "1")) { + TPMBACK_ERR("%s has an invalid value (%s)\n", path, value); + free(value); + return -1; + } + free(value); + domid = tpmif->domid; - if((tpmif->tx = gntmap_map_grant_refs(>pmdev.map, 1, &domid, 0, &ringref, PROT_READ | PROT_WRITE)) == NULL) { + if((tpmif->page = gntmap_map_grant_refs(>pmdev.map, 1, &domid, 0, &ringref, PROT_READ | PROT_WRITE)) == NULL) { TPMBACK_ERR("Failed to map grant reference %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); return -1; } - memset(tpmif->tx, 0, PAGE_SIZE); /*Bind the event channel */ if((evtchn_bind_interdomain(tpmif->domid, evtchn, tpmback_handler, tpmif, &tpmif->evtchn))) @@ -618,7 +641,7 @@ error_post_evtchn: mask_evtchn(tpmif->evtchn); unbind_evtchn(tpmif->evtchn); error_post_map: - gntmap_munmap(>pmdev.map, (unsigned long)tpmif->tx, 1); + gntmap_munmap(>pmdev.map, (unsigned long)tpmif->page, 1); return -1; } @@ -633,9 +656,9 @@ static int frontend_changed(tpmif_t* tpmif) switch (state) { case XenbusStateInitialising: - case XenbusStateInitialised: break; + case XenbusStateInitialised: case XenbusStateConnected: if(connect_fe(tpmif)) { TPMBACK_ERR("Failed to connect to front end %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle); @@ -874,6 +897,7 @@ void shutdown_tpmback(void) inline void init_tpmcmd(tpmcmd_t* tpmcmd, domid_t domid, unsigned int handle, unsigned char uuid[16]) { tpmcmd->domid = domid; + tpmcmd->locality = -1; tpmcmd->handle = handle; memcpy(tpmcmd->uuid, uuid, sizeof(tpmcmd->uuid)); tpmcmd->req = NULL; @@ -884,12 +908,12 @@ inline void init_tpmcmd(tpmcmd_t* tpmcmd, domid_t domid, unsigned int handle, un tpmcmd_t* get_request(tpmif_t* tpmif) { tpmcmd_t* cmd; - tpmif_tx_request_t* tx; - int offset; - int tocopy; - int i; - uint32_t domid; + vtpm_shared_page_t* shr; + unsigned int offset; int flags; +#ifdef TPMBACK_PRINT_DEBUG + int i; +#endif local_irq_save(flags); @@ -899,35 +923,22 @@ tpmcmd_t* get_request(tpmif_t* tpmif) { } init_tpmcmd(cmd, tpmif->domid, tpmif->handle, tpmif->uuid); - tx = &tpmif->tx->ring[0].req; - cmd->req_len = tx->size; + shr = tpmif->page; + cmd->req_len = shr->length; + cmd->locality = shr->locality; + offset = sizeof(*shr) + 4*shr->nr_extra_pages; + if (offset > PAGE_SIZE || offset + cmd->req_len > PAGE_SIZE) { + TPMBACK_ERR("%u/%u Command size too long for shared page!\n", (unsigned int) tpmif->domid, tpmif->handle); + goto error; + } /* Allocate the buffer */ if(cmd->req_len) { if((cmd->req = malloc(cmd->req_len)) == NULL) { goto error; } } - /* Copy the bits from the shared pages */ - offset = 0; - for(i = 0; i < TPMIF_TX_RING_SIZE && offset < cmd->req_len; ++i) { - tx = &tpmif->tx->ring[i].req; - - /* Map the page with the data */ - domid = (uint32_t)tpmif->domid; - if((tpmif->pages[i] = gntmap_map_grant_refs(>pmdev.map, 1, &domid, 0, &tx->ref, PROT_READ)) == NULL) { - TPMBACK_ERR("%u/%u Unable to map shared page during read!\n", (unsigned int) tpmif->domid, tpmif->handle); - goto error; - } - - /* do the copy now */ - tocopy = min(cmd->req_len - offset, PAGE_SIZE); - memcpy(&cmd->req[offset], tpmif->pages[i], tocopy); - offset += tocopy; - - /* release the page */ - gntmap_munmap(>pmdev.map, (unsigned long)tpmif->pages[i], 1); - - } + /* Copy the bits from the shared page(s) */ + memcpy(cmd->req, offset + (uint8_t*)shr, cmd->req_len); #ifdef TPMBACK_PRINT_DEBUG TPMBACK_DEBUG("Received Tpm Command from %u/%u of size %u", (unsigned int) tpmif->domid, tpmif->handle, cmd->req_len); @@ -958,38 +969,24 @@ error: void send_response(tpmcmd_t* cmd, tpmif_t* tpmif) { - tpmif_tx_request_t* tx; - int offset; - int i; - uint32_t domid; - int tocopy; + vtpm_shared_page_t* shr; + unsigned int offset; int flags; +#ifdef TPMBACK_PRINT_DEBUG +int i; +#endif local_irq_save(flags); - tx = &tpmif->tx->ring[0].req; - tx->size = cmd->resp_len; - - offset = 0; - for(i = 0; i < TPMIF_TX_RING_SIZE && offset < cmd->resp_len; ++i) { - tx = &tpmif->tx->ring[i].req; - - /* Map the page with the data */ - domid = (uint32_t)tpmif->domid; - if((tpmif->pages[i] = gntmap_map_grant_refs(>pmdev.map, 1, &domid, 0, &tx->ref, PROT_WRITE)) == NULL) { - TPMBACK_ERR("%u/%u Unable to map shared page during write!\n", (unsigned int) tpmif->domid, tpmif->handle); - goto error; - } - - /* do the copy now */ - tocopy = min(cmd->resp_len - offset, PAGE_SIZE); - memcpy(tpmif->pages[i], &cmd->resp[offset], tocopy); - offset += tocopy; - - /* release the page */ - gntmap_munmap(>pmdev.map, (unsigned long)tpmif->pages[i], 1); + shr = tpmif->page; + shr->length = cmd->resp_len; + offset = sizeof(*shr) + 4*shr->nr_extra_pages; + if (offset > PAGE_SIZE || offset + cmd->resp_len > PAGE_SIZE) { + TPMBACK_ERR("%u/%u Command size too long for shared page!\n", (unsigned int) tpmif->domid, tpmif->handle); + goto error; } + memcpy(offset + (uint8_t*)shr, cmd->resp, cmd->resp_len); #ifdef TPMBACK_PRINT_DEBUG TPMBACK_DEBUG("Sent response to %u/%u of size %u", (unsigned int) tpmif->domid, tpmif->handle, cmd->resp_len); @@ -1003,6 +1000,9 @@ void send_response(tpmcmd_t* cmd, tpmif_t* tpmif) #endif /* clear the ready flag and send the event channel notice to the frontend */ tpmif_req_finished(tpmif); + barrier(); + shr->state = VTPM_STATE_FINISH; + wmb(); notify_remote_via_evtchn(tpmif->evtchn); error: local_irq_restore(flags); diff --git a/extras/mini-os/tpmfront.c b/extras/mini-os/tpmfront.c index 0218d7f..9f930b5 100644 --- a/extras/mini-os/tpmfront.c +++ b/extras/mini-os/tpmfront.c @@ -47,11 +47,21 @@ void tpmfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data) { struct tpmfront_dev* dev = (struct tpmfront_dev*) data; + vtpm_shared_page_t* shr = dev->page; /*If we get a response when we didnt make a request, just ignore it */ if(!dev->waiting) { return; } + switch (shr->state) { + case VTPM_STATE_FINISH: /* request was completed */ + case VTPM_STATE_IDLE: /* request was cancelled */ + break; + default: + /* Spurious wakeup; do nothing, request is still pending */ + return; + } + dev->waiting = 0; #ifdef HAVE_LIBC if(dev->fd >= 0) { @@ -176,7 +186,7 @@ static int wait_for_backend_state_changed(struct tpmfront_dev* dev, XenbusState ret = wait_for_backend_closed(&events, path); break; default: - break; + TPMFRONT_ERR("Bad wait state %d, ignoring\n", state); } if((err = xenbus_unwatch_path_token(XBT_NIL, path, path))) { @@ -190,13 +200,13 @@ static int tpmfront_connect(struct tpmfront_dev* dev) { char* err; /* Create shared page */ - dev->tx = (tpmif_tx_interface_t*) alloc_page(); - if(dev->tx == NULL) { + dev->page = (vtpm_shared_page_t*) alloc_page(); + if(dev->page == NULL) { TPMFRONT_ERR("Unable to allocate page for shared memory\n"); goto error; } - memset(dev->tx, 0, PAGE_SIZE); - dev->ring_ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->tx), 0); + memset(dev->page, 0, PAGE_SIZE); + dev->ring_ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->page), 0); TPMFRONT_DEBUG("grant ref is %lu\n", (unsigned long) dev->ring_ref); /*Create event channel */ @@ -228,7 +238,7 @@ error_postevtchn: unbind_evtchn(dev->evtchn); error_postmap: gnttab_end_access(dev->ring_ref); - free_page(dev->tx); + free_page(dev->page); error: return -1; } @@ -240,7 +250,6 @@ struct tpmfront_dev* init_tpmfront(const char* _nodename) char path[512]; char* value, *err; unsigned long long ival; - int i; printk("============= Init TPM Front ================\n"); @@ -279,6 +288,15 @@ struct tpmfront_dev* init_tpmfront(const char* _nodename) goto error; } + /* Publish protocol v2 feature */ + snprintf(path, 512, "%s/feature-protocol-v2", dev->nodename); + if ((err = xenbus_write(XBT_NIL, path, "1"))) + { + TPMFRONT_ERR("Unable to write feature-protocol-v2 node: %s\n", err); + free(err); + goto error; + } + /* Create and publish grant reference and event channel */ if (tpmfront_connect(dev)) { goto error; @@ -289,18 +307,19 @@ struct tpmfront_dev* init_tpmfront(const char* _nodename) goto error; } - /* Allocate pages that will contain the messages */ - dev->pages = malloc(sizeof(void*) * TPMIF_TX_RING_SIZE); - if(dev->pages == NULL) { + /* Ensure backend is also using protocol v2 */ + snprintf(path, 512, "%s/feature-protocol-v2", dev->bepath); + if((err = xenbus_read(XBT_NIL, path, &value))) { + TPMFRONT_ERR("Unable to read %s during tpmfront initialization! error = %s\n", path, err); + free(err); goto error; } - memset(dev->pages, 0, sizeof(void*) * TPMIF_TX_RING_SIZE); - for(i = 0; i < TPMIF_TX_RING_SIZE; ++i) { - dev->pages[i] = (void*)alloc_page(); - if(dev->pages[i] == NULL) { - goto error; - } + if(strcmp(value, "1")) { + TPMFRONT_ERR("%s has an invalid value (%s)\n", path, value); + free(value); + goto error; } + free(value); TPMFRONT_LOG("Initialization Completed successfully\n"); @@ -314,8 +333,6 @@ void shutdown_tpmfront(struct tpmfront_dev* dev) { char* err; char path[512]; - int i; - tpmif_tx_request_t* tx; if(dev == NULL) { return; } @@ -349,27 +366,12 @@ void shutdown_tpmfront(struct tpmfront_dev* dev) /* Wait for the backend to close and unmap shared pages, ignore any errors */ wait_for_backend_state_changed(dev, XenbusStateClosed); - /* Cleanup any shared pages */ - if(dev->pages) { - for(i = 0; i < TPMIF_TX_RING_SIZE; ++i) { - if(dev->pages[i]) { - tx = &dev->tx->ring[i].req; - if(tx->ref != 0) { - gnttab_end_access(tx->ref); - } - free_page(dev->pages[i]); - } - } - free(dev->pages); - } - /* Close event channel and unmap shared page */ mask_evtchn(dev->evtchn); unbind_evtchn(dev->evtchn); gnttab_end_access(dev->ring_ref); - free_page(dev->tx); - + free_page(dev->page); } /* Cleanup memory usage */ @@ -387,13 +389,17 @@ void shutdown_tpmfront(struct tpmfront_dev* dev) int tpmfront_send(struct tpmfront_dev* dev, const uint8_t* msg, size_t length) { + unsigned int offset; + vtpm_shared_page_t* shr = NULL; +#ifdef TPMFRONT_PRINT_DEBUG int i; - tpmif_tx_request_t* tx = NULL; +#endif /* Error Checking */ if(dev == NULL || dev->state != XenbusStateConnected) { TPMFRONT_ERR("Tried to send message through disconnected frontend\n"); return -1; } + shr = dev->page; #ifdef TPMFRONT_PRINT_DEBUG TPMFRONT_DEBUG("Sending Msg to backend size=%u", (unsigned int) length); @@ -407,19 +413,16 @@ int tpmfront_send(struct tpmfront_dev* dev, const uint8_t* msg, size_t length) #endif /* Copy to shared pages now */ - for(i = 0; length > 0 && i < TPMIF_TX_RING_SIZE; ++i) { - /* Share the page */ - tx = &dev->tx->ring[i].req; - tx->unused = 0; - tx->addr = virt_to_mach(dev->pages[i]); - tx->ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->pages[i]), 0); - /* Copy the bits to the page */ - tx->size = length > PAGE_SIZE ? PAGE_SIZE : length; - memcpy(dev->pages[i], &msg[i * PAGE_SIZE], tx->size); - - /* Update counters */ - length -= tx->size; + offset = sizeof(*shr); + if (length + offset > PAGE_SIZE) { + TPMFRONT_ERR("Message too long for shared page\n"); + return -1; } + memcpy(offset + (uint8_t*)shr, msg, length); + shr->length = length; + barrier(); + shr->state = VTPM_STATE_SUBMIT; + dev->waiting = 1; dev->resplen = 0; #ifdef HAVE_LIBC @@ -429,49 +432,50 @@ int tpmfront_send(struct tpmfront_dev* dev, const uint8_t* msg, size_t length) files[dev->fd].tpmfront.offset = 0; } #endif + wmb(); notify_remote_via_evtchn(dev->evtchn); return 0; } int tpmfront_recv(struct tpmfront_dev* dev, uint8_t** msg, size_t *length) { - tpmif_tx_request_t* tx; - int i; + unsigned int offset; + vtpm_shared_page_t* shr = NULL; +#ifdef TPMFRONT_PRINT_DEBUG +int i; +#endif if(dev == NULL || dev->state != XenbusStateConnected) { TPMFRONT_ERR("Tried to receive message from disconnected frontend\n"); return -1; } /*Wait for the response */ wait_event(dev->waitq, (!dev->waiting)); + shr = dev->page; /* Initialize */ *msg = NULL; *length = 0; + offset = sizeof(*shr); - /* special case, just quit */ - tx = &dev->tx->ring[0].req; - if(tx->size == 0 ) { - goto quit; - } - /* Get the total size */ - tx = &dev->tx->ring[0].req; - for(i = 0; i < TPMIF_TX_RING_SIZE && tx->size > 0; ++i) { - tx = &dev->tx->ring[i].req; - *length += tx->size; + if (shr->state != VTPM_STATE_FINISH) + goto quit; + + *length = shr->length; + + if (*length + offset > PAGE_SIZE) { + TPMFRONT_ERR("Reply too long for shared page\n"); + return -1; } + /* Alloc the buffer */ if(dev->respbuf) { free(dev->respbuf); } *msg = dev->respbuf = malloc(*length); dev->resplen = *length; + /* Copy the bits */ - tx = &dev->tx->ring[0].req; - for(i = 0; i < TPMIF_TX_RING_SIZE && tx->size > 0; ++i) { - tx = &dev->tx->ring[i].req; - memcpy(&(*msg)[i * PAGE_SIZE], dev->pages[i], tx->size); - gnttab_end_access(tx->ref); - tx->ref = 0; - } + memcpy(*msg, offset + (uint8_t*)shr, *length); + #ifdef TPMFRONT_PRINT_DEBUG TPMFRONT_DEBUG("Received response from backend size=%u", (unsigned int) *length); for(i = 0; i < *length; ++i) { @@ -504,6 +508,14 @@ int tpmfront_cmd(struct tpmfront_dev* dev, uint8_t* req, size_t reqlen, uint8_t* return 0; } +int tpmfront_set_locality(struct tpmfront_dev* dev, int locality) +{ + if (!dev || !dev->page) + return -1; + dev->page->locality = locality; + return 0; +} + #ifdef HAVE_LIBC #include <errno.h> int tpmfront_open(struct tpmfront_dev* dev) diff --git a/xen/include/public/io/tpmif.h b/xen/include/public/io/tpmif.h index fca2c4e..dcc5e57 100644 --- a/xen/include/public/io/tpmif.h +++ b/xen/include/public/io/tpmif.h @@ -64,6 +64,72 @@ struct tpmif_tx_interface { }; typedef struct tpmif_tx_interface tpmif_tx_interface_t; +/****************************************************************************** + * TPM I/O interface for Xen guest OSes, v2 + * + * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov> + * + * This protocol emulates the request/response behavior of a TPM using a Xen + * shared memory interface. All interaction with the TPM is at the direction + * of the frontend, since a TPM (hardware or virtual) is a passive device - + * the backend only processes commands as requested by the frontend. + * + * The frontend sends a request to the TPM by populating the shared page with + * the request packet, changing the state to VTPM_STATE_SUBMIT, and sending + * and event channel notification. When the backend is finished, it will set + * the state to VTPM_STATE_FINISH and send an event channel notification. + * + * In order to allow long-running commands to be canceled, the frontend can + * at any time change the state to VTPM_STATE_CANCEL and send a notification. + * The TPM can either finish the command (changing state to VTPM_STATE_FINISH) + * or can cancel the command and change the state to VTPM_STATE_IDLE. The TPM + * can also change the state to VTPM_STATE_IDLE instead of VTPM_STATE_FINISH + * if another reason for cancellation is required - for example, a physical + * TPM may cancel a command if the interface is seized by another locality. + * + * The TPM command format is defined by the TCG, and is available at + * http://www.trustedcomputinggroup.org/resources/tpm_main_specification + */ + +enum vtpm_state { + VTPM_STATE_IDLE, /* no contents / vTPM idle / cancel complete */ + VTPM_STATE_SUBMIT, /* request ready / vTPM working */ + VTPM_STATE_FINISH, /* response ready / vTPM idle */ + VTPM_STATE_CANCEL, /* cancel requested / vTPM working */ +}; +/* Note: The backend should only change state to IDLE or FINISH, while the + * frontend should only change to SUBMIT or CANCEL. Status changes do not need + * to use atomic operations. + */ + + +/* The shared page for vTPM request/response packets looks like: + * + * Offset Contents + * ================================================+ * 0 struct vtpm_shared_page + * 16 [optional] List of grant IDs + * 16+4*nr_extra_pages TPM packet data + * + * If the TPM packet data extends beyond the end of a single page, the grant IDs + * defined in extra_pages are used as if they were mapped immediately following + * the primary shared page. The grants are allocated by the frontend and mapped + * by the backend. Before sending a request spanning multiple pages, the + * frontend should verify that the TPM supports such large requests by querying + * the TPM_CAP_PROP_INPUT_BUFFER property from the TPM. + */ +struct vtpm_shared_page { + uint32_t length; /* request/response length in bytes */ + + uint8_t state; /* enum vtpm_state */ + uint8_t locality; /* for the current request */ + uint8_t pad; /* should be zero */ + + uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */ + uint32_t extra_pages[0]; /* grant IDs; length is actually nr_extra_pages */ +}; +typedef struct vtpm_shared_page vtpm_shared_page_t; + #endif /* -- 1.8.1.4