Wei Wang
2012-Feb-28 14:21 UTC
[PATCH] amd iommu: Introduce a new lock for event and ppr logging
# HG changeset patch # User Wei Wang <wei.wang2@amd.com> # Date 1330438725 -3600 # Node ID 928b131ba6558387e546725fa955996434171a95 # Parent a43eeaedf61ccaf269d0823ea80d3dfa8157cc63 amd iommu: Introduce a new lock for event and ppr logging iommu->lock is used with irq disabled, so it cannot be used to protect ppr log. Otherwise, after c/s 24770, get_gfn will trigger a BUG() if called by parse_ppr_log_entry(). This patch adds an additional lock to protect ppr and event pointers in iommu_read_log(). signed-off-by: Wei Wang <wei.wang2@amd.com> diff -r a43eeaedf61c -r 928b131ba655 xen/drivers/passthrough/amd/iommu_init.c --- a/xen/drivers/passthrough/amd/iommu_init.c Tue Feb 28 10:17:27 2012 +0000 +++ b/xen/drivers/passthrough/amd/iommu_init.c Tue Feb 28 15:18:45 2012 +0100 @@ -367,6 +367,8 @@ static int iommu_read_log(struct amd_iom u32 tail, head, *entry, tail_offest, head_offset; BUG_ON(!iommu || ((log != &iommu->event_log) && (log != &iommu->ppr_log))); + + spin_lock(&log->lock); /* make sure there''s an entry in the log */ tail_offest = ( log == &iommu->event_log ) ? @@ -396,6 +398,8 @@ static int iommu_read_log(struct amd_iom writel(head, iommu->mmio_base + head_offset); } + spin_unlock(&log->lock); + return 0; } @@ -618,11 +622,11 @@ static void iommu_check_event_log(struct u32 entry; unsigned long flags; - spin_lock_irqsave(&iommu->lock, flags); - iommu_read_log(iommu, &iommu->event_log, sizeof(event_entry_t), parse_event_log_entry); + spin_lock_irqsave(&iommu->lock, flags); + /*check event overflow */ entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); @@ -651,14 +655,10 @@ void parse_ppr_log_entry(struct amd_iomm bus = PCI_BUS(device_id); devfn = PCI_DEVFN2(device_id); - local_irq_enable(); - spin_lock(&pcidevs_lock); pdev = pci_get_pdev(iommu->seg, bus, devfn); spin_unlock(&pcidevs_lock); - local_irq_disable(); - if ( pdev == NULL ) return; @@ -672,10 +672,10 @@ static void iommu_check_ppr_log(struct a u32 entry; unsigned long flags; - spin_lock_irqsave(&iommu->lock, flags); - iommu_read_log(iommu, &iommu->ppr_log, sizeof(ppr_entry_t), parse_ppr_log_entry); + + spin_lock_irqsave(&iommu->lock, flags); /*check event overflow */ entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); @@ -852,6 +852,8 @@ static void * __init allocate_ring_buffe ring_buf->head = 0; ring_buf->tail = 0; + spin_lock_init(&ring_buf->lock); + ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes(entries * entry_size); ring_buf->entries = ring_buf->alloc_size / entry_size; diff -r a43eeaedf61c -r 928b131ba655 xen/include/asm-x86/amd-iommu.h --- a/xen/include/asm-x86/amd-iommu.h Tue Feb 28 10:17:27 2012 +0000 +++ b/xen/include/asm-x86/amd-iommu.h Tue Feb 28 15:18:45 2012 +0100 @@ -65,6 +65,7 @@ struct ring_buffer { unsigned long alloc_size; uint32_t tail; uint32_t head; + spinlock_t lock; /* protect buffer pointers */ }; typedef struct iommu_cap {