Jan, This patch set cleanups amd iommu codes in many aspects without functional changes. Please apply them to unstable. Thanks, Wei _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Wei Wang
2011-Nov-09 15:50 UTC
[Xen-devel] [PATCH 1 of 6] amd iommu: Use pci access function to detect msi capabilities
# HG changeset patch # User Wei Wang <wei.wang2@amd.com> # Date 1320841109 -3600 # Node ID 4769713326a876c25bdc0f9d1f90594f90fba9c5 # Parent 452d9143687f826a30d0e56b7ba3b9783fb6bf24 amd iommu: Use pci access function to detect msi capabilities. Signed-off-by: Wei Wang <wei.wang2@amd.com> diff -r 452d9143687f -r 4769713326a8 xen/drivers/passthrough/amd/iommu_detect.c --- a/xen/drivers/passthrough/amd/iommu_detect.c Tue Nov 08 11:26:53 2011 +0100 +++ b/xen/drivers/passthrough/amd/iommu_detect.c Wed Nov 09 13:18:29 2011 +0100 @@ -30,38 +30,19 @@ static int __init get_iommu_msi_capabilities( u16 seg, u8 bus, u8 dev, u8 func, struct amd_iommu *iommu) { - int cap_ptr, cap_id; - u32 cap_header; + int pos; u16 control; - int count = 0; - cap_ptr = pci_conf_read8(seg, bus, dev, func, - PCI_CAPABILITY_LIST); + pos = pci_find_cap_offset(seg, bus, dev, func, PCI_CAP_ID_MSI); - while ( cap_ptr >= PCI_MIN_CAP_OFFSET && - count < PCI_MAX_CAP_BLOCKS ) - { - cap_ptr &= PCI_CAP_PTR_MASK; - cap_header = pci_conf_read32(seg, bus, dev, func, cap_ptr); - cap_id = get_field_from_reg_u32(cap_header, - PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT); - - if ( cap_id == PCI_CAP_ID_MSI ) - { - iommu->msi_cap = cap_ptr; - break; - } - cap_ptr = get_field_from_reg_u32(cap_header, - PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT); - count++; - } - - if ( !iommu->msi_cap ) + if ( !pos ) return -ENODEV; - AMD_IOMMU_DEBUG("Found MSI capability block \n"); + AMD_IOMMU_DEBUG("Found MSI capability block at 0x%x\n", pos); + + iommu->msi_cap = pos; control = pci_conf_read16(seg, bus, dev, func, - iommu->msi_cap + PCI_MSI_FLAGS); + iommu->msi_cap + PCI_MSI_FLAGS); iommu->maskbit = control & PCI_MSI_FLAGS_MASKBIT; return 0; } _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Wei Wang
2011-Nov-09 15:50 UTC
[Xen-devel] [PATCH 2 of 6] amd iommu: Cleanup iommu pci capabilites detection
# HG changeset patch # User Wei Wang <wei.wang2@amd.com> # Date 1320851997 -3600 # Node ID 4b115815bc13e4f2a3a178f3de7477ecf46cb44b # Parent 4769713326a876c25bdc0f9d1f90594f90fba9c5 amd iommu: Cleanup iommu pci capabilites detection. * Define new structure to represent capability block. * Remove unnecessary read for unused information. * Add sanity check into get_iommu_capabilities. * iommu capability offset is 16 bit not 8 bit, fix that. Signed-off-by: Wei Wang <wei.wang2@amd.com> diff -r 4769713326a8 -r 4b115815bc13 xen/drivers/passthrough/amd/iommu_detect.c --- a/xen/drivers/passthrough/amd/iommu_detect.c Wed Nov 09 13:18:29 2011 +0100 +++ b/xen/drivers/passthrough/amd/iommu_detect.c Wed Nov 09 16:19:57 2011 +0100 @@ -48,25 +48,16 @@ static int __init get_iommu_msi_capabili } static int __init get_iommu_capabilities( - u16 seg, u8 bus, u8 dev, u8 func, u8 cap_ptr, struct amd_iommu *iommu) + u16 seg, u8 bus, u8 dev, u8 func, u16 cap_ptr, struct amd_iommu *iommu) { - u32 cap_header, cap_range, misc_info; + u8 type; - cap_header = pci_conf_read32(seg, bus, dev, func, cap_ptr); - iommu->revision = get_field_from_reg_u32( - cap_header, PCI_CAP_REV_MASK, PCI_CAP_REV_SHIFT); - iommu->pte_not_present_cached = get_field_from_reg_u32( - cap_header, PCI_CAP_NP_CACHE_MASK, PCI_CAP_NP_CACHE_SHIFT); + iommu->cap.header = pci_conf_read32(seg, bus, dev, func, cap_ptr); + type = get_field_from_reg_u32(iommu->cap.header, PCI_CAP_TYPE_MASK, + PCI_CAP_TYPE_SHIFT); - cap_range = pci_conf_read32(seg, bus, dev, func, - cap_ptr + PCI_CAP_RANGE_OFFSET); - iommu->unit_id = get_field_from_reg_u32( - cap_range, PCI_CAP_UNIT_ID_MASK, PCI_CAP_UNIT_ID_SHIFT); - - misc_info = pci_conf_read32(seg, bus, dev, func, - cap_ptr + PCI_MISC_INFO_OFFSET); - iommu->msi_number = get_field_from_reg_u32( - misc_info, PCI_CAP_MSI_NUMBER_MASK, PCI_CAP_MSI_NUMBER_SHIFT); + if ( type != PCI_CAP_TYPE_IOMMU ) + return -ENODEV; return 0; } @@ -76,6 +67,7 @@ int __init amd_iommu_detect_one_acpi(voi struct amd_iommu *iommu; u8 bus, dev, func; struct acpi_ivhd_block_header *ivhd_block; + int rt = 0; ivhd_block = (struct acpi_ivhd_block_header *)ivhd; @@ -125,12 +117,19 @@ int __init amd_iommu_detect_one_acpi(voi iommu->ht_tunnel_enable = get_field_from_byte(ivhd_block->header.flags, AMD_IOMMU_ACPI_HT_TUN_ENB_MASK, AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT); - bus = iommu->bdf >> 8; - dev = PCI_SLOT(iommu->bdf & 0xFF); - func = PCI_FUNC(iommu->bdf & 0xFF); - get_iommu_capabilities(iommu->seg, bus, dev, func, - iommu->cap_offset, iommu); - get_iommu_msi_capabilities(iommu->seg, bus, dev, func, iommu); + + bus = PCI_BUS(iommu->bdf); + dev = PCI_SLOT(iommu->bdf); + func = PCI_FUNC(iommu->bdf); + + rt = get_iommu_capabilities(iommu->seg, bus, dev, func, + iommu->cap_offset, iommu); + if ( rt ) + return -ENODEV; + + rt = get_iommu_msi_capabilities(iommu->seg, bus, dev, func, iommu); + if ( rt ) + return -ENODEV; list_add_tail(&iommu->list, &amd_iommu_head); diff -r 4769713326a8 -r 4b115815bc13 xen/include/asm-x86/amd-iommu.h --- a/xen/include/asm-x86/amd-iommu.h Wed Nov 09 13:18:29 2011 +0100 +++ b/xen/include/asm-x86/amd-iommu.h Wed Nov 09 16:19:57 2011 +0100 @@ -36,16 +36,22 @@ struct table_struct { unsigned long alloc_size; }; +typedef struct iommu_cap { + uint32_t header; /* offset 00h */ + uint32_t base_low; /* offset 04h */ + uint32_t base_hi; /* offset 08h */ + uint32_t range; /* offset 0Ch */ + uint32_t misc; /* offset 10h */ +} iommu_cap_t; + struct amd_iommu { struct list_head list; spinlock_t lock; /* protect iommu */ u16 seg; u16 bdf; - u8 cap_offset; - u8 revision; - u8 unit_id; - u8 msi_number; + u16 cap_offset; + iommu_cap_t cap; u8 pte_not_present_cached; u8 ht_tunnel_support; diff -r 4769713326a8 -r 4b115815bc13 xen/include/asm-x86/hvm/svm/amd-iommu-defs.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Wed Nov 09 13:18:29 2011 +0100 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Wed Nov 09 16:19:57 2011 +0100 @@ -74,7 +74,7 @@ #define PCI_CAP_UNIT_ID_MASK 0x0000001F #define PCI_CAP_UNIT_ID_SHIFT 0 -#define PCI_MISC_INFO_OFFSET 0x10 +#define PCI_CAP_MISC_INFO_OFFSET 0x10 #define PCI_CAP_MSI_NUMBER_MASK 0x0000001F #define PCI_CAP_MSI_NUMBER_SHIFT 0 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Wei Wang
2011-Nov-09 15:50 UTC
[Xen-devel] [PATCH 3 of 6] amd iommu: Simplify IVHD device flag handling
# HG changeset patch # User Wei Wang <wei.wang2@amd.com> # Date 1320852001 -3600 # Node ID 3de4c299af619a8c992d779e255e6060d79f9ff6 # Parent 4b115815bc13e4f2a3a178f3de7477ecf46cb44b amd iommu: Simplify IVHD device flag handling. These bits are aligned to corresponding fields in device table entry. They can be updated by a single device entry write. Signed-off-by: Wei Wang <wei.wang2@amd.com> diff -r 4b115815bc13 -r 3de4c299af61 xen/drivers/passthrough/amd/iommu_acpi.c --- a/xen/drivers/passthrough/amd/iommu_acpi.c Wed Nov 09 16:19:57 2011 +0100 +++ b/xen/drivers/passthrough/amd/iommu_acpi.c Wed Nov 09 16:20:01 2011 +0100 @@ -31,38 +31,14 @@ static void __init add_ivrs_mapping_entr u16 bdf, u16 alias_id, u8 flags, struct amd_iommu *iommu) { struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(iommu->seg); - u8 sys_mgt, lint1_pass, lint0_pass, nmi_pass, ext_int_pass, init_pass; + ASSERT( ivrs_mappings != NULL ); /* setup requestor id */ ivrs_mappings[bdf].dte_requestor_id = alias_id; /* override flags for range of devices */ - sys_mgt = get_field_from_byte(flags, - AMD_IOMMU_ACPI_SYS_MGT_MASK, - AMD_IOMMU_ACPI_SYS_MGT_SHIFT); - lint1_pass = get_field_from_byte(flags, - AMD_IOMMU_ACPI_LINT1_PASS_MASK, - AMD_IOMMU_ACPI_LINT1_PASS_SHIFT); - lint0_pass = get_field_from_byte(flags, - AMD_IOMMU_ACPI_LINT0_PASS_MASK, - AMD_IOMMU_ACPI_LINT0_PASS_SHIFT); - nmi_pass = get_field_from_byte(flags, - AMD_IOMMU_ACPI_NMI_PASS_MASK, - AMD_IOMMU_ACPI_NMI_PASS_SHIFT); - ext_int_pass = get_field_from_byte(flags, - AMD_IOMMU_ACPI_EINT_PASS_MASK, - AMD_IOMMU_ACPI_EINT_PASS_SHIFT); - init_pass = get_field_from_byte(flags, - AMD_IOMMU_ACPI_INIT_PASS_MASK, - AMD_IOMMU_ACPI_INIT_PASS_SHIFT); - - ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt; - ivrs_mappings[bdf].dte_lint1_pass = lint1_pass; - ivrs_mappings[bdf].dte_lint0_pass = lint0_pass; - ivrs_mappings[bdf].dte_nmi_pass = nmi_pass; - ivrs_mappings[bdf].dte_ext_int_pass = ext_int_pass; - ivrs_mappings[bdf].dte_init_pass = init_pass; + ivrs_mappings[bdf].device_flags = flags; if (ivrs_mappings[alias_id].intremap_table == NULL ) { diff -r 4b115815bc13 -r 3de4c299af61 xen/drivers/passthrough/amd/iommu_init.c --- a/xen/drivers/passthrough/amd/iommu_init.c Wed Nov 09 16:19:57 2011 +0100 +++ b/xen/drivers/passthrough/amd/iommu_init.c Wed Nov 09 16:20:01 2011 +0100 @@ -790,18 +790,12 @@ static int __init alloc_ivrs_mappings(u1 for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) { ivrs_mappings[bdf].dte_requestor_id = bdf; - ivrs_mappings[bdf].dte_sys_mgt_enable - IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED; ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_DISABLED; ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_DISABLED; ivrs_mappings[bdf].iommu = NULL; ivrs_mappings[bdf].intremap_table = NULL; - ivrs_mappings[bdf].dte_lint1_pass = IOMMU_CONTROL_DISABLED; - ivrs_mappings[bdf].dte_lint0_pass = IOMMU_CONTROL_DISABLED; - ivrs_mappings[bdf].dte_nmi_pass = IOMMU_CONTROL_DISABLED; - ivrs_mappings[bdf].dte_ext_int_pass = IOMMU_CONTROL_DISABLED; - ivrs_mappings[bdf].dte_init_pass = IOMMU_CONTROL_DISABLED; + ivrs_mappings[bdf].device_flags = 0; if ( amd_iommu_perdev_intremap ) spin_lock_init(&ivrs_mappings[bdf].intremap_lock); @@ -817,8 +811,6 @@ static int __init amd_iommu_setup_device { int bdf; void *intr_tb, *dte; - int sys_mgt, dev_ex, lint1_pass, lint0_pass, - nmi_pass, ext_int_pass, init_pass; BUG_ON( (ivrs_bdf_entries == 0) ); @@ -840,21 +832,9 @@ static int __init amd_iommu_setup_device if ( intr_tb ) { - sys_mgt = ivrs_mappings[bdf].dte_sys_mgt_enable; - dev_ex = ivrs_mappings[bdf].dte_allow_exclusion; - - /* get interrupt remapping settings */ - lint1_pass = ivrs_mappings[bdf].dte_lint1_pass; - lint0_pass = ivrs_mappings[bdf].dte_lint0_pass; - nmi_pass = ivrs_mappings[bdf].dte_nmi_pass; - ext_int_pass = ivrs_mappings[bdf].dte_ext_int_pass; - init_pass = ivrs_mappings[bdf].dte_init_pass; - /* add device table entry */ dte = device_table.buffer + (bdf * IOMMU_DEV_TABLE_ENTRY_SIZE); - amd_iommu_add_dev_table_entry( - dte, sys_mgt, dev_ex, lint1_pass, lint0_pass, - nmi_pass, ext_int_pass, init_pass); + iommu_dte_add_device_entry(dte, &ivrs_mappings[bdf]); amd_iommu_set_intremap_table( dte, (u64)virt_to_maddr(intr_tb), iommu_intremap); diff -r 4b115815bc13 -r 3de4c299af61 xen/drivers/passthrough/amd/iommu_map.c --- a/xen/drivers/passthrough/amd/iommu_map.c Wed Nov 09 16:19:57 2011 +0100 +++ b/xen/drivers/passthrough/amd/iommu_map.c Wed Nov 09 16:20:01 2011 +0100 @@ -25,6 +25,7 @@ #include <asm/hvm/svm/amd-iommu-proto.h> #include "../ats.h" #include <xen/pci.h> +#include <asm/hvm/svm/amd-iommu-acpi.h> static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[]) { @@ -419,35 +420,23 @@ void __init amd_iommu_set_intremap_table dte[4] = entry; } -void __init amd_iommu_add_dev_table_entry( - u32 *dte, u8 sys_mgt, u8 dev_ex, u8 lint1_pass, u8 lint0_pass, - u8 nmi_pass, u8 ext_int_pass, u8 init_pass) +void __init iommu_dte_add_device_entry(u32 *dte, struct ivrs_mappings* ivrs_dev) { u32 entry; + u8 sys_mgt, dev_ex, flags; + u8 mask = ~(0x7 << 3); dte[7] = dte[6] = dte[4] = dte[2] = dte[1] = dte[0] = 0; + flags = ivrs_dev->device_flags; + sys_mgt = get_field_from_byte(flags, AMD_IOMMU_ACPI_SYS_MGT_MASK, + AMD_IOMMU_ACPI_SYS_MGT_SHIFT); + dev_ex = ivrs_dev->dte_allow_exclusion; - set_field_in_reg_u32(init_pass ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, 0, - IOMMU_DEV_TABLE_INIT_PASSTHRU_MASK, - IOMMU_DEV_TABLE_INIT_PASSTHRU_SHIFT, &entry); - set_field_in_reg_u32(ext_int_pass ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_DEV_TABLE_EINT_PASSTHRU_MASK, - IOMMU_DEV_TABLE_EINT_PASSTHRU_SHIFT, &entry); - set_field_in_reg_u32(nmi_pass ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_DEV_TABLE_NMI_PASSTHRU_MASK, - IOMMU_DEV_TABLE_NMI_PASSTHRU_SHIFT, &entry); - set_field_in_reg_u32(lint0_pass ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_DEV_TABLE_LINT0_ENABLE_MASK, - IOMMU_DEV_TABLE_LINT0_ENABLE_SHIFT, &entry); - set_field_in_reg_u32(lint1_pass ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_DEV_TABLE_LINT1_ENABLE_MASK, - IOMMU_DEV_TABLE_LINT1_ENABLE_SHIFT, &entry); + flags &= mask; + set_field_in_reg_u32(flags, 0, + IOMMU_DEV_TABLE_IVHD_FLAGS_MASK, + IOMMU_DEV_TABLE_IVHD_FLAGS_SHIFT, &entry); dte[5] = entry; set_field_in_reg_u32(sys_mgt, 0, diff -r 4b115815bc13 -r 3de4c299af61 xen/include/asm-x86/amd-iommu.h --- a/xen/include/asm-x86/amd-iommu.h Wed Nov 09 16:19:57 2011 +0100 +++ b/xen/include/asm-x86/amd-iommu.h Wed Nov 09 16:20:01 2011 +0100 @@ -86,7 +86,6 @@ struct amd_iommu { struct ivrs_mappings { u16 dte_requestor_id; - u8 dte_sys_mgt_enable; u8 dte_allow_exclusion; u8 unity_map_enable; u8 write_permission; @@ -99,12 +98,8 @@ struct ivrs_mappings { void *intremap_table; spinlock_t intremap_lock; - /* interrupt remapping settings */ - u8 dte_lint1_pass; - u8 dte_lint0_pass; - u8 dte_nmi_pass; - u8 dte_ext_int_pass; - u8 dte_init_pass; + /* ivhd device data settings */ + u8 device_flags; }; extern unsigned short ivrs_bdf_entries; diff -r 4b115815bc13 -r 3de4c299af61 xen/include/asm-x86/hvm/svm/amd-iommu-defs.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Wed Nov 09 16:19:57 2011 +0100 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Wed Nov 09 16:20:01 2011 +0100 @@ -158,18 +158,11 @@ /* DeviceTable Entry[191:160] */ #define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK 0x000FFFFF #define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT 0 -#define IOMMU_DEV_TABLE_INIT_PASSTHRU_MASK 0x01000000 -#define IOMMU_DEV_TABLE_INIT_PASSTHRU_SHIFT 24 -#define IOMMU_DEV_TABLE_EINT_PASSTHRU_MASK 0x02000000 -#define IOMMU_DEV_TABLE_EINT_PASSTHRU_SHIFT 25 -#define IOMMU_DEV_TABLE_NMI_PASSTHRU_MASK 0x04000000 -#define IOMMU_DEV_TABLE_NMI_PASSTHRU_SHIFT 26 +#define IOMMU_DEV_TABLE_IVHD_FLAGS_SHIFT 24 +#define IOMMU_DEV_TABLE_IVHD_FLAGS_MASK 0xC7000000 #define IOMMU_DEV_TABLE_INT_CONTROL_MASK 0x30000000 #define IOMMU_DEV_TABLE_INT_CONTROL_SHIFT 28 -#define IOMMU_DEV_TABLE_LINT0_ENABLE_MASK 0x40000000 -#define IOMMU_DEV_TABLE_LINT0_ENABLE_SHIFT 30 -#define IOMMU_DEV_TABLE_LINT1_ENABLE_MASK 0x80000000 -#define IOMMU_DEV_TABLE_LINT1_ENABLE_SHIFT 31 + /* Command Buffer */ #define IOMMU_CMD_BUFFER_BASE_LOW_OFFSET 0x08 diff -r 4b115815bc13 -r 3de4c299af61 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Wed Nov 09 16:19:57 2011 +0100 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Wed Nov 09 16:20:01 2011 +0100 @@ -68,14 +68,12 @@ void amd_iommu_share_p2m(struct domain * /* device table functions */ int get_dma_requestor_id(u16 seg, u16 bdf); -void amd_iommu_add_dev_table_entry( - u32 *dte, u8 sys_mgt, u8 dev_ex, u8 lint1_pass, u8 lint0_pass, - u8 nmi_pass, u8 ext_int_pass, u8 init_pass); void amd_iommu_set_intremap_table( u32 *dte, u64 intremap_ptr, u8 int_valid); void amd_iommu_set_root_page_table( u32 *dte, u64 root_ptr, u16 domain_id, u8 paging_mode, u8 valid); void iommu_dte_set_iotlb(u32 *dte, u8 i); +void iommu_dte_add_device_entry(u32 *dte, struct ivrs_mappings* ivrs_dev); void invalidate_dev_table_entry(struct amd_iommu *iommu, u16 devic_id); /* send cmd to iommu */ _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Wei Wang
2011-Nov-09 15:50 UTC
[Xen-devel] [PATCH 4 of 6] amd iommu: Disable debug output for early DTE update
# HG changeset patch # User Wei Wang <wei.wang2@amd.com> # Date 1320852002 -3600 # Node ID 60d51bc20139471cf71e3d6854534d4176853de2 # Parent 3de4c299af619a8c992d779e255e6060d79f9ff6 amd iommu: Disable debug output for early DTE update. Some systems may have IVHD device entries that cover large device id range. Having those entries displayed will take very long time to boot. Signed-off-by: Wei Wang <wei.wang2@amd.com> diff -r 3de4c299af61 -r 60d51bc20139 xen/drivers/passthrough/amd/iommu_init.c --- a/xen/drivers/passthrough/amd/iommu_init.c Wed Nov 09 16:20:01 2011 +0100 +++ b/xen/drivers/passthrough/amd/iommu_init.c Wed Nov 09 16:20:02 2011 +0100 @@ -838,10 +838,6 @@ static int __init amd_iommu_setup_device amd_iommu_set_intremap_table( dte, (u64)virt_to_maddr(intr_tb), iommu_intremap); - - AMD_IOMMU_DEBUG("Add device table entry: device id = 0x%04x, " - "interupt table = 0x%"PRIx64"\n", bdf, - (u64)virt_to_maddr(intr_tb)); } } _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Wei Wang
2011-Nov-09 15:51 UTC
[Xen-devel] [PATCH 5 of 6] amd iommu: Compress hyper-transport flags into a single byte
# HG changeset patch # User Wei Wang <wei.wang2@amd.com> # Date 1320852003 -3600 # Node ID c75cf540a1fe57a439f9cb0503bf69f120066b92 # Parent 60d51bc20139471cf71e3d6854534d4176853de2 amd iommu: Compress hyper-transport flags into a single byte. These flags are single bit, no need to be saved as integers. Add 3 inline helpers to make single bit access easier. Introduce iommu_has_ht_flag and set_iommu_ht_flags Signed-off-by: Wei Wang <wei.wang2@amd.com> diff -r 60d51bc20139 -r c75cf540a1fe xen/drivers/passthrough/amd/iommu_detect.c --- a/xen/drivers/passthrough/amd/iommu_detect.c Wed Nov 09 16:20:02 2011 +0100 +++ b/xen/drivers/passthrough/amd/iommu_detect.c Wed Nov 09 16:20:03 2011 +0100 @@ -98,25 +98,8 @@ int __init amd_iommu_detect_one_acpi(voi iommu->cap_offset = ivhd_block->cap_offset; iommu->mmio_base_phys = ivhd_block->mmio_base; - /* override IOMMU support flags */ - iommu->coherent = get_field_from_byte(ivhd_block->header.flags, - AMD_IOMMU_ACPI_COHERENT_MASK, - AMD_IOMMU_ACPI_COHERENT_SHIFT); - iommu->iotlb_support = get_field_from_byte(ivhd_block->header.flags, - AMD_IOMMU_ACPI_IOTLB_SUP_MASK, - AMD_IOMMU_ACPI_IOTLB_SUP_SHIFT); - iommu->isochronous = get_field_from_byte(ivhd_block->header.flags, - AMD_IOMMU_ACPI_ISOC_MASK, - AMD_IOMMU_ACPI_ISOC_SHIFT); - iommu->res_pass_pw = get_field_from_byte(ivhd_block->header.flags, - AMD_IOMMU_ACPI_RES_PASS_PW_MASK, - AMD_IOMMU_ACPI_RES_PASS_PW_SHIFT); - iommu->pass_pw = get_field_from_byte(ivhd_block->header.flags, - AMD_IOMMU_ACPI_PASS_PW_MASK, - AMD_IOMMU_ACPI_PASS_PW_SHIFT); - iommu->ht_tunnel_enable = get_field_from_byte(ivhd_block->header.flags, - AMD_IOMMU_ACPI_HT_TUN_ENB_MASK, - AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT); + /* override IOMMU HT flags */ + iommu->ht_flags = ivhd_block->header.flags; bus = PCI_BUS(iommu->bdf); dev = PCI_SLOT(iommu->bdf); diff -r 60d51bc20139 -r c75cf540a1fe xen/drivers/passthrough/amd/iommu_init.c --- a/xen/drivers/passthrough/amd/iommu_init.c Wed Nov 09 16:20:02 2011 +0100 +++ b/xen/drivers/passthrough/amd/iommu_init.c Wed Nov 09 16:20:03 2011 +0100 @@ -28,6 +28,7 @@ #include <asm/hvm/svm/amd-iommu-proto.h> #include <asm-x86/fixmap.h> #include <mach_apic.h> +#include <asm/hvm/svm/amd-iommu-acpi.h> static int __initdata nr_amd_iommus; @@ -36,6 +37,12 @@ static struct radix_tree_root ivrs_maps; struct list_head amd_iommu_head; struct table_struct device_table; +static int iommu_has_ht_flag(struct amd_iommu *iommu, uint8_t bit) +{ + u8 mask = (1U << bit) & 0xff; + return iommu->ht_flags & mask; +} + static int __init map_iommu_mmio_region(struct amd_iommu *iommu) { unsigned long mfn; @@ -66,6 +73,34 @@ static void __init unmap_iommu_mmio_regi } } +static void set_iommu_ht_flags(struct amd_iommu *iommu) +{ + u32 entry; + entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); + + /* Setup HT flags */ + iommu_has_ht_flag(iommu, AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT) ? + iommu_set_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT): + iommu_clear_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT); + + iommu_has_ht_flag(iommu, AMD_IOMMU_ACPI_RES_PASS_PW_SHIFT) ? + iommu_set_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT): + iommu_clear_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT); + + iommu_has_ht_flag(iommu, AMD_IOMMU_ACPI_ISOC_SHIFT) ? + iommu_set_bit(&entry, IOMMU_CONTROL_ISOCHRONOUS_SHIFT): + iommu_clear_bit(&entry, IOMMU_CONTROL_ISOCHRONOUS_SHIFT); + + iommu_has_ht_flag(iommu, AMD_IOMMU_ACPI_PASS_PW_SHIFT) ? + iommu_set_bit(&entry, IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT): + iommu_clear_bit(&entry, IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT); + + /* Force coherent */ + iommu_set_bit(&entry, IOMMU_CONTROL_COHERENT_SHIFT); + + writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); +} + static void register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu) { u64 addr_64, addr_lo, addr_hi; @@ -150,33 +185,10 @@ static void set_iommu_translation_contro entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); - if ( enable ) - { - set_field_in_reg_u32(iommu->ht_tunnel_support ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK, - IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry); - set_field_in_reg_u32(iommu->isochronous ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_CONTROL_ISOCHRONOUS_MASK, - IOMMU_CONTROL_ISOCHRONOUS_SHIFT, &entry); - set_field_in_reg_u32(iommu->coherent ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_CONTROL_COHERENT_MASK, - IOMMU_CONTROL_COHERENT_SHIFT, &entry); - set_field_in_reg_u32(iommu->res_pass_pw ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_MASK, - IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT, &entry); - /* do not set PassPW bit */ - set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry, - IOMMU_CONTROL_PASS_POSTED_WRITE_MASK, - IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT, &entry); - } - set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_CONTROL_TRANSLATION_ENABLE_MASK, - IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry); + enable ? + iommu_set_bit(&entry, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT): + iommu_clear_bit(&entry, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT); + writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); } @@ -186,17 +198,17 @@ static void set_iommu_command_buffer_con u32 entry; entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); - set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK, - IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry); /*reset head and tail pointer manually before enablement */ - if ( enable == IOMMU_CONTROL_ENABLED ) + if ( enable ) { writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET); writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET); + + iommu_set_bit(&entry, IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT); } + else + iommu_clear_bit(&entry, IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT); writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); } @@ -247,24 +259,24 @@ static void set_iommu_event_log_control( u32 entry; entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); - set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_CONTROL_EVENT_LOG_ENABLE_MASK, - IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT, &entry); - set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED : - IOMMU_CONTROL_DISABLED, entry, - IOMMU_CONTROL_EVENT_LOG_INT_MASK, - IOMMU_CONTROL_EVENT_LOG_INT_SHIFT, &entry); - set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry, - IOMMU_CONTROL_COMP_WAIT_INT_MASK, - IOMMU_CONTROL_COMP_WAIT_INT_SHIFT, &entry); /*reset head and tail pointer manually before enablement */ - if ( enable == IOMMU_CONTROL_ENABLED ) + if ( enable ) { writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET); writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET); + + iommu_set_bit(&entry, IOMMU_CONTROL_EVENT_LOG_INT_SHIFT); + iommu_set_bit(&entry, IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT); } + else + { + iommu_clear_bit(&entry, IOMMU_CONTROL_EVENT_LOG_INT_SHIFT); + iommu_clear_bit(&entry, IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT); + } + + iommu_clear_bit(&entry, IOMMU_CONTROL_COMP_WAIT_INT_SHIFT); + writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); } @@ -313,9 +325,7 @@ static void amd_iommu_reset_event_log(st /* wait until EventLogRun bit = 0 */ do { entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); - log_run = get_field_from_reg_u32(entry, - IOMMU_STATUS_EVENT_LOG_RUN_MASK, - IOMMU_STATUS_EVENT_LOG_RUN_SHIFT); + log_run = iommu_get_bit(entry, IOMMU_STATUS_EVENT_LOG_RUN_SHIFT); loop_count--; } while ( log_run && loop_count ); @@ -330,11 +340,9 @@ static void amd_iommu_reset_event_log(st /* read event log for debugging */ amd_iommu_read_event_log(iommu); + /*clear overflow bit */ + iommu_clear_bit(&entry, IOMMU_STATUS_EVENT_OVERFLOW_SHIFT); - /*clear overflow bit */ - set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry, - IOMMU_STATUS_EVENT_OVERFLOW_MASK, - IOMMU_STATUS_EVENT_OVERFLOW_SHIFT, &entry); writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET); /*reset event log base address */ @@ -519,7 +527,6 @@ static void amd_iommu_page_fault(int irq { u32 entry; unsigned long flags; - int of; struct amd_iommu *iommu = dev_id; spin_lock_irqsave(&iommu->lock, flags); @@ -527,19 +534,14 @@ static void amd_iommu_page_fault(int irq /*check event overflow */ entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); - of = get_field_from_reg_u32(entry, - IOMMU_STATUS_EVENT_OVERFLOW_MASK, - IOMMU_STATUS_EVENT_OVERFLOW_SHIFT); - /* reset event log if event overflow */ - if ( of ) + if ( iommu_get_bit(entry, IOMMU_STATUS_EVENT_OVERFLOW_SHIFT) ) amd_iommu_reset_event_log(iommu); /* reset interrupt status bit */ entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); - set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, - IOMMU_STATUS_EVENT_LOG_INT_MASK, - IOMMU_STATUS_EVENT_LOG_INT_SHIFT, &entry); + iommu_set_bit(&entry, IOMMU_STATUS_EVENT_LOG_INT_SHIFT); + writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET); spin_unlock_irqrestore(&iommu->lock, flags); } @@ -590,6 +592,7 @@ static void enable_iommu(struct amd_iomm iommu_msi_set_affinity(irq_to_desc(iommu->irq), &cpu_online_map); amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED); + set_iommu_ht_flags(iommu); set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED); set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED); set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED); diff -r 60d51bc20139 -r c75cf540a1fe xen/include/asm-x86/amd-iommu.h --- a/xen/include/asm-x86/amd-iommu.h Wed Nov 09 16:20:02 2011 +0100 +++ b/xen/include/asm-x86/amd-iommu.h Wed Nov 09 16:20:03 2011 +0100 @@ -57,11 +57,7 @@ struct amd_iommu { u8 ht_tunnel_support; u8 iotlb_support; - u8 isochronous; - u8 coherent; - u8 res_pass_pw; - u8 pass_pw; - u8 ht_tunnel_enable; + u8 ht_flags; void *mmio_base; unsigned long mmio_base_phys; diff -r 60d51bc20139 -r c75cf540a1fe xen/include/asm-x86/hvm/svm/amd-iommu-proto.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Wed Nov 09 16:20:02 2011 +0100 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Wed Nov 09 16:20:03 2011 +0100 @@ -166,4 +166,19 @@ static inline void __free_amd_iommu_tabl free_xenheap_pages(table, order); } +static inline void iommu_set_bit(uint32_t *reg, uint32_t bit) +{ + set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, *reg, 1U << bit, bit, reg); +} + +static inline void iommu_clear_bit(uint32_t *reg, uint32_t bit) +{ + set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, *reg, 1U << bit, bit, reg); +} + +static inline uint32_t iommu_get_bit(uint32_t reg, uint32_t bit) +{ + return get_field_from_reg_u32(reg, 1U << bit, bit); +} + #endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */ _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Wei Wang
2011-Nov-09 15:51 UTC
[Xen-devel] [PATCH 6 of 6] amd iommu: Introduce iommu_has_cap() function
# HG changeset patch # User Wei Wang <wei.wang2@amd.com> # Date 1320852003 -3600 # Node ID fc109be12bf682299b47ea8ebb549afc6ac52952 # Parent c75cf540a1fe57a439f9cb0503bf69f120066b92 amd iommu: Introduce iommu_has_cap() function. Signed-off-by: Wei Wang <wei.wang2@amd.com> diff -r c75cf540a1fe -r fc109be12bf6 xen/drivers/passthrough/amd/iommu_init.c --- a/xen/drivers/passthrough/amd/iommu_init.c Wed Nov 09 16:20:03 2011 +0100 +++ b/xen/drivers/passthrough/amd/iommu_init.c Wed Nov 09 16:20:03 2011 +0100 @@ -79,9 +79,12 @@ static void set_iommu_ht_flags(struct am entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); /* Setup HT flags */ - iommu_has_ht_flag(iommu, AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT) ? + if ( iommu_has_cap(iommu, PCI_CAP_HT_TUNNEL_SHIFT) ) + { + iommu_has_ht_flag(iommu, AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT) ? iommu_set_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT): iommu_clear_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT); + } iommu_has_ht_flag(iommu, AMD_IOMMU_ACPI_RES_PASS_PW_SHIFT) ? iommu_set_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT): diff -r c75cf540a1fe -r fc109be12bf6 xen/drivers/passthrough/amd/iommu_map.c --- a/xen/drivers/passthrough/amd/iommu_map.c Wed Nov 09 16:20:03 2011 +0100 +++ b/xen/drivers/passthrough/amd/iommu_map.c Wed Nov 09 16:20:03 2011 +0100 @@ -996,7 +996,7 @@ void amd_iommu_flush_iotlb(struct pci_de return; } - if ( !iommu->iotlb_support ) + if ( !iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) ) return; req_id = get_dma_requestor_id(iommu->seg, bdf); diff -r c75cf540a1fe -r fc109be12bf6 xen/drivers/passthrough/amd/pci_amd_iommu.c --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Wed Nov 09 16:20:03 2011 +0100 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Wed Nov 09 16:20:03 2011 +0100 @@ -115,7 +115,7 @@ static void amd_iommu_setup_domain_devic hd->paging_mode, valid); if ( pci_ats_device(iommu->seg, bus, devfn) && - iommu->iotlb_support ) + iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) ) iommu_dte_set_iotlb((u32 *)dte, dte_i); invalidate_dev_table_entry(iommu, req_id); @@ -307,7 +307,7 @@ void amd_iommu_disable_domain_device(str disable_translation((u32 *)dte); if ( pci_ats_device(iommu->seg, bus, devfn) && - iommu->iotlb_support ) + iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) ) iommu_dte_set_iotlb((u32 *)dte, 0); invalidate_dev_table_entry(iommu, req_id); diff -r c75cf540a1fe -r fc109be12bf6 xen/include/asm-x86/amd-iommu.h --- a/xen/include/asm-x86/amd-iommu.h Wed Nov 09 16:20:03 2011 +0100 +++ b/xen/include/asm-x86/amd-iommu.h Wed Nov 09 16:20:03 2011 +0100 @@ -53,10 +53,6 @@ struct amd_iommu { u16 cap_offset; iommu_cap_t cap; - u8 pte_not_present_cached; - u8 ht_tunnel_support; - u8 iotlb_support; - u8 ht_flags; void *mmio_base; diff -r c75cf540a1fe -r fc109be12bf6 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Wed Nov 09 16:20:03 2011 +0100 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Wed Nov 09 16:20:03 2011 +0100 @@ -181,4 +181,10 @@ static inline uint32_t iommu_get_bit(uin return get_field_from_reg_u32(reg, 1U << bit, bit); } +static inline int iommu_has_cap(struct amd_iommu *iommu, uint32_t bit) +{ + u32 mask = (1U << bit) & 0xffffffff; + return iommu->cap.header & mask; +} + #endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */ _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Jan Beulich
2011-Nov-11 09:33 UTC
[Xen-devel] Re: [PATCH 2 of 6] amd iommu: Cleanup iommu pci capabilites detection
>>> On 09.11.11 at 16:50, Wei Wang <wei.wang2@amd.com> wrote: > # HG changeset patch > # User Wei Wang <wei.wang2@amd.com> > # Date 1320851997 -3600 > # Node ID 4b115815bc13e4f2a3a178f3de7477ecf46cb44b > # Parent 4769713326a876c25bdc0f9d1f90594f90fba9c5 > amd iommu: Cleanup iommu pci capabilites detection. > * Define new structure to represent capability block. > * Remove unnecessary read for unused information. > * Add sanity check into get_iommu_capabilities. > * iommu capability offset is 16 bit not 8 bit, fix that.Does this imply that the capability can reside in extended config space? If so, all code paths using this will need revisiting with regard to extended config space possibly being inaccessible until early Dom0 initialization. Jan> Signed-off-by: Wei Wang <wei.wang2@amd.com>_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Wei Wang2
2011-Nov-11 10:16 UTC
[Xen-devel] Re: [PATCH 2 of 6] amd iommu: Cleanup iommu pci capabilites detection
On Friday 11 November 2011 10:33:07 Jan Beulich wrote:> >>> On 09.11.11 at 16:50, Wei Wang <wei.wang2@amd.com> wrote: > > > > # HG changeset patch > > # User Wei Wang <wei.wang2@amd.com> > > # Date 1320851997 -3600 > > # Node ID 4b115815bc13e4f2a3a178f3de7477ecf46cb44b > > # Parent 4769713326a876c25bdc0f9d1f90594f90fba9c5 > > amd iommu: Cleanup iommu pci capabilites detection. > > * Define new structure to represent capability block. > > * Remove unnecessary read for unused information. > > * Add sanity check into get_iommu_capabilities. > > * iommu capability offset is 16 bit not 8 bit, fix that. > > Does this imply that the capability can reside in extended config space? > If so, all code paths using this will need revisiting with regard to > extended config space possibly being inaccessible until early Dom0 > initialization.Jan, This is just an inconsistency issue with iommu specification as in IVHD header, it has been defined as 16 bit value. In reality, both current and next generation amd iommu will not have this capability in extended pci config space. Thanks, Wei> Jan > > > Signed-off-by: Wei Wang <wei.wang2@amd.com>_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel