kbuild test robot
2020-Apr-29 00:31 UTC
[PATCH 1/5] swiotlb: Introduce concept of swiotlb_pool
Hi Srivatsa,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on vhost/linux-next]
[also build test ERROR on xen-tip/linux-next linus/master v5.7-rc3
next-20200428]
[cannot apply to swiotlb/linux-next]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to
specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]
url:
https://github.com/0day-ci/linux/commits/Srivatsa-Vaddagiri/virtio-on-Type-1-hypervisor/20200429-032334
base: https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git linux-next
config: x86_64-defconfig (attached as .config)
compiler: gcc-7 (Ubuntu 7.5.0-6ubuntu2) 7.5.0
reproduce:
# save the attached .config to linux build tree
make ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kbuild test robot <lkp at intel.com>
All errors (new ones prefixed by >>):
drivers/iommu/intel-iommu.c: In function
'bounce_map_single':>> drivers/iommu/intel-iommu.c:3990:24: error: 'io_tlb_start'
undeclared (first use in this function); did you mean 'swiotlb_start'?
__phys_to_dma(dev, io_tlb_start),
^~~~~~~~~~~~
swiotlb_start
drivers/iommu/intel-iommu.c:3990:24: note: each undeclared identifier is
reported only once for each function it appears in
vim +3990 drivers/iommu/intel-iommu.c
cfb94a372f2d4e Lu Baolu 2019-09-06 3941
cfb94a372f2d4e Lu Baolu 2019-09-06 3942 static dma_addr_t
cfb94a372f2d4e Lu Baolu 2019-09-06 3943 bounce_map_single(struct device
*dev, phys_addr_t paddr, size_t size,
cfb94a372f2d4e Lu Baolu 2019-09-06 3944 enum dma_data_direction dir,
unsigned long attrs,
cfb94a372f2d4e Lu Baolu 2019-09-06 3945 u64 dma_mask)
cfb94a372f2d4e Lu Baolu 2019-09-06 3946 {
cfb94a372f2d4e Lu Baolu 2019-09-06 3947 size_t aligned_size = ALIGN(size,
VTD_PAGE_SIZE);
cfb94a372f2d4e Lu Baolu 2019-09-06 3948 struct dmar_domain *domain;
cfb94a372f2d4e Lu Baolu 2019-09-06 3949 struct intel_iommu *iommu;
cfb94a372f2d4e Lu Baolu 2019-09-06 3950 unsigned long iova_pfn;
cfb94a372f2d4e Lu Baolu 2019-09-06 3951 unsigned long nrpages;
cfb94a372f2d4e Lu Baolu 2019-09-06 3952 phys_addr_t tlb_addr;
cfb94a372f2d4e Lu Baolu 2019-09-06 3953 int prot = 0;
cfb94a372f2d4e Lu Baolu 2019-09-06 3954 int ret;
cfb94a372f2d4e Lu Baolu 2019-09-06 3955
a11bfde9c77df1 Joerg Roedel 2020-02-17 3956 if
(unlikely(attach_deferred(dev)))
a11bfde9c77df1 Joerg Roedel 2020-02-17 3957 do_deferred_attach(dev);
a11bfde9c77df1 Joerg Roedel 2020-02-17 3958
96d170f3b1a607 Joerg Roedel 2020-02-17 3959 domain = find_domain(dev);
a11bfde9c77df1 Joerg Roedel 2020-02-17 3960
cfb94a372f2d4e Lu Baolu 2019-09-06 3961 if (WARN_ON(dir == DMA_NONE ||
!domain))
cfb94a372f2d4e Lu Baolu 2019-09-06 3962 return DMA_MAPPING_ERROR;
cfb94a372f2d4e Lu Baolu 2019-09-06 3963
cfb94a372f2d4e Lu Baolu 2019-09-06 3964 iommu = domain_get_iommu(domain);
cfb94a372f2d4e Lu Baolu 2019-09-06 3965 if (WARN_ON(!iommu))
cfb94a372f2d4e Lu Baolu 2019-09-06 3966 return DMA_MAPPING_ERROR;
cfb94a372f2d4e Lu Baolu 2019-09-06 3967
cfb94a372f2d4e Lu Baolu 2019-09-06 3968 nrpages = aligned_nrpages(0,
size);
cfb94a372f2d4e Lu Baolu 2019-09-06 3969 iova_pfn = intel_alloc_iova(dev,
domain,
cfb94a372f2d4e Lu Baolu 2019-09-06 3970 dma_to_mm_pfn(nrpages),
dma_mask);
cfb94a372f2d4e Lu Baolu 2019-09-06 3971 if (!iova_pfn)
cfb94a372f2d4e Lu Baolu 2019-09-06 3972 return DMA_MAPPING_ERROR;
cfb94a372f2d4e Lu Baolu 2019-09-06 3973
cfb94a372f2d4e Lu Baolu 2019-09-06 3974 /*
cfb94a372f2d4e Lu Baolu 2019-09-06 3975 * Check if DMAR supports
zero-length reads on write only
cfb94a372f2d4e Lu Baolu 2019-09-06 3976 * mappings..
cfb94a372f2d4e Lu Baolu 2019-09-06 3977 */
cfb94a372f2d4e Lu Baolu 2019-09-06 3978 if (dir == DMA_TO_DEVICE || dir
== DMA_BIDIRECTIONAL ||
cfb94a372f2d4e Lu Baolu 2019-09-06 3979 !cap_zlr(iommu->cap))
cfb94a372f2d4e Lu Baolu 2019-09-06 3980 prot |= DMA_PTE_READ;
cfb94a372f2d4e Lu Baolu 2019-09-06 3981 if (dir == DMA_FROM_DEVICE || dir
== DMA_BIDIRECTIONAL)
cfb94a372f2d4e Lu Baolu 2019-09-06 3982 prot |= DMA_PTE_WRITE;
cfb94a372f2d4e Lu Baolu 2019-09-06 3983
cfb94a372f2d4e Lu Baolu 2019-09-06 3984 /*
cfb94a372f2d4e Lu Baolu 2019-09-06 3985 * If both the physical buffer
start address and size are
cfb94a372f2d4e Lu Baolu 2019-09-06 3986 * page aligned, we don't
need to use a bounce page.
cfb94a372f2d4e Lu Baolu 2019-09-06 3987 */
cfb94a372f2d4e Lu Baolu 2019-09-06 3988 if (!IS_ALIGNED(paddr | size,
VTD_PAGE_SIZE)) {
cfb94a372f2d4e Lu Baolu 2019-09-06 3989 tlb_addr =
swiotlb_tbl_map_single(dev,
cfb94a372f2d4e Lu Baolu 2019-09-06 @3990 __phys_to_dma(dev,
io_tlb_start),
cfb94a372f2d4e Lu Baolu 2019-09-06 3991 paddr, size, aligned_size,
dir, attrs);
cfb94a372f2d4e Lu Baolu 2019-09-06 3992 if (tlb_addr ==
DMA_MAPPING_ERROR) {
cfb94a372f2d4e Lu Baolu 2019-09-06 3993 goto swiotlb_error;
cfb94a372f2d4e Lu Baolu 2019-09-06 3994 } else {
cfb94a372f2d4e Lu Baolu 2019-09-06 3995 /* Cleanup the padding area. */
cfb94a372f2d4e Lu Baolu 2019-09-06 3996 void *padding_start =
phys_to_virt(tlb_addr);
cfb94a372f2d4e Lu Baolu 2019-09-06 3997 size_t padding_size =
aligned_size;
cfb94a372f2d4e Lu Baolu 2019-09-06 3998
cfb94a372f2d4e Lu Baolu 2019-09-06 3999 if (!(attrs &
DMA_ATTR_SKIP_CPU_SYNC) &&
cfb94a372f2d4e Lu Baolu 2019-09-06 4000 (dir == DMA_TO_DEVICE ||
cfb94a372f2d4e Lu Baolu 2019-09-06 4001 dir == DMA_BIDIRECTIONAL))
{
cfb94a372f2d4e Lu Baolu 2019-09-06 4002 padding_start += size;
cfb94a372f2d4e Lu Baolu 2019-09-06 4003 padding_size -= size;
cfb94a372f2d4e Lu Baolu 2019-09-06 4004 }
cfb94a372f2d4e Lu Baolu 2019-09-06 4005
cfb94a372f2d4e Lu Baolu 2019-09-06 4006 memset(padding_start, 0,
padding_size);
cfb94a372f2d4e Lu Baolu 2019-09-06 4007 }
cfb94a372f2d4e Lu Baolu 2019-09-06 4008 } else {
cfb94a372f2d4e Lu Baolu 2019-09-06 4009 tlb_addr = paddr;
cfb94a372f2d4e Lu Baolu 2019-09-06 4010 }
cfb94a372f2d4e Lu Baolu 2019-09-06 4011
cfb94a372f2d4e Lu Baolu 2019-09-06 4012 ret = domain_pfn_mapping(domain,
mm_to_dma_pfn(iova_pfn),
cfb94a372f2d4e Lu Baolu 2019-09-06 4013 tlb_addr >>
VTD_PAGE_SHIFT, nrpages, prot);
cfb94a372f2d4e Lu Baolu 2019-09-06 4014 if (ret)
cfb94a372f2d4e Lu Baolu 2019-09-06 4015 goto mapping_error;
cfb94a372f2d4e Lu Baolu 2019-09-06 4016
cfb94a372f2d4e Lu Baolu 2019-09-06 4017 trace_bounce_map_single(dev,
iova_pfn << PAGE_SHIFT, paddr, size);
cfb94a372f2d4e Lu Baolu 2019-09-06 4018
cfb94a372f2d4e Lu Baolu 2019-09-06 4019 return (phys_addr_t)iova_pfn
<< PAGE_SHIFT;
cfb94a372f2d4e Lu Baolu 2019-09-06 4020
cfb94a372f2d4e Lu Baolu 2019-09-06 4021 mapping_error:
cfb94a372f2d4e Lu Baolu 2019-09-06 4022 if (is_swiotlb_buffer(tlb_addr))
cfb94a372f2d4e Lu Baolu 2019-09-06 4023 swiotlb_tbl_unmap_single(dev,
tlb_addr, size,
cfb94a372f2d4e Lu Baolu 2019-09-06 4024 aligned_size, dir, attrs);
cfb94a372f2d4e Lu Baolu 2019-09-06 4025 swiotlb_error:
cfb94a372f2d4e Lu Baolu 2019-09-06 4026
free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
cfb94a372f2d4e Lu Baolu 2019-09-06 4027 dev_err(dev, "Device bounce
map: %zx@%llx dir %d --- failed\n",
cfb94a372f2d4e Lu Baolu 2019-09-06 4028 size, (unsigned long long)paddr,
dir);
cfb94a372f2d4e Lu Baolu 2019-09-06 4029
cfb94a372f2d4e Lu Baolu 2019-09-06 4030 return DMA_MAPPING_ERROR;
cfb94a372f2d4e Lu Baolu 2019-09-06 4031 }
cfb94a372f2d4e Lu Baolu 2019-09-06 4032
:::::: The code at line 3990 was first introduced by commit
:::::: cfb94a372f2d4ee226247447c863f8709863d170 iommu/vt-d: Use bounce buffer
for untrusted devices
:::::: TO: Lu Baolu <baolu.lu at linux.intel.com>
:::::: CC: Joerg Roedel <jroedel at suse.de>
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all at lists.01.org
-------------- next part --------------
A non-text attachment was scrubbed...
Name: .config.gz
Type: application/gzip
Size: 28969 bytes
Desc: not available
URL:
<http://lists.linuxfoundation.org/pipermail/virtualization/attachments/20200429/d25c108a/attachment-0001.gz>
Possibly Parallel Threads
- [PATCH 6/8] iommu: allow the dma-iommu api to use bounce buffers
- [PATCH 0/8] Convert the intel iommu driver to the dma-iommu api
- [PATCH 0/8] Convert the intel iommu driver to the dma-iommu api
- [PATCH 4/8] iommu: Handle freelists when using deferred flushing in iommu drivers
- [PATCH 1/2] Add new flush_iotlb_range and handle freelists when using iommu_unmap_fast
