From baa45ab1bdffc10033921735aae02f673e276cf1 Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Wed, 16 Jun 2021 06:38:42 -0700 Subject: [PATCH 01/19] iommu/io-pgtable: Introduce unmap_pages() as a page table op The io-pgtable code expects to operate on a single block or granule of memory that is supported by the IOMMU hardware when unmapping memory. This means that when a large buffer that consists of multiple such blocks is unmapped, the io-pgtable code will walk the page tables to the correct level to unmap each block, even for blocks that are virtually contiguous and at the same level, which can incur an overhead in performance. Introduce the unmap_pages() page table op to express to the io-pgtable code that it should unmap a number of blocks of the same size, instead of a single block. Doing so allows multiple blocks to be unmapped in one call to the io-pgtable code, reducing the number of page table walks, and indirect calls. Signed-off-by: Isaac J. Manjarres Suggested-by: Will Deacon Signed-off-by: Will Deacon Signed-off-by: Georgi Djakov Link: https://lore.kernel.org/r/1623850736-389584-2-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- include/linux/io-pgtable.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 4d40dfa75b55fe..9391c5fa71e6c4 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -144,6 +144,7 @@ struct io_pgtable_cfg { * * @map: Map a physically contiguous memory region. * @unmap: Unmap a physically contiguous memory region. + * @unmap_pages: Unmap a range of virtually contiguous pages of the same size. * @iova_to_phys: Translate iova to physical address. * * These functions map directly onto the iommu_ops member functions with @@ -154,6 +155,9 @@ struct io_pgtable_ops { phys_addr_t paddr, size_t size, int prot, gfp_t gfp); size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, size_t size, struct iommu_iotlb_gather *gather); + size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); }; From e0ba04ddd5300ee0d10dbd7f9db671de38dd6e3f Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Wed, 16 Jun 2021 06:38:43 -0700 Subject: [PATCH 02/19] iommu: Add an unmap_pages() op for IOMMU drivers Add a callback for IOMMU drivers to provide a path for the IOMMU framework to call into an IOMMU driver, which can call into the io-pgtable code, to unmap a virtually contiguous range of pages of the same size. For IOMMU drivers that do not specify an unmap_pages() callback, the existing logic of unmapping memory one page block at a time will be used. Signed-off-by: Isaac J. Manjarres Suggested-by: Will Deacon Signed-off-by: Will Deacon Acked-by: Lu Baolu Signed-off-by: Georgi Djakov Link: https://lore.kernel.org/r/1623850736-389584-3-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 32d448050bf76e..25a844121be55d 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -181,6 +181,7 @@ struct iommu_iotlb_gather { * @detach_dev: detach device from an iommu domain * @map: map a physically contiguous memory region to an iommu domain * @unmap: unmap a physically contiguous memory region from an iommu domain + * @unmap_pages: unmap a number of pages of the same size from an iommu domain * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain * @iotlb_sync_map: Sync mappings created recently using @map to the hardware * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush @@ -231,6 +232,9 @@ struct iommu_ops { phys_addr_t paddr, size_t size, int prot, gfp_t gfp); size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather); + size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *iotlb_gather); void (*flush_iotlb_all)(struct iommu_domain *domain); void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, size_t size); From d2b972c1784ac9d0bdc0c6a6cf0b86fef9146256 Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Wed, 16 Jun 2021 06:38:44 -0700 Subject: [PATCH 03/19] iommu/io-pgtable: Introduce map_pages() as a page table op Mapping memory into io-pgtables follows the same semantics that unmapping memory used to follow (i.e. a buffer will be mapped one page block per call to the io-pgtable code). This means that it can be optimized in the same way that unmapping memory was, so add a map_pages() callback to the io-pgtable ops structure, so that a range of pages of the same size can be mapped within the same call. Signed-off-by: Isaac J. Manjarres Suggested-by: Will Deacon Signed-off-by: Georgi Djakov Link: https://lore.kernel.org/r/1623850736-389584-4-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- include/linux/io-pgtable.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 9391c5fa71e6c4..c43f3b899d2a49 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -143,6 +143,7 @@ struct io_pgtable_cfg { * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. * * @map: Map a physically contiguous memory region. + * @map_pages: Map a physically contiguous range of pages of the same size. * @unmap: Unmap a physically contiguous memory region. * @unmap_pages: Unmap a range of virtually contiguous pages of the same size. * @iova_to_phys: Translate iova to physical address. @@ -153,6 +154,9 @@ struct io_pgtable_cfg { struct io_pgtable_ops { int (*map)(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp); + int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped); size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, size_t size, struct iommu_iotlb_gather *gather); size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova, From 6ccb88e45050ba2d5a88d58349696e0836439570 Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Wed, 16 Jun 2021 06:38:45 -0700 Subject: [PATCH 04/19] iommu: Add a map_pages() op for IOMMU drivers Add a callback for IOMMU drivers to provide a path for the IOMMU framework to call into an IOMMU driver, which can call into the io-pgtable code, to map a physically contiguous rnage of pages of the same size. For IOMMU drivers that do not specify a map_pages() callback, the existing logic of mapping memory one page block at a time will be used. Signed-off-by: Isaac J. Manjarres Suggested-by: Will Deacon Acked-by: Lu Baolu Signed-off-by: Georgi Djakov Link: https://lore.kernel.org/r/1623850736-389584-5-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 25a844121be55d..d7989d4a740499 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -180,6 +180,8 @@ struct iommu_iotlb_gather { * @attach_dev: attach device to an iommu domain * @detach_dev: detach device from an iommu domain * @map: map a physically contiguous memory region to an iommu domain + * @map_pages: map a physically contiguous set of pages of the same size to + * an iommu domain. * @unmap: unmap a physically contiguous memory region from an iommu domain * @unmap_pages: unmap a number of pages of the same size from an iommu domain * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain @@ -230,6 +232,9 @@ struct iommu_ops { void (*detach_dev)(struct iommu_domain *domain, struct device *dev); int (*map)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp); + int (*map_pages)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped); size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather); size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, From 3510271b21e9e1bf6e365ab0280dd81bb0cc4956 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 16 Jun 2021 06:38:46 -0700 Subject: [PATCH 05/19] iommu: Use bitmap to calculate page size in iommu_pgsize() Avoid the potential for shifting values by amounts greater than the width of their type by using a bitmap to compute page size in iommu_pgsize(). Signed-off-by: Will Deacon Signed-off-by: Isaac J. Manjarres Signed-off-by: Georgi Djakov Reviewed-by: Lu Baolu Link: https://lore.kernel.org/r/1623850736-389584-6-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 31 ++++++++++++------------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 5419c4b9f27ada..80e471ada35816 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -2378,30 +2379,22 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long addr_merge, size_t size) { unsigned int pgsize_idx; + unsigned long pgsizes; size_t pgsize; - /* Max page size that still fits into 'size' */ - pgsize_idx = __fls(size); + /* Page sizes supported by the hardware and small enough for @size */ + pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); - /* need to consider alignment requirements ? */ - if (likely(addr_merge)) { - /* Max page size allowed by address */ - unsigned int align_pgsize_idx = __ffs(addr_merge); - pgsize_idx = min(pgsize_idx, align_pgsize_idx); - } - - /* build a mask of acceptable page sizes */ - pgsize = (1UL << (pgsize_idx + 1)) - 1; - - /* throw away page sizes not supported by the hardware */ - pgsize &= domain->pgsize_bitmap; + /* Constrain the page sizes further based on the maximum alignment */ + if (likely(addr_merge)) + pgsizes &= GENMASK(__ffs(addr_merge), 0); - /* make sure we're still sane */ - BUG_ON(!pgsize); + /* Make sure we have at least one suitable page size */ + BUG_ON(!pgsizes); - /* pick the biggest page */ - pgsize_idx = __fls(pgsize); - pgsize = 1UL << pgsize_idx; + /* Pick the biggest page size remaining */ + pgsize_idx = __fls(pgsizes); + pgsize = BIT(pgsize_idx); return pgsize; } From f9f844ef19424104d441a252b481bec42e2621a0 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 16 Jun 2021 06:38:47 -0700 Subject: [PATCH 06/19] iommu: Split 'addr_merge' argument to iommu_pgsize() into separate parts The 'addr_merge' parameter to iommu_pgsize() is a fabricated address intended to describe the alignment requirements to consider when choosing an appropriate page size. On the iommu_map() path, this address is the logical OR of the virtual and physical addresses. Subsequent improvements to iommu_pgsize() will need to check the alignment of the virtual and physical components of 'addr_merge' independently, so pass them in as separate parameters and reconstruct 'addr_merge' locally. No functional change. Signed-off-by: Will Deacon Signed-off-by: Isaac J. Manjarres Signed-off-by: Georgi Djakov Reviewed-by: Lu Baolu Link: https://lore.kernel.org/r/1623850736-389584-7-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 80e471ada35816..80e14c139d40f5 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2375,12 +2375,13 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) } EXPORT_SYMBOL_GPL(iommu_iova_to_phys); -static size_t iommu_pgsize(struct iommu_domain *domain, - unsigned long addr_merge, size_t size) +static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size) { unsigned int pgsize_idx; unsigned long pgsizes; size_t pgsize; + unsigned long addr_merge = paddr | iova; /* Page sizes supported by the hardware and small enough for @size */ pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); @@ -2433,7 +2434,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova, pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); while (size) { - size_t pgsize = iommu_pgsize(domain, iova | paddr, size); + size_t pgsize = iommu_pgsize(domain, iova, paddr, size); pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", iova, &paddr, pgsize); @@ -2521,8 +2522,9 @@ static size_t __iommu_unmap(struct iommu_domain *domain, * or we hit an area that isn't mapped. */ while (unmapped < size) { - size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); + size_t pgsize; + pgsize = iommu_pgsize(domain, iova, iova, size - unmapped); unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather); if (!unmapped_page) break; From 14d0ba182f95ae40c62b75c12b894eecd3e9c8f2 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 16 Jun 2021 06:38:48 -0700 Subject: [PATCH 07/19] iommu: Hook up '->unmap_pages' driver callback Extend iommu_pgsize() to populate an optional 'count' parameter so that we can direct unmapping operation to the ->unmap_pages callback if it has been provided by the driver. Signed-off-by: Will Deacon Signed-off-by: Isaac J. Manjarres Signed-off-by: Georgi Djakov Reviewed-by: Lu Baolu Link: https://lore.kernel.org/r/1623850736-389584-8-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 59 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 50 insertions(+), 9 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 80e14c139d40f5..725622c7e60345 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2376,11 +2376,11 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) EXPORT_SYMBOL_GPL(iommu_iova_to_phys); static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size) + phys_addr_t paddr, size_t size, size_t *count) { - unsigned int pgsize_idx; + unsigned int pgsize_idx, pgsize_idx_next; unsigned long pgsizes; - size_t pgsize; + size_t offset, pgsize, pgsize_next; unsigned long addr_merge = paddr | iova; /* Page sizes supported by the hardware and small enough for @size */ @@ -2396,7 +2396,36 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, /* Pick the biggest page size remaining */ pgsize_idx = __fls(pgsizes); pgsize = BIT(pgsize_idx); + if (!count) + return pgsize; + /* Find the next biggest support page size, if it exists */ + pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); + if (!pgsizes) + goto out_set_count; + + pgsize_idx_next = __ffs(pgsizes); + pgsize_next = BIT(pgsize_idx_next); + + /* + * There's no point trying a bigger page size unless the virtual + * and physical addresses are similarly offset within the larger page. + */ + if ((iova ^ paddr) & (pgsize_next - 1)) + goto out_set_count; + + /* Calculate the offset to the next page size alignment boundary */ + offset = pgsize_next - (addr_merge & (pgsize_next - 1)); + + /* + * If size is big enough to accommodate the larger page, reduce + * the number of smaller pages. + */ + if (offset + pgsize_next <= size) + size = offset; + +out_set_count: + *count = size >> pgsize_idx; return pgsize; } @@ -2434,7 +2463,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova, pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); while (size) { - size_t pgsize = iommu_pgsize(domain, iova, paddr, size); + size_t pgsize = iommu_pgsize(domain, iova, paddr, size, NULL); pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", iova, &paddr, pgsize); @@ -2485,6 +2514,19 @@ int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, } EXPORT_SYMBOL_GPL(iommu_map_atomic); +static size_t __iommu_unmap_pages(struct iommu_domain *domain, + unsigned long iova, size_t size, + struct iommu_iotlb_gather *iotlb_gather) +{ + const struct iommu_ops *ops = domain->ops; + size_t pgsize, count; + + pgsize = iommu_pgsize(domain, iova, iova, size, &count); + return ops->unmap_pages ? + ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : + ops->unmap(domain, iova, pgsize, iotlb_gather); +} + static size_t __iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) @@ -2494,7 +2536,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain, unsigned long orig_iova = iova; unsigned int min_pagesz; - if (unlikely(ops->unmap == NULL || + if (unlikely(!(ops->unmap || ops->unmap_pages) || domain->pgsize_bitmap == 0UL)) return 0; @@ -2522,10 +2564,9 @@ static size_t __iommu_unmap(struct iommu_domain *domain, * or we hit an area that isn't mapped. */ while (unmapped < size) { - size_t pgsize; - - pgsize = iommu_pgsize(domain, iova, iova, size - unmapped); - unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather); + unmapped_page = __iommu_unmap_pages(domain, iova, + size - unmapped, + iotlb_gather); if (!unmapped_page) break; From 3ee5e6d7a8578daf24d5c52ac783bd4c71826c76 Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Wed, 16 Jun 2021 06:38:49 -0700 Subject: [PATCH 08/19] iommu: Add support for the map_pages() callback Since iommu_pgsize can calculate how many pages of the same size can be mapped/unmapped before the next largest page size boundary, add support for invoking an IOMMU driver's map_pages() callback, if it provides one. Signed-off-by: Isaac J. Manjarres Suggested-by: Will Deacon Signed-off-by: Georgi Djakov Reviewed-by: Lu Baolu Link: https://lore.kernel.org/r/1623850736-389584-9-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 43 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 725622c7e60345..70a729ce88b1a7 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2429,6 +2429,30 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, return pgsize; } +static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot, + gfp_t gfp, size_t *mapped) +{ + const struct iommu_ops *ops = domain->ops; + size_t pgsize, count; + int ret; + + pgsize = iommu_pgsize(domain, iova, paddr, size, &count); + + pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", + iova, &paddr, pgsize, count); + + if (ops->map_pages) { + ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, + gfp, mapped); + } else { + ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); + *mapped = ret ? 0 : pgsize; + } + + return ret; +} + static int __iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { @@ -2439,7 +2463,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t orig_paddr = paddr; int ret = 0; - if (unlikely(ops->map == NULL || + if (unlikely(!(ops->map || ops->map_pages) || domain->pgsize_bitmap == 0UL)) return -ENODEV; @@ -2463,18 +2487,21 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova, pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); while (size) { - size_t pgsize = iommu_pgsize(domain, iova, paddr, size, NULL); + size_t mapped = 0; - pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", - iova, &paddr, pgsize); - ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); + ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, + &mapped); + /* + * Some pages may have been mapped, even if an error occurred, + * so we should account for those so they can be unmapped. + */ + size -= mapped; if (ret) break; - iova += pgsize; - paddr += pgsize; - size -= pgsize; + iova += mapped; + paddr += mapped; } /* unroll mapping in case something went wrong */ From 553e34763dcf5df57022308e33f3de7344b539df Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Wed, 16 Jun 2021 06:38:50 -0700 Subject: [PATCH 09/19] iommu/io-pgtable-arm: Prepare PTE methods for handling multiple entries The PTE methods currently operate on a single entry. In preparation for manipulating multiple PTEs in one map or unmap call, allow them to handle multiple PTEs. Signed-off-by: Isaac J. Manjarres Suggested-by: Robin Murphy Signed-off-by: Georgi Djakov Link: https://lore.kernel.org/r/1623850736-389584-10-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- drivers/iommu/io-pgtable-arm.c | 78 +++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 34 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 87def58e79b5a5..ea66b10c04c426 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -232,20 +232,23 @@ static void __arm_lpae_free_pages(void *pages, size_t size, free_pages((unsigned long)pages, get_order(size)); } -static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, +static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries, struct io_pgtable_cfg *cfg) { dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), - sizeof(*ptep), DMA_TO_DEVICE); + sizeof(*ptep) * num_entries, DMA_TO_DEVICE); } static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, - struct io_pgtable_cfg *cfg) + int num_entries, struct io_pgtable_cfg *cfg) { - *ptep = pte; + int i; + + for (i = 0; i < num_entries; i++) + ptep[i] = pte; if (!cfg->coherent_walk) - __arm_lpae_sync_pte(ptep, cfg); + __arm_lpae_sync_pte(ptep, num_entries, cfg); } static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, @@ -255,47 +258,54 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, phys_addr_t paddr, arm_lpae_iopte prot, - int lvl, arm_lpae_iopte *ptep) + int lvl, int num_entries, arm_lpae_iopte *ptep) { arm_lpae_iopte pte = prot; + struct io_pgtable_cfg *cfg = &data->iop.cfg; + size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); + int i; if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1) pte |= ARM_LPAE_PTE_TYPE_PAGE; else pte |= ARM_LPAE_PTE_TYPE_BLOCK; - pte |= paddr_to_iopte(paddr, data); + for (i = 0; i < num_entries; i++) + ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data); - __arm_lpae_set_pte(ptep, pte, &data->iop.cfg); + if (!cfg->coherent_walk) + __arm_lpae_sync_pte(ptep, num_entries, cfg); } static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, - arm_lpae_iopte prot, int lvl, + arm_lpae_iopte prot, int lvl, int num_entries, arm_lpae_iopte *ptep) { - arm_lpae_iopte pte = *ptep; - - if (iopte_leaf(pte, lvl, data->iop.fmt)) { - /* We require an unmap first */ - WARN_ON(!selftest_running); - return -EEXIST; - } else if (iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE) { - /* - * We need to unmap and free the old table before - * overwriting it with a block entry. - */ - arm_lpae_iopte *tblp; - size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); - - tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); - if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) { - WARN_ON(1); - return -EINVAL; + int i; + + for (i = 0; i < num_entries; i++) + if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) { + /* We require an unmap first */ + WARN_ON(!selftest_running); + return -EEXIST; + } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) { + /* + * We need to unmap and free the old table before + * overwriting it with a block entry. + */ + arm_lpae_iopte *tblp; + size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); + + tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); + if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, + lvl, tblp) != sz) { + WARN_ON(1); + return -EINVAL; + } } - } - __arm_lpae_init_pte(data, paddr, prot, lvl, ptep); + __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep); return 0; } @@ -323,7 +333,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, return old; /* Even if it's not ours, there's no point waiting; just kick it */ - __arm_lpae_sync_pte(ptep, cfg); + __arm_lpae_sync_pte(ptep, 1, cfg); if (old == curr) WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC); @@ -344,7 +354,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, /* If we can install a leaf entry at this level, then do so */ if (size == block_size) - return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); + return arm_lpae_init_pte(data, iova, paddr, prot, lvl, 1, ptep); /* We can't allocate tables at the final level */ if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) @@ -361,7 +371,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, if (pte) __arm_lpae_free_pages(cptep, tblsz, cfg); } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { - __arm_lpae_sync_pte(ptep, cfg); + __arm_lpae_sync_pte(ptep, 1, cfg); } if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) { @@ -543,7 +553,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, if (i == unmap_idx) continue; - __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]); + __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]); } pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg); @@ -585,7 +595,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, /* If the size matches this level, we're in the right place */ if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { - __arm_lpae_set_pte(ptep, 0, &iop->cfg); + __arm_lpae_set_pte(ptep, 0, 1, &iop->cfg); if (!iopte_leaf(pte, lvl, iop->fmt)) { /* Also flush any partial walks */ From f835a4b79bad367c717dff53ee67082023ab2c83 Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Wed, 16 Jun 2021 06:38:51 -0700 Subject: [PATCH 10/19] iommu/io-pgtable-arm: Implement arm_lpae_unmap_pages() Implement the unmap_pages() callback for the ARM LPAE io-pgtable format. Signed-off-by: Isaac J. Manjarres Suggested-by: Will Deacon Signed-off-by: Georgi Djakov Link: https://lore.kernel.org/r/1623850736-389584-11-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- drivers/iommu/io-pgtable-arm.c | 120 ++++++++++++++++++++------------- 1 file changed, 74 insertions(+), 46 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index ea66b10c04c426..fe8fa0ee9c98f9 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -46,6 +46,9 @@ #define ARM_LPAE_PGD_SIZE(d) \ (sizeof(arm_lpae_iopte) << (d)->pgd_bits) +#define ARM_LPAE_PTES_PER_TABLE(d) \ + (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte))) + /* * Calculate the index at level l used to map virtual address a using the * pagetable in d. @@ -239,22 +242,19 @@ static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries, sizeof(*ptep) * num_entries, DMA_TO_DEVICE); } -static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, - int num_entries, struct io_pgtable_cfg *cfg) +static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg) { - int i; - for (i = 0; i < num_entries; i++) - ptep[i] = pte; + *ptep = 0; if (!cfg->coherent_walk) - __arm_lpae_sync_pte(ptep, num_entries, cfg); + __arm_lpae_sync_pte(ptep, 1, cfg); } static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, struct iommu_iotlb_gather *gather, - unsigned long iova, size_t size, int lvl, - arm_lpae_iopte *ptep); + unsigned long iova, size_t size, size_t pgcount, + int lvl, arm_lpae_iopte *ptep); static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, phys_addr_t paddr, arm_lpae_iopte prot, @@ -298,7 +298,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); - if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, + if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1, lvl, tblp) != sz) { WARN_ON(1); return -EINVAL; @@ -526,14 +526,15 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, arm_lpae_iopte blk_pte, int lvl, - arm_lpae_iopte *ptep) + arm_lpae_iopte *ptep, size_t pgcount) { struct io_pgtable_cfg *cfg = &data->iop.cfg; arm_lpae_iopte pte, *tablep; phys_addr_t blk_paddr; size_t tablesz = ARM_LPAE_GRANULE(data); size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); - int i, unmap_idx = -1; + int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data); + int i, unmap_idx_start = -1, num_entries = 0, max_entries; if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) return 0; @@ -542,15 +543,18 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, if (!tablep) return 0; /* Bytes unmapped */ - if (size == split_sz) - unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data); + if (size == split_sz) { + unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); + max_entries = ptes_per_table - unmap_idx_start; + num_entries = min_t(int, pgcount, max_entries); + } blk_paddr = iopte_to_paddr(blk_pte, data); pte = iopte_prot(blk_pte); - for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) { + for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) { /* Unmap! */ - if (i == unmap_idx) + if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries)) continue; __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]); @@ -568,76 +572,92 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, return 0; tablep = iopte_deref(pte, data); - } else if (unmap_idx >= 0) { - io_pgtable_tlb_add_page(&data->iop, gather, iova, size); - return size; + } else if (unmap_idx_start >= 0) { + for (i = 0; i < num_entries; i++) + io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size); + + return num_entries * size; } - return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep); + return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep); } static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, struct iommu_iotlb_gather *gather, - unsigned long iova, size_t size, int lvl, - arm_lpae_iopte *ptep) + unsigned long iova, size_t size, size_t pgcount, + int lvl, arm_lpae_iopte *ptep) { arm_lpae_iopte pte; struct io_pgtable *iop = &data->iop; + int i = 0, num_entries, max_entries, unmap_idx_start; /* Something went horribly wrong and we ran out of page table */ if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) return 0; - ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); + unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); + ptep += unmap_idx_start; pte = READ_ONCE(*ptep); if (WARN_ON(!pte)) return 0; /* If the size matches this level, we're in the right place */ if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { - __arm_lpae_set_pte(ptep, 0, 1, &iop->cfg); - - if (!iopte_leaf(pte, lvl, iop->fmt)) { - /* Also flush any partial walks */ - io_pgtable_tlb_flush_walk(iop, iova, size, - ARM_LPAE_GRANULE(data)); - ptep = iopte_deref(pte, data); - __arm_lpae_free_pgtable(data, lvl + 1, ptep); - } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { - /* - * Order the PTE update against queueing the IOVA, to - * guarantee that a flush callback from a different CPU - * has observed it before the TLBIALL can be issued. - */ - smp_wmb(); - } else { - io_pgtable_tlb_add_page(iop, gather, iova, size); + max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start; + num_entries = min_t(int, pgcount, max_entries); + + while (i < num_entries) { + pte = READ_ONCE(*ptep); + if (WARN_ON(!pte)) + break; + + __arm_lpae_clear_pte(ptep, &iop->cfg); + + if (!iopte_leaf(pte, lvl, iop->fmt)) { + /* Also flush any partial walks */ + io_pgtable_tlb_flush_walk(iop, iova + i * size, size, + ARM_LPAE_GRANULE(data)); + __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); + } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { + /* + * Order the PTE update against queueing the IOVA, to + * guarantee that a flush callback from a different CPU + * has observed it before the TLBIALL can be issued. + */ + smp_wmb(); + } else { + io_pgtable_tlb_add_page(iop, gather, iova + i * size, size); + } + + ptep++; + i++; } - return size; + return i * size; } else if (iopte_leaf(pte, lvl, iop->fmt)) { /* * Insert a table at the next level to map the old region, * minus the part we want to unmap */ return arm_lpae_split_blk_unmap(data, gather, iova, size, pte, - lvl + 1, ptep); + lvl + 1, ptep, pgcount); } /* Keep on walkin' */ ptep = iopte_deref(pte, data); - return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep); + return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep); } -static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, - size_t size, struct iommu_iotlb_gather *gather) +static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather) { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; arm_lpae_iopte *ptep = data->pgd; long iaext = (s64)iova >> cfg->ias; - if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) + if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) return 0; if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) @@ -645,7 +665,14 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, if (WARN_ON(iaext)) return 0; - return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep); + return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount, + data->start_level, ptep); +} + +static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, + size_t size, struct iommu_iotlb_gather *gather) +{ + return arm_lpae_unmap_pages(ops, iova, size, 1, gather); } static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, @@ -761,6 +788,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) data->iop.ops = (struct io_pgtable_ops) { .map = arm_lpae_map, .unmap = arm_lpae_unmap, + .unmap_pages = arm_lpae_unmap_pages, .iova_to_phys = arm_lpae_iova_to_phys, }; From 3a3f4e140f50922b3cb821677d57e9789c8569c9 Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Wed, 16 Jun 2021 06:38:52 -0700 Subject: [PATCH 11/19] iommu/io-pgtable-arm: Implement arm_lpae_map_pages() Implement the map_pages() callback for the ARM LPAE io-pgtable format. Signed-off-by: Isaac J. Manjarres Signed-off-by: Georgi Djakov Link: https://lore.kernel.org/r/1623850736-389584-12-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- drivers/iommu/io-pgtable-arm.c | 41 +++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index fe8fa0ee9c98f9..053df4048a2977 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -341,20 +341,30 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, } static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, - phys_addr_t paddr, size_t size, arm_lpae_iopte prot, - int lvl, arm_lpae_iopte *ptep, gfp_t gfp) + phys_addr_t paddr, size_t size, size_t pgcount, + arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep, + gfp_t gfp, size_t *mapped) { arm_lpae_iopte *cptep, pte; size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); size_t tblsz = ARM_LPAE_GRANULE(data); struct io_pgtable_cfg *cfg = &data->iop.cfg; + int ret = 0, num_entries, max_entries, map_idx_start; /* Find our entry at the current level */ - ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); + map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); + ptep += map_idx_start; /* If we can install a leaf entry at this level, then do so */ - if (size == block_size) - return arm_lpae_init_pte(data, iova, paddr, prot, lvl, 1, ptep); + if (size == block_size) { + max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start; + num_entries = min_t(int, pgcount, max_entries); + ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep); + if (!ret && mapped) + *mapped += num_entries * size; + + return ret; + } /* We can't allocate tables at the final level */ if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) @@ -383,7 +393,8 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, } /* Rinse, repeat */ - return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp); + return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1, + cptep, gfp, mapped); } static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, @@ -450,8 +461,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, return pte; } -static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp) +static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int iommu_prot, gfp_t gfp, size_t *mapped) { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; @@ -460,7 +472,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, arm_lpae_iopte prot; long iaext = (s64)iova >> cfg->ias; - if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) + if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize)) return -EINVAL; if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) @@ -473,7 +485,8 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, return 0; prot = arm_lpae_prot_to_pte(data, iommu_prot); - ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp); + ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl, + ptep, gfp, mapped); /* * Synchronise all PTE updates for the new mapping before there's * a chance for anything to kick off a table walk for the new iova. @@ -483,6 +496,13 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, return ret; } +static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp) +{ + return arm_lpae_map_pages(ops, iova, paddr, size, 1, iommu_prot, gfp, + NULL); +} + static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, arm_lpae_iopte *ptep) { @@ -787,6 +807,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) data->iop.ops = (struct io_pgtable_ops) { .map = arm_lpae_map, + .map_pages = arm_lpae_map_pages, .unmap = arm_lpae_unmap, .unmap_pages = arm_lpae_unmap_pages, .iova_to_phys = arm_lpae_iova_to_phys, From 0cce8f937a2eba7843c9f02ae0cfc1c750107d6d Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Wed, 16 Jun 2021 06:38:53 -0700 Subject: [PATCH 12/19] iommu/io-pgtable-arm-v7s: Implement arm_v7s_unmap_pages() Implement the unmap_pages() callback for the ARM v7s io-pgtable format. Signed-off-by: Isaac J. Manjarres Signed-off-by: Georgi Djakov Link: https://lore.kernel.org/r/1623850736-389584-13-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- drivers/iommu/io-pgtable-arm-v7s.c | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index d4004bcf333a81..1af0606869857a 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -710,15 +710,32 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, return __arm_v7s_unmap(data, gather, iova, size, lvl + 1, ptep); } -static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, - size_t size, struct iommu_iotlb_gather *gather) +static size_t arm_v7s_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather) { struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); + size_t unmapped = 0, ret; if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) return 0; - return __arm_v7s_unmap(data, gather, iova, size, 1, data->pgd); + while (pgcount--) { + ret = __arm_v7s_unmap(data, gather, iova, pgsize, 1, data->pgd); + if (!ret) + break; + + unmapped += pgsize; + iova += pgsize; + } + + return unmapped; +} + +static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, + size_t size, struct iommu_iotlb_gather *gather) +{ + return arm_v7s_unmap_pages(ops, iova, size, 1, gather); } static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, @@ -781,6 +798,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, data->iop.ops = (struct io_pgtable_ops) { .map = arm_v7s_map, .unmap = arm_v7s_unmap, + .unmap_pages = arm_v7s_unmap_pages, .iova_to_phys = arm_v7s_iova_to_phys, }; From 3b9cd8fa61b0418c343aa1e4d2cf6997f21f3db4 Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Wed, 16 Jun 2021 06:38:54 -0700 Subject: [PATCH 13/19] iommu/io-pgtable-arm-v7s: Implement arm_v7s_map_pages() Implement the map_pages() callback for the ARM v7s io-pgtable format. Signed-off-by: Isaac J. Manjarres Signed-off-by: Georgi Djakov Link: https://lore.kernel.org/r/1623850736-389584-14-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- drivers/iommu/io-pgtable-arm-v7s.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 1af0606869857a..5db90d7ce2eccf 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -519,11 +519,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp); } -static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) +static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped) { struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); - int ret; + int ret = -EINVAL; if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || paddr >= (1ULL << data->iop.cfg.oas))) @@ -533,7 +534,17 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, if (!(prot & (IOMMU_READ | IOMMU_WRITE))) return 0; - ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp); + while (pgcount--) { + ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd, + gfp); + if (ret) + break; + + iova += pgsize; + paddr += pgsize; + if (mapped) + *mapped += pgsize; + } /* * Synchronise all PTE updates for the new mapping before there's * a chance for anything to kick off a table walk for the new iova. @@ -543,6 +554,12 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, return ret; } +static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) +{ + return arm_v7s_map_pages(ops, iova, paddr, size, 1, prot, gfp, NULL); +} + static void arm_v7s_free_pgtable(struct io_pgtable *iop) { struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop); @@ -797,6 +814,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, data->iop.ops = (struct io_pgtable_ops) { .map = arm_v7s_map, + .map_pages = arm_v7s_map_pages, .unmap = arm_v7s_unmap, .unmap_pages = arm_v7s_unmap_pages, .iova_to_phys = arm_v7s_iova_to_phys, From ea2fbf1aa9f750738a54145af9ac3d3ca7b3bb91 Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Wed, 16 Jun 2021 06:38:55 -0700 Subject: [PATCH 14/19] iommu/arm-smmu: Implement the unmap_pages() IOMMU driver callback Implement the unmap_pages() callback for the ARM SMMU driver to allow calls from iommu_unmap to unmap multiple pages of the same size in one call. Also, remove the unmap() callback for the SMMU driver, as it will no longer be used. Signed-off-by: Isaac J. Manjarres Suggested-by: Will Deacon Signed-off-by: Georgi Djakov Link: https://lore.kernel.org/r/1623850736-389584-15-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- drivers/iommu/arm/arm-smmu/arm-smmu.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index f22dbeb1e51058..9a0c49060bf3d3 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1215,8 +1215,9 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, return ret; } -static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size, struct iommu_iotlb_gather *gather) +static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *iotlb_gather) { struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; @@ -1226,7 +1227,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, return 0; arm_smmu_rpm_get(smmu); - ret = ops->unmap(ops, iova, size, gather); + ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather); arm_smmu_rpm_put(smmu); return ret; @@ -1583,7 +1584,7 @@ static struct iommu_ops arm_smmu_ops = { .domain_free = arm_smmu_domain_free, .attach_dev = arm_smmu_attach_dev, .map = arm_smmu_map, - .unmap = arm_smmu_unmap, + .unmap_pages = arm_smmu_unmap_pages, .flush_iotlb_all = arm_smmu_flush_iotlb_all, .iotlb_sync = arm_smmu_iotlb_sync, .iova_to_phys = arm_smmu_iova_to_phys, From 9a3c053ce05d9f7cff4176a90d154bf477d1bfb9 Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Wed, 16 Jun 2021 06:38:56 -0700 Subject: [PATCH 15/19] iommu/arm-smmu: Implement the map_pages() IOMMU driver callback Implement the map_pages() callback for the ARM SMMU driver to allow calls from iommu_map to map multiple pages of the same size in one call. Also, remove the map() callback for the ARM SMMU driver, as it will no longer be used. Signed-off-by: Isaac J. Manjarres Suggested-by: Will Deacon Signed-off-by: Georgi Djakov Link: https://lore.kernel.org/r/1623850736-389584-16-git-send-email-quic_c_gdjako@quicinc.com Signed-off-by: Joerg Roedel --- drivers/iommu/arm/arm-smmu/arm-smmu.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 9a0c49060bf3d3..5ed4408d4b2890 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1198,8 +1198,9 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) return ret; } -static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) +static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped) { struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; @@ -1209,7 +1210,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, return -ENODEV; arm_smmu_rpm_get(smmu); - ret = ops->map(ops, iova, paddr, size, prot, gfp); + ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped); arm_smmu_rpm_put(smmu); return ret; @@ -1583,7 +1584,7 @@ static struct iommu_ops arm_smmu_ops = { .domain_alloc = arm_smmu_domain_alloc, .domain_free = arm_smmu_domain_free, .attach_dev = arm_smmu_attach_dev, - .map = arm_smmu_map, + .map_pages = arm_smmu_map_pages, .unmap_pages = arm_smmu_unmap_pages, .flush_iotlb_all = arm_smmu_flush_iotlb_all, .iotlb_sync = arm_smmu_iotlb_sync, From 84981c06c5fc6256e2b88349ef5df227a510933c Mon Sep 17 00:00:00 2001 From: Sven Peter Date: Fri, 19 Mar 2021 18:23:37 +0000 Subject: [PATCH 16/19] iommu/io-pgtable: Add DART pagetable format Apple's DART iommu uses a pagetable format that shares some similarities with the ones already implemented by io-pgtable.c. Add a new format variant to support the required differences so that we don't have to duplicate the pagetable handling code. Reviewed-by: Alexander Graf Reviewed-by: Alyssa Rosenzweig Reviewed-by: Robin Murphy Signed-off-by: Sven Peter --- drivers/iommu/io-pgtable-arm.c | 63 ++++++++++++++++++++++++++++++++++ drivers/iommu/io-pgtable.c | 1 + include/linux/io-pgtable.h | 7 ++++ 3 files changed, 71 insertions(+) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 053df4048a2977..0779eb96bd2938 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -130,6 +130,9 @@ #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL +#define APPLE_DART_PTE_PROT_NO_WRITE (1<<7) +#define APPLE_DART_PTE_PROT_NO_READ (1<<8) + /* IOPTE accessors */ #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) @@ -402,6 +405,15 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, { arm_lpae_iopte pte; + if (data->iop.fmt == APPLE_DART) { + pte = 0; + if (!(prot & IOMMU_WRITE)) + pte |= APPLE_DART_PTE_PROT_NO_WRITE; + if (!(prot & IOMMU_READ)) + pte |= APPLE_DART_PTE_PROT_NO_READ; + return pte; + } + if (data->iop.fmt == ARM_64_LPAE_S1 || data->iop.fmt == ARM_32_LPAE_S1) { pte = ARM_LPAE_PTE_nG; @@ -1102,6 +1114,52 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) return NULL; } +static struct io_pgtable * +apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) +{ + struct arm_lpae_io_pgtable *data; + int i; + + if (cfg->oas > 36) + return NULL; + + data = arm_lpae_alloc_pgtable(cfg); + if (!data) + return NULL; + + /* + * The table format itself always uses two levels, but the total VA + * space is mapped by four separate tables, making the MMIO registers + * an effective "level 1". For simplicity, though, we treat this + * equivalently to LPAE stage 2 concatenation at level 2, with the + * additional TTBRs each just pointing at consecutive pages. + */ + if (data->start_level < 1) + goto out_free_data; + if (data->start_level == 1 && data->pgd_bits > 2) + goto out_free_data; + if (data->start_level > 1) + data->pgd_bits = 0; + data->start_level = 2; + cfg->apple_dart_cfg.n_ttbrs = 1 << data->pgd_bits; + data->pgd_bits += data->bits_per_level; + + data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, + cfg); + if (!data->pgd) + goto out_free_data; + + for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) + cfg->apple_dart_cfg.ttbr[i] = + virt_to_phys(data->pgd + i * ARM_LPAE_GRANULE(data)); + + return &data->iop; + +out_free_data: + kfree(data); + return NULL; +} + struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { .alloc = arm_64_lpae_alloc_pgtable_s1, .free = arm_lpae_free_pgtable, @@ -1127,6 +1185,11 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = { .free = arm_lpae_free_pgtable, }; +struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = { + .alloc = apple_dart_alloc_pgtable, + .free = arm_lpae_free_pgtable, +}; + #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST static struct io_pgtable_cfg *cfg_cookie __initdata; diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c index 6e9917ce980ffc..f4bfcef98297f4 100644 --- a/drivers/iommu/io-pgtable.c +++ b/drivers/iommu/io-pgtable.c @@ -20,6 +20,7 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = { [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns, [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns, [ARM_MALI_LPAE] = &io_pgtable_arm_mali_lpae_init_fns, + [APPLE_DART] = &io_pgtable_apple_dart_init_fns, #endif #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S [ARM_V7S] = &io_pgtable_arm_v7s_init_fns, diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index c43f3b899d2a49..a738483fb4da3f 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -16,6 +16,7 @@ enum io_pgtable_fmt { ARM_V7S, ARM_MALI_LPAE, AMD_IOMMU_V1, + APPLE_DART, IO_PGTABLE_NUM_FMTS, }; @@ -136,6 +137,11 @@ struct io_pgtable_cfg { u64 transtab; u64 memattr; } arm_mali_lpae_cfg; + + struct { + u64 ttbr[4]; + u32 n_ttbrs; + } apple_dart_cfg; }; }; @@ -254,5 +260,6 @@ extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns; extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns; +extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns; #endif /* __IO_PGTABLE_H */ From c18b10ff6f6601e3cd5cbd36b805adfcdf1004ab Mon Sep 17 00:00:00 2001 From: Sven Peter Date: Fri, 19 Mar 2021 19:07:29 +0000 Subject: [PATCH 17/19] dt-bindings: iommu: add DART iommu bindings DART (Device Address Resolution Table) is the iommu found on Apple ARM SoCs such as the M1. Reviewed-by: Rob Herring Reviewed-by: Alyssa Rosenzweig Signed-off-by: Sven Peter --- .../devicetree/bindings/iommu/apple,dart.yaml | 81 +++++++++++++++++++ MAINTAINERS | 6 ++ 2 files changed, 87 insertions(+) create mode 100644 Documentation/devicetree/bindings/iommu/apple,dart.yaml diff --git a/Documentation/devicetree/bindings/iommu/apple,dart.yaml b/Documentation/devicetree/bindings/iommu/apple,dart.yaml new file mode 100644 index 00000000000000..94aa9e9afa596d --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/apple,dart.yaml @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iommu/apple,dart.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Apple DART IOMMU + +maintainers: + - Sven Peter + +description: |+ + Apple SoCs may contain an implementation of their Device Address + Resolution Table which provides a mandatory layer of address + translations for various masters. + + Each DART instance is capable of handling up to 16 different streams + with individual pagetables and page-level read/write protection flags. + + This DART IOMMU also raises interrupts in response to various + fault conditions. + +properties: + compatible: + const: apple,t8103-dart + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + description: + Reference to the gate clock phandle if required for this IOMMU. + Optional since not all IOMMUs are attached to a clock gate. + + '#iommu-cells': + const: 1 + description: + Has to be one. The single cell describes the stream id emitted by + a master to the IOMMU. + +required: + - compatible + - reg + - '#iommu-cells' + - interrupts + +additionalProperties: false + +examples: + - |+ + dart1: iommu@82f80000 { + compatible = "apple,t8103-dart"; + reg = <0x82f80000 0x4000>; + interrupts = <1 781 4>; + #iommu-cells = <1>; + }; + + master1 { + iommus = <&dart1 0>; + }; + + - |+ + dart2a: iommu@82f00000 { + compatible = "apple,t8103-dart"; + reg = <0x82f00000 0x4000>; + interrupts = <1 781 4>; + #iommu-cells = <1>; + }; + dart2b: iommu@82f80000 { + compatible = "apple,t8103-dart"; + reg = <0x82f80000 0x4000>; + interrupts = <1 781 4>; + #iommu-cells = <1>; + }; + + master2 { + iommus = <&dart2a 0>, <&dart2b 1>; + }; diff --git a/MAINTAINERS b/MAINTAINERS index c9467d2839f5e8..0f450f7d5336c4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1262,6 +1262,12 @@ L: linux-input@vger.kernel.org S: Odd fixes F: drivers/input/mouse/bcm5974.c +APPLE DART IOMMU DRIVER +M: Sven Peter +L: iommu@lists.linux-foundation.org +S: Maintained +F: Documentation/devicetree/bindings/iommu/apple,dart.yaml + APPLE SMC DRIVER M: Henrik Rydberg L: linux-hwmon@vger.kernel.org From d84c04f235aa2428eeb882b6b7932f8668239daa Mon Sep 17 00:00:00 2001 From: Sven Peter Date: Fri, 19 Mar 2021 19:06:46 +0000 Subject: [PATCH 18/19] iommu/dart: Add DART iommu driver Apple's new SoCs use iommus for almost all peripherals. These Device Address Resolution Tables must be setup before these peripherals can act as DMA masters. Tested-by: Alyssa Rosenzweig Signed-off-by: Sven Peter --- MAINTAINERS | 1 + drivers/iommu/Kconfig | 14 + drivers/iommu/Makefile | 1 + drivers/iommu/apple-dart.c | 923 +++++++++++++++++++++++++++++++++++++ 4 files changed, 939 insertions(+) create mode 100644 drivers/iommu/apple-dart.c diff --git a/MAINTAINERS b/MAINTAINERS index 0f450f7d5336c4..5f3ef429859456 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1267,6 +1267,7 @@ M: Sven Peter L: iommu@lists.linux-foundation.org S: Maintained F: Documentation/devicetree/bindings/iommu/apple,dart.yaml +F: drivers/iommu/apple-dart.c APPLE SMC DRIVER M: Henrik Rydberg diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 07b7c25cbed8a1..cbbf609d4bfb7e 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -249,6 +249,20 @@ config SPAPR_TCE_IOMMU Enables bits of IOMMU API required by VFIO. The iommu_ops is not implemented as it is not necessary for VFIO. +config APPLE_DART + tristate "Apple DART IOMMU Support" + depends on ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64) + select IOMMU_API + select IOMMU_IO_PGTABLE_LPAE + default ARCH_APPLE + help + Support for Apple DART (Device Address Resolution Table) IOMMUs + found in Apple ARM SoCs like the M1. + This IOMMU is required for most peripherals using DMA to access + the main memory. + + Say Y here if you are using an Apple SoC. + # ARM IOMMU support config ARM_SMMU tristate "ARM Ltd. System MMU (SMMU) Support" diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index c0fb0ba8814372..bc7f730edbb0be 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -29,3 +29,4 @@ obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o io-pgfault.o obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o +obj-$(CONFIG_APPLE_DART) += apple-dart.o diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c new file mode 100644 index 00000000000000..559db9259e65c7 --- /dev/null +++ b/drivers/iommu/apple-dart.c @@ -0,0 +1,923 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Apple DART (Device Address Resolution Table) IOMMU driver + * + * Copyright (C) 2021 The Asahi Linux Contributors + * + * Based on arm/arm-smmu/arm-ssmu.c and arm/arm-smmu-v3/arm-smmu-v3.c + * Copyright (C) 2013 ARM Limited + * Copyright (C) 2015 ARM Limited + * and on exynos-iommu.c + * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DART_MAX_STREAMS 16 +#define DART_MAX_TTBR 4 +#define MAX_DARTS_PER_DEVICE 2 + +#define DART_STREAM_ALL 0xffff + +#define DART_PARAMS1 0x00 +#define DART_PARAMS_PAGE_SHIFT GENMASK(27, 24) + +#define DART_PARAMS2 0x04 +#define DART_PARAMS_BYPASS_SUPPORT BIT(0) + +#define DART_STREAM_COMMAND 0x20 +#define DART_STREAM_COMMAND_BUSY BIT(2) +#define DART_STREAM_COMMAND_INVALIDATE BIT(20) + +#define DART_STREAM_SELECT 0x34 + +#define DART_ERROR 0x40 +#define DART_ERROR_STREAM GENMASK(27, 24) +#define DART_ERROR_CODE GENMASK(11, 0) +#define DART_ERROR_FLAG BIT(31) + +#define DART_ERROR_READ_FAULT BIT(4) +#define DART_ERROR_WRITE_FAULT BIT(3) +#define DART_ERROR_NO_PTE BIT(2) +#define DART_ERROR_NO_PMD BIT(1) +#define DART_ERROR_NO_TTBR BIT(0) + +#define DART_CONFIG 0x60 +#define DART_CONFIG_LOCK BIT(15) + +#define DART_STREAM_COMMAND_BUSY_TIMEOUT 100 + +#define DART_ERROR_ADDR_HI 0x54 +#define DART_ERROR_ADDR_LO 0x50 + +#define DART_TCR(sid) (0x100 + 4 * (sid)) +#define DART_TCR_TRANSLATE_ENABLE BIT(7) +#define DART_TCR_BYPASS0_ENABLE BIT(8) +#define DART_TCR_BYPASS1_ENABLE BIT(12) + +#define DART_TTBR(sid, idx) (0x200 + 16 * (sid) + 4 * (idx)) +#define DART_TTBR_VALID BIT(31) +#define DART_TTBR_SHIFT 12 + +/* + * Private structure associated with each DART device. + * + * @dev: device struct + * @regs: mapped MMIO region + * @irq: interrupt number, can be shared with other DARTs + * @clks: clocks associated with this DART + * @num_clks: number of @clks + * @lock: lock for hardware operations involving this dart + * @pgsize: pagesize supported by this DART + * @supports_bypass: indicates if this DART supports bypass mode + * @force_bypass: force bypass mode due to pagesize mismatch? + * @sid2group: maps stream ids to iommu_groups + * @iommu: iommu core device + */ +struct apple_dart { + struct device *dev; + + void __iomem *regs; + + int irq; + struct clk_bulk_data *clks; + int num_clks; + + spinlock_t lock; + + u32 pgsize; + u32 supports_bypass : 1; + u32 force_bypass : 1; + + struct iommu_group *sid2group[DART_MAX_STREAMS]; + struct iommu_device iommu; +}; + +/* + * Convenience struct to identify streams. + * + * The normal variant is used inside apple_dart_master_cfg which isn't written + * to concurrently. + * The atomic variant is used inside apple_dart_domain where we have to guard + * against races from potential parallel calls to attach/detach_device. + * Note that even inside the atomic variant the apple_dart pointer is not + * protected: This pointer is initialized once under the domain init mutex + * and never changed again afterwards. Devices with different dart pointers + * cannot be attached to the same domain. + * + * @dart dart pointer + * @sid stream id bitmap + */ +struct apple_dart_stream_map { + struct apple_dart *dart; + unsigned long sidmap; +}; +struct apple_dart_atomic_stream_map { + struct apple_dart *dart; + atomic64_t sidmap; +}; + +/* + * This structure is attached to each iommu domain handled by a DART. + * + * @pgtbl_ops: pagetable ops allocated by io-pgtable + * @finalized: true if the domain has been completely initialized + * @init_lock: protects domain initialization + * @stream_maps: streams attached to this domain (valid for DMA/UNMANAGED only) + * @domain: core iommu domain pointer + */ +struct apple_dart_domain { + struct io_pgtable_ops *pgtbl_ops; + + bool finalized; + struct mutex init_lock; + struct apple_dart_atomic_stream_map stream_maps[MAX_DARTS_PER_DEVICE]; + + struct iommu_domain domain; +}; + +/* + * This structure is attached to devices with dev_iommu_priv_set() on of_xlate + * and contains a list of streams bound to this device. + * So far the worst case seen is a single device with two streams + * from different darts, such that this simple static array is enough. + * + * @streams: streams for this device + */ +struct apple_dart_master_cfg { + struct apple_dart_stream_map stream_maps[MAX_DARTS_PER_DEVICE]; +}; + +/* + * Helper macro to iterate over apple_dart_master_cfg.stream_maps and + * apple_dart_domain.stream_maps + * + * @i int used as loop variable + * @base pointer to base struct (apple_dart_master_cfg or apple_dart_domain) + * @stream pointer to the apple_dart_streams struct for each loop iteration + */ +#define for_each_stream_map(i, base, stream_map) \ + for (i = 0, stream_map = &(base)->stream_maps[0]; \ + i < MAX_DARTS_PER_DEVICE && stream_map->dart; \ + stream_map = &(base)->stream_maps[++i]) + +static struct platform_driver apple_dart_driver; +static const struct iommu_ops apple_dart_iommu_ops; +static const struct iommu_flush_ops apple_dart_tlb_ops; + +static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct apple_dart_domain, domain); +} + +static void +apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map) +{ + int sid; + + for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) + writel(DART_TCR_TRANSLATE_ENABLE, + stream_map->dart->regs + DART_TCR(sid)); +} + +static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map) +{ + int sid; + + for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) + writel(0, stream_map->dart->regs + DART_TCR(sid)); +} + +static void +apple_dart_hw_enable_bypass(struct apple_dart_stream_map *stream_map) +{ + int sid; + + WARN_ON(!stream_map->dart->supports_bypass); + for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) + writel(DART_TCR_BYPASS0_ENABLE | DART_TCR_BYPASS1_ENABLE, + stream_map->dart->regs + DART_TCR(sid)); +} + +static void apple_dart_hw_set_ttbr(struct apple_dart_stream_map *stream_map, + u8 idx, phys_addr_t paddr) +{ + int sid; + + WARN_ON(paddr & ((1 << DART_TTBR_SHIFT) - 1)); + for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) + writel(DART_TTBR_VALID | (paddr >> DART_TTBR_SHIFT), + stream_map->dart->regs + DART_TTBR(sid, idx)); +} + +static void apple_dart_hw_clear_ttbr(struct apple_dart_stream_map *stream_map, + u8 idx) +{ + int sid; + + for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) + writel(0, stream_map->dart->regs + DART_TTBR(sid, idx)); +} + +static void +apple_dart_hw_clear_all_ttbrs(struct apple_dart_stream_map *stream_map) +{ + int i; + + for (i = 0; i < DART_MAX_TTBR; ++i) + apple_dart_hw_clear_ttbr(stream_map, i); +} + +static int +apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map, + u32 command) +{ + unsigned long flags; + int ret; + u32 command_reg; + + spin_lock_irqsave(&stream_map->dart->lock, flags); + + writel(stream_map->sidmap, stream_map->dart->regs + DART_STREAM_SELECT); + writel(command, stream_map->dart->regs + DART_STREAM_COMMAND); + + ret = readl_poll_timeout_atomic( + stream_map->dart->regs + DART_STREAM_COMMAND, command_reg, + !(command_reg & DART_STREAM_COMMAND_BUSY), 1, + DART_STREAM_COMMAND_BUSY_TIMEOUT); + + spin_unlock_irqrestore(&stream_map->dart->lock, flags); + + if (ret) { + dev_err(stream_map->dart->dev, + "busy bit did not clear after command %x for streams %lx\n", + command, stream_map->sidmap); + return ret; + } + + return 0; +} + +static int +apple_dart_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map) +{ + return apple_dart_hw_stream_command(stream_map, + DART_STREAM_COMMAND_INVALIDATE); +} + +static int apple_dart_hw_reset(struct apple_dart *dart) +{ + u32 config; + struct apple_dart_stream_map stream_map; + + config = readl(dart->regs + DART_CONFIG); + if (config & DART_CONFIG_LOCK) { + dev_err(dart->dev, "DART is locked down until reboot: %08x\n", + config); + return -EINVAL; + } + + stream_map.dart = dart; + stream_map.sidmap = DART_STREAM_ALL; + apple_dart_hw_disable_dma(&stream_map); + apple_dart_hw_clear_all_ttbrs(&stream_map); + + /* clear any pending errors before the interrupt is unmasked */ + writel(readl(dart->regs + DART_ERROR), dart->regs + DART_ERROR); + + return apple_dart_hw_invalidate_tlb(&stream_map); +} + +static void apple_dart_domain_flush_tlb(struct apple_dart_domain *domain) +{ + int i; + struct apple_dart_atomic_stream_map *domain_stream_map; + struct apple_dart_stream_map stream_map; + + for_each_stream_map(i, domain, domain_stream_map) { + stream_map.dart = domain_stream_map->dart; + stream_map.sidmap = atomic64_read(&domain_stream_map->sidmap); + apple_dart_hw_invalidate_tlb(&stream_map); + } +} + +static void apple_dart_flush_iotlb_all(struct iommu_domain *domain) +{ + apple_dart_domain_flush_tlb(to_dart_domain(domain)); +} + +static void apple_dart_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) +{ + apple_dart_domain_flush_tlb(to_dart_domain(domain)); +} + +static void apple_dart_iotlb_sync_map(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ + apple_dart_domain_flush_tlb(to_dart_domain(domain)); +} + +static void apple_dart_tlb_flush_all(void *cookie) +{ + apple_dart_domain_flush_tlb(cookie); +} + +static void apple_dart_tlb_flush_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + apple_dart_domain_flush_tlb(cookie); +} + +static const struct iommu_flush_ops apple_dart_tlb_ops = { + .tlb_flush_all = apple_dart_tlb_flush_all, + .tlb_flush_walk = apple_dart_tlb_flush_walk, +}; + +static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + struct apple_dart_domain *dart_domain = to_dart_domain(domain); + struct io_pgtable_ops *ops = dart_domain->pgtbl_ops; + + if (!ops) + return 0; + + return ops->iova_to_phys(ops, iova); +} + +static int apple_dart_map_pages(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t pgsize, + size_t pgcount, int prot, gfp_t gfp, + size_t *mapped) +{ + struct apple_dart_domain *dart_domain = to_dart_domain(domain); + struct io_pgtable_ops *ops = dart_domain->pgtbl_ops; + + if (!ops) + return -ENODEV; + + return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, + mapped); +} + +static size_t apple_dart_unmap_pages(struct iommu_domain *domain, + unsigned long iova, size_t pgsize, + size_t pgcount, + struct iommu_iotlb_gather *gather) +{ + struct apple_dart_domain *dart_domain = to_dart_domain(domain); + struct io_pgtable_ops *ops = dart_domain->pgtbl_ops; + + return ops->unmap_pages(ops, iova, pgsize, pgcount, gather); +} + +static void +apple_dart_setup_translation(struct apple_dart_domain *domain, + struct apple_dart_stream_map *stream_map) +{ + int i; + struct io_pgtable_cfg *pgtbl_cfg = + &io_pgtable_ops_to_pgtable(domain->pgtbl_ops)->cfg; + + for (i = 0; i < pgtbl_cfg->apple_dart_cfg.n_ttbrs; ++i) + apple_dart_hw_set_ttbr(stream_map, i, + pgtbl_cfg->apple_dart_cfg.ttbr[i]); + for (; i < DART_MAX_TTBR; ++i) + apple_dart_hw_clear_ttbr(stream_map, i); + + apple_dart_hw_enable_translation(stream_map); + apple_dart_hw_invalidate_tlb(stream_map); +} + +static int apple_dart_finalize_domain(struct iommu_domain *domain, + struct apple_dart_master_cfg *cfg) +{ + struct apple_dart_domain *dart_domain = to_dart_domain(domain); + struct apple_dart *dart = cfg->stream_maps[0].dart; + struct io_pgtable_cfg pgtbl_cfg; + int ret = 0; + int i; + + mutex_lock(&dart_domain->init_lock); + + if (dart_domain->finalized) + goto done; + + for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) { + dart_domain->stream_maps[i].dart = cfg->stream_maps[i].dart; + atomic64_set(&dart_domain->stream_maps[i].sidmap, + cfg->stream_maps[i].sidmap); + } + + pgtbl_cfg = (struct io_pgtable_cfg){ + .pgsize_bitmap = dart->pgsize, + .ias = 32, + .oas = 36, + .coherent_walk = 1, + .tlb = &apple_dart_tlb_ops, + .iommu_dev = dart->dev, + }; + + dart_domain->pgtbl_ops = + alloc_io_pgtable_ops(APPLE_DART, &pgtbl_cfg, domain); + if (!dart_domain->pgtbl_ops) { + ret = -ENOMEM; + goto done; + } + + domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; + domain->geometry.aperture_start = 0; + domain->geometry.aperture_end = DMA_BIT_MASK(32); + domain->geometry.force_aperture = true; + + dart_domain->finalized = true; + +done: + mutex_unlock(&dart_domain->init_lock); + return ret; +} + +static int +apple_dart_mod_streams(struct apple_dart_atomic_stream_map *domain_maps, + struct apple_dart_stream_map *master_maps, + bool add_streams) +{ + int i; + + for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) { + if (domain_maps[i].dart != master_maps[i].dart) + return -EINVAL; + } + + for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) { + if (!domain_maps[i].dart) + break; + if (add_streams) + atomic64_or(master_maps[i].sidmap, + &domain_maps[i].sidmap); + else + atomic64_and(~master_maps[i].sidmap, + &domain_maps[i].sidmap); + } + + return 0; +} + +static int apple_dart_domain_add_streams(struct apple_dart_domain *domain, + struct apple_dart_master_cfg *cfg) +{ + return apple_dart_mod_streams(domain->stream_maps, cfg->stream_maps, + true); +} + +static int apple_dart_domain_remove_streams(struct apple_dart_domain *domain, + struct apple_dart_master_cfg *cfg) +{ + return apple_dart_mod_streams(domain->stream_maps, cfg->stream_maps, + false); +} + +static int apple_dart_attach_dev(struct iommu_domain *domain, + struct device *dev) +{ + int ret, i; + struct apple_dart_stream_map *stream_map; + struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); + struct apple_dart_domain *dart_domain = to_dart_domain(domain); + + if (cfg->stream_maps[0].dart->force_bypass && + domain->type != IOMMU_DOMAIN_IDENTITY) + return -EINVAL; + if (!cfg->stream_maps[0].dart->supports_bypass && + domain->type == IOMMU_DOMAIN_IDENTITY) + return -EINVAL; + + ret = apple_dart_finalize_domain(domain, cfg); + if (ret) + return ret; + + switch (domain->type) { + case IOMMU_DOMAIN_DMA: + case IOMMU_DOMAIN_UNMANAGED: + ret = apple_dart_domain_add_streams(dart_domain, cfg); + if (ret) + return ret; + + for_each_stream_map(i, cfg, stream_map) + apple_dart_setup_translation(dart_domain, stream_map); + break; + case IOMMU_DOMAIN_BLOCKED: + for_each_stream_map(i, cfg, stream_map) + apple_dart_hw_disable_dma(stream_map); + break; + case IOMMU_DOMAIN_IDENTITY: + for_each_stream_map(i, cfg, stream_map) + apple_dart_hw_enable_bypass(stream_map); + break; + } + + return ret; +} + +static void apple_dart_detach_dev(struct iommu_domain *domain, + struct device *dev) +{ + int i; + struct apple_dart_stream_map *stream_map; + struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); + struct apple_dart_domain *dart_domain = to_dart_domain(domain); + + for_each_stream_map(i, cfg, stream_map) + apple_dart_hw_disable_dma(stream_map); + + if (domain->type == IOMMU_DOMAIN_DMA || + domain->type == IOMMU_DOMAIN_UNMANAGED) + apple_dart_domain_remove_streams(dart_domain, cfg); +} + +static struct iommu_device *apple_dart_probe_device(struct device *dev) +{ + struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); + struct apple_dart_stream_map *stream_map; + int i; + + if (!cfg) + return ERR_PTR(-ENODEV); + + for_each_stream_map(i, cfg, stream_map) + device_link_add( + dev, stream_map->dart->dev, + DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER); + + return &cfg->stream_maps[0].dart->iommu; +} + +static void apple_dart_release_device(struct device *dev) +{ + struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); + + if (!cfg) + return; + + dev_iommu_priv_set(dev, NULL); + kfree(cfg); +} + +static struct iommu_domain *apple_dart_domain_alloc(unsigned int type) +{ + struct apple_dart_domain *dart_domain; + + if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED && + type != IOMMU_DOMAIN_IDENTITY && type != IOMMU_DOMAIN_BLOCKED) + return NULL; + + dart_domain = kzalloc(sizeof(*dart_domain), GFP_KERNEL); + if (!dart_domain) + return NULL; + + iommu_get_dma_cookie(&dart_domain->domain); + mutex_init(&dart_domain->init_lock); + + /* no need to allocate pgtbl_ops or do any other finalization steps */ + if (type == IOMMU_DOMAIN_IDENTITY || type == IOMMU_DOMAIN_BLOCKED) + dart_domain->finalized = true; + + return &dart_domain->domain; +} + +static void apple_dart_domain_free(struct iommu_domain *domain) +{ + struct apple_dart_domain *dart_domain = to_dart_domain(domain); + + if (dart_domain->pgtbl_ops) + free_io_pgtable_ops(dart_domain->pgtbl_ops); + + kfree(dart_domain); +} + +static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args) +{ + struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); + struct platform_device *iommu_pdev = of_find_device_by_node(args->np); + struct apple_dart *dart = platform_get_drvdata(iommu_pdev); + struct apple_dart *cfg_dart; + int i, sid; + + if (args->args_count != 1) + return -EINVAL; + sid = args->args[0]; + + if (!cfg) + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return -ENOMEM; + dev_iommu_priv_set(dev, cfg); + + cfg_dart = cfg->stream_maps[0].dart; + if (cfg_dart) { + if (cfg_dart->supports_bypass != dart->supports_bypass) + return -EINVAL; + if (cfg_dart->force_bypass != dart->force_bypass) + return -EINVAL; + if (cfg_dart->pgsize != dart->pgsize) + return -EINVAL; + } + + for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) { + if (cfg->stream_maps[i].dart == dart) { + cfg->stream_maps[i].sidmap |= 1 << sid; + return 0; + } + } + for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) { + if (!cfg->stream_maps[i].dart) { + cfg->stream_maps[i].dart = dart; + cfg->stream_maps[i].sidmap = 1 << sid; + return 0; + } + } + + return -EINVAL; +} + +static struct iommu_group *apple_dart_device_group(struct device *dev) +{ + static DEFINE_MUTEX(lock); + int i, sid; + struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); + struct apple_dart_stream_map *stream_map; + struct iommu_group *group = NULL; + struct iommu_group *res = ERR_PTR(-EINVAL); + + mutex_lock(&lock); + + for_each_stream_map(i, cfg, stream_map) { + for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) { + struct iommu_group *stream_group = + stream_map->dart->sid2group[sid]; + + if (group && group != stream_group) { + res = ERR_PTR(-EINVAL); + goto out; + } + + group = stream_group; + } + } + + if (group) { + res = iommu_group_ref_get(group); + goto out; + } + +#ifdef CONFIG_PCI + if (dev_is_pci(dev)) + group = pci_device_group(dev); + else +#endif + group = generic_device_group(dev); + + for_each_stream_map(i, cfg, stream_map) + for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) + stream_map->dart->sid2group[sid] = group; + + res = group; + +out: + mutex_unlock(&lock); + return res; +} + +static int apple_dart_def_domain_type(struct device *dev) +{ + struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); + + if (cfg->stream_maps[0].dart->force_bypass) + return IOMMU_DOMAIN_IDENTITY; + if (!cfg->stream_maps[0].dart->supports_bypass) + return IOMMU_DOMAIN_DMA; + + return 0; +} + +static const struct iommu_ops apple_dart_iommu_ops = { + .domain_alloc = apple_dart_domain_alloc, + .domain_free = apple_dart_domain_free, + .attach_dev = apple_dart_attach_dev, + .detach_dev = apple_dart_detach_dev, + .map_pages = apple_dart_map_pages, + .unmap_pages = apple_dart_unmap_pages, + .flush_iotlb_all = apple_dart_flush_iotlb_all, + .iotlb_sync = apple_dart_iotlb_sync, + .iotlb_sync_map = apple_dart_iotlb_sync_map, + .iova_to_phys = apple_dart_iova_to_phys, + .probe_device = apple_dart_probe_device, + .release_device = apple_dart_release_device, + .device_group = apple_dart_device_group, + .of_xlate = apple_dart_of_xlate, + .def_domain_type = apple_dart_def_domain_type, + .pgsize_bitmap = -1UL, /* Restricted during dart probe */ +}; + +static irqreturn_t apple_dart_irq(int irq, void *dev) +{ + struct apple_dart *dart = dev; + const char *fault_name = NULL; + u32 error = readl(dart->regs + DART_ERROR); + u32 error_code = FIELD_GET(DART_ERROR_CODE, error); + u32 addr_lo = readl(dart->regs + DART_ERROR_ADDR_LO); + u32 addr_hi = readl(dart->regs + DART_ERROR_ADDR_HI); + u64 addr = addr_lo | (((u64)addr_hi) << 32); + u8 stream_idx = FIELD_GET(DART_ERROR_STREAM, error); + + if (!(error & DART_ERROR_FLAG)) + return IRQ_NONE; + + /* there should only be a single bit set but let's use == to be sure */ + if (error_code == DART_ERROR_READ_FAULT) + fault_name = "READ FAULT"; + else if (error_code == DART_ERROR_WRITE_FAULT) + fault_name = "WRITE FAULT"; + else if (error_code == DART_ERROR_NO_PTE) + fault_name = "NO PTE FOR IOVA"; + else if (error_code == DART_ERROR_NO_PMD) + fault_name = "NO PMD FOR IOVA"; + else if (error_code == DART_ERROR_NO_TTBR) + fault_name = "NO TTBR FOR IOVA"; + else + fault_name = "unknown"; + + dev_err_ratelimited( + dart->dev, + "translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx", + error, stream_idx, error_code, fault_name, addr); + + writel(error, dart->regs + DART_ERROR); + return IRQ_HANDLED; +} + +static int apple_dart_set_bus_ops(const struct iommu_ops *ops) +{ + int ret; + + if (!iommu_present(&platform_bus_type)) { + ret = bus_set_iommu(&platform_bus_type, ops); + if (ret) + return ret; + } +#ifdef CONFIG_PCI + if (!iommu_present(&pci_bus_type)) { + ret = bus_set_iommu(&pci_bus_type, ops); + if (ret) { + bus_set_iommu(&platform_bus_type, NULL); + return ret; + } + } +#endif + return 0; +} + +static int apple_dart_probe(struct platform_device *pdev) +{ + int ret; + u32 dart_params[2]; + struct resource *res; + struct apple_dart *dart; + struct device *dev = &pdev->dev; + + dart = devm_kzalloc(dev, sizeof(*dart), GFP_KERNEL); + if (!dart) + return -ENOMEM; + + dart->dev = dev; + spin_lock_init(&dart->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (resource_size(res) < 0x4000) { + dev_err(dev, "MMIO region too small (%pr)\n", res); + return -EINVAL; + } + + dart->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(dart->regs)) + return PTR_ERR(dart->regs); + + dart->irq = platform_get_irq(pdev, 0); + if (dart->irq < 0) + return -ENODEV; + + ret = devm_clk_bulk_get_all(dev, &dart->clks); + if (ret < 0) + return ret; + dart->num_clks = ret; + + ret = clk_bulk_prepare_enable(dart->num_clks, dart->clks); + if (ret) + return ret; + + ret = apple_dart_hw_reset(dart); + if (ret) + goto err_clk_disable; + + dart_params[0] = readl(dart->regs + DART_PARAMS1); + dart_params[1] = readl(dart->regs + DART_PARAMS2); + dart->pgsize = 1 << FIELD_GET(DART_PARAMS_PAGE_SHIFT, dart_params[0]); + dart->supports_bypass = dart_params[1] & DART_PARAMS_BYPASS_SUPPORT; + dart->force_bypass = dart->pgsize > PAGE_SIZE; + + ret = request_irq(dart->irq, apple_dart_irq, IRQF_SHARED, + "apple-dart fault handler", dart); + if (ret) + goto err_clk_disable; + + platform_set_drvdata(pdev, dart); + + ret = apple_dart_set_bus_ops(&apple_dart_iommu_ops); + if (ret) + goto err_free_irq; + + ret = iommu_device_sysfs_add(&dart->iommu, dev, NULL, "apple-dart.%s", + dev_name(&pdev->dev)); + if (ret) + goto err_remove_bus_ops; + + ret = iommu_device_register(&dart->iommu, &apple_dart_iommu_ops, dev); + if (ret) + goto err_sysfs_remove; + + dev_info( + &pdev->dev, + "DART [pagesize %x, bypass support: %d, bypass forced: %d] initialized\n", + dart->pgsize, dart->supports_bypass, dart->force_bypass); + return 0; + +err_sysfs_remove: + iommu_device_sysfs_remove(&dart->iommu); +err_remove_bus_ops: + apple_dart_set_bus_ops(NULL); +err_free_irq: + free_irq(dart->irq, dart); +err_clk_disable: + clk_bulk_disable_unprepare(dart->num_clks, dart->clks); + + return ret; +} + +static int apple_dart_remove(struct platform_device *pdev) +{ + struct apple_dart *dart = platform_get_drvdata(pdev); + + apple_dart_hw_reset(dart); + free_irq(dart->irq, dart); + apple_dart_set_bus_ops(NULL); + + iommu_device_unregister(&dart->iommu); + iommu_device_sysfs_remove(&dart->iommu); + + clk_bulk_disable_unprepare(dart->num_clks, dart->clks); + + return 0; +} + +static const struct of_device_id apple_dart_of_match[] = { + { .compatible = "apple,t8103-dart", .data = NULL }, + {}, +}; +MODULE_DEVICE_TABLE(of, apple_dart_of_match); + +static struct platform_driver apple_dart_driver = { + .driver = { + .name = "apple-dart", + .of_match_table = apple_dart_of_match, + .suppress_bind_attrs = true, + }, + .probe = apple_dart_probe, + .remove = apple_dart_remove, +}; + +module_platform_driver(apple_dart_driver); + +MODULE_DESCRIPTION("IOMMU API for Apple's DART"); +MODULE_AUTHOR("Sven Peter "); +MODULE_LICENSE("GPL v2"); From 5a8cd2be6359e684efea5e4b9b9fdc79b7ab8768 Mon Sep 17 00:00:00 2001 From: Sven Peter Date: Mon, 7 Jun 2021 15:55:14 +0200 Subject: [PATCH 19/19] arm64: apple: add dwc3 controller and DART nodes Signed-off-by: Sven Peter --- arch/arm64/boot/dts/apple/t8103.dtsi | 51 ++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi index a1e22a2ea2e53c..f0c10d1c6cf2ff 100644 --- a/arch/arm64/boot/dts/apple/t8103.dtsi +++ b/arch/arm64/boot/dts/apple/t8103.dtsi @@ -108,6 +108,7 @@ #size-cells = <2>; ranges; + dma-ranges; nonposted-mmio; serial0: serial@235200000 { @@ -131,5 +132,55 @@ interrupt-controller; reg = <0x2 0x3b100000 0x0 0x8000>; }; + + dwc3_0_dart_0: iommu@382f00000 { + compatible = "apple,t8103-dart"; + reg = <0x3 0x82f00000 0x0 0x4000>; + interrupt-parent = <&aic>; + interrupts = ; + #iommu-cells = <1>; + }; + + dwc3_0_dart_1: iommu@382f80000 { + compatible = "apple,t8103-dart"; + reg = <0x3 0x82f80000 0x0 0x4000>; + interrupt-parent = <&aic>; + interrupts = ; + #iommu-cells = <1>; + }; + + dwc3_0: usb@382280000{ + compatible = "snps,dwc3"; + reg = <0x3 0x82280000 0x0 0x100000>; + interrupt-parent = <&aic>; + interrupts = ; + dr_mode = "host"; + iommus = <&dwc3_0_dart_0 0>, <&dwc3_0_dart_1 1>; + }; + + dwc3_1_dart_0: iommu@502f00000 { + compatible = "apple,t8103-dart"; + reg = <0x5 0x02f00000 0x0 0x4000>; + interrupt-parent = <&aic>; + interrupts = ; + #iommu-cells = <1>; + }; + + dwc3_1_dart_1: iommu@502f80000 { + compatible = "apple,t8103-dart"; + reg = <0x5 0x02f80000 0x0 0x4000>; + interrupt-parent = <&aic>; + interrupts = ; + #iommu-cells = <1>; + }; + + dwc3_1: usb@502280000{ + compatible = "snps,dwc3"; + reg = <0x5 0x02280000 0x0 0x100000>; + interrupt-parent = <&aic>; + interrupts = ; + dr_mode = "host"; + iommus = <&dwc3_1_dart_0 0>, <&dwc3_1_dart_1 1>; + }; }; };