aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/io-pgtable-arm.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 17:22:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 17:22:35 -0700
commit9a9952bbd76a13fc2c95c28f09ba1801a3664929 (patch)
tree8ec6955ec8706dd28cc43161821b4350ebd80424 /drivers/iommu/io-pgtable-arm.c
parente81b594cdae73f341ea13bc9fb2b57a5b739c1a3 (diff)
parent4ad79562577a3936b08365260f86eeb83156479f (diff)
Merge tag 'iommu-updates-v4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates for from Joerg Roedel: "This time the IOMMU updates are mostly cleanups or fixes. No big new features or drivers this time. In particular the changes include: - Bigger cleanup of the Domain<->IOMMU data structures and the code that manages them in the Intel VT-d driver. This makes the code easier to understand and maintain, and also easier to keep the data structures in sync. It is also a preparation step to make use of default domains from the IOMMU core in the Intel VT-d driver. - Fixes for a couple of DMA-API misuses in ARM IOMMU drivers, namely in the ARM and Tegra SMMU drivers. - Fix for a potential buffer overflow in the OMAP iommu driver's debug code - A couple of smaller fixes and cleanups in various drivers - One small new feature: Report domain-id usage in the Intel VT-d driver to easier detect bugs where these are leaked" * tag 'iommu-updates-v4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (83 commits) iommu/vt-d: Really use upper context table when necessary x86/vt-d: Fix documentation of DRHD iommu/fsl: Really fix init section(s) content iommu/io-pgtable-arm: Unmap and free table when overwriting with block iommu/io-pgtable-arm: Move init-fn declarations to io-pgtable.h iommu/msm: Use BUG_ON instead of if () BUG() iommu/vt-d: Access iomem correctly iommu/vt-d: Make two functions static iommu/vt-d: Use BUG_ON instead of if () BUG() iommu/vt-d: Return false instead of 0 in irq_remapping_cap() iommu/amd: Use BUG_ON instead of if () BUG() iommu/amd: Make a symbol static iommu/amd: Simplify allocation in irq_remapping_alloc() iommu/tegra-smmu: Parameterize number of TLB lines iommu/tegra-smmu: Factor out tegra_smmu_set_pde() iommu/tegra-smmu: Extract tegra_smmu_pte_get_use() iommu/tegra-smmu: Use __GFP_ZERO to allocate zeroed pages iommu/tegra-smmu: Remove PageReserved manipulation iommu/tegra-smmu: Convert to use DMA API iommu/tegra-smmu: smmu_flush_ptc() wants device addresses ...
Diffstat (limited to 'drivers/iommu/io-pgtable-arm.c')
-rw-r--r--drivers/iommu/io-pgtable-arm.c143
1 files changed, 109 insertions, 34 deletions
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 4e460216bd16..73c07482f487 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -26,6 +26,8 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include <asm/barrier.h>
+
#include "io-pgtable.h"
#define ARM_LPAE_MAX_ADDR_BITS 48
@@ -200,20 +202,97 @@ typedef u64 arm_lpae_iopte;
static bool selftest_running = false;
+static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
+{
+ return phys_to_dma(dev, virt_to_phys(pages));
+}
+
+static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
+ struct io_pgtable_cfg *cfg)
+{
+ struct device *dev = cfg->iommu_dev;
+ dma_addr_t dma;
+ void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
+
+ if (!pages)
+ return NULL;
+
+ if (!selftest_running) {
+ dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto out_free;
+ /*
+ * We depend on the IOMMU being able to work with any physical
+ * address directly, so if the DMA layer suggests it can't by
+ * giving us back some translation, that bodes very badly...
+ */
+ if (dma != __arm_lpae_dma_addr(dev, pages))
+ goto out_unmap;
+ }
+
+ return pages;
+
+out_unmap:
+ dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
+ dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
+out_free:
+ free_pages_exact(pages, size);
+ return NULL;
+}
+
+static void __arm_lpae_free_pages(void *pages, size_t size,
+ struct io_pgtable_cfg *cfg)
+{
+ struct device *dev = cfg->iommu_dev;
+
+ if (!selftest_running)
+ dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages),
+ size, DMA_TO_DEVICE);
+ free_pages_exact(pages, size);
+}
+
+static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
+ struct io_pgtable_cfg *cfg)
+{
+ struct device *dev = cfg->iommu_dev;
+
+ *ptep = pte;
+
+ if (!selftest_running)
+ dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep),
+ sizeof(pte), DMA_TO_DEVICE);
+}
+
+static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size, int lvl,
+ arm_lpae_iopte *ptep);
+
static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
unsigned long iova, phys_addr_t paddr,
arm_lpae_iopte prot, int lvl,
arm_lpae_iopte *ptep)
{
arm_lpae_iopte pte = prot;
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
- /* We require an unmap first */
if (iopte_leaf(*ptep, lvl)) {
+ /* We require an unmap first */
WARN_ON(!selftest_running);
return -EEXIST;
+ } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
+ /*
+ * We need to unmap and free the old table before
+ * overwriting it with a block entry.
+ */
+ arm_lpae_iopte *tblp;
+ size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+ tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
+ if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
+ return -EINVAL;
}
- if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NS;
if (lvl == ARM_LPAE_MAX_LEVELS - 1)
@@ -224,8 +303,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
- *ptep = pte;
- data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
+ __arm_lpae_set_pte(ptep, pte, cfg);
return 0;
}
@@ -234,14 +312,14 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
int lvl, arm_lpae_iopte *ptep)
{
arm_lpae_iopte *cptep, pte;
- void *cookie = data->iop.cookie;
size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
/* Find our entry at the current level */
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
/* If we can install a leaf entry at this level, then do so */
- if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
+ if (size == block_size && (size & cfg->pgsize_bitmap))
return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
/* We can't allocate tables at the final level */
@@ -251,18 +329,15 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */
pte = *ptep;
if (!pte) {
- cptep = alloc_pages_exact(1UL << data->pg_shift,
- GFP_ATOMIC | __GFP_ZERO);
+ cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift,
+ GFP_ATOMIC, cfg);
if (!cptep)
return -ENOMEM;
- data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
- cookie);
pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
- if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NSTABLE;
- *ptep = pte;
- data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ __arm_lpae_set_pte(ptep, pte, cfg);
} else {
cptep = iopte_deref(pte, data);
}
@@ -309,7 +384,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte *ptep = data->pgd;
- int lvl = ARM_LPAE_START_LVL(data);
+ int ret, lvl = ARM_LPAE_START_LVL(data);
arm_lpae_iopte prot;
/* If no access, then nothing to do */
@@ -317,7 +392,14 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
return 0;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
- return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+ ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+ /*
+ * Synchronise all PTE updates for the new mapping before there's
+ * a chance for anything to kick off a table walk for the new iova.
+ */
+ wmb();
+
+ return ret;
}
static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
@@ -347,7 +429,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
}
- free_pages_exact(start, table_size);
+ __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
}
static void arm_lpae_free_pgtable(struct io_pgtable *iop)
@@ -366,8 +448,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
unsigned long blk_start, blk_end;
phys_addr_t blk_paddr;
arm_lpae_iopte table = 0;
- void *cookie = data->iop.cookie;
- const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
blk_start = iova & ~(blk_size - 1);
blk_end = blk_start + blk_size;
@@ -393,10 +474,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
}
}
- *ptep = table;
- tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ __arm_lpae_set_pte(ptep, table, cfg);
iova &= ~(blk_size - 1);
- tlb->tlb_add_flush(iova, blk_size, true, cookie);
+ cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie);
return size;
}
@@ -418,13 +498,12 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* If the size matches this level, we're in the right place */
if (size == blk_size) {
- *ptep = 0;
- tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ __arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
if (!iopte_leaf(pte, lvl)) {
/* Also flush any partial walks */
tlb->tlb_add_flush(iova, size, false, cookie);
- tlb->tlb_sync(data->iop.cookie);
+ tlb->tlb_sync(cookie);
ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
} else {
@@ -640,11 +719,12 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s1_cfg.mair[1] = 0;
/* Looking good; allocate a pgd */
- data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+ data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
- cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+ /* Ensure the empty pgd is visible before any actual TTBR write */
+ wmb();
/* TTBRs */
cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
@@ -728,11 +808,12 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s2_cfg.vtcr = reg;
/* Allocate pgd pages */
- data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+ data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
- cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+ /* Ensure the empty pgd is visible before any actual TTBR write */
+ wmb();
/* VTTBR */
cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
@@ -818,16 +899,10 @@ static void dummy_tlb_sync(void *cookie)
WARN_ON(cookie != cfg_cookie);
}
-static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
-{
- WARN_ON(cookie != cfg_cookie);
-}
-
static struct iommu_gather_ops dummy_tlb_ops __initdata = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_add_flush = dummy_tlb_add_flush,
.tlb_sync = dummy_tlb_sync,
- .flush_pgtable = dummy_flush_pgtable,
};
static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)

Privacy Policy