armbian-build/patch/kernel/archive/sunxi-6.18/patches.armbian/drv-iommu-sunxi-add-iommu-driver.patch

2394 lines
73 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Marvin Wewer <mwewer37@proton.me>
Date: Mon, 25 Aug 2025 10:45:25 +0000
Subject: iommu: sunxi: add sun55i (A523) IOMMU driver
Signed-off-by: Marvin Wewer <mwewer37@proton.me>
---
drivers/iommu/Kconfig | 10 +
drivers/iommu/Makefile | 3 +
drivers/iommu/sun55i-iommu-pgtable.c | 468 +++
drivers/iommu/sun55i-iommu-pgtable.h | 125 +
drivers/iommu/sun55i-iommu.c | 1606 ++++++++++
drivers/iommu/sun55i-iommu.h | 57 +
include/sunxi-iommu.h | 50 +
7 files changed, 2319 insertions(+)
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 111111111111..222222222222 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -244,6 +244,16 @@ config SUN50I_IOMMU
select IOMMU_API
help
Support for the IOMMU introduced in the Allwinner H6 SoCs.
+
+config SUN55I_IOMMU
+ bool "Allwinner A523 IOMMU Support"
+ depends on HAS_DMA
+ depends on ARCH_SUNXI || COMPILE_TEST
+ select ARM_DMA_USE_IOMMU
+ select IOMMU_API
+ select IOMMU_DMA
+ help
+ Support for the IOMMU introduced in the Allwinner A523 SoCs.
config TEGRA_IOMMU_SMMU
bool "NVIDIA Tegra SMMU Support"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 111111111111..222222222222 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -34,3 +34,6 @@ obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o
obj-$(CONFIG_IOMMU_IOPF) += io-pgfault.o
obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
obj-$(CONFIG_APPLE_DART) += apple-dart.o
+obj-$(CONFIG_SUN55I_IOMMU) += sunxi-iommu.o
+sunxi-iommu-objs := sun55i-iommu-pgtable.o
+sunxi-iommu-objs += sun55i-iommu.o
\ No newline at end of file
diff --git a/drivers/iommu/sun55i-iommu-pgtable.c b/drivers/iommu/sun55i-iommu-pgtable.c
new file mode 100644
index 000000000000..111111111111
--- /dev/null
+++ b/drivers/iommu/sun55i-iommu-pgtable.c
@@ -0,0 +1,468 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */
+/*
+ * Allwinner's pgtable controler
+ *
+ * Copyright (c) 2023, ouyangkun <ouyangkun@allwinnertech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include "sun55i-iommu.h"
+#include <sunxi-iommu.h>
+
+#define NUM_ENTRIES_PDE (1UL << (IOMMU_VA_BITS - IOMMU_PD_SHIFT))
+#define NUM_ENTRIES_PTE (1UL << (IOMMU_PD_SHIFT - IOMMU_PT_SHIFT))
+#define PD_SIZE (NUM_ENTRIES_PDE * sizeof(u32))
+#define PT_SIZE (NUM_ENTRIES_PTE * sizeof(u32))
+
+#define PAGE_OFFSET_MASK ((1UL << IOMMU_PT_SHIFT) - 1)
+#define IOPTE_BASE_MASK (~(PT_SIZE - 1))
+
+/*
+ * Page Directory Entry Control Bits
+ */
+#define DENT_VALID 0x01
+#define DENT_PTE_SHFIT 10
+#define DENT_WRITABLE BIT(3)
+#define DENT_READABLE BIT(2)
+
+/*
+ * Page Table Entry Control Bits
+ */
+#define SUNXI_PTE_PAGE_WRITABLE BIT(3)
+#define SUNXI_PTE_PAGE_READABLE BIT(2)
+#define SUNXI_PTE_PAGE_VALID BIT(1)
+
+#define IS_VALID(x) (((x)&0x03) == DENT_VALID)
+
+#define IOPDE_INDEX(va) (((va) >> IOMMU_PD_SHIFT) & (NUM_ENTRIES_PDE - 1))
+#define IOPTE_INDEX(va) (((va) >> IOMMU_PT_SHIFT) & (NUM_ENTRIES_PTE - 1))
+
+#define IOPTE_BASE(ent) ((ent)&IOPTE_BASE_MASK)
+
+#define IOPTE_TO_PFN(ent) ((*ent) & IOMMU_PT_MASK)
+#define IOVA_PAGE_OFT(va) ((va)&PAGE_OFFSET_MASK)
+
+/* IO virtual address start page frame number */
+#define IOVA_START_PFN (1)
+#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
+
+/* TLB Invalid ALIGN */
+#define IOVA_4M_ALIGN(iova) ((iova) & (~0x3fffff))
+
+struct sunxi_pgtable_t {
+ unsigned int *pgtable;
+ struct kmem_cache *iopte_cache;
+ struct device *dma_dev;
+} sunxi_pgtable_params;
+
+/* pointer to l1 table entry */
+static inline u32 *iopde_offset(u32 *iopd, dma_addr_t iova)
+{
+ return iopd + IOPDE_INDEX(iova);
+}
+
+/* pointer to l2 table entry */
+static inline u32 *iopte_offset(u32 *ent, dma_addr_t iova)
+{
+ u64 iopte_base = 0;
+
+ iopte_base = IOPTE_BASE(*ent);
+ iopte_base = iommu_phy_to_cpu_phy(iopte_base);
+
+ return (u32 *)__va(iopte_base) + IOPTE_INDEX(iova);
+}
+
+static int sunxi_alloc_iopte(u32 *sent, int prot)
+{
+ u32 *pent;
+ u32 flags = 0;
+
+ flags |= (prot & IOMMU_READ) ? DENT_READABLE : 0;
+ flags |= (prot & IOMMU_WRITE) ? DENT_WRITABLE : 0;
+
+ pent = kmem_cache_zalloc(sunxi_pgtable_params.iopte_cache, GFP_ATOMIC);
+ WARN_ON((unsigned long)pent & (PT_SIZE - 1));
+ if (!pent) {
+ pr_err("%s, %d, kmalloc failed!\n", __func__, __LINE__);
+ return 0;
+ }
+ dma_sync_single_for_cpu(sunxi_pgtable_params.dma_dev,
+ virt_to_phys(sent), sizeof(*sent),
+ DMA_TO_DEVICE);
+ *sent = cpu_phy_to_iommu_phy(__pa(pent)) | DENT_VALID;
+ dma_sync_single_for_device(sunxi_pgtable_params.dma_dev,
+ virt_to_phys(sent), sizeof(*sent),
+ DMA_TO_DEVICE);
+
+ return 1;
+}
+
+static void sunxi_free_iopte(u32 *pent)
+{
+ kmem_cache_free(sunxi_pgtable_params.iopte_cache, pent);
+}
+
+static inline u32 sunxi_mk_pte(phys_addr_t page, int prot)
+{
+ u32 flags = 0;
+ u32 high_addr = 0;
+
+ flags |= (prot & IOMMU_READ) ? SUNXI_PTE_PAGE_READABLE : 0;
+ flags |= (prot & IOMMU_WRITE) ? SUNXI_PTE_PAGE_WRITABLE : 0;
+ page &= IOMMU_PT_MASK;
+
+ return page | high_addr | flags | SUNXI_PTE_PAGE_VALID;
+}
+
+int sunxi_pgtable_prepare_l1_tables(unsigned int *pgtable,
+ dma_addr_t iova_start, dma_addr_t iova_end,
+ int prot)
+{
+ u32 *dent;
+ for (; iova_start <= iova_end; iova_start += SPD_SIZE) {
+ dent = iopde_offset(pgtable, iova_start);
+ if (!IS_VALID(*dent) && !sunxi_alloc_iopte(dent, prot)) {
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+int sunxi_pgtable_prepare_l2_tables(unsigned int *pgtable,
+ dma_addr_t iova_start, dma_addr_t iova_end,
+ phys_addr_t paddr, int prot)
+{
+ size_t paddr_start;
+ u32 *dent, *pent;
+ u32 iova_tail_count, iova_tail_size;
+ u32 pent_val;
+ int i;
+ paddr = cpu_phy_to_iommu_phy(paddr);
+ paddr_start = paddr & IOMMU_PT_MASK;
+ for (; iova_start < iova_end;) {
+ iova_tail_count = NUM_ENTRIES_PTE - IOPTE_INDEX(iova_start);
+ iova_tail_size = iova_tail_count * SPAGE_SIZE;
+ if (iova_start + iova_tail_size > iova_end) {
+ iova_tail_size = iova_end - iova_start;
+ iova_tail_count = iova_tail_size / SPAGE_SIZE;
+ }
+
+ dent = iopde_offset(pgtable, iova_start);
+ pent = iopte_offset(dent, iova_start);
+ pent_val = sunxi_mk_pte(paddr_start, prot);
+ for (i = 0; i < iova_tail_count; i++) {
+ WARN_ON(*pent);
+ *pent = pent_val + SPAGE_SIZE * i;
+ pent++;
+ }
+
+ dma_sync_single_for_device(
+ sunxi_pgtable_params.dma_dev,
+ virt_to_phys(iopte_offset(dent, iova_start)),
+ iova_tail_count << 2, DMA_TO_DEVICE);
+ iova_start += iova_tail_size;
+ paddr_start += iova_tail_size;
+ }
+ return 0;
+}
+
+
+int sunxi_pgtable_delete_l2_tables(unsigned int *pgtable, dma_addr_t iova_start,
+ dma_addr_t iova_end)
+{
+ u32 *dent, *pent;
+ u32 iova_tail_count, iova_tail_size;
+ iova_tail_count = NUM_ENTRIES_PTE - IOPTE_INDEX(iova_start);
+ iova_tail_size = iova_tail_count * SPAGE_SIZE;
+ if (iova_start + iova_tail_size > iova_end) {
+ iova_tail_size = iova_end - iova_start;
+ iova_tail_count = iova_tail_size / SPAGE_SIZE;
+ }
+
+ dent = iopde_offset(pgtable, iova_start);
+ if (!IS_VALID(*dent))
+ return -EINVAL;
+ pent = iopte_offset(dent, iova_start);
+ memset(pent, 0, iova_tail_count * sizeof(u32));
+ dma_sync_single_for_device(sunxi_pgtable_params.dma_dev,
+ virt_to_phys(iopte_offset(dent, iova_start)),
+ iova_tail_count << 2, DMA_TO_DEVICE);
+
+ if (iova_tail_size == SPD_SIZE) {
+ *dent = 0;
+ dma_sync_single_for_device(sunxi_pgtable_params.dma_dev,
+ virt_to_phys(dent), sizeof(*dent),
+ DMA_TO_DEVICE);
+ sunxi_free_iopte(pent);
+ }
+ return iova_tail_size;
+}
+
+
+phys_addr_t sunxi_pgtable_iova_to_phys(unsigned int *pgtable, dma_addr_t iova)
+{
+ u32 *dent, *pent;
+ phys_addr_t ret = 0;
+ dent = iopde_offset(pgtable, iova);
+ if (IS_VALID(*dent)) {
+ pent = iopte_offset(dent, iova);
+ if (*pent) {
+ ret = IOPTE_TO_PFN(pent) + IOVA_PAGE_OFT(iova);
+ ret = iommu_phy_to_cpu_phy(ret);
+ }
+ }
+ return ret;
+}
+
+
+int sunxi_pgtable_invalid_helper(unsigned int *pgtable, dma_addr_t iova)
+{
+ u32 *pte_addr, *dte_addr;
+
+ dte_addr = iopde_offset(pgtable, iova);
+ if ((*dte_addr & 0x3) != 0x1) {
+ pr_err("0x%pad is not mapped!\n", &iova);
+ return 1;
+ }
+ pte_addr = iopte_offset(dte_addr, iova);
+ if ((*pte_addr & 0x2) == 0) {
+ pr_err("0x%pad is not mapped!\n", &iova);
+ return 1;
+ }
+ pr_err("0x%pad is mapped!\n", &iova);
+
+ return 0;
+}
+
+
+void sunxi_pgtable_clear(unsigned int *pgtable)
+{
+ int i = 0;
+ u32 *dent, *pent;
+ size_t iova;
+
+ for (i = 0; i < NUM_ENTRIES_PDE; ++i) {
+ dent = pgtable + i;
+ iova = (unsigned long)i << IOMMU_PD_SHIFT;
+ if (IS_VALID(*dent)) {
+ pent = iopte_offset(dent, iova);
+ dma_sync_single_for_cpu(sunxi_pgtable_params.dma_dev,
+ virt_to_phys(pent), PT_SIZE,
+ DMA_TO_DEVICE);
+ memset(pent, 0, PT_SIZE);
+ dma_sync_single_for_device(sunxi_pgtable_params.dma_dev,
+ virt_to_phys(pent), PT_SIZE,
+ DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(sunxi_pgtable_params.dma_dev,
+ virt_to_phys(dent), PT_SIZE,
+ DMA_TO_DEVICE);
+ *dent = 0;
+ dma_sync_single_for_device(sunxi_pgtable_params.dma_dev,
+ virt_to_phys(dent),
+ sizeof(*dent),
+ DMA_TO_DEVICE);
+ sunxi_free_iopte(pent);
+ }
+ }
+}
+
+
+unsigned int *sunxi_pgtable_alloc(void)
+{
+ unsigned int *pgtable;
+ pgtable = (unsigned int *)__get_free_pages(GFP_KERNEL,
+ get_order(PD_SIZE));
+
+ if (pgtable != NULL) {
+ memset(pgtable, 0, PD_SIZE);
+ }
+ sunxi_pgtable_params.pgtable = pgtable;
+ return pgtable;
+}
+
+
+void sunxi_pgtable_free(unsigned int *pgtable)
+{
+ free_pages((unsigned long)pgtable, get_order(PD_SIZE));
+ sunxi_pgtable_params.pgtable = NULL;
+}
+
+
+static inline bool __region_ended(u32 pent)
+{
+ return !(pent & SUNXI_PTE_PAGE_VALID);
+}
+
+static inline bool __access_mask_changed(u32 pent, u32 old_mask)
+{
+ return old_mask !=
+ (pent & (SUNXI_PTE_PAGE_READABLE | SUNXI_PTE_PAGE_WRITABLE));
+}
+
+static u32 __print_region(char *buf, size_t buf_len, ssize_t len,
+ struct dump_region *active_region,
+ bool for_sysfs_show)
+{
+ if (active_region->type == DUMP_REGION_RESERVE) {
+ if (for_sysfs_show) {
+ len += sysfs_emit_at(
+ buf, len,
+ "iova:%pad size:0x%zx\n",
+ &active_region->iova, active_region->size);
+ } else {
+ len += scnprintf(
+ buf + len, buf_len - len,
+ "iova:%pad size:0x%zx\n",
+ &active_region->iova, active_region->size);
+ }
+ } else {
+ if (for_sysfs_show) {
+ len += sysfs_emit_at(
+ buf, len,
+ "iova:%pad phys:%pad %s%s size:0x%zx\n",
+ &active_region->iova, &active_region->phys,
+ active_region->access_mask &
+ SUNXI_PTE_PAGE_READABLE ?
+ "R" :
+ " ",
+ active_region->access_mask &
+ SUNXI_PTE_PAGE_WRITABLE ?
+ "W" :
+ " ",
+ active_region->size);
+ } else {
+ len += scnprintf(
+ buf + len, buf_len - len,
+ "iova:%pad phys:%pad %s%s size:0x%zx\n",
+ &active_region->iova, &active_region->phys,
+ active_region->access_mask &
+ SUNXI_PTE_PAGE_READABLE ?
+ "R" :
+ " ",
+ active_region->access_mask &
+ SUNXI_PTE_PAGE_WRITABLE ?
+ "W" :
+ " ",
+ active_region->size);
+ }
+ }
+ return len;
+}
+
+ssize_t sunxi_pgtable_dump(unsigned int *pgtable, ssize_t len, char *buf,
+ size_t buf_len, bool for_sysfs_show)
+{
+ /* walk and dump */
+ int i, j;
+ u32 *dent, *pent;
+ struct dump_region active_region;
+
+ if (for_sysfs_show) {
+ len += sysfs_emit_at(buf, len, "mapped\n");
+ } else {
+ len += scnprintf(buf + len, buf_len - len, "mapped\n");
+ }
+
+ dent = pgtable;
+ active_region.type = DUMP_REGION_MAP;
+ active_region.size = 0;
+ active_region.access_mask = 0;
+ for (i = 0; i < NUM_ENTRIES_PDE; i++) {
+ j = 0;
+ if (!IS_VALID(dent[i])) {
+ /* empty dentry measn ended of region, print it*/
+ if (active_region.size) {
+ len = __print_region(buf, buf_len, len,
+ &active_region,
+ for_sysfs_show);
+ /* prepare next region */
+ active_region.size = 0;
+ active_region.access_mask = 0;
+ }
+ continue;
+ }
+ /* iova here use for l1 idx, safe to pass 0 to get entry for 1st page(idx 0)*/
+ pent = iopte_offset(dent + i, 0);
+ for (; j < NUM_ENTRIES_PTE; j++) {
+ if (active_region.size) {
+ /* looks like we are counting something, check if it need printing */
+ if (__region_ended(pent[j]) /* not contiguous */
+ ||
+ (active_region.access_mask &&
+ __access_mask_changed(
+ pent[j],
+ active_region
+ .access_mask)) /* different access */
+ ) {
+ len = __print_region(buf, buf_len, len,
+ &active_region,
+ for_sysfs_show);
+
+ /* prepare next region */
+ active_region.size = 0;
+ active_region.access_mask = 0;
+ }
+ }
+
+ if (pent[j] & SUNXI_PTE_PAGE_VALID) {
+ /* no on count region, mark start address */
+ if (active_region.size == 0) {
+ active_region.iova =
+ ((dma_addr_t)i
+ << IOMMU_PD_SHIFT) +
+ ((dma_addr_t)j
+ << IOMMU_PT_SHIFT);
+ active_region.phys =
+ iommu_phy_to_cpu_phy(
+ IOPTE_TO_PFN(&pent[j]));
+ active_region.access_mask =
+ (pent[j] &
+ (SUNXI_PTE_PAGE_READABLE |
+ SUNXI_PTE_PAGE_WRITABLE));
+ }
+ active_region.size += 1 << IOMMU_PT_SHIFT;
+ }
+ }
+ }
+ //dump last region (if any)
+ if (active_region.size) {
+ len = __print_region(buf, buf_len, len, &active_region,
+ for_sysfs_show);
+ }
+ return len;
+}
+
+
+struct kmem_cache *sunxi_pgtable_alloc_pte_cache(void)
+{
+ struct kmem_cache *cache;
+ cache = kmem_cache_create("sunxi-iopte-cache", PT_SIZE, PT_SIZE,
+ SLAB_HWCACHE_ALIGN, NULL);
+ sunxi_pgtable_params.iopte_cache = cache;
+ return cache;
+}
+
+
+void sunxi_pgtable_free_pte_cache(struct kmem_cache *iopte_cache)
+{
+ kmem_cache_destroy(iopte_cache);
+}
+
+
+void sunxi_pgtable_set_dma_dev(struct device *dma_dev)
+{
+ sunxi_pgtable_params.dma_dev = dma_dev;
+}
diff --git a/drivers/iommu/sun55i-iommu-pgtable.h b/drivers/iommu/sun55i-iommu-pgtable.h
new file mode 100644
index 000000000000..111111111111
--- /dev/null
+++ b/drivers/iommu/sun55i-iommu-pgtable.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */
+/*
+ * Allwinner's pgtable controler
+ *
+ * Copyright (c) 2023, ouyangkun <ouyangkun@allwinnertech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+#ifndef __SUNXI_IOMMU_PGTALBE__
+#define __SUNXI_IOMMU_PGTALBE__
+#include <linux/iommu.h>
+
+#define SUNXI_PHYS_OFFSET 0x40000000UL
+
+#define IOMMU_VA_BITS 32
+
+#define IOMMU_PD_SHIFT 20
+#define IOMMU_PD_MASK (~((1UL << IOMMU_PD_SHIFT) - 1))
+
+#define IOMMU_PT_SHIFT 12
+#define IOMMU_PT_MASK (~((1UL << IOMMU_PT_SHIFT) - 1))
+
+#define SPAGE_SIZE (1 << IOMMU_PT_SHIFT)
+#define SPD_SIZE (1 << IOMMU_PD_SHIFT)
+#define SPAGE_ALIGN(addr) ALIGN(addr, SPAGE_SIZE)
+#define SPDE_ALIGN(addr) ALIGN(addr, SPD_SIZE)
+
+/*
+ * This version Hardware just only support 4KB page. It have
+ * a two level page table structure, where the first level has
+ * 4096 entries, and the second level has 256 entries. And, the
+ * first level is "Page Directory(PG)", every entry include a
+ * Page Table base address and a few of control bits. Second
+ * level is "Page Table(PT)", every entry include a physical
+ * page address and a few of control bits. Each entry is one
+ * 32-bit word. Most of the bits in the second level entry are
+ * used by hardware.
+ *
+ * Virtual Address Format:
+ * 31 20|19 12|11 0
+ * +-----------------+------------+--------+
+ * | PDE Index | PTE Index | offset |
+ * +-----------------+------------+--------+
+ *
+ * Table Layout:
+ *
+ * First Level Second Level
+ * (Page Directory) (Page Table)
+ * ----+---------+0
+ * ^ | PDE | ---> -+--------+----
+ * | ----------+1 | PTE | ^
+ * | | | +--------+ |
+ * ----------+2 | | 1K
+ * 16K | | +--------+ |
+ * ----------+3 | | v
+ * | | | +--------+----
+ * | ----------
+ * | | |
+ * v | |
+ * ----+--------+
+ *
+ * IOPDE:
+ * 31 10|9 0
+ * +------------------------+--------+
+ * | PTE Base Address |CTRL BIT|
+ * +------------------------+--------+
+ *
+ * IOPTE:
+ * 31 12|11 0
+ * +---------------------+-----------+
+ * | Phy Page Address | CTRL BIT |
+ * +---------------------+-----------+
+ *
+ * cpu phy 0x0000 0000 ~ 0x4000 0000 is reserved for IO access,
+ * iommu phy in between 0x0000 0000 ~ 0x4000 0000 should not used
+ * as cpu phy directly, move this address space beyond iommu
+ * phy max, so iommu phys 0x0000 0000 ~ 0x4000 0000 shoule be
+ * iommu_phy_max + 0x0000 0000 ~ iommu_phy_max + 0x4000 0000(as
+ * spec said)
+ */
+
+static inline dma_addr_t iommu_phy_to_cpu_phy(dma_addr_t iommu_phy)
+{
+ return iommu_phy < SUNXI_PHYS_OFFSET ?
+ iommu_phy + (1ULL << IOMMU_VA_BITS) :
+ iommu_phy;
+}
+
+static inline dma_addr_t cpu_phy_to_iommu_phy(dma_addr_t cpu_phy)
+{
+ return cpu_phy > (1ULL << IOMMU_VA_BITS) ?
+ cpu_phy - (1ULL << IOMMU_VA_BITS) :
+ cpu_phy;
+}
+
+int sunxi_pgtable_prepare_l1_tables(unsigned int *pgtable,
+ dma_addr_t iova_start, dma_addr_t iova_end,
+ int prot);
+int sunxi_pgtable_prepare_l2_tables(unsigned int *pgtable,
+ dma_addr_t iova_start, dma_addr_t iova_end,
+ phys_addr_t paddr, int prot);
+int sunxi_pgtable_delete_l2_tables(unsigned int *pgtable, dma_addr_t iova_start,
+ dma_addr_t iova_end);
+phys_addr_t sunxi_pgtable_iova_to_phys(unsigned int *pgtable, dma_addr_t iova);
+int sunxi_pgtable_invalid_helper(unsigned int *pgtable, dma_addr_t iova);
+void sunxi_pgtable_clear(unsigned int *pgtable);
+unsigned int *sunxi_pgtable_alloc(void);
+void sunxi_pgtable_free(unsigned int *pgtable);
+ssize_t sunxi_pgtable_dump(unsigned int *pgtable, ssize_t len, char *buf,
+ size_t buf_len, bool for_sysfs_show);
+struct kmem_cache *sunxi_pgtable_alloc_pte_cache(void);
+void sunxi_pgtable_free_pte_cache(struct kmem_cache *iopte_cache);
+void sunxi_pgtable_set_dma_dev(struct device *dma_dev);
+
+#endif
diff --git a/drivers/iommu/sun55i-iommu.c b/drivers/iommu/sun55i-iommu.c
new file mode 100644
index 000000000000..111111111111
--- /dev/null
+++ b/drivers/iommu/sun55i-iommu.c
@@ -0,0 +1,1606 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */
+/*******************************************************************************
+ * Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
+ * Author: zhuxianbin <zhuxianbin@allwinnertech.com>
+ *
+ * This file is provided under a dual BSD/GPL license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/sizes.h>
+#include <linux/device.h>
+#include <asm/cacheflush.h>
+#include <linux/pm_runtime.h>
+#include <linux/version.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
+
+#include <sunxi-iommu.h>
+#include "sun55i-iommu.h"
+
+/*
+ * Register of IOMMU device
+ */
+#define IOMMU_VERSION_REG 0x0000
+#define IOMMU_RESET_REG 0x0010
+#define IOMMU_ENABLE_REG 0x0020
+#define IOMMU_BYPASS_REG 0x0030
+#define IOMMU_AUTO_GATING_REG 0x0040
+#define IOMMU_WBUF_CTRL_REG 0x0044
+#define IOMMU_OOO_CTRL_REG 0x0048
+#define IOMMU_4KB_BDY_PRT_CTRL_REG 0x004C
+#define IOMMU_TTB_REG 0x0050
+#define IOMMU_TLB_ENABLE_REG 0x0060
+#define IOMMU_TLB_PREFETCH_REG 0x0070
+#define IOMMU_TLB_FLUSH_ENABLE_REG 0x0080
+#define IOMMU_TLB_IVLD_MODE_SEL_REG 0x0084
+#define IOMMU_TLB_IVLD_START_ADDR_REG 0x0088
+#define IOMMU_TLB_IVLD_END_ADDR_REG 0x008C
+#define IOMMU_TLB_IVLD_ADDR_REG 0x0090
+#define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x0094
+#define IOMMU_TLB_IVLD_ENABLE_REG 0x0098
+#define IOMMU_PC_IVLD_MODE_SEL_REG 0x009C
+#define IOMMU_PC_IVLD_ADDR_REG 0x00A0
+#define IOMMU_PC_IVLD_START_ADDR_REG 0x00A4
+#define IOMMU_PC_IVLD_ENABLE_REG 0x00A8
+#define IOMMU_PC_IVLD_END_ADDR_REG 0x00Ac
+#define IOMMU_DM_AUT_CTRL_REG0 0x00B0
+#define IOMMU_DM_AUT_CTRL_REG1 0x00B4
+#define IOMMU_DM_AUT_CTRL_REG2 0x00B8
+#define IOMMU_DM_AUT_CTRL_REG3 0x00BC
+#define IOMMU_DM_AUT_CTRL_REG4 0x00C0
+#define IOMMU_DM_AUT_CTRL_REG5 0x00C4
+#define IOMMU_DM_AUT_CTRL_REG6 0x00C8
+#define IOMMU_DM_AUT_CTRL_REG7 0x00CC
+#define IOMMU_DM_AUT_OVWT_REG 0x00D0
+#define IOMMU_INT_ENABLE_REG 0x0100
+#define IOMMU_INT_CLR_REG 0x0104
+#define IOMMU_INT_STA_REG 0x0108
+#define IOMMU_INT_ERR_ADDR_REG0 0x0110
+
+#define IOMMU_INT_ERR_ADDR_REG1 0x0114
+#define IOMMU_INT_ERR_ADDR_REG2 0x0118
+
+#define IOMMU_INT_ERR_ADDR_REG3 0x011C
+#define IOMMU_INT_ERR_ADDR_REG4 0x0120
+#define IOMMU_INT_ERR_ADDR_REG5 0x0124
+
+#define IOMMU_INT_ERR_ADDR_REG6 0x0128
+#define IOMMU_INT_ERR_ADDR_REG7 0x0130
+#define IOMMU_INT_ERR_ADDR_REG8 0x0134
+
+#define IOMMU_INT_ERR_DATA_REG0 0x0150
+#define IOMMU_INT_ERR_DATA_REG1 0x0154
+#define IOMMU_INT_ERR_DATA_REG2 0x0158
+#define IOMMU_INT_ERR_DATA_REG3 0x015C
+#define IOMMU_INT_ERR_DATA_REG4 0x0160
+#define IOMMU_INT_ERR_DATA_REG5 0x0164
+
+#define IOMMU_INT_ERR_DATA_REG6 0x0168
+#define IOMMU_INT_ERR_DATA_REG7 0x0170
+#define IOMMU_INT_ERR_DATA_REG8 0x0174
+
+#define IOMMU_L1PG_INT_REG 0x0180
+#define IOMMU_L2PG_INT_REG 0x0184
+#define IOMMU_VA_REG 0x0190
+#define IOMMU_VA_DATA_REG 0x0194
+#define IOMMU_VA_CONFIG_REG 0x0198
+#define IOMMU_PMU_ENABLE_REG 0x0200
+#define IOMMU_PMU_CLR_REG 0x0210
+#define IOMMU_PMU_ACCESS_LOW_REG0 0x0230
+#define IOMMU_PMU_ACCESS_HIGH_REG0 0x0234
+#define IOMMU_PMU_HIT_LOW_REG0 0x0238
+#define IOMMU_PMU_HIT_HIGH_REG0 0x023C
+#define IOMMU_PMU_ACCESS_LOW_REG1 0x0240
+#define IOMMU_PMU_ACCESS_HIGH_REG1 0x0244
+#define IOMMU_PMU_HIT_LOW_REG1 0x0248
+#define IOMMU_PMU_HIT_HIGH_REG1 0x024C
+#define IOMMU_PMU_ACCESS_LOW_REG2 0x0250
+#define IOMMU_PMU_ACCESS_HIGH_REG2 0x0254
+#define IOMMU_PMU_HIT_LOW_REG2 0x0258
+#define IOMMU_PMU_HIT_HIGH_REG2 0x025C
+#define IOMMU_PMU_ACCESS_LOW_REG3 0x0260
+#define IOMMU_PMU_ACCESS_HIGH_REG3 0x0264
+#define IOMMU_PMU_HIT_LOW_REG3 0x0268
+#define IOMMU_PMU_HIT_HIGH_REG3 0x026C
+#define IOMMU_PMU_ACCESS_LOW_REG4 0x0270
+#define IOMMU_PMU_ACCESS_HIGH_REG4 0x0274
+#define IOMMU_PMU_HIT_LOW_REG4 0x0278
+#define IOMMU_PMU_HIT_HIGH_REG4 0x027C
+#define IOMMU_PMU_ACCESS_LOW_REG5 0x0280
+#define IOMMU_PMU_ACCESS_HIGH_REG5 0x0284
+#define IOMMU_PMU_HIT_LOW_REG5 0x0288
+#define IOMMU_PMU_HIT_HIGH_REG5 0x028C
+
+#define IOMMU_PMU_ACCESS_LOW_REG6 0x0290
+#define IOMMU_PMU_ACCESS_HIGH_REG6 0x0294
+#define IOMMU_PMU_HIT_LOW_REG6 0x0298
+#define IOMMU_PMU_HIT_HIGH_REG6 0x029C
+#define IOMMU_PMU_ACCESS_LOW_REG7 0x02D0
+#define IOMMU_PMU_ACCESS_HIGH_REG7 0x02D4
+#define IOMMU_PMU_HIT_LOW_REG7 0x02D8
+#define IOMMU_PMU_HIT_HIGH_REG7 0x02DC
+#define IOMMU_PMU_ACCESS_LOW_REG8 0x02E0
+#define IOMMU_PMU_ACCESS_HIGH_REG8 0x02E4
+#define IOMMU_PMU_HIT_LOW_REG8 0x02E8
+#define IOMMU_PMU_HIT_HIGH_REG8 0x02EC
+
+#define IOMMU_PMU_TL_LOW_REG0 0x0300
+#define IOMMU_PMU_TL_HIGH_REG0 0x0304
+#define IOMMU_PMU_ML_REG0 0x0308
+
+#define IOMMU_PMU_TL_LOW_REG1 0x0310
+#define IOMMU_PMU_TL_HIGH_REG1 0x0314
+#define IOMMU_PMU_ML_REG1 0x0318
+
+#define IOMMU_PMU_TL_LOW_REG2 0x0320
+#define IOMMU_PMU_TL_HIGH_REG2 0x0324
+#define IOMMU_PMU_ML_REG2 0x0328
+
+#define IOMMU_PMU_TL_LOW_REG3 0x0330
+#define IOMMU_PMU_TL_HIGH_REG3 0x0334
+#define IOMMU_PMU_ML_REG3 0x0338
+
+#define IOMMU_PMU_TL_LOW_REG4 0x0340
+#define IOMMU_PMU_TL_HIGH_REG4 0x0344
+#define IOMMU_PMU_ML_REG4 0x0348
+
+#define IOMMU_PMU_TL_LOW_REG5 0x0350
+#define IOMMU_PMU_TL_HIGH_REG5 0x0354
+#define IOMMU_PMU_ML_REG5 0x0358
+
+#define IOMMU_PMU_TL_LOW_REG6 0x0360
+#define IOMMU_PMU_TL_HIGH_REG6 0x0364
+#define IOMMU_PMU_ML_REG6 0x0368
+
+#define IOMMU_RESET_SHIFT 31
+#define IOMMU_RESET_MASK (1 << IOMMU_RESET_SHIFT)
+#define IOMMU_RESET_SET (0 << 31)
+#define IOMMU_RESET_RELEASE (1 << 31)
+
+/*
+ * IOMMU enable register field
+ */
+#define IOMMU_ENABLE 0x1
+
+/*
+ * IOMMU interrupt id mask
+ */
+#define MICRO_TLB0_INVALID_INTER_MASK 0x1
+#define MICRO_TLB1_INVALID_INTER_MASK 0x2
+#define MICRO_TLB2_INVALID_INTER_MASK 0x4
+#define MICRO_TLB3_INVALID_INTER_MASK 0x8
+#define MICRO_TLB4_INVALID_INTER_MASK 0x10
+#define MICRO_TLB5_INVALID_INTER_MASK 0x20
+#define MICRO_TLB6_INVALID_INTER_MASK 0x40
+
+#define L1_PAGETABLE_INVALID_INTER_MASK 0x10000
+#define L2_PAGETABLE_INVALID_INTER_MASK 0x20000
+
+/**
+ * sun8iw15p1
+ * DE : masterID 0
+ * E_EDMA: masterID 1
+ * E_FE: masterID 2
+ * VE: masterID 3
+ * CSI: masterID 4
+ * G2D: masterID 5
+ * E_BE: masterID 6
+ *
+ * sun50iw9p1:
+ * DE : masterID 0
+ * DI: masterID 1
+ * VE_R: masterID 2
+ * VE: masterID 3
+ * CSI0: masterID 4
+ * CSI1: masterID 5
+ * G2D: masterID 6
+ * sun8iw19p1:
+ * DE :>--->-------masterID 0
+ * EISE: masterID 1
+ * AI: masterID 2
+ * VE:>---->-------masterID 3
+ * CSI: >-->----masterID 4
+ * ISP:>-->------ masterID 5
+ * G2D:>--->-------masterID 6
+ * sun8iw21:
+ * VE : masterID 0
+ * CSI: masterID 1
+ * DE: masterID 2
+ * G2D: masterID 3
+ * ISP: masterID 4
+ * RISCV: masterID 5
+ * NPU: masterID 6
+ */
+#define DEFAULT_BYPASS_VALUE 0x7f
+static const u32 master_id_bitmap[] = {0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40};
+
+#define sunxi_wait_when(COND, MS) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
+ int ret__ = 0; \
+ while ((COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = (!COND) ? 0 : -ETIMEDOUT; \
+ break; \
+ } \
+ udelay(1); \
+ } \
+ ret__; \
+})
+
+/*
+ * The format of device tree, and client device how to use it.
+ *
+ * /{
+ * ....
+ * smmu: iommu@xxxxx {
+ * compatible = "allwinner,iommu";
+ * reg = <xxx xxx xxx xxx>;
+ * interrupts = <GIC_SPI xxx IRQ_TYPE_LEVEL_HIGH>;
+ * interrupt-names = "iommu-irq";
+ * clocks = <&iommu_clk>;
+ * clock-name = "iommu-clk";
+ * #iommu-cells = <1>;
+ * status = "enabled";
+ * };
+ *
+ * de@xxxxx {
+ * .....
+ * iommus = <&smmu ID>;
+ * };
+ *
+ * }
+ *
+ * Here, ID number is 0 ~ 5, every client device have a unique id.
+ * Every id represent a micro TLB, also represent a master device.
+ *
+ */
+
+enum sunxi_iommu_version {
+ IOMMU_VERSION_V10 = 0x10,
+ IOMMU_VERSION_V11,
+ IOMMU_VERSION_V12,
+ IOMMU_VERSION_V13,
+ IOMMU_VERSION_V14,
+};
+
+struct sunxi_iommu_plat_data {
+ u32 version;
+ u32 tlb_prefetch;
+ u32 tlb_invalid_mode;
+ u32 ptw_invalid_mode;
+ const char *master[8];
+};
+
+struct sunxi_iommu_dev {
+ struct iommu_device iommu;
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ int irq;
+ u32 bypass;
+ spinlock_t iommu_lock;
+ struct list_head rsv_list;
+ const struct sunxi_iommu_plat_data *plat_data;
+ struct iommu_domain *identity_domain;
+ struct sunxi_iommu_domain *debug_domain;
+};
+
+struct sunxi_iommu_domain {
+ unsigned int *pgtable; /* first page directory, size is 16KB */
+ u32 *sg_buffer;
+ struct spinlock dt_lock; /* lock for modifying page table @ pgtable */
+ struct dma_iommu_mapping *mapping;
+ struct iommu_domain domain;
+ /* struct iova_domain iovad; */
+ /* list of master device, it represent a micro TLB */
+ struct list_head mdevs;
+ spinlock_t lock;
+};
+
+/*
+ * sunxi master device which use iommu.
+ */
+struct sunxi_mdev {
+ struct list_head node; /* for sunxi_iommu mdevs list */
+ struct device *dev; /* the master device */
+ unsigned int tlbid; /* micro TLB id, distinguish device by it */
+ bool flag;
+};
+
+struct sunxi_iommu_owner {
+ unsigned int tlbid;
+ bool flag;
+ struct sunxi_iommu_dev *data;
+ struct device *dev;
+ struct dma_iommu_mapping *mapping;
+};
+
+#define _max(x, y) (((u64)(x) > (u64)(y)) ? (x) : (y))
+
+static struct kmem_cache *iopte_cache;
+static struct sunxi_iommu_dev *global_iommu_dev;
+static bool iommu_hw_init_flag;
+static struct device *dma_dev;
+
+static sunxi_iommu_fault_cb sunxi_iommu_fault_notify_cbs[7];
+u32 sunxi_iommu_dump_rsv_list(struct list_head *rsv_list, ssize_t len,
+ char *buf, size_t buf_len, bool for_sysfs_show);
+int sunxi_iommu_check_cmd(struct device *dev, void *data);
+
+void sun55i_iommu_register_fault_cb(sunxi_iommu_fault_cb cb, unsigned int master_id)
+{
+ if (master_id >= ARRAY_SIZE(sunxi_iommu_fault_notify_cbs))
+ return;
+ sunxi_iommu_fault_notify_cbs[master_id] = cb;
+}
+EXPORT_SYMBOL_GPL(sun55i_iommu_register_fault_cb);
+
+static inline u32 sunxi_iommu_read(struct sunxi_iommu_dev *iommu,
+ u32 offset)
+{
+ return readl(iommu->base + offset);
+}
+
+static inline void sunxi_iommu_write(struct sunxi_iommu_dev *iommu,
+ u32 offset, u32 value)
+{
+ writel(value, iommu->base + offset);
+}
+
+void sun55i_reset_device_iommu(unsigned int master_id)
+{
+ unsigned int regval;
+ struct sunxi_iommu_dev *iommu = global_iommu_dev;
+
+ if (master_id >= 7)
+ return;
+
+ if (!iommu)
+ return;
+
+ regval = sunxi_iommu_read(iommu, IOMMU_RESET_REG);
+ sunxi_iommu_write(iommu, IOMMU_RESET_REG, regval & (~(1 << master_id)));
+ regval = sunxi_iommu_read(iommu, IOMMU_RESET_REG);
+ if (!(regval & ((1 << master_id)))) {
+ sunxi_iommu_write(iommu, IOMMU_RESET_REG, regval | ((1 << master_id)));
+ }
+}
+EXPORT_SYMBOL(sun55i_reset_device_iommu);
+
+void sun55i_enable_device_iommu(struct sunxi_iommu_dev *iommu, unsigned int master_id, bool flag)
+{
+ unsigned long mflag;
+
+ if (!iommu)
+ return;
+
+ if (master_id >= ARRAY_SIZE(master_id_bitmap))
+ return;
+
+ spin_lock_irqsave(&iommu->iommu_lock, mflag);
+ if (flag)
+ iommu->bypass &= ~(master_id_bitmap[master_id]);
+ else
+ iommu->bypass |= master_id_bitmap[master_id];
+ sunxi_iommu_write(iommu, IOMMU_BYPASS_REG, iommu->bypass);
+ spin_unlock_irqrestore(&iommu->iommu_lock, mflag);
+}
+EXPORT_SYMBOL(sun55i_enable_device_iommu);
+
+static int sun55i_tlb_flush(struct sunxi_iommu_dev *iommu)
+{
+ int ret;
+
+ /* enable the maximum number(7) of master to fit all platform */
+ sunxi_iommu_write(iommu, IOMMU_TLB_FLUSH_ENABLE_REG, 0x0003007f);
+ ret = sunxi_wait_when(
+ (sunxi_iommu_read(iommu, IOMMU_TLB_FLUSH_ENABLE_REG)), 2);
+ if (ret)
+ dev_err(iommu->dev, "Enable flush all request timed out\n");
+
+ return ret;
+}
+
+static int sun55i_iommu_hw_init(struct sunxi_iommu_dev *iommu, struct sunxi_iommu_domain *sunxi_domain)
+{
+ int ret = 0;
+ int iommu_enable = 0;
+ unsigned long mflag;
+ const struct sunxi_iommu_plat_data *plat_data = iommu->plat_data;
+
+ spin_lock_irqsave(&iommu->iommu_lock, mflag);
+
+ if (sunxi_domain) {
+ phys_addr_t dte_addr = __pa(sunxi_domain->pgtable);
+ sunxi_iommu_write(iommu, IOMMU_TTB_REG, dte_addr);
+ }
+
+ /*
+ * set preftech functions, including:
+ * master prefetching and only prefetch valid page to TLB/PTW
+ */
+ sunxi_iommu_write(iommu, IOMMU_TLB_PREFETCH_REG, plat_data->tlb_prefetch);
+ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_MODE_SEL_REG, plat_data->tlb_invalid_mode);
+ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_MODE_SEL_REG, plat_data->ptw_invalid_mode);
+
+ /* disable interrupt of prefetch */
+ sunxi_iommu_write(iommu, IOMMU_INT_ENABLE_REG, 0x3003f);
+ sunxi_iommu_write(iommu, IOMMU_BYPASS_REG, iommu->bypass);
+
+ ret = sun55i_tlb_flush(iommu);
+ if (ret) {
+ dev_err(iommu->dev, "Enable flush all request timed out\n");
+ goto out;
+ }
+ sunxi_iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0x1);
+ sunxi_iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE);
+ iommu_enable = sunxi_iommu_read(iommu, IOMMU_ENABLE_REG);
+ if (iommu_enable != 0x1) {
+ iommu_enable = sunxi_iommu_read(iommu, IOMMU_ENABLE_REG);
+ if (iommu_enable != 0x1) {
+ dev_err(iommu->dev, "iommu enable failed! No iommu in bitfile!\n");
+ ret = -ENODEV;
+ goto out;
+ }
+ }
+ iommu_hw_init_flag = true;
+
+out:
+ spin_unlock_irqrestore(&iommu->iommu_lock, mflag);
+
+ return ret;
+}
+
+static int sun55i_tlb_invalid(dma_addr_t iova, dma_addr_t iova_mask)
+{
+ struct sunxi_iommu_dev *iommu = global_iommu_dev;
+ dma_addr_t iova_end = iova_mask;
+ int ret = 0;
+ unsigned long mflag;
+
+ spin_lock_irqsave(&iommu->iommu_lock, mflag);
+ /* new TLB invalid function: use range(start, end) to invalid TLB page */
+ pr_debug("iommu: TLB invalid:0x%x-0x%x\n", (unsigned int)iova,
+ (unsigned int)iova_end);
+ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_START_ADDR_REG, iova);
+ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_END_ADDR_REG, iova_end);
+ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG, 0x1);
+
+ ret = sunxi_wait_when(
+ (sunxi_iommu_read(iommu, IOMMU_TLB_IVLD_ENABLE_REG)&0x1), 2);
+ if (ret) {
+ dev_err(iommu->dev, "TLB cache invalid timed out\n");
+ }
+ spin_unlock_irqrestore(&iommu->iommu_lock, mflag);
+
+ return ret;
+}
+
+static int sun55i_ptw_cache_invalid(dma_addr_t iova_start, dma_addr_t iova_end)
+{
+ struct sunxi_iommu_dev *iommu = global_iommu_dev;
+ int ret = 0;
+ unsigned long mflag;
+
+ spin_lock_irqsave(&iommu->iommu_lock, mflag);
+ /* new PTW invalid function: use range(start, end) to invalid PTW page */
+ pr_debug("iommu: PTW invalid:0x%x-0x%x\n", (unsigned int)iova_start,
+ (unsigned int)iova_end);
+ WARN_ON(iova_end == 0);
+ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_START_ADDR_REG, iova_start);
+ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_END_ADDR_REG, iova_end);
+ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG, 0x1);
+
+ ret = sunxi_wait_when(
+ (sunxi_iommu_read(iommu, IOMMU_PC_IVLD_ENABLE_REG)&0x1), 2);
+ if (ret) {
+ dev_err(iommu->dev, "PTW cache invalid timed out\n");
+ goto out;
+ }
+
+out:
+ spin_unlock_irqrestore(&iommu->iommu_lock, mflag);
+
+ return ret;
+}
+
+static void sun55i_zap_tlb(unsigned long iova, size_t size)
+{
+ sun55i_tlb_invalid(iova, iova + 2 * SPAGE_SIZE);
+ sun55i_tlb_invalid(iova + size - SPAGE_SIZE, iova + size + 8 * SPAGE_SIZE);
+ sun55i_ptw_cache_invalid(iova, iova + SPD_SIZE);
+ sun55i_ptw_cache_invalid(iova + size - SPD_SIZE, iova + size);
+
+ return;
+}
+
+static int sun55i_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, size_t count, int prot,
+ gfp_t gfp, size_t *mapped)
+{
+ struct sunxi_iommu_domain *sunxi_domain;
+ size_t iova_start, iova_end;
+ unsigned long total_size = size * count;
+ int ret;
+ unsigned long flags;
+
+ sunxi_domain = container_of(domain, struct sunxi_iommu_domain, domain);
+ WARN_ON(sunxi_domain->pgtable == NULL);
+
+ iova_start = iova & IOMMU_PT_MASK;
+ iova_end = SPAGE_ALIGN(iova + total_size);
+
+ spin_lock_irqsave(&sunxi_domain->dt_lock, flags);
+
+ ret = sunxi_pgtable_prepare_l1_tables(sunxi_domain->pgtable, iova_start,
+ iova_end, prot);
+ if (ret) {
+ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags);
+ if (mapped)
+ *mapped = 0;
+ return -ENOMEM;
+ }
+
+ sunxi_pgtable_prepare_l2_tables(sunxi_domain->pgtable,
+ iova_start, iova_end, paddr, prot);
+
+ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags);
+
+ if (mapped)
+ *mapped = total_size;
+
+ return 0;
+}
+
+static size_t sun55i_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, size_t count,
+ struct iommu_iotlb_gather *gather)
+{
+ struct sunxi_iommu_domain *sunxi_domain;
+ const struct sunxi_iommu_plat_data *plat_data;
+ size_t iova_start, iova_end;
+ unsigned long total_size = size * count;
+ int iova_tail_size;
+ unsigned long flags;
+
+ sunxi_domain = container_of(domain, struct sunxi_iommu_domain, domain);
+ plat_data = global_iommu_dev->plat_data;
+ WARN_ON(sunxi_domain->pgtable == NULL);
+
+ iova_start = iova & IOMMU_PT_MASK;
+ iova_end = SPAGE_ALIGN(iova + total_size);
+
+ if (gather) {
+ if (gather->start > iova_start)
+ gather->start = iova_start;
+ if (gather->end < iova_end)
+ gather->end = iova_end;
+ }
+
+ spin_lock_irqsave(&sunxi_domain->dt_lock, flags);
+
+ sun55i_tlb_invalid(iova_start, iova_end);
+ sun55i_ptw_cache_invalid(iova_start, iova_end);
+
+ for (; iova_start < iova_end; ) {
+ iova_tail_size = sunxi_pgtable_delete_l2_tables(
+ sunxi_domain->pgtable, iova_start, iova_end);
+ if (iova_tail_size < 0) {
+ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags);
+ return 0;
+ }
+ if (iova_tail_size == 0)
+ break;
+
+ sun55i_ptw_cache_invalid(iova_start, iova_start + iova_tail_size);
+ iova_start += iova_tail_size;
+ }
+ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags);
+
+ return total_size;
+}
+
+static int sun55i_iommu_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct sunxi_iommu_domain *sunxi_domain =
+ container_of(domain, struct sunxi_iommu_domain, domain);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sunxi_domain->dt_lock, flags);
+ sun55i_zap_tlb(iova, size);
+ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags);
+
+ return 0;
+}
+
+static phys_addr_t sun55i_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct sunxi_iommu_domain *sunxi_domain =
+ container_of(domain, struct sunxi_iommu_domain, domain);
+ phys_addr_t ret = 0;
+ unsigned long flags;
+
+
+ WARN_ON(sunxi_domain->pgtable == NULL);
+ spin_lock_irqsave(&sunxi_domain->dt_lock, flags);
+ ret = sunxi_pgtable_iova_to_phys(sunxi_domain->pgtable, iova);
+ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags);
+
+ return ret;
+}
+
+static struct iommu_domain *sun55i_iommu_domain_alloc_paging(struct device *dev)
+{
+ struct sunxi_iommu_domain *sunxi_domain;
+
+ sunxi_domain = kzalloc(sizeof(*sunxi_domain), GFP_KERNEL);
+ if (!sunxi_domain)
+ return NULL;
+
+ sunxi_domain->pgtable = sunxi_pgtable_alloc();
+ if (!sunxi_domain->pgtable) {
+ pr_err("sunxi domain get pgtable failed\n");
+ goto err_page;
+ }
+
+ sunxi_domain->sg_buffer = (unsigned int *)__get_free_pages(
+ GFP_KERNEL, get_order(MAX_SG_TABLE_SIZE));
+ if (!sunxi_domain->sg_buffer) {
+ pr_err("sunxi domain get sg_buffer failed\n");
+ goto err_sg_buffer;
+ }
+
+ sunxi_domain->domain.geometry.aperture_start = 0;
+ sunxi_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
+ sunxi_domain->domain.geometry.force_aperture = true;
+ spin_lock_init(&sunxi_domain->dt_lock);
+
+ if (global_iommu_dev)
+ global_iommu_dev->debug_domain = sunxi_domain;
+
+ if (!iommu_hw_init_flag) {
+ if (sun55i_iommu_hw_init(global_iommu_dev, sunxi_domain))
+ pr_err("sunxi iommu hardware init failed\n");
+ }
+
+ return &sunxi_domain->domain;
+
+err_sg_buffer:
+ sunxi_pgtable_free(sunxi_domain->pgtable);
+ sunxi_domain->pgtable = NULL;
+err_page:
+ kfree(sunxi_domain);
+
+ return NULL;
+}
+
+static void sun55i_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct sunxi_iommu_domain *sunxi_domain =
+ container_of(domain, struct sunxi_iommu_domain, domain);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sunxi_domain->dt_lock, flags);
+ sunxi_pgtable_clear(sunxi_domain->pgtable);
+ sun55i_tlb_flush(global_iommu_dev);
+ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags);
+ sunxi_pgtable_free(sunxi_domain->pgtable);
+ sunxi_domain->pgtable = NULL;
+ free_pages((unsigned long)sunxi_domain->sg_buffer,
+ get_order(MAX_SG_TABLE_SIZE));
+ sunxi_domain->sg_buffer = NULL;
+ kfree(sunxi_domain);
+}
+
+static int sun55i_iommu_attach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ return 0;
+}
+
+static void sun55i_iommu_probe_device_finalize(struct device *dev)
+{
+ struct sunxi_iommu_owner *owner = dev_iommu_priv_get(dev);
+
+ WARN(!dev->dma_mask || *dev->dma_mask == 0, "NULL or 0 dma mask will fail iommu setup\n");
+ iommu_setup_dma_ops(dev);
+
+ sun55i_enable_device_iommu(owner->data, owner->tlbid, owner->flag);
+}
+
+static struct iommu_device *sun55i_iommu_probe_device(struct device *dev)
+{
+ struct sunxi_iommu_owner *owner = dev_iommu_priv_get(dev);
+
+ if (!owner) /* Not a iommu client device */
+ return ERR_PTR(-ENODEV);
+
+ return &owner->data->iommu;
+}
+
+static void sun55i_iommu_release_device(struct device *dev)
+{
+ struct sunxi_iommu_owner *owner = dev_iommu_priv_get(dev);
+
+ if (!owner)
+ return;
+
+ sun55i_enable_device_iommu(owner->data, owner->tlbid, false);
+ dev->iommu_group = NULL;
+ devm_kfree(dev, dev->dma_parms);
+ dev->dma_parms = NULL;
+ kfree(owner);
+ owner = NULL;
+ dev_iommu_priv_set(dev, NULL);
+}
+
+static int sun55i_iommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
+{
+ struct sunxi_iommu_owner *owner = dev_iommu_priv_get(dev);
+ struct platform_device *sysmmu = of_find_device_by_node(args->np);
+ struct sunxi_iommu_dev *data;
+
+ if (!sysmmu)
+ return -ENODEV;
+
+ data = platform_get_drvdata(sysmmu);
+ if (data == NULL)
+ return -ENODEV;
+
+ if (!owner) {
+ owner = kzalloc(sizeof(*owner), GFP_KERNEL);
+ if (!owner)
+ return -ENOMEM;
+ owner->tlbid = args->args[0];
+ if (args->args_count > 1)
+ owner->flag = args->args[1];
+ else
+ owner->flag = 0;
+ owner->data = data;
+ owner->dev = dev;
+ dev_iommu_priv_set(dev, owner);
+ }
+
+ return 0;
+}
+
+static irqreturn_t sunxi_iommu_irq(int irq, void *dev_id)
+{
+
+ u32 inter_status_reg = 0;
+ u32 addr_reg = 0;
+ u32 int_masterid_bitmap = 0;
+ u32 data_reg = 0;
+ u32 l1_pgint_reg = 0;
+ u32 l2_pgint_reg = 0;
+ u32 master_id = 0;
+ unsigned long mflag;
+ struct sunxi_iommu_dev *iommu = dev_id;
+ const struct sunxi_iommu_plat_data *plat_data = iommu->plat_data;
+
+ spin_lock_irqsave(&iommu->iommu_lock, mflag);
+ inter_status_reg = sunxi_iommu_read(iommu, IOMMU_INT_STA_REG) & 0x3ffff;
+ l1_pgint_reg = sunxi_iommu_read(iommu, IOMMU_L1PG_INT_REG);
+ l2_pgint_reg = sunxi_iommu_read(iommu, IOMMU_L2PG_INT_REG);
+ int_masterid_bitmap = inter_status_reg | l1_pgint_reg | l2_pgint_reg;
+
+ if (inter_status_reg & MICRO_TLB0_INVALID_INTER_MASK) {
+ pr_err("%s Invalid Authority\n", plat_data->master[0]);
+ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG0);
+ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG0);
+ } else if (inter_status_reg & MICRO_TLB1_INVALID_INTER_MASK) {
+ pr_err("%s Invalid Authority\n", plat_data->master[1]);
+ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG1);
+ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG1);
+ } else if (inter_status_reg & MICRO_TLB2_INVALID_INTER_MASK) {
+ pr_err("%s Invalid Authority\n", plat_data->master[2]);
+ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG2);
+ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG2);
+ } else if (inter_status_reg & MICRO_TLB3_INVALID_INTER_MASK) {
+ pr_err("%s Invalid Authority\n", plat_data->master[3]);
+ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG3);
+ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG3);
+ } else if (inter_status_reg & MICRO_TLB4_INVALID_INTER_MASK) {
+ pr_err("%s Invalid Authority\n", plat_data->master[4]);
+ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG4);
+ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG4);
+ } else if (inter_status_reg & MICRO_TLB5_INVALID_INTER_MASK) {
+ pr_err("%s Invalid Authority\n", plat_data->master[5]);
+ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG5);
+ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG5);
+ } else if (inter_status_reg & MICRO_TLB6_INVALID_INTER_MASK) {
+ pr_err("%s Invalid Authority\n", plat_data->master[6]);
+ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG6);
+ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG6);
+ } else if (inter_status_reg & L1_PAGETABLE_INVALID_INTER_MASK) {
+ /* It's OK to prefetch an invalid page, no need to print msg for debug. */
+ if (!(int_masterid_bitmap & (1U << 31)))
+ pr_err("L1 PageTable Invalid\n");
+ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG7);
+ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG7);
+ } else if (inter_status_reg & L2_PAGETABLE_INVALID_INTER_MASK) {
+ if (!(int_masterid_bitmap & (1U << 31)))
+ pr_err("L2 PageTable Invalid\n");
+ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG8);
+ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG8);
+ } else
+ pr_err("sunxi iommu int error!!!\n");
+
+ if (!(int_masterid_bitmap & (1U << 31))) {
+ int_masterid_bitmap &= 0xffff;
+
+ if (int_masterid_bitmap) {
+ master_id = __ffs(int_masterid_bitmap);
+ pr_err("Bug is in %s module, invalid address: 0x%x, data:0x%x, id:0x%x\n",
+ plat_data->master[master_id], addr_reg, data_reg,
+ int_masterid_bitmap);
+
+ if (sunxi_iommu_fault_notify_cbs[master_id])
+ sunxi_iommu_fault_notify_cbs[master_id]();
+ } else {
+ pr_err("Bug in unknown module (id=0), invalid address: 0x%x, data:0x%x\n",
+ addr_reg, data_reg);
+ }
+ }
+
+ /* invalid TLB */
+ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_START_ADDR_REG, addr_reg);
+ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_END_ADDR_REG, addr_reg + 4 * SPAGE_SIZE);
+ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG, 0x1);
+ while (sunxi_iommu_read(iommu, IOMMU_TLB_IVLD_ENABLE_REG) & 0x1)
+ ;
+
+ /* invalid PTW */
+ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_START_ADDR_REG, addr_reg);
+ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_END_ADDR_REG, addr_reg + 2 * SPD_SIZE);
+ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG, 0x1);
+ while (sunxi_iommu_read(iommu, IOMMU_PC_IVLD_ENABLE_REG) & 0x1)
+ ;
+
+ sunxi_iommu_write(iommu, IOMMU_INT_CLR_REG, inter_status_reg);
+ inter_status_reg |= (l1_pgint_reg | l2_pgint_reg);
+ inter_status_reg &= 0xffff;
+ sunxi_iommu_write(iommu, IOMMU_RESET_REG, ~inter_status_reg);
+ sunxi_iommu_write(iommu, IOMMU_RESET_REG, 0xffffffff);
+ spin_unlock_irqrestore(&iommu->iommu_lock, mflag);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t sunxi_iommu_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sunxi_iommu_dev *iommu = global_iommu_dev;
+ u32 data;
+
+ spin_lock(&iommu->iommu_lock);
+ data = sunxi_iommu_read(iommu, IOMMU_PMU_ENABLE_REG);
+ spin_unlock(&iommu->iommu_lock);
+
+ return scnprintf(buf, PAGE_SIZE,
+ "enable = %d\n", data & 0x1 ? 1 : 0);
+}
+
+static ssize_t sunxi_iommu_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sunxi_iommu_dev *iommu = global_iommu_dev;
+ unsigned long val;
+ u32 data;
+ int retval;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val) {
+ spin_lock(&iommu->iommu_lock);
+ data = sunxi_iommu_read(iommu, IOMMU_PMU_ENABLE_REG);
+ sunxi_iommu_write(iommu, IOMMU_PMU_ENABLE_REG, data | 0x1);
+ data = sunxi_iommu_read(iommu, IOMMU_PMU_CLR_REG);
+ sunxi_iommu_write(iommu, IOMMU_PMU_CLR_REG, data | 0x1);
+ retval = sunxi_wait_when((sunxi_iommu_read(iommu,
+ IOMMU_PMU_CLR_REG) & 0x1), 1);
+ if (retval)
+ dev_err(iommu->dev, "Clear PMU Count timed out\n");
+ spin_unlock(&iommu->iommu_lock);
+ } else {
+ spin_lock(&iommu->iommu_lock);
+ data = sunxi_iommu_read(iommu, IOMMU_PMU_CLR_REG);
+ sunxi_iommu_write(iommu, IOMMU_PMU_CLR_REG, data | 0x1);
+ retval = sunxi_wait_when((sunxi_iommu_read(iommu,
+ IOMMU_PMU_CLR_REG) & 0x1), 1);
+ if (retval)
+ dev_err(iommu->dev, "Clear PMU Count timed out\n");
+ data = sunxi_iommu_read(iommu, IOMMU_PMU_ENABLE_REG);
+ sunxi_iommu_write(iommu, IOMMU_PMU_ENABLE_REG, data & ~0x1);
+ spin_unlock(&iommu->iommu_lock);
+ }
+
+ return count;
+}
+
+static ssize_t sunxi_iommu_profilling_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sunxi_iommu_dev *iommu = global_iommu_dev;
+ const struct sunxi_iommu_plat_data *plat_data = iommu->plat_data;
+ struct {
+ u64 macrotlb_access_count;
+ u64 macrotlb_hit_count;
+ u64 ptwcache_access_count;
+ u64 ptwcache_hit_count;
+ struct {
+ u64 access_count;
+ u64 hit_count;
+ u64 latency;
+ u32 max_latency;
+ } micro_tlb[7];
+ } *iommu_profile;
+ iommu_profile = kmalloc(sizeof(*iommu_profile), GFP_KERNEL);
+ if (!iommu_profile)
+ return 0;
+ int len;
+ spin_lock(&iommu->iommu_lock);
+
+ iommu_profile->micro_tlb[0].access_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG0) &
+ 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG0);
+ iommu_profile->micro_tlb[0].hit_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG0) & 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG0);
+
+ iommu_profile->micro_tlb[1].access_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG1) &
+ 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG1);
+ iommu_profile->micro_tlb[1].hit_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG1) & 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG1);
+
+ iommu_profile->micro_tlb[2].access_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG2) &
+ 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG2);
+ iommu_profile->micro_tlb[2].hit_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG2) & 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG2);
+
+ iommu_profile->micro_tlb[3].access_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG3) &
+ 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG3);
+ iommu_profile->micro_tlb[3].hit_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG3) & 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG3);
+
+ iommu_profile->micro_tlb[4].access_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG4) &
+ 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG4);
+ iommu_profile->micro_tlb[4].hit_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG4) & 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG4);
+
+ iommu_profile->micro_tlb[5].access_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG5) &
+ 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG5);
+ iommu_profile->micro_tlb[5].hit_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG5) & 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG5);
+
+ iommu_profile->micro_tlb[6].access_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG6) &
+ 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG6);
+ iommu_profile->micro_tlb[6].hit_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG6) & 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG6);
+
+ iommu_profile->macrotlb_access_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG7) &
+ 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG7);
+ iommu_profile->macrotlb_hit_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG7) & 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG7);
+
+ iommu_profile->ptwcache_access_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG8) &
+ 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG8);
+ iommu_profile->ptwcache_hit_count =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG8) & 0x7ff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG8);
+
+ iommu_profile->micro_tlb[0].latency =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG0) &
+ 0x3ffff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG0);
+ iommu_profile->micro_tlb[1].latency =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG1) &
+ 0x3ffff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG1);
+ iommu_profile->micro_tlb[2].latency =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG2) &
+ 0x3ffff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG2);
+ iommu_profile->micro_tlb[3].latency =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG3) &
+ 0x3ffff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG3);
+ iommu_profile->micro_tlb[4].latency =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG4) &
+ 0x3ffff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG4);
+ iommu_profile->micro_tlb[5].latency =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG5) &
+ 0x3ffff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG5);
+
+ iommu_profile->micro_tlb[6].latency =
+ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG6) &
+ 0x3ffff)
+ << 32) |
+ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG6);
+
+ iommu_profile->micro_tlb[0].max_latency =
+ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG0);
+ iommu_profile->micro_tlb[1].max_latency =
+ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG1);
+ iommu_profile->micro_tlb[2].max_latency =
+ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG2);
+ iommu_profile->micro_tlb[3].max_latency =
+ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG3);
+ iommu_profile->micro_tlb[4].max_latency =
+ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG4);
+ iommu_profile->micro_tlb[5].max_latency =
+ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG5);
+ iommu_profile->micro_tlb[6].max_latency =
+ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG6);
+
+ spin_unlock(&iommu->iommu_lock);
+err:
+ return 0;
+
+ len = scnprintf(
+ buf, PAGE_SIZE,
+ "%s_access_count = 0x%llx\n"
+ "%s_hit_count = 0x%llx\n"
+ "%s_access_count = 0x%llx\n"
+ "%s_hit_count = 0x%llx\n"
+ "%s_access_count = 0x%llx\n"
+ "%s_hit_count = 0x%llx\n"
+ "%s_access_count = 0x%llx\n"
+ "%s_hit_count = 0x%llx\n"
+ "%s_access_count = 0x%llx\n"
+ "%s_hit_count = 0x%llx\n"
+ "%s_access_count = 0x%llx\n"
+ "%s_hit_count = 0x%llx\n"
+ "%s_access_count = 0x%llx\n"
+ "%s_hit_count = 0x%llx\n"
+ "macrotlb_access_count = 0x%llx\n"
+ "macrotlb_hit_count = 0x%llx\n"
+ "ptwcache_access_count = 0x%llx\n"
+ "ptwcache_hit_count = 0x%llx\n"
+ "%s_total_latency = 0x%llx\n"
+ "%s_total_latency = 0x%llx\n"
+ "%s_total_latency = 0x%llx\n"
+ "%s_total_latency = 0x%llx\n"
+ "%s_total_latency = 0x%llx\n"
+ "%s_total_latency = 0x%llx\n"
+ "%s_total_latency = 0x%llx\n"
+ "%s_max_latency = 0x%x\n"
+ "%s_max_latency = 0x%x\n"
+ "%s_max_latency = 0x%x\n"
+ "%s_max_latency = 0x%x\n"
+ "%s_max_latency = 0x%x\n"
+ "%s_max_latency = 0x%x\n"
+ "%s_max_latency = 0x%x\n",
+ plat_data->master[0], iommu_profile->micro_tlb[0].access_count,
+ plat_data->master[0], iommu_profile->micro_tlb[0].hit_count,
+ plat_data->master[1], iommu_profile->micro_tlb[1].access_count,
+ plat_data->master[1], iommu_profile->micro_tlb[1].hit_count,
+ plat_data->master[2], iommu_profile->micro_tlb[2].access_count,
+ plat_data->master[2], iommu_profile->micro_tlb[2].hit_count,
+ plat_data->master[3], iommu_profile->micro_tlb[3].access_count,
+ plat_data->master[3], iommu_profile->micro_tlb[3].hit_count,
+ plat_data->master[4], iommu_profile->micro_tlb[4].access_count,
+ plat_data->master[4], iommu_profile->micro_tlb[4].hit_count,
+ plat_data->master[5], iommu_profile->micro_tlb[5].access_count,
+ plat_data->master[5], iommu_profile->micro_tlb[5].hit_count,
+ plat_data->master[6], iommu_profile->micro_tlb[6].access_count,
+ plat_data->master[6], iommu_profile->micro_tlb[6].hit_count,
+ iommu_profile->macrotlb_access_count,
+ iommu_profile->macrotlb_hit_count,
+ iommu_profile->ptwcache_access_count,
+ iommu_profile->ptwcache_hit_count, plat_data->master[0],
+ iommu_profile->micro_tlb[0].latency, plat_data->master[1],
+ iommu_profile->micro_tlb[1].latency, plat_data->master[2],
+ iommu_profile->micro_tlb[2].latency, plat_data->master[3],
+ iommu_profile->micro_tlb[3].latency, plat_data->master[4],
+ iommu_profile->micro_tlb[4].latency, plat_data->master[5],
+ iommu_profile->micro_tlb[5].latency, plat_data->master[6],
+ iommu_profile->micro_tlb[6].latency, plat_data->master[0],
+ iommu_profile->micro_tlb[0].max_latency, plat_data->master[1],
+ iommu_profile->micro_tlb[1].max_latency, plat_data->master[2],
+ iommu_profile->micro_tlb[2].max_latency, plat_data->master[3],
+ iommu_profile->micro_tlb[3].max_latency, plat_data->master[4],
+ iommu_profile->micro_tlb[4].max_latency, plat_data->master[5],
+ iommu_profile->micro_tlb[5].max_latency, plat_data->master[6],
+ iommu_profile->micro_tlb[6].max_latency);
+ kfree(iommu_profile);
+ return len;
+}
+
+
+static u32 __print_rsv_region(char *buf, size_t buf_len, ssize_t len,
+ struct dump_region *active_region,
+ bool for_sysfs_show)
+{
+ if (active_region->type == DUMP_REGION_RESERVE) {
+ if (for_sysfs_show) {
+ len += sysfs_emit_at(
+ buf, len,
+ "iova:%pad size:0x%zx\n",
+ &active_region->iova, active_region->size);
+ } else {
+ len += scnprintf(
+ buf + len, buf_len - len,
+ "iova:%pad size:0x%zx\n",
+ &active_region->iova, active_region->size);
+ }
+ }
+ return len;
+}
+
+u32 sunxi_iommu_dump_rsv_list(struct list_head *rsv_list, ssize_t len,
+ char *buf, size_t buf_len, bool for_sysfs_show)
+{
+ struct iommu_resv_region *resv;
+ struct dump_region active_region;
+ if (for_sysfs_show) {
+ len += sysfs_emit_at(buf, len, "reserved\n");
+ } else {
+ len += scnprintf(buf + len, buf_len - len, "reserved\n");
+ }
+ list_for_each_entry(resv, rsv_list, list) {
+ active_region.access_mask = 0;
+ active_region.iova = resv->start;
+ active_region.type = DUMP_REGION_RESERVE;
+ active_region.size = resv->length;
+ len = __print_rsv_region(buf, buf_len, len, &active_region,
+ for_sysfs_show);
+ }
+ return len;
+}
+
+static ssize_t sun55i_iommu_dump_pgtable(struct sunxi_iommu_dev *iommu, char *buf, size_t buf_len,
+ bool for_sysfs_show)
+{
+ struct sunxi_iommu_domain *sunxi_domain = iommu->debug_domain;
+ ssize_t len = 0;
+
+ len = sunxi_iommu_dump_rsv_list(&iommu->rsv_list, len, buf,
+ buf_len, for_sysfs_show);
+
+ if (sunxi_domain && sunxi_domain->pgtable) {
+ len = sunxi_pgtable_dump(sunxi_domain->pgtable, len, buf, buf_len,
+ for_sysfs_show);
+ } else {
+ if (for_sysfs_show) {
+ len += sysfs_emit_at(buf, len, "no active domain to dump\n");
+ } else {
+ len += scnprintf(buf + len, buf_len - len, "no active domain to dump\n");
+ }
+ }
+
+ return len;
+}
+
+static ssize_t sun55i_iommu_map_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sunxi_iommu_dev *iommu = dev_get_drvdata(dev);
+
+ if (!iommu)
+ return -ENODEV;
+
+ return sun55i_iommu_dump_pgtable(iommu, buf, PAGE_SIZE, true);
+}
+
+static struct device_attribute sunxi_iommu_enable_attr =
+ __ATTR(enable, 0644, sunxi_iommu_enable_show,
+ sunxi_iommu_enable_store);
+static struct device_attribute sunxi_iommu_profilling_attr =
+ __ATTR(profilling, 0444, sunxi_iommu_profilling_show, NULL);
+static struct device_attribute sun55i_iommu_map_attr =
+ __ATTR(page_debug, 0444, sun55i_iommu_map_show, NULL);
+
+static void sun55i_iommu_sysfs_create(struct platform_device *_pdev,
+ struct sunxi_iommu_dev *sunxi_iommu)
+{
+ device_create_file(&_pdev->dev, &sunxi_iommu_enable_attr);
+ device_create_file(&_pdev->dev, &sunxi_iommu_profilling_attr);
+ device_create_file(&_pdev->dev, &sun55i_iommu_map_attr);
+}
+
+static void sun55i_iommu_sysfs_remove(struct platform_device *_pdev)
+{
+ device_remove_file(&_pdev->dev, &sunxi_iommu_enable_attr);
+ device_remove_file(&_pdev->dev, &sunxi_iommu_profilling_attr);
+ device_remove_file(&_pdev->dev, &sun55i_iommu_map_attr);
+}
+
+
+int sunxi_iommu_check_cmd(struct device *dev, void *data)
+{
+ struct iommu_resv_region *region;
+ int prot = IOMMU_WRITE | IOMMU_READ;
+ struct list_head *rsv_list = data;
+ struct {
+ const char *name;
+ u32 region_type;
+ } supported_region[2] = { { "sunxi-iova-reserve", IOMMU_RESV_RESERVED },
+ { "sunxi-iova-premap", IOMMU_RESV_DIRECT } };
+ int i, j;
+#define REGION_CNT_MAX (8)
+ struct {
+ u64 array[REGION_CNT_MAX * 2];
+ int count;
+ } *tmp_data;
+
+ tmp_data = kzalloc(sizeof(*tmp_data), GFP_KERNEL);
+ if (!tmp_data)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(supported_region); i++) {
+ /* search all supported argument */
+ if (!of_find_property(dev->of_node, supported_region[i].name,
+ NULL))
+ continue;
+
+ tmp_data->count = of_property_read_variable_u64_array(
+ dev->of_node, supported_region[i].name, tmp_data->array,
+ 0, REGION_CNT_MAX);
+ if (tmp_data->count <= 0)
+ continue;
+ if ((tmp_data->count & 1) != 0) {
+ dev_err(dev, "size %d of array %s should be even\n",
+ tmp_data->count, supported_region[i].name);
+ continue;
+ }
+
+ /* two u64 describe one region */
+ tmp_data->count /= 2;
+
+ /* prepared reserve region data */
+ for (j = 0; j < tmp_data->count; j++) {
+ region = iommu_alloc_resv_region(
+ tmp_data->array[j * 2],
+ tmp_data->array[j * 2 + 1], prot,
+ supported_region[i].region_type,
+ GFP_KERNEL);
+ if (!region) {
+ dev_err(dev, "no memory for iova rsv region");
+ } else {
+ struct iommu_resv_region *walk;
+ /* warn on region overlaps */
+ list_for_each_entry(walk, rsv_list, list) {
+ phys_addr_t walk_end =
+ walk->start + walk->length;
+ phys_addr_t region_end =
+ region->start + region->length;
+ if (!(walk->start >
+ region->start +
+ region->length ||
+ walk->start + walk->length <
+ region->start)) {
+ dev_warn(
+ dev,
+ "overlap on iova-reserve %pap~%pap with %pap~%pap",
+ &walk->start, &walk_end,
+ &region->start,
+ &region_end);
+ }
+ }
+ list_add_tail(&region->list, rsv_list);
+ }
+ }
+ }
+ kfree(tmp_data);
+#undef REGION_CNT_MAX
+
+ return 0;
+}
+
+static int __init_reserve_mem(struct sunxi_iommu_dev *dev)
+{
+ return bus_for_each_dev(&platform_bus_type, NULL, &dev->rsv_list,
+ sunxi_iommu_check_cmd);
+}
+
+static const struct iommu_ops sunxi_iommu_ops = {
+ .domain_alloc_paging = sun55i_iommu_domain_alloc_paging,
+ .probe_device = sun55i_iommu_probe_device,
+ .probe_finalize = sun55i_iommu_probe_device_finalize,
+ .release_device = sun55i_iommu_release_device,
+ .device_group = generic_device_group,
+ .of_xlate = sun55i_iommu_of_xlate,
+ .owner = THIS_MODULE,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = sun55i_iommu_attach_dev,
+ .map_pages = sun55i_iommu_map,
+ .unmap_pages = sun55i_iommu_unmap,
+ .iotlb_sync_map = sun55i_iommu_iotlb_sync_map,
+ .iova_to_phys = sun55i_iommu_iova_to_phys,
+ .free = sun55i_iommu_domain_free,
+ }
+};
+
+static int sun55i_iommu_probe(struct platform_device *pdev)
+{
+ int ret, irq;
+ struct device *dev = &pdev->dev;
+ struct sunxi_iommu_dev *sunxi_iommu;
+ struct resource *res;
+
+ iopte_cache = sunxi_pgtable_alloc_pte_cache();
+ if (!iopte_cache) {
+ pr_err("%s: Failed to create sunx-iopte-cache.\n", __func__);
+ return -ENOMEM;
+ }
+
+ sunxi_iommu = devm_kzalloc(dev, sizeof(*sunxi_iommu), GFP_KERNEL);
+ if (!sunxi_iommu) {
+ kmem_cache_destroy(iopte_cache);
+ iopte_cache = NULL;
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(dev, "Unable to find resource region\n");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ sunxi_iommu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sunxi_iommu->base)) {
+ dev_dbg(dev, "Unable to map IOMEM @ PA:%pa\n", &res->start);
+ ret = PTR_ERR(sunxi_iommu->base);
+ goto err_res;
+ }
+
+ sunxi_iommu->bypass = DEFAULT_BYPASS_VALUE;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_dbg(dev, "Unable to find IRQ resource\n");
+ ret = -ENOENT;
+ goto err_res;
+ }
+ pr_info("sunxi iommu: irq = %d\n", irq);
+
+ ret = devm_request_irq(dev, irq, sunxi_iommu_irq, 0,
+ dev_name(dev), (void *)sunxi_iommu);
+ if (ret < 0) {
+ dev_dbg(dev, "Unabled to register interrupt handler\n");
+ goto err_res;
+ }
+
+ sunxi_iommu->irq = irq;
+
+ sunxi_iommu->clk = of_clk_get_by_name(dev->of_node, "iommu");
+ if (IS_ERR(sunxi_iommu->clk)) {
+ sunxi_iommu->clk = NULL;
+ dev_dbg(dev, "Unable to find clock\n");
+ ret = PTR_ERR(sunxi_iommu->clk);
+ goto err_clk;
+ }
+ clk_prepare_enable(sunxi_iommu->clk);
+
+ platform_set_drvdata(pdev, sunxi_iommu);
+ sunxi_iommu->dev = dev;
+ spin_lock_init(&sunxi_iommu->iommu_lock);
+ global_iommu_dev = sunxi_iommu;
+ sunxi_iommu->plat_data = of_device_get_match_data(dev);
+
+ if (sunxi_iommu->plat_data->version !=
+ sunxi_iommu_read(sunxi_iommu, IOMMU_VERSION_REG)) {
+ dev_err(dev, "iommu version mismatch, please check and reconfigure\n");
+
+ clk_disable_unprepare(sunxi_iommu->clk);
+ ret = -EINVAL;
+ goto err_clk;
+ }
+
+ sun55i_iommu_sysfs_create(pdev, sunxi_iommu);
+ ret = iommu_device_sysfs_add(&sunxi_iommu->iommu, dev, NULL,
+ dev_name(dev));
+ if (ret) {
+ dev_err(dev, "Failed to register iommu in sysfs\n");
+ clk_disable_unprepare(sunxi_iommu->clk);
+ goto err_clk;
+ }
+
+ ret = iommu_device_register(&sunxi_iommu->iommu, &sunxi_iommu_ops, dev);
+ if (ret) {
+ dev_err(dev, "Failed to register iommu\n");
+ goto err_sysfs_remove;
+ }
+
+ INIT_LIST_HEAD(&sunxi_iommu->rsv_list);
+ __init_reserve_mem(sunxi_iommu);
+
+ sunxi_iommu->identity_domain = sun55i_iommu_domain_alloc_paging(&pdev->dev);
+ if (!sunxi_iommu->identity_domain) {
+ dev_err(dev, "Failed to allocate identity domain\n");
+ ret = -ENOMEM;
+ goto err_iommu_unregister;
+ }
+
+ if (!list_empty(&sunxi_iommu->rsv_list)) {
+ struct iommu_resv_region *entry;
+
+ dev_info(dev, "Mapping %zu reserved regions for identity domain\n",
+ list_count_nodes(&sunxi_iommu->rsv_list));
+
+ list_for_each_entry(entry, &sunxi_iommu->rsv_list, list) {
+ size_t size = entry->length;
+ phys_addr_t phys = entry->start;
+
+ if (sun55i_iommu_map(sunxi_iommu->identity_domain, phys, phys, size, 1, entry->prot, GFP_KERNEL, NULL)) {
+ dev_err(dev, "Failed to map reserved region %pa [%zx]\n",
+ &phys, size);
+ }
+ }
+ }
+
+ if (!dma_dev) {
+ dma_dev = &pdev->dev;
+ sunxi_pgtable_set_dma_dev(dma_dev);
+ }
+
+ return 0;
+
+err_iommu_unregister:
+ iommu_device_unregister(&sunxi_iommu->iommu);
+err_sysfs_remove:
+ iommu_device_sysfs_remove(&sunxi_iommu->iommu);
+err_clk:
+ clk_disable_unprepare(sunxi_iommu->clk);
+err_res:
+ sunxi_pgtable_free_pte_cache(iopte_cache);
+ dev_err(dev, "Failed to initialize\n");
+
+ return ret;
+}
+
+static void sun55i_iommu_remove(struct platform_device *pdev)
+{
+ struct sunxi_iommu_dev *sunxi_iommu = platform_get_drvdata(pdev);
+ struct iommu_resv_region *entry, *next;
+
+ sunxi_pgtable_free_pte_cache(iopte_cache);
+ if (!list_empty(&sunxi_iommu->rsv_list)) {
+ list_for_each_entry_safe (entry, next, &sunxi_iommu->rsv_list,
+ list)
+ kfree(entry);
+ }
+ devm_free_irq(sunxi_iommu->dev, sunxi_iommu->irq, sunxi_iommu);
+ devm_iounmap(sunxi_iommu->dev, sunxi_iommu->base);
+ sun55i_iommu_sysfs_remove(pdev);
+ iommu_device_sysfs_remove(&sunxi_iommu->iommu);
+ iommu_device_unregister(&sunxi_iommu->iommu);
+ global_iommu_dev = NULL;
+
+ return;
+}
+
+static int sun55i_iommu_suspend(struct device *dev)
+{
+ clk_disable_unprepare(global_iommu_dev->clk);
+
+ return 0;
+}
+
+static int sun55i_iommu_resume(struct device *dev)
+{
+ struct sunxi_iommu_dev *iommu = dev_get_drvdata(dev);
+
+ clk_prepare_enable(iommu->clk);
+
+ return sun55i_iommu_hw_init(iommu, NULL);
+}
+
+static const struct dev_pm_ops sunxi_iommu_pm_ops = {
+ .suspend = sun55i_iommu_suspend,
+ .resume = sun55i_iommu_resume,
+};
+
+static const struct sunxi_iommu_plat_data iommu_v15_sun55iw3_data = {
+ .version = 0x15,
+ /* disable preftech to test display rcq bug */
+ .tlb_prefetch = 0x30000,
+ .tlb_invalid_mode = 0x1,
+ .ptw_invalid_mode = 0x1,
+ .master = {"ISP", "CSI", "VE0", "VE1", "G2D", "DE",
+ "DI", "DEBUG_MODE"},
+};
+
+static const struct of_device_id sunxi_iommu_dt_ids[] = {
+ { .compatible = "allwinner,sun55i-a523-iommu", .data = &iommu_v15_sun55iw3_data},
+ { /* sentinel */ },
+};
+
+static struct platform_driver sunxi_iommu_driver = {
+ .probe = sun55i_iommu_probe,
+ .remove = sun55i_iommu_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "sunxi-iommu",
+ .pm = &sunxi_iommu_pm_ops,
+ .of_match_table = sunxi_iommu_dt_ids,
+ }
+};
+
+static int __init sunxi_iommu_init(void)
+{
+ return platform_driver_register(&sunxi_iommu_driver);
+}
+
+static void __exit sunxi_iommu_exit(void)
+{
+ return platform_driver_unregister(&sunxi_iommu_driver);
+}
+
+subsys_initcall(sunxi_iommu_init);
+module_exit(sunxi_iommu_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.5.1");
+MODULE_AUTHOR("huangshuosheng<huangshuosheng@allwinnertech.com>");
+MODULE_AUTHOR("ouayngkun<ouyangkun@allwinnertech.com>");
diff --git a/drivers/iommu/sun55i-iommu.h b/drivers/iommu/sun55i-iommu.h
new file mode 100644
index 000000000000..111111111111
--- /dev/null
+++ b/drivers/iommu/sun55i-iommu.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */
+/*
+ * sunxi iommu: main structures
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+#include "sun55i-iommu-pgtable.h"
+
+//iommu domain have seperate ops
+#define SEPERATE_DOMAIN_API
+//dma-iommu is enclosed into iommu-core
+#define DMA_IOMMU_IN_IOMMU
+//not used anywhere since refactoring
+#define GROUP_NOTIFIER_DEPRECATED
+//iommu now have correct probe order
+//no more need bus set op as workaround
+#define BUS_SET_OP_DEPRECATED
+//dma cookie handled by iommu core, not driver
+#define COOKIE_HANDLE_BY_CORE
+//iommu resv region allocation require gfp flags
+#define RESV_REGION_NEED_GFP_FLAG
+
+#ifdef DMA_IOMMU_IN_IOMMU
+#include <linux/iommu.h>
+/*
+ * by design iommu driver should be part of iommu
+ * and get to it by ../../dma-iommu.h
+ * sunxi bsp have seperate root, use different path
+ * to reach dma-iommu.h
+ */
+#include <../drivers/iommu/dma-iommu.h>
+#else
+#include <linux/dma-iommu.h>
+#endif
+
+#define MAX_SG_SIZE (128 << 20)
+#define MAX_SG_TABLE_SIZE ((MAX_SG_SIZE / SPAGE_SIZE) * sizeof(u32))
+#define DUMP_REGION_MAP 0
+#define DUMP_REGION_RESERVE 1
+struct dump_region {
+ u32 access_mask;
+ size_t size;
+ u32 type;
+ dma_addr_t phys, iova;
+};
+struct sunxi_iommu_dev;
+void sun55i_reset_device_iommu(unsigned int master_id);
+void sun55i_enable_device_iommu(struct sunxi_iommu_dev *iommu, unsigned int master_id, bool flag);
diff --git a/include/sunxi-iommu.h b/include/sunxi-iommu.h
new file mode 100644
index 000000000000..111111111111
--- /dev/null
+++ b/include/sunxi-iommu.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */
+/*
+ *
+ * Copyright (C) 2015 AllWinnertech Ltd.
+ *
+ * Author: huangshuosheng <huangshuosheng@allwinnertech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_SUNXI_IOMMU_H
+#define __LINUX_SUNXI_IOMMU_H
+#include <linux/iommu.h>
+#include <linux/iova.h>
+
+struct sunxi_iommu_dev;
+typedef void (*sunxi_iommu_fault_cb)(void);
+extern void sun55i_iommu_register_fault_cb(sunxi_iommu_fault_cb cb, unsigned int master_id);
+extern void sun55i_enable_device_iommu(struct sunxi_iommu_dev *iommu, unsigned int master_id, bool flag);
+extern void sun55i_reset_device_iommu(unsigned int master_id);
+
+enum iommu_dma_cookie_type {
+ IOMMU_DMA_IOVA_COOKIE,
+ IOMMU_DMA_MSI_COOKIE,
+};
+
+struct iommu_dma_cookie {
+ enum iommu_dma_cookie_type type;
+ union {
+ /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
+ struct iova_domain iovad;
+ /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
+ dma_addr_t msi_iova;
+ };
+ struct list_head msi_page_list;
+
+ /* Domain for flush queue callback; NULL if flush queue not in use */
+ struct iommu_domain *fq_domain;
+};
+
+#endif /* __LINUX_SUNXI_IOMMU_H */
\ No newline at end of file
--
Armbian