armbian-build/patch/kernel/archive/spacemit-6.1/011-drivers-dma.patch
2024-07-01 19:15:00 +02:00

3827 lines
108 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Patrick Yavitz <pyavitz@armbian.com>
Date: Fri, 21 Jun 2024 11:54:06 -0400
Subject: add spacemit patch set
source: https://gitee.com/bianbu-linux/linux-6.1
Signed-off-by: Patrick Yavitz <pyavitz@armbian.com>
---
drivers/dma/Kconfig | 35 +-
drivers/dma/Makefile | 3 +
drivers/dma/adma-spacemit.c | 696 ++++++++++
drivers/dma/bcm2835-dma.c | 4 +-
drivers/dma/dma-axi-dmac.c | 4 +-
5 files changed, 735 insertions(+), 7 deletions(-)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 111111111111..222222222222 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -416,6 +416,12 @@ config MILBEAUT_XDMAC
Say yes here to support the Socionext Milbeaut
XDMAC device.
+menuconfig MMP_PDMA_DRIVER
+ bool "MMP_PDMA driver"
+ help
+ choice mmp_pdma driver
+
+if MMP_PDMA_DRIVER
config MMP_PDMA
tristate "MMP PDMA support"
depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
@@ -423,6 +429,27 @@ config MMP_PDMA
help
Support the MMP PDMA engine for PXA and MMP platform.
+config MMP_PDMA_SPACEMIT_K1X
+ bool "Spacemit mmp_pdma support"
+ depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST || SOC_SPACEMIT_K1X
+ select DMA_ENGINE
+ help
+ Support the MMP PDMA engine for Spacemit-k1x platform.
+endif
+
+config SPACEMIT_PDMA_SUPPORT_64BIT
+ bool "MMP PDMA support the 64-bit address"
+ default y
+ help
+ Support 64-bit address in the MMP PDMA
+
+config ADMA_SPACEMIT_K1X
+ bool "Spacemit adma support"
+ depends on SOC_SPACEMIT_K1X && RPMSG_VIRTIO
+ select DMA_ENGINE
+ help
+ Support the AMDA engine for Spacemit-k1x sspa.
+
config MMP_TDMA
tristate "MMP Two-Channel DMA support"
depends on ARCH_MMP || COMPILE_TEST
@@ -439,7 +466,7 @@ config MOXART_DMA
select DMA_VIRTUAL_CHANNELS
help
Enable support for the MOXA ART SoC DMA controller.
-
+
Say Y here if you enabled MMP ADMA, otherwise say N.
config MPC512X_DMA
@@ -755,6 +782,12 @@ config XILINX_ZYNQMP_DPDMA
driver provides the dmaengine required by the DisplayPort subsystem
display driver.
+config USERSPACE_DMA
+ bool "userspace dma driver support"
+ depends on DMA_ENGINE
+ help
+ Support dma operation in userspace
+
# driver files
source "drivers/dma/bestcomm/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 111111111111..222222222222 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -50,6 +50,8 @@ obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
+obj-$(CONFIG_MMP_PDMA_SPACEMIT_K1X) += mmp_pdma_k1x.o
+obj-$(CONFIG_ADMA_SPACEMIT_K1X) += adma-spacemit.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
@@ -81,6 +83,7 @@ obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o
obj-$(CONFIG_UNIPHIER_XDMAC) += uniphier-xdmac.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ST_FDMA) += st_fdma.o
+obj-$(CONFIG_USERSPACE_DMA) += udma.o
obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
obj-$(CONFIG_INTEL_LDMA) += lgm/
diff --git a/drivers/dma/adma-spacemit.c b/drivers/dma/adma-spacemit.c
new file mode 100644
index 000000000000..111111111111
--- /dev/null
+++ b/drivers/dma/adma-spacemit.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2024 Spacemit K1x Adma Driver
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/genalloc.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include "dmaengine.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rpmsg.h>
+#include <linux/of_device.h>
+
+#define BCR 0x0
+#define SAR 0x10
+#define DAR 0x20
+#define NDR 0x30
+#define DCR 0x40
+#define IER 0x80
+#define ADMA_SAMPLE_BITS_MASK (0x7 << 22)
+#define ADMA_SAMPLE_BITS(x) (((x) << 22) & ADMA_SAMPLE_BITS_MASK)
+#define ADMA_CH_ABORT (1 << 20)
+#define ADMA_CLOSE_DESC_EN (1 << 17)
+#define ADMA_UNPACK_SAMPLES (1 << 16)
+#define ADMA_CH_ACTIVE (1 << 14)
+#define ADMA_FETCH_NEXT_DESC (1 << 13)
+#define ADMA_CH_EN (1 << 12)
+#define ADMA_INTRRUPT_MODE (1 << 10)
+
+#define ADMA_BURST_LIMIT_MASK (0x7 << 6)
+#define ADMA_BURST_LIMIT(x) (((x) << 6) & ADMA_BURST_LIMIT_MASK)
+
+#define ADMA_DEST_ADDR_DIR_MASK (0x3 << 4)
+#define ADMA_DEST_ADDR_INCREMENT (0x0 << 4)
+#define ADMA_DEST_ADDR_DECREMENT (0x1 << 4)
+#define ADMA_DEST_ADDR_HOLD (0x2 << 4)
+
+#define ADMA_SRC_ADDR_DIR_MASK (0x3 << 2)
+#define ADMA_SRC_ADDR_INCREMENT (0x0 << 2)
+#define ADMA_SRC_ADDR_DECREMENT (0x1 << 2)
+#define ADMA_SRC_ADDR_HOLD (0x2 << 2)
+
+/* current descriptor register */
+#define ADMA_CH_CUR_DESC_REG 0x70
+
+/* interrupt mask register */
+#define ADMA_CH_INTR_MASK_REG 0x80
+#define ADMA_FINISH_INTR_EN (0x1 << 0)
+
+/* interrupt status register */
+#define ADMA_CH_INTR_STATUS_REG 0xa0
+#define ADMA_FINISH_INTR_DONE (0x1 << 0)
+
+#define HDMI_ADMA 0x50
+#define HDMI_ENABLE (1 << 0)
+#define HDMI_DISABLE (0 << 0)
+
+#define DESC_BUF_BASE 0xc08d0000
+#define DESC_BUF_SIZE 0x400
+
+#define tx_to_adma_desc(tx) \
+ container_of(tx, struct adma_desc_sw, async_tx)
+#define to_adma_chan(dchan) \
+ container_of(dchan, struct adma_ch, chan)
+#define to_adma_dev(dmadev) \
+ container_of(dmadev, struct adma_dev, device)
+
+#define STARTUP_MSG "startup"
+#define STARTUP_OK_MSG "startup-ok"
+//#define DESC_BUFFER_ADDR
+
+enum {
+ AUDIO_SAMPLE_WORD_8BITS = 0x0,
+ AUDIO_SAMPLE_WORD_12BITS,
+ AUDIO_SAMPLE_WORD_16BITS,
+ AUDIO_SAMPLE_WORD_20BITS,
+ AUDIO_SAMPLE_WORD_24BITS,
+ AUDIO_SAMPLE_WORD_32BITS,
+};
+
+struct adma_desc_hw {
+ u32 byte_cnt;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 nxt_desc;
+};
+
+struct adma_desc_sw {
+ struct adma_desc_hw desc;
+ struct list_head node;
+ struct list_head tx_list;
+ struct dma_async_tx_descriptor async_tx;
+};
+
+struct adma_pchan;
+
+struct adma_ch {
+ struct device *dev;
+ struct dma_chan chan;
+ struct dma_async_tx_descriptor desc;
+ struct adma_pchan *phy;
+ struct dma_slave_config slave_config;
+ enum dma_transfer_direction dir;
+ struct adma_desc_sw *cyclic_first;
+ bool unpack_sample;
+
+ struct tasklet_struct tasklet;
+ u32 dev_addr;
+
+ spinlock_t desc_lock;
+ struct list_head chain_pending;
+ struct list_head chain_running;
+ enum dma_status status;
+
+ struct gen_pool *desc_pool;
+};
+
+struct adma_pchan {
+ void __iomem *base;
+ void __iomem *ctrl_base;
+ struct adma_ch *vchan;
+};
+
+struct adma_dev {
+ int max_burst_size;
+ void __iomem *base;
+ void __iomem *ctrl_base;
+ void __iomem *desc_base;
+ struct dma_device device;
+ struct device *dev;
+ spinlock_t phy_lock;
+};
+
+static unsigned long long private_data[2];
+
+struct instance_data {
+ struct rpmsg_device *rpdev;
+ struct adma_ch *achan;
+};
+
+static void adma_ch_write_reg(struct adma_pchan *phy, u32 reg_offset, u32 value)
+{
+ writel(value, phy->base + reg_offset);
+}
+
+static u32 adma_ch_read_reg(struct adma_pchan *phy, u32 reg_offset)
+{
+ u32 val;
+ return val = readl(phy->base + reg_offset);
+}
+
+/*define adma-controller driver*/
+static dma_cookie_t adma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct adma_ch *achan = to_adma_chan(tx->chan);
+ struct adma_desc_sw *desc = tx_to_adma_desc(tx);
+ struct adma_desc_sw *child;
+ unsigned long flags;
+ dma_cookie_t cookie = -EBUSY;
+
+ spin_lock_irqsave(&achan->desc_lock, flags);
+ list_for_each_entry(child, &desc->tx_list, node) {
+ cookie = dma_cookie_assign(&child->async_tx);
+ }
+
+ list_splice_tail_init(&desc->tx_list, &achan->chain_pending);
+ spin_unlock_irqrestore(&achan->desc_lock, flags);
+
+ return cookie;
+}
+
+static int adma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct adma_ch *achan = to_adma_chan(dchan);
+ struct adma_dev *adev = to_adma_dev(achan->chan.device);
+ if(achan->desc_pool)
+ return 1;
+ achan->desc_pool = gen_pool_create(7, -1);
+ if (!achan->desc_pool) {
+ pr_err("unable to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
+ if(gen_pool_add_virt(achan->desc_pool, (long)adev->desc_base, DESC_BUF_BASE,
+ DESC_BUF_SIZE, -1) != 0) {
+ pr_err("gen_pool_add mem error!\n");
+ gen_pool_destroy(achan->desc_pool);
+ return -ENOMEM;
+ }
+
+ achan->status = DMA_COMPLETE;
+ achan->dir = 0;
+ achan->dev_addr = 0;
+ return 1;
+}
+
+static void adma_free_desc_list(struct adma_ch *chan,
+ struct list_head *list)
+{
+ struct adma_desc_sw *desc, *_desc;
+
+ list_for_each_entry_safe(desc, _desc, list, node) {
+ list_del(&desc->node);
+ gen_pool_free(chan->desc_pool, (long)desc, sizeof(struct adma_desc_sw));
+ }
+}
+
+static void adma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct adma_ch *achan = to_adma_chan(dchan);
+ struct adma_dev *adev = to_adma_dev(achan->chan.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&achan->desc_lock, flags);
+ adma_free_desc_list(achan, &achan->chain_pending);
+ adma_free_desc_list(achan, &achan->chain_running);
+ spin_unlock_irqrestore(&achan->desc_lock, flags);
+ gen_pool_destroy(achan->desc_pool);
+ achan->desc_pool = NULL;
+ achan->status = DMA_COMPLETE;
+ achan->dir = 0;
+ achan->dev_addr = 0;
+ spin_lock_irqsave(&adev->phy_lock, flags);
+ spin_unlock_irqrestore(&adev->phy_lock, flags);
+ return;
+}
+
+static struct adma_desc_sw *alloc_descriptor(struct adma_ch *achan)
+{
+ struct adma_desc_sw *desc;
+ dma_addr_t pdesc;
+
+ desc = (struct adma_desc_sw*)gen_pool_alloc(achan->desc_pool, sizeof(struct adma_desc_sw));
+ if (!desc) {
+ dev_err(achan->dev, "out of memory for link descriptor\n");
+ return NULL;
+ }
+ memset(desc, 0, sizeof(struct adma_desc_sw));
+ pdesc = (dma_addr_t)gen_pool_virt_to_phys(achan->desc_pool, (long)desc);
+
+ INIT_LIST_HEAD(&desc->tx_list);
+ dma_async_tx_descriptor_init(&desc->async_tx, &achan->chan);
+ desc->async_tx.tx_submit = adma_tx_submit;
+ desc->async_tx.phys = pdesc;
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *
+adma_prep_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
+ size_t len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct adma_ch *achan;
+ struct adma_desc_sw *first = NULL, *prev = NULL, *new;
+ dma_addr_t adma_src, adma_dst;
+
+ achan = to_adma_chan(dchan);
+
+ switch(direction) {
+ case DMA_MEM_TO_DEV:
+ adma_src = buf_addr & 0xffffffff;
+ achan->dev_addr = achan->slave_config.dst_addr;
+ adma_dst = achan->dev_addr;
+ break;
+ case DMA_DEV_TO_MEM:
+ adma_dst = buf_addr & 0xffffffff;
+ achan->dev_addr = achan->slave_config.src_addr;
+ adma_src = achan->dev_addr;
+ break;
+ default:
+ dev_err(achan->dev, "Unsupported direction for cyclic DMA\n");
+ return NULL;
+ }
+ achan->dir = direction;
+ do {
+ new = alloc_descriptor(achan);
+ if(!new) {
+ dev_err(achan->dev, "no memory for desc\n");
+
+ }
+ new->desc.byte_cnt = period_len;
+ new->desc.src_addr = adma_src;
+ new->desc.dst_addr = adma_dst;
+ if(!first)
+ first = new;
+ else
+ prev->desc.nxt_desc = new->async_tx.phys;
+ new->async_tx.cookie = 0;
+ prev = new;
+ len -= period_len;
+
+ if(achan->dir == DMA_MEM_TO_DEV)
+ adma_src += period_len;
+ else
+ adma_dst += period_len;
+ list_add_tail(&new->node, &first->tx_list);
+ }while(len);
+
+ first->async_tx.flags = flags;
+ first->async_tx.cookie = -EBUSY;
+ new->desc.nxt_desc = first->async_tx.phys;
+ achan->cyclic_first = first;
+ return &first->async_tx;
+}
+
+static int adma_config(struct dma_chan *dchan,
+ struct dma_slave_config *cfg)
+{
+ struct adma_ch *achan = to_adma_chan(dchan);
+
+ memcpy(&achan->slave_config, cfg, sizeof(*cfg));
+ return 0;
+}
+
+static void set_desc(struct adma_pchan *phy, dma_addr_t addr)
+{
+ adma_ch_write_reg(phy, NDR, addr);
+}
+
+static void set_ctrl_reg(struct adma_pchan *phy)
+{
+ u32 ctrl_reg_val;
+ u32 maxburst = 0, sample_bits = 0;
+ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ struct adma_ch *achan = phy->vchan;
+
+ if(achan->dir == DMA_MEM_TO_DEV) {
+ maxburst = achan->slave_config.dst_maxburst;
+ width = achan->slave_config.dst_addr_width;
+ ctrl_reg_val |= ADMA_DEST_ADDR_HOLD | ADMA_SRC_ADDR_INCREMENT;
+ }
+ else if(achan->dir == DMA_DEV_TO_MEM) {
+ maxburst = achan->slave_config.src_maxburst;
+ width = achan->slave_config.src_addr_width;
+ ctrl_reg_val |= ADMA_SRC_ADDR_HOLD | ADMA_DEST_ADDR_INCREMENT;
+ }
+ else
+ ctrl_reg_val |= ADMA_SRC_ADDR_HOLD | ADMA_DEST_ADDR_HOLD;
+
+ if(width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+ sample_bits = AUDIO_SAMPLE_WORD_8BITS;
+ else if(width == DMA_SLAVE_BUSWIDTH_2_BYTES)
+ sample_bits = AUDIO_SAMPLE_WORD_16BITS;
+ else if(width == DMA_SLAVE_BUSWIDTH_3_BYTES)
+ sample_bits = AUDIO_SAMPLE_WORD_24BITS;
+ else if(width == DMA_SLAVE_BUSWIDTH_4_BYTES)
+ sample_bits = AUDIO_SAMPLE_WORD_32BITS;
+ ctrl_reg_val |= ADMA_SAMPLE_BITS(sample_bits);
+
+ /*no burst function information,default 0*/
+ ctrl_reg_val |= ADMA_BURST_LIMIT(0);
+ ctrl_reg_val |= ADMA_CH_ABORT;
+ if(achan->unpack_sample)
+ ctrl_reg_val |= ADMA_UNPACK_SAMPLES;
+ adma_ch_write_reg(phy, DCR, ctrl_reg_val);
+ if(!achan->unpack_sample)
+ writel(HDMI_ENABLE, phy->ctrl_base);
+}
+
+static void enable_chan(struct adma_pchan *phy)
+{
+ u32 ctrl_val;
+ struct adma_ch *achan = phy->vchan;
+
+ if(achan->dir == DMA_MEM_TO_DEV)
+ adma_ch_write_reg(phy, DAR, achan->dev_addr);
+ else if(achan->dir == DMA_DEV_TO_MEM)
+ adma_ch_write_reg(phy, SAR, achan->dev_addr);
+ adma_ch_write_reg(phy, IER, 1);
+ ctrl_val = adma_ch_read_reg(phy, DCR);
+ ctrl_val |= ADMA_FETCH_NEXT_DESC;
+ ctrl_val |= ADMA_CH_EN;
+ adma_ch_write_reg(phy, DCR, ctrl_val);
+}
+
+static void start_pending_queue(struct adma_ch *achan)
+{
+ struct adma_dev *adev = to_adma_dev(achan->chan.device);
+ struct adma_pchan *phy;
+ struct adma_desc_sw *desc;
+ unsigned long flags;
+
+ if(achan->status == DMA_IN_PROGRESS) {
+ dev_dbg(achan->dev, "DMA controller still busy\n");
+ return;
+ }
+ spin_lock_irqsave(&adev->phy_lock, flags);
+ phy = achan->phy;
+ desc = list_first_entry(&achan->chain_pending,
+ struct adma_desc_sw, node);
+ list_splice_tail_init(&achan->chain_pending, &achan->chain_running);
+ set_desc(phy, desc->async_tx.phys);
+ set_ctrl_reg(phy);
+ enable_chan(phy);
+ spin_unlock_irqrestore(&adev->phy_lock, flags);
+ achan->status = DMA_IN_PROGRESS;
+}
+
+static void adma_issue_pending(struct dma_chan *dchan)
+{
+ struct adma_ch *achan = to_adma_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&achan->desc_lock, flags);
+ start_pending_queue(achan);
+ spin_unlock_irqrestore(&achan->desc_lock, flags);
+}
+
+static enum dma_status adma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ /*struct adma_ch *chan = to_adma_chan(dchan);
+ enum dma_status ret;
+ unsigned long flags;
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ if (likely(ret != DMA_ERROR))
+ dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ if (ret == DMA_COMPLETE)
+ return ret;
+ else
+ return chan->status;*/
+ return 0;
+}
+
+static void disable_chan(struct adma_pchan *phy)
+{
+ u32 reg_val = adma_ch_read_reg(phy,DCR);
+ reg_val |= ADMA_CH_ABORT;
+ adma_ch_write_reg(phy, DCR, reg_val);
+
+ udelay(500);
+ reg_val = adma_ch_read_reg(phy, DCR);
+ reg_val &= ~ADMA_CH_EN;
+ adma_ch_write_reg(phy, DCR, reg_val);
+ adma_ch_write_reg(phy, IER, 0);
+ if((!phy->vchan->unpack_sample) && ((readl(phy->ctrl_base) & HDMI_ENABLE) == 0x1))
+ writel(HDMI_DISABLE, phy->ctrl_base);
+}
+
+static int adma_terminate_all(struct dma_chan *dchan)
+{
+ struct adma_ch *achan = to_adma_chan(dchan);
+ struct adma_dev *adev = to_adma_dev(achan->chan.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&achan->desc_lock, flags);
+ disable_chan(achan->phy);
+ achan->status = DMA_COMPLETE;
+ spin_lock_irqsave(&adev->phy_lock, flags);
+ spin_unlock_irqrestore(&adev->phy_lock, flags);
+
+ adma_free_desc_list(achan, &achan->chain_pending);
+ adma_free_desc_list(achan, &achan->chain_running);
+ //achan->bytes_residue = 0;
+
+ spin_unlock_irqrestore(&achan->desc_lock, flags);
+ return 0;
+}
+
+static struct dma_chan *adma_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct adma_dev *d = ofdma->of_dma_data;
+ struct dma_chan *chan;
+
+ chan = dma_get_any_slave_channel(&d->device);
+ if (!chan)
+ return NULL;
+ return chan;
+}
+
+static const struct of_device_id adma_id_table[] = {
+ { .compatible = "spacemit,k1x-adma", .data =(void *)&private_data[0] },
+ {},
+};
+
+static int adma_probe(struct platform_device *pdev)
+{
+ struct adma_dev *adev;
+ struct device *dev;
+ const struct of_device_id *of_id;
+ struct rpmsg_device *rpdev;
+ struct instance_data *idata;
+ struct adma_pchan *phy;
+ struct adma_ch *achan;
+ int ret;
+ const enum dma_slave_buswidth widths =
+ DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
+ DMA_SLAVE_BUSWIDTH_3_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ of_id = of_match_device(adma_id_table, &pdev->dev);
+ if (!of_id) {
+ pr_err("Unable to match OF ID\n");
+ return -ENODEV;
+ }
+ idata = (struct instance_data *)((unsigned long long *)(of_id->data))[0];
+ rpdev = idata->rpdev;
+ ret = rpmsg_send(rpdev->ept, STARTUP_MSG, strlen(STARTUP_MSG));
+ if (ret) {
+ dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
+ return ret;
+ }
+
+ /*get controller dts info*/
+ dev = &pdev->dev;
+ adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
+ adev->dev = dev;
+ adev->base = devm_platform_ioremap_resource_byname(pdev, "adma_reg");
+ if(IS_ERR(adev->base))
+ return PTR_ERR(adev->base);
+ adev->ctrl_base = devm_platform_ioremap_resource_byname(pdev, "ctrl_reg");
+ if(IS_ERR(adev->ctrl_base))
+ return PTR_ERR(adev->ctrl_base);
+ adev->desc_base = devm_platform_ioremap_resource_byname(pdev, "buf_addr");
+ if(IS_ERR(adev->desc_base))
+ return PTR_ERR(adev->desc_base);
+ /*if(of_property_read_u32(pdev->dev->of_node, "max-burst-size", &adev->max_burst_size))
+ adev->max_burst_size = DEFAULT_MAX_BURST_SIZE;*/
+
+ /*init adma-chan*/
+ INIT_LIST_HEAD(&adev->device.channels);
+ achan = devm_kzalloc(dev, sizeof(struct adma_ch), GFP_KERNEL);
+ if(achan == NULL)
+ return -ENOMEM;
+ phy = devm_kzalloc(dev, sizeof(struct adma_pchan), GFP_KERNEL);
+ phy->base = adev->base;
+ phy->ctrl_base = adev->ctrl_base;
+ phy->vchan = achan;
+ achan->phy = phy;
+ achan->dev = adev->dev;
+ achan->chan.device = &adev->device;
+ spin_lock_init(&achan->desc_lock);
+ spin_lock_init(&adev->phy_lock);
+ INIT_LIST_HEAD(&achan->chain_pending);
+ INIT_LIST_HEAD(&achan->chain_running);
+ achan->status = DMA_COMPLETE;
+ achan->unpack_sample = !of_property_read_bool(pdev->dev.of_node, "hdmi-sample");
+
+ /* register virt channel to dma engine */
+ list_add_tail(&achan->chan.device_node, &adev->device.channels);
+ idata->achan = achan;
+
+ dma_cap_set(DMA_SLAVE, adev->device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, adev->device.cap_mask);
+ adev->device.dev = dev;
+ adev->device.device_tx_status = adma_tx_status;
+ adev->device.device_alloc_chan_resources = adma_alloc_chan_resources;
+ adev->device.device_free_chan_resources = adma_free_chan_resources;
+ adev->device.device_prep_dma_cyclic = adma_prep_cyclic;
+ adev->device.device_issue_pending = adma_issue_pending;
+ adev->device.device_config = adma_config;
+ adev->device.device_terminate_all = adma_terminate_all;
+ adev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
+ adev->device.src_addr_widths = widths;
+ adev->device.dst_addr_widths = widths;
+ adev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+
+ dma_set_mask(adev->dev, adev->dev->coherent_dma_mask);
+
+ ret = dma_async_device_register(&adev->device);
+ if(ret) {
+ dev_err(adev->device.dev, "unable to register\n");
+ return ret;
+ }
+
+ if(pdev->dev.of_node) {
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ adma_dma_xlate,adev);
+ if(ret < 0){
+ dev_err(dev, "of_dma_controller_register failed\n");
+ dma_async_device_unregister(&adev->device);
+ return ret;
+ }
+ }
+
+ platform_set_drvdata(pdev, adev);
+ return 0;
+}
+
+static int adma_remove(struct platform_device *pdev)
+{
+ struct adma_dev *adev = platform_get_drvdata(pdev);;
+
+ if(pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&adev->device);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver adma_driver = {
+ .driver = {
+ .name = "k1x-adma",
+ .of_match_table = adma_id_table,
+ },
+ .probe = adma_probe,
+ .remove = adma_remove,
+};
+
+static struct rpmsg_device_id rpmsg_driver_adma_id_table[] = {
+ { .name = "adma-service", .driver_data = 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, rpmsg_driver_adma_id_table);
+
+static int rpmsg_adma_client_cb(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct instance_data *idata = dev_get_drvdata(&rpdev->dev);
+ struct adma_ch *chan = idata->achan;
+
+#if 0
+ if (strcmp(data, STARTUP_OK_MSG) == 0) {
+ dev_info(&rpdev->dev, "channel: 0x%x -> 0x%x startup ok!\n",
+ rpdev->src, rpdev->dst);
+ }
+
+ if (strcmp(data, "#") == 0) {
+#endif
+ /* adma irq happend */
+ struct adma_desc_sw *desc;
+ LIST_HEAD(chain_cleanup);
+ unsigned long flags;
+ struct dmaengine_desc_callback cb;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ if (chan->status == DMA_COMPLETE) {
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ desc = chan->cyclic_first;
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ dmaengine_desc_callback_invoke(&cb, NULL);
+// }
+
+ return 0;
+}
+
+static int rpmsg_adma_client_probe(struct rpmsg_device *rpdev)
+{
+ struct instance_data *idata;
+
+ dev_info(&rpdev->dev, "new channel: 0x%x -> 0x%x!\n",
+ rpdev->src, rpdev->dst);
+
+ idata = devm_kzalloc(&rpdev->dev, sizeof(*idata), GFP_KERNEL);
+ if (!idata)
+ return -ENOMEM;
+
+ dev_set_drvdata(&rpdev->dev, idata);
+ idata->rpdev = rpdev;
+
+ ((unsigned long long *)(adma_id_table[0].data))[0] = (unsigned long long)idata;
+
+ platform_driver_register(&adma_driver);
+
+ return 0;
+}
+
+static void rpmsg_adma_client_remove(struct rpmsg_device *rpdev)
+{
+ dev_info(&rpdev->dev, "rpmsg adma client driver is removed\n");
+ platform_driver_unregister(&adma_driver);
+}
+
+static struct rpmsg_driver rpmsg_adma_client = {
+ .drv.name = KBUILD_MODNAME,
+ .id_table = rpmsg_driver_adma_id_table,
+ .probe = rpmsg_adma_client_probe,
+ .callback = rpmsg_adma_client_cb,
+ .remove = rpmsg_adma_client_remove,
+};
+module_rpmsg_driver(rpmsg_adma_client);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -878,7 +878,6 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
static int bcm2835_dma_probe(struct platform_device *pdev)
{
struct bcm2835_dmadev *od;
- struct resource *res;
void __iomem *base;
int rc;
int i, j;
@@ -902,8 +901,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 111111111111..222222222222 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -910,7 +910,6 @@ static int axi_dmac_probe(struct platform_device *pdev)
{
struct dma_device *dma_dev;
struct axi_dmac *dmac;
- struct resource *res;
struct regmap *regmap;
unsigned int version;
int ret;
@@ -925,8 +924,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
if (dmac->irq == 0)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmac->base = devm_ioremap_resource(&pdev->dev, res);
+ dmac->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmac->base))
return PTR_ERR(dmac->base);
--
Armbian
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Patrick Yavitz <pyavitz@xxxxx.com>
Date: Sat, 22 Jun 2024 07:54:04 -0400
Subject: drivers: dma: dw-axi-dmac: dw-axi-dmac-platform.c
Signed-off-by: Patrick Yavitz <pyavitz@xxxxx.com>
---
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 70 +-
drivers/dma/dw-axi-dmac/dw-axi-dmac.h | 5 +
drivers/dma/fsl-edma.c | 8 +-
drivers/dma/fsl-qdma.c | 10 +-
drivers/dma/idma64.c | 4 +-
drivers/dma/img-mdc-dma.c | 4 +-
drivers/dma/imx-dma.c | 4 +-
drivers/dma/imx-sdma.c | 4 +-
drivers/dma/mcf-edma.c | 5 +-
drivers/dma/mediatek/mtk-hsdma.c | 4 +-
drivers/dma/mmp_pdma.c | 4 +-
drivers/dma/mmp_pdma_k1x.c | 1665 ++++++++++
drivers/dma/mmp_tdma.c | 4 +-
drivers/dma/moxart-dma.c | 4 +-
drivers/dma/mv_xor_v2.c | 7 +-
drivers/dma/mxs-dma.c | 4 +-
drivers/dma/nbpfaxi.c | 4 +-
drivers/dma/pxa_dma.c | 4 +-
drivers/dma/qcom/bam_dma.c | 4 +-
drivers/dma/sf-pdma/sf-pdma.c | 4 +-
drivers/dma/sh/usb-dmac.c | 4 +-
drivers/dma/stm32-dmamux.c | 4 +-
drivers/dma/stm32-mdma.c | 4 +-
drivers/dma/sun4i-dma.c | 4 +-
drivers/dma/sun6i-dma.c | 4 +-
drivers/dma/tegra210-adma.c | 4 +-
drivers/dma/ti/cppi41.c | 10 +-
drivers/dma/ti/omap-dma.c | 4 +-
drivers/dma/udma.c | 432 +++
drivers/dma/xilinx/zynqmp_dma.c | 4 +-
30 files changed, 2194 insertions(+), 102 deletions(-)
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index 111111111111..222222222222 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -21,10 +21,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -46,6 +48,10 @@
DMA_SLAVE_BUSWIDTH_32_BYTES | \
DMA_SLAVE_BUSWIDTH_64_BYTES)
+#define AXI_DMA_FLAG_HAS_APB_REGS BIT(0)
+#define AXI_DMA_FLAG_HAS_RESETS BIT(1)
+#define AXI_DMA_FLAG_USE_CFG2 BIT(2)
+
static inline void
axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
{
@@ -86,7 +92,7 @@ static inline void axi_chan_config_write(struct axi_dma_chan *chan,
cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
- if (chan->chip->dw->hdata->reg_map_8_channels) {
+ if (!chan->chip->dw->hdata->use_cfg2) {
cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
@@ -606,6 +612,7 @@ static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
size_t block_ts;
u32 ctllo, ctlhi;
u32 burst_len;
+ u32 burst_trans_len;
axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
@@ -669,8 +676,14 @@ static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
- ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
- DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
+ if (chan->fixed_burst_trans_len == true)
+ burst_trans_len = chan->burst_trans_len;
+ else
+ burst_trans_len = DWAXIDMAC_BURST_TRANS_LEN_4;
+
+ ctllo |= burst_trans_len << CH_CTL_L_DST_MSIZE_POS |
+ burst_trans_len << CH_CTL_L_SRC_MSIZE_POS;
+
hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
set_desc_src_master(hw_desc);
@@ -1138,7 +1151,7 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
axi_chan_disable(chan);
ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
- !(val & chan_active), 1000, 10000);
+ !(val & chan_active), 1000, 50000);
if (ret == -ETIMEDOUT)
dev_warn(dchan2dev(dchan),
"%s failed to stop\n", axi_chan_name(chan));
@@ -1290,6 +1303,13 @@ static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
chan = dchan_to_axi_dma_chan(dchan);
chan->hw_handshake_num = dma_spec->args[0];
+
+ /*some per may need fixed-burst_trans_len*/
+ if (dma_spec->args_count == 2 && dma_spec->args[1] > 0) {
+ chan->fixed_burst_trans_len = true;
+ chan->burst_trans_len = dma_spec->args[1];
+ }
+
return dchan;
}
@@ -1360,16 +1380,24 @@ static int parse_device_properties(struct axi_dma_chip *chip)
chip->dw->hdata->axi_rw_burst_len = tmp;
}
+ /* get number of handshak interface and configure multi reg */
+ ret = device_property_read_u32(dev, "snps,num-hs-if", &tmp);
+ if (!ret)
+ chip->dw->hdata->nr_hs_if = tmp;
+ if (chip->dw->hdata->nr_channels > DMA_REG_MAP_CH_REF ||
+ chip->dw->hdata->nr_hs_if > DMA_REG_MAP_HS_IF_REF)
+ chip->dw->hdata->use_cfg2 = true;
+
return 0;
}
static int dw_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct axi_dma_chip *chip;
- struct resource *mem;
struct dw_axi_dma *dw;
struct dw_axi_dma_hcfg *hdata;
+ struct reset_control *resets;
+ unsigned int flags;
u32 i;
int ret;
@@ -1393,17 +1421,27 @@ static int dw_probe(struct platform_device *pdev)
if (chip->irq < 0)
return chip->irq;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->regs = devm_ioremap_resource(chip->dev, mem);
+ chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
- if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
+ flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ if (flags & AXI_DMA_FLAG_HAS_APB_REGS) {
chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(chip->apb_regs))
return PTR_ERR(chip->apb_regs);
}
+ if (flags & AXI_DMA_FLAG_HAS_RESETS) {
+ resets = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(resets))
+ return PTR_ERR(resets);
+
+ ret = reset_control_deassert(resets);
+ if (ret)
+ return ret;
+ }
+
chip->core_clk = devm_clk_get(chip->dev, "core-clk");
if (IS_ERR(chip->core_clk))
return PTR_ERR(chip->core_clk);
@@ -1554,8 +1592,18 @@ static const struct dev_pm_ops dw_axi_dma_pm_ops = {
};
static const struct of_device_id dw_dma_of_id_table[] = {
- { .compatible = "snps,axi-dma-1.01a" },
- { .compatible = "intel,kmb-axi-dma" },
+ {
+ .compatible = "snps,axi-dma-1.01a"
+ }, {
+ .compatible = "intel,kmb-axi-dma",
+ .data = (void *)AXI_DMA_FLAG_HAS_APB_REGS,
+ }, {
+ .compatible = "starfive,jh7110-axi-dma",
+ .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),
+ }, {
+ .compatible = "spacemit,k1pro-axi-dma",
+ .data = (void *)AXI_DMA_FLAG_HAS_RESETS,
+ },
{}
};
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index 111111111111..222222222222 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -25,6 +25,7 @@
struct dw_axi_dma_hcfg {
u32 nr_channels;
u32 nr_masters;
+ u32 nr_hs_if;
u32 m_data_width;
u32 block_size[DMAC_MAX_CHANNELS];
u32 priority[DMAC_MAX_CHANNELS];
@@ -33,6 +34,7 @@ struct dw_axi_dma_hcfg {
/* Register map for DMAX_NUM_CHANNELS <= 8 */
bool reg_map_8_channels;
bool restrict_axi_burst_len;
+ bool use_cfg2;
};
struct axi_dma_chan {
@@ -40,6 +42,7 @@ struct axi_dma_chan {
void __iomem *chan_regs;
u8 id;
u8 hw_handshake_num;
+ s8 burst_trans_len;
atomic_t descs_allocated;
struct dma_pool *desc_pool;
@@ -48,6 +51,7 @@ struct axi_dma_chan {
struct axi_dma_desc *desc;
struct dma_slave_config config;
enum dma_transfer_direction direction;
+ bool fixed_burst_trans_len;
bool cyclic;
/* these other elements are all protected by vc.lock */
bool is_paused;
@@ -204,6 +208,7 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
#define DMA_APB_HS_SEL_MASK 0xFF /* HW handshake select masks */
#define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */
#define DMA_REG_MAP_CH_REF 0x08 /* Channel count to choose register map */
+#define DMA_REG_MAP_HS_IF_REF 0x10 /* handshake num to choose register map */
/* DMAC_CFG */
#define DMAC_EN_POS 0
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -272,7 +272,6 @@ static int fsl_edma_probe(struct platform_device *pdev)
const struct fsl_edma_drvdata *drvdata = NULL;
struct fsl_edma_chan *fsl_chan;
struct edma_regs *regs;
- struct resource *res;
int len, chans;
int ret, i;
@@ -298,8 +297,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->n_chans = chans;
mutex_init(&fsl_edma->fsl_edma_mutex);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+ fsl_edma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fsl_edma->membase))
return PTR_ERR(fsl_edma->membase);
@@ -323,8 +321,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
- fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
+ fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
+ 1 + i);
if (IS_ERR(fsl_edma->muxbase[i])) {
/* on error: disable all previously enabled clks */
fsl_disable_clocks(fsl_edma, i);
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -1121,7 +1121,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
int ret, i;
int blk_num, blk_off;
u32 len, chans, queues;
- struct resource *res;
struct fsl_qdma_chan *fsl_chan;
struct fsl_qdma_engine *fsl_qdma;
struct device_node *np = pdev->dev.of_node;
@@ -1185,18 +1184,15 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->status[i])
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ fsl_qdma->ctrl_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fsl_qdma->ctrl_base))
return PTR_ERR(fsl_qdma->ctrl_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
+ fsl_qdma->status_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(fsl_qdma->status_base))
return PTR_ERR(fsl_qdma->status_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
+ fsl_qdma->block_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(fsl_qdma->block_base))
return PTR_ERR(fsl_qdma->block_base);
fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 111111111111..222222222222 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -635,7 +635,6 @@ static int idma64_platform_probe(struct platform_device *pdev)
struct idma64_chip *chip;
struct device *dev = &pdev->dev;
struct device *sysdev = dev->parent;
- struct resource *mem;
int ret;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
@@ -646,8 +645,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
if (chip->irq < 0)
return chip->irq;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->regs = devm_ioremap_resource(dev, mem);
+ chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -886,7 +886,6 @@ static int img_mdc_runtime_resume(struct device *dev)
static int mdc_dma_probe(struct platform_device *pdev)
{
struct mdc_dma *mdma;
- struct resource *res;
unsigned int i;
u32 val;
int ret;
@@ -898,8 +897,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
mdma->soc = of_device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdma->regs = devm_ioremap_resource(&pdev->dev, res);
+ mdma->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdma->regs))
return PTR_ERR(mdma->regs);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1038,7 +1038,6 @@ static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
static int __init imxdma_probe(struct platform_device *pdev)
{
struct imxdma_engine *imxdma;
- struct resource *res;
int ret, i;
int irq, irq_err;
@@ -1049,8 +1048,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
imxdma->dev = &pdev->dev;
imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- imxdma->base = devm_ioremap_resource(&pdev->dev, res);
+ imxdma->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imxdma->base))
return PTR_ERR(imxdma->base);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -2169,7 +2169,6 @@ static int sdma_probe(struct platform_device *pdev)
const char *fw_name;
int ret;
int irq;
- struct resource *iores;
struct resource spba_res;
int i;
struct sdma_engine *sdma;
@@ -2192,8 +2191,7 @@ static int sdma_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
+ sdma->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdma->regs))
return PTR_ERR(sdma->regs);
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/mcf-edma.c
+++ b/drivers/dma/mcf-edma.c
@@ -182,7 +182,6 @@ static int mcf_edma_probe(struct platform_device *pdev)
struct fsl_edma_engine *mcf_edma;
struct fsl_edma_chan *mcf_chan;
struct edma_regs *regs;
- struct resource *res;
int ret, i, len, chans;
pdata = dev_get_platdata(&pdev->dev);
@@ -211,9 +210,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
mutex_init(&mcf_edma->fsl_edma_mutex);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+ mcf_edma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mcf_edma->membase))
return PTR_ERR(mcf_edma->membase);
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -896,7 +896,6 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
struct mtk_hsdma_device *hsdma;
struct mtk_hsdma_vchan *vc;
struct dma_device *dd;
- struct resource *res;
int i, err;
hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
@@ -905,8 +904,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
dd = &hsdma->ddev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hsdma->base = devm_ioremap_resource(&pdev->dev, res);
+ hsdma->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsdma->base))
return PTR_ERR(hsdma->base);
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -1022,7 +1022,6 @@ static int mmp_pdma_probe(struct platform_device *op)
struct mmp_pdma_device *pdev;
const struct of_device_id *of_id;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
- struct resource *iores;
int i, ret, irq = 0;
int dma_channels = 0, irq_num = 0;
const enum dma_slave_buswidth widths =
@@ -1037,8 +1036,7 @@ static int mmp_pdma_probe(struct platform_device *op)
spin_lock_init(&pdev->phy_lock);
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- pdev->base = devm_ioremap_resource(pdev->dev, iores);
+ pdev->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
diff --git a/drivers/dma/mmp_pdma_k1x.c b/drivers/dma/mmp_pdma_k1x.c
new file mode 100644
index 000000000000..111111111111
--- /dev/null
+++ b/drivers/dma/mmp_pdma_k1x.c
@@ -0,0 +1,1665 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2012 Marvell International Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/platform_data/mmp_dma.h>
+#include <linux/dmapool.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/of.h>
+
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+
+#include "dmaengine.h"
+
+#define DDADRH(n) (0x0300 + ((n) << 4))
+#define DSADRH(n) (0x0304 + ((n) << 4))
+#define DTADRH(n) (0x0308 + ((n) << 4))
+#define DCSR_LPAEEN BIT(21) /* Long Physical Address Extension enable */
+#define DRCMR_INVALID 100 /* Max DMA request number + 1 */
+#define DCMD_BURST64 (4 << 16) /* 64 byte burst */
+
+#define DCSR 0x0000
+#define DALGN 0x00a0
+#define DINT 0x00f0
+#define DDADR 0x0200
+#define DSADR(n) (0x0204 + ((n) << 4))
+#define DTADR(n) (0x0208 + ((n) << 4))
+#define DCMD 0x020c
+
+#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
+#define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
+#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */
+#define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
+#define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
+#define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
+#define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
+#define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
+
+#define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */
+#define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
+#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
+#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
+#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
+#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
+#define DCSR_EORINTR BIT(9) /* The end of Receive */
+
+#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
+#define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
+#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
+
+#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
+#define DDADR_STOP BIT(0) /* Stop (read / write) */
+
+#define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
+#define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
+#define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
+#define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
+#define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
+#define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
+#define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
+#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
+#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
+#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
+#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
+#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
+#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
+#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
+
+#define PDMA_MAX_DESC_BYTES DCMD_LENGTH
+
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+struct mmp_pdma_desc_hw {
+ u32 ddadr; /* Points to the next descriptor + flags */
+ u32 dsadr; /* DSADR value for the current transfer */
+ u32 dtadr; /* DTADR value for the current transfer */
+ u32 dcmd; /* DCMD value for the current transfer */
+ u32 ddadrh; /* Points to the next descriptor + flags */
+ u32 dsadrh; /* DSADR value for the current transfer */
+ u32 dtadrh; /* DTADR value for the current transfer */
+ u32 rsvd; /* DCMD value for the current transfer */
+} __aligned(64);
+#else
+struct mmp_pdma_desc_hw {
+ u32 ddadr; /* Points to the next descriptor + flags */
+ u32 dsadr; /* DSADR value for the current transfer */
+ u32 dtadr; /* DTADR value for the current transfer */
+ u32 dcmd; /* DCMD value for the current transfer */
+} __aligned(32);
+#endif
+
+struct mmp_pdma_desc_sw {
+ struct mmp_pdma_desc_hw desc;
+ struct list_head node;
+ struct list_head tx_list;
+ struct dma_async_tx_descriptor async_tx;
+};
+
+struct mmp_pdma_phy;
+
+struct mmp_pdma_chan {
+ struct device *dev;
+ struct dma_chan chan;
+ struct dma_async_tx_descriptor desc;
+ struct mmp_pdma_phy *phy;
+ enum dma_transfer_direction dir;
+ struct dma_slave_config slave_config;
+
+ struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel
+ * is in cyclic mode */
+
+ /* channel's basic info */
+ struct tasklet_struct tasklet;
+ u32 dcmd;
+ u32 drcmr;
+ u32 dev_addr;
+
+ /* list for desc */
+ spinlock_t desc_lock; /* Descriptor list lock */
+ struct list_head chain_pending; /* Link descriptors queue for pending */
+ struct list_head chain_running; /* Link descriptors queue for running */
+ bool idle; /* channel statue machine */
+ bool byte_align;
+
+ int user_do_qos;
+ int qos_count; /* Per-channel qos count */
+ enum dma_status status; /* channel state machine */
+ u32 bytes_residue;
+
+ struct dma_pool *desc_pool; /* Descriptors pool */
+};
+
+struct mmp_pdma_phy {
+ int idx;
+ void __iomem *base;
+ struct mmp_pdma_chan *vchan;
+};
+
+struct reserved_chan{
+ int chan_id;
+ int drcmr;
+};
+
+struct mmp_pdma_device {
+ int dma_channels;
+ int nr_reserved_channels;
+ struct reserved_chan *reserved_channels;
+ s32 lpm_qos;
+ struct clk *clk;
+ struct reset_control *resets;
+ int max_burst_size;
+ void __iomem *base;
+ struct device *dev;
+ struct dma_device device;
+ struct mmp_pdma_phy *phy;
+ spinlock_t phy_lock; /* protect alloc/free phy channels */
+};
+
+#define tx_to_mmp_pdma_desc(tx) \
+ container_of(tx, struct mmp_pdma_desc_sw, async_tx)
+#define to_mmp_pdma_desc(lh) \
+ container_of(lh, struct mmp_pdma_desc_sw, node)
+#define to_mmp_pdma_chan(dchan) \
+ container_of(dchan, struct mmp_pdma_chan, chan)
+#define to_mmp_pdma_dev(dmadev) \
+ container_of(dmadev, struct mmp_pdma_device, device)
+
+static void mmp_pdma_qos_get(struct mmp_pdma_chan *chan);
+static void mmp_pdma_qos_put(struct mmp_pdma_chan *chan);
+
+#define QSPI_PHY_CHAN 15
+
+static int mmp_pdma_config_write(struct dma_chan *dchan,
+ struct dma_slave_config *cfg,
+ enum dma_transfer_direction direction);
+
+static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
+{
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ u32 ddadrh;
+#endif
+ u32 reg = (phy->idx << 4) + DDADR;
+
+ writel(addr & 0xffffffff, phy->base + reg);
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ /* config higher bits for desc address */
+ ddadrh = (addr >> 32);
+ writel(ddadrh, phy->base + DDADRH(phy->idx));
+#endif
+}
+
+static void enable_chan(struct mmp_pdma_phy *phy)
+{
+ u32 reg, dalgn;
+ u32 dcsr;
+ unsigned long flags;
+ struct mmp_pdma_device *pdev;
+
+ if (phy == NULL)
+ return;
+
+ if (!phy->vchan)
+ return;
+
+ pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
+
+ spin_lock_irqsave(&pdev->phy_lock, flags);
+
+ reg = DRCMR(phy->vchan->drcmr);
+ writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+
+ dalgn = readl(phy->base + DALGN);
+ if (phy->vchan->byte_align)
+ dalgn |= 1 << phy->idx;
+ else
+ dalgn &= ~(1 << phy->idx);
+ writel(dalgn, phy->base + DALGN);
+
+ reg = (phy->idx << 2) + DCSR;
+
+ dcsr = readl(phy->base + reg);
+ dcsr |= (DCSR_RUN | DCSR_EORIRQEN | DCSR_EORSTOPEN);
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ /* use long descriptor mode: set DCSR_LPAEEN bit */
+ dcsr |= DCSR_LPAEEN;
+#endif
+ writel(dcsr, phy->base + reg);
+
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
+}
+
+static void disable_chan(struct mmp_pdma_phy *phy)
+{
+ u32 reg;
+ u32 dcsr, cnt = 1000;
+
+ if (!phy)
+ return;
+
+ reg = (phy->idx << 2) + DCSR;
+
+ dcsr = readl(phy->base + reg);
+ dcsr &= ~(DCSR_RUN | DCSR_EORIRQEN | DCSR_EORSTOPEN);
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ /* use long descriptor mode: set DCSR_LPAEEN bit */
+ dcsr &= ~DCSR_LPAEEN;
+#endif
+ writel(dcsr, phy->base + reg);
+
+ /* ensure dma is stopped. */
+ dcsr = readl(phy->base + reg);
+ while (!(dcsr & (0x1 << 3)) && --cnt) {
+ udelay(10);
+ dcsr = readl(phy->base + reg);
+ }
+
+ WARN_ON(!cnt);
+}
+
+static int clear_chan_irq(struct mmp_pdma_phy *phy)
+{
+ u32 dcsr;
+ u32 dint = readl(phy->base + DINT);
+ u32 reg = (phy->idx << 2) + DCSR;
+
+ if (!(dint & BIT(phy->idx)))
+ return -EAGAIN;
+
+ /* clear irq */
+ dcsr = readl(phy->base + reg);
+ writel(dcsr, phy->base + reg);
+ if ((dcsr & DCSR_BUSERR) && (phy->vchan))
+ dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
+
+ return 0;
+}
+
+static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
+{
+ struct mmp_pdma_phy *phy = dev_id;
+ struct mmp_pdma_chan *pchan = phy->vchan;
+
+ if (clear_chan_irq(phy) != 0)
+ return IRQ_NONE;
+
+ if (pchan)
+ tasklet_schedule(&pchan->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static bool is_channel_reserved(struct mmp_pdma_device *pdev, int chan_id)
+{
+ int i;
+
+ for (i = 0; i < pdev->nr_reserved_channels; i++) {
+ if (chan_id == pdev->reserved_channels[i].chan_id)
+ return true;
+ }
+
+ return false;
+}
+
+static struct mmp_pdma_phy * lookup_phy_for_drcmr(struct mmp_pdma_device *pdev, int drcmr)
+{
+ int i;
+ int chan_id;
+ struct mmp_pdma_phy *phy;
+
+ for (i = 0; i < pdev->nr_reserved_channels; i++) {
+ if (drcmr == pdev->reserved_channels[i].drcmr) {
+ chan_id = pdev->reserved_channels[i].chan_id;
+ phy = &pdev->phy[chan_id];
+ return phy;
+ }
+ }
+
+ return NULL;
+}
+
+static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
+{
+ struct mmp_pdma_device *pdev = dev_id;
+ struct mmp_pdma_phy *phy;
+ u32 dint = readl(pdev->base + DINT);
+ int i, ret;
+ int irq_num = 0;
+ unsigned long flags;
+
+ while (dint) {
+ i = __ffs(dint);
+ /* only handle interrupts belonging to pdma driver*/
+ if (i >= pdev->dma_channels)
+ break;
+
+ dint &= (dint - 1);
+ phy = &pdev->phy[i];
+ spin_lock_irqsave(&pdev->phy_lock, flags);
+
+ ret = mmp_pdma_chan_handler(irq, phy);
+
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
+ if (ret == IRQ_HANDLED)
+ irq_num++;
+ }
+
+ if (irq_num)
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+/* lookup free phy channel as descending priority */
+static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
+{
+ int prio, i;
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
+ struct mmp_pdma_phy *phy, *found = NULL;
+ unsigned long flags;
+
+ /*
+ * dma channel priorities
+ * ch 0 - 3, 16 - 19 <--> (0)
+ * ch 4 - 7, 20 - 23 <--> (1)
+ * ch 8 - 11, 24 - 27 <--> (2)
+ * ch 12 - 15, 28 - 31 <--> (3)
+ */
+
+ spin_lock_irqsave(&pdev->phy_lock, flags);
+
+ phy = lookup_phy_for_drcmr(pdev, pchan->drcmr);
+
+ if (phy != NULL) {
+ if (!phy->vchan) {
+ phy->vchan = pchan;
+ found = phy;
+ }
+
+ goto out_unlock;
+ }
+
+ for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
+ for (i = 0; i < pdev->dma_channels; i++) {
+ if (prio != (i & 0xf) >> 2)
+ continue;
+
+ if (is_channel_reserved(pdev, i))
+ continue;
+ phy = &pdev->phy[i];
+ if (!phy->vchan) {
+ phy->vchan = pchan;
+ found = phy;
+ goto out_unlock;
+ }
+ }
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
+ return found;
+}
+
+static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
+{
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
+ unsigned long flags;
+ u32 reg;
+
+ if (!pchan->phy)
+ return;
+
+ /* clear the channel mapping in DRCMR */
+ reg = DRCMR(pchan->drcmr);
+ writel(0, pchan->phy->base + reg);
+
+ spin_lock_irqsave(&pdev->phy_lock, flags);
+ pchan->phy->vchan = NULL;
+ pchan->phy = NULL;
+
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
+}
+
+/*
+ * start_pending_queue - transfer any pending transactions
+ * pending list ==> running list
+ */
+static int start_pending_queue(struct mmp_pdma_chan *chan)
+{
+ struct mmp_pdma_desc_sw *desc;
+ struct mmp_pdma_desc_sw *_desc;
+
+ /* still in running, irq will start the pending list */
+ if (chan->status == DMA_IN_PROGRESS) {
+ dev_dbg(chan->dev, "DMA controller still busy\n");
+ return -1;
+ }
+
+ if (list_empty(&chan->chain_pending)) {
+ /* chance to re-fetch phy channel with higher prio */
+ mmp_pdma_free_phy(chan);
+ dev_dbg(chan->dev, "no pending list\n");
+
+ return -1;
+ }
+
+ if (!chan->phy) {
+ chan->phy = lookup_phy(chan);
+ if (!chan->phy) {
+ dev_dbg(chan->dev, "no free dma channel\n");
+
+ return -1;
+ }
+ }
+
+ /*
+ * pending -> running
+ * reintilize pending list
+ */
+ list_for_each_entry_safe(desc, _desc, &chan->chain_pending, node) {
+ list_del(&desc->node);
+ list_add_tail(&desc->node, &chan->chain_running);
+ if (desc->desc.ddadr & DDADR_STOP)
+ break;
+ }
+
+ desc = list_first_entry(&chan->chain_running,
+ struct mmp_pdma_desc_sw, node);
+
+ /*
+ * Program the descriptor's address into the DMA controller,
+ * then start the DMA transaction
+ */
+ set_desc(chan->phy, desc->async_tx.phys);
+ enable_chan(chan->phy);
+ chan->idle = false;
+ chan->status = DMA_IN_PROGRESS;
+ chan->bytes_residue = 0;
+ return 0;
+}
+
+
+/* desc->tx_list ==> pending list */
+static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
+ struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
+ struct mmp_pdma_desc_sw *child;
+ unsigned long flags;
+ dma_cookie_t cookie = -EBUSY;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
+ list_for_each_entry(child, &desc->tx_list, node) {
+ cookie = dma_cookie_assign(&child->async_tx);
+ }
+
+ /* softly link to pending list - desc->tx_list ==> pending list */
+ list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ return cookie;
+}
+
+static struct mmp_pdma_desc_sw *
+mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
+{
+ struct mmp_pdma_desc_sw *desc;
+ dma_addr_t pdesc;
+
+ desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
+ if (!desc) {
+ dev_err(chan->dev, "out of memory for link descriptor\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&desc->tx_list);
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
+ /* each desc has submit */
+ desc->async_tx.tx_submit = mmp_pdma_tx_submit;
+ desc->async_tx.phys = pdesc;
+
+ return desc;
+}
+
+/*
+ * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
+ *
+ * This function will create a dma pool for descriptor allocation.
+ * Request irq only when channel is requested
+ * Return - The number of allocated descriptors.
+ */
+
+static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+
+ if (chan->desc_pool)
+ return 1;
+
+ chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
+ chan->dev,
+ sizeof(struct mmp_pdma_desc_sw),
+ __alignof__(struct mmp_pdma_desc_sw),
+ 0);
+ if (!chan->desc_pool) {
+ dev_err(chan->dev, "unable to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ chan->status = DMA_COMPLETE;
+ chan->dir = 0;
+ chan->dcmd = 0;
+
+ mmp_pdma_free_phy(chan);
+
+ chan->idle = true;
+ chan->dev_addr = 0;
+ return 1;
+}
+
+static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
+ struct list_head *list)
+{
+ struct mmp_pdma_desc_sw *desc, *_desc;
+
+ list_for_each_entry_safe(desc, _desc, list, node) {
+ list_del(&desc->node);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+ }
+}
+
+static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ unsigned long flags;
+
+ /* wait until task ends if necessary */
+ tasklet_kill(&chan->tasklet);
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ mmp_pdma_free_desc_list(chan, &chan->chain_pending);
+ mmp_pdma_free_desc_list(chan, &chan->chain_running);
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+ chan->idle = true;
+ chan->dev_addr = 0;
+
+ chan->status = DMA_COMPLETE;
+ chan->dir = 0;
+ chan->dcmd = 0;
+
+ mmp_pdma_free_phy(chan);
+ return;
+}
+
+#define INVALID_BURST_SETTING -1
+#define DEFAULT_MAX_BURST_SIZE 32
+
+static int get_max_burst_setting(unsigned int max_burst_size)
+{
+ switch (max_burst_size) {
+ case 8:
+ return DCMD_BURST8;
+ case 16:
+ return DCMD_BURST16;
+ case 32:
+ return DCMD_BURST32;
+ case 64:
+ return DCMD_BURST64;
+ default:
+ return INVALID_BURST_SETTING;
+ }
+}
+
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_memcpy(struct dma_chan *dchan,
+ dma_addr_t dma_dst, dma_addr_t dma_src,
+ size_t len, unsigned long flags)
+{
+ struct mmp_pdma_chan *chan;
+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
+ size_t copy = 0;
+ struct mmp_pdma_device *dev;
+ int value;
+
+ if (!dchan)
+ return NULL;
+
+ if (!len)
+ return NULL;
+
+ chan = to_mmp_pdma_chan(dchan);
+ chan->byte_align = false;
+
+ if (!chan->dir) {
+ chan->dir = DMA_MEM_TO_MEM;
+ chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
+ dev = to_mmp_pdma_dev(dchan->device);
+ value = get_max_burst_setting(dev->max_burst_size);
+
+ BUG_ON(value == INVALID_BURST_SETTING);
+
+ chan->dcmd |= value;
+ }
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = mmp_pdma_alloc_descriptor(chan);
+ if (!new) {
+ dev_err(chan->dev, "no memory for desc\n");
+ goto fail;
+ }
+
+ copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
+ if (dma_src & 0x7 || dma_dst & 0x7)
+ chan->byte_align = true;
+
+ new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
+
+ /*
+ * Check whether descriptor/source-addr/target-addr is in
+ * region higher than 4G. If so, set related higher bits to 1.
+ */
+ if (chan->dir == DMA_MEM_TO_DEV) {
+ new->desc.dsadr = dma_src & 0xffffffff;
+ new->desc.dtadr = dma_dst;
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ new->desc.dsadrh = (dma_src >> 32);
+ new->desc.dtadrh = 0;
+#endif
+ } else if (chan->dir == DMA_DEV_TO_MEM) {
+ new->desc.dsadr = dma_src;
+ new->desc.dtadr = dma_dst & 0xffffffff;
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ new->desc.dsadrh = 0;
+ new->desc.dtadrh = (dma_dst >> 32);
+#endif
+ } else if (chan->dir == DMA_MEM_TO_MEM) {
+ new->desc.dsadr = dma_src & 0xffffffff;
+ new->desc.dtadr = dma_dst & 0xffffffff;
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ new->desc.dsadrh = (dma_src >> 32);
+ new->desc.dtadrh = (dma_dst >> 32);
+#endif
+ } else {
+ dev_err(chan->dev, "wrong direction: 0x%x\n", chan->dir);
+ goto fail;
+ }
+
+ if (!first)
+ first = new;
+ else {
+ prev->desc.ddadr = new->async_tx.phys;
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ prev->desc.ddadrh = (new->async_tx.phys >> 32);
+#endif
+ }
+
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+
+ prev = new;
+ len -= copy;
+
+ if (chan->dir == DMA_MEM_TO_DEV) {
+ dma_src += copy;
+ } else if (chan->dir == DMA_DEV_TO_MEM) {
+ dma_dst += copy;
+ } else if (chan->dir == DMA_MEM_TO_MEM) {
+ dma_src += copy;
+ dma_dst += copy;
+ }
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ first->async_tx.flags = flags; /* client is in control of this ack */
+ first->async_tx.cookie = -EBUSY;
+
+ /* last desc and fire IRQ */
+ new->desc.ddadr = DDADR_STOP;
+ new->desc.dcmd |= DCMD_ENDIRQEN;
+
+ chan->cyclic_first = NULL;
+
+ return &first->async_tx;
+
+fail:
+ if (first)
+ mmp_pdma_free_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
+ size_t len, avail;
+ struct scatterlist *sg;
+ dma_addr_t addr;
+ int i;
+
+ if ((sgl == NULL) || (sg_len == 0))
+ return NULL;
+
+ chan->byte_align = true;
+
+ mmp_pdma_config_write(dchan, &chan->slave_config, dir);
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ addr = sg_dma_address(sg);
+ avail = sg_dma_len(sgl);
+
+ do {
+ len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
+ if (addr & 0x7)
+ chan->byte_align = true;
+
+ /* allocate and populate the descriptor */
+ new = mmp_pdma_alloc_descriptor(chan);
+ if (!new) {
+ dev_err(chan->dev, "no memory for desc\n");
+ goto fail;
+ }
+
+ new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
+
+ /*
+ * Check whether descriptor/source-addr/target-addr is in
+ * region higher than 4G. If so, set related higher bits to 1.
+ */
+ if (dir == DMA_MEM_TO_DEV) {
+ new->desc.dsadr = addr & 0xffffffff;
+ new->desc.dtadr = chan->dev_addr;
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ new->desc.dsadrh = (addr >> 32);
+ new->desc.dtadrh = 0;
+#endif
+ } else if (dir == DMA_DEV_TO_MEM) {
+ new->desc.dsadr = chan->dev_addr;
+ new->desc.dtadr = addr & 0xffffffff;
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ new->desc.dsadrh = 0;
+ new->desc.dtadrh = (addr >> 32);
+#endif
+ } else {
+ dev_err(chan->dev, "wrong direction: 0x%x\n", chan->dir);
+ goto fail;
+ }
+
+ if (!first)
+ first = new;
+ else {
+ prev->desc.ddadr = new->async_tx.phys;
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ prev->desc.ddadrh = (new->async_tx.phys >> 32);
+#endif
+ }
+
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+ prev = new;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+
+ /* update metadata */
+ addr += len;
+ avail -= len;
+ } while (avail);
+ }
+
+ first->async_tx.cookie = -EBUSY;
+ first->async_tx.flags = flags;
+
+ /* last desc and fire IRQ */
+ new->desc.ddadr = DDADR_STOP;
+ new->desc.dcmd |= DCMD_ENDIRQEN;
+
+ chan->dir = dir;
+ chan->cyclic_first = NULL;
+
+ return &first->async_tx;
+
+fail:
+ if (first)
+ mmp_pdma_free_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
+ dma_addr_t buf_addr, size_t len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct mmp_pdma_chan *chan;
+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
+ dma_addr_t dma_src, dma_dst;
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ dma_addr_t dma_srch, dma_dsth;
+#endif
+
+ if (!dchan || !len || !period_len)
+ return NULL;
+
+ /* the buffer length must be a multiple of period_len */
+ if (len % period_len != 0)
+ return NULL;
+
+ if (period_len > PDMA_MAX_DESC_BYTES)
+ return NULL;
+
+ chan = to_mmp_pdma_chan(dchan);
+ mmp_pdma_config_write(dchan, &chan->slave_config, direction);
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ dma_src = buf_addr & 0xffffffff;
+ dma_dst = chan->dev_addr;
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ dma_srch = (buf_addr >> 32);
+ dma_dsth = 0;
+#endif
+ break;
+ case DMA_DEV_TO_MEM:
+ dma_dst = buf_addr & 0xffffffff;
+ dma_src = chan->dev_addr;
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ dma_dsth = (buf_addr >> 32);
+ dma_srch = 0;
+#endif
+ break;
+ default:
+ dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
+ return NULL;
+ }
+
+ chan->dir = direction;
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = mmp_pdma_alloc_descriptor(chan);
+ if (!new) {
+ dev_err(chan->dev, "no memory for desc\n");
+ goto fail;
+ }
+
+ new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
+ (DCMD_LENGTH & period_len));
+ new->desc.dsadr = dma_src;
+ new->desc.dtadr = dma_dst;
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ new->desc.dsadrh = dma_dsth;
+ new->desc.dtadrh = dma_srch;
+#endif
+
+ if (!first)
+ first = new;
+ else {
+ prev->desc.ddadr = new->async_tx.phys;
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ prev->desc.ddadrh = (new->async_tx.phys >> 32);
+#endif
+ }
+
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+
+ prev = new;
+ len -= period_len;
+
+ if (chan->dir == DMA_MEM_TO_DEV)
+ dma_src += period_len;
+ else
+ dma_dst += period_len;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ first->async_tx.flags = flags; /* client is in control of this ack */
+ first->async_tx.cookie = -EBUSY;
+
+ /* make the cyclic link */
+ new->desc.ddadr = first->async_tx.phys;
+ chan->cyclic_first = first;
+
+ return &first->async_tx;
+
+fail:
+ if (first)
+ mmp_pdma_free_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static int mmp_pdma_config_write(struct dma_chan *dchan,
+ struct dma_slave_config *cfg,
+ enum dma_transfer_direction direction)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ u32 maxburst = 0, addr = 0;
+ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+
+ if (!dchan)
+ return -EINVAL;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
+ maxburst = cfg->src_maxburst;
+ width = cfg->src_addr_width;
+ addr = cfg->src_addr;
+ } else if (direction == DMA_MEM_TO_DEV) {
+ chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
+ maxburst = cfg->dst_maxburst;
+ width = cfg->dst_addr_width;
+ addr = cfg->dst_addr;
+ }
+
+ if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+ chan->dcmd |= DCMD_WIDTH1;
+ else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
+ chan->dcmd |= DCMD_WIDTH2;
+ else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
+ chan->dcmd |= DCMD_WIDTH4;
+
+ if (maxburst == 8)
+ chan->dcmd |= DCMD_BURST8;
+ else if (maxburst == 16)
+ chan->dcmd |= DCMD_BURST16;
+ else if (maxburst == 32)
+ chan->dcmd |= DCMD_BURST32;
+
+ chan->dir = direction;
+ chan->dev_addr = addr;
+
+ return 0;
+}
+
+static int mmp_pdma_pause_chan(struct dma_chan *dchan)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+
+ if (!chan->phy)
+ return -1;
+
+ disable_chan(chan->phy);
+ chan->status = DMA_PAUSED;
+
+ return 0;
+}
+
+static int mmp_pdma_config(struct dma_chan *dchan,
+ struct dma_slave_config *cfg)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+
+ memcpy(&chan->slave_config, cfg, sizeof(*cfg));
+ return 0;
+}
+
+static int mmp_pdma_terminate_all(struct dma_chan *dchan)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ unsigned long flags;
+
+ if (!dchan)
+ return -EINVAL;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ disable_chan(chan->phy);
+ chan->status = DMA_COMPLETE;
+ mmp_pdma_free_phy(chan);
+
+ mmp_pdma_free_desc_list(chan, &chan->chain_pending);
+ mmp_pdma_free_desc_list(chan, &chan->chain_running);
+ chan->bytes_residue = 0;
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ chan->idle = true;
+
+ mmp_pdma_qos_put(chan);
+
+ return 0;
+}
+
+static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
+ dma_cookie_t cookie)
+{
+ struct mmp_pdma_desc_sw *sw;
+ u32 curr, residue = 0;
+ bool passed = false;
+ bool cyclic = chan->cyclic_first != NULL;
+
+ /*
+ * If the channel does not have a phy pointer anymore, it has already
+ * been completed. Therefore, its residue is 0.
+ */
+ if (!chan->phy)
+ return chan->bytes_residue; /* special case for EORIRQEN */
+
+ if (chan->dir == DMA_DEV_TO_MEM)
+ curr = readl(chan->phy->base + DTADR(chan->phy->idx));
+ else
+ curr = readl(chan->phy->base + DSADR(chan->phy->idx));
+
+ list_for_each_entry(sw, &chan->chain_running, node) {
+ u32 start, end, len;
+
+ if (chan->dir == DMA_DEV_TO_MEM)
+ start = sw->desc.dtadr;
+ else
+ start = sw->desc.dsadr;
+
+ len = sw->desc.dcmd & DCMD_LENGTH;
+ end = start + len;
+
+ /*
+ * 'passed' will be latched once we found the descriptor which
+ * lies inside the boundaries of the curr pointer. All
+ * descriptors that occur in the list _after_ we found that
+ * partially handled descriptor are still to be processed and
+ * are hence added to the residual bytes counter.
+ */
+
+ if (passed) {
+ residue += len;
+ } else if (curr >= start && curr <= end) {
+ residue += end - curr;
+ passed = true;
+ }
+
+ /*
+ * Descriptors that have the ENDIRQEN bit set mark the end of a
+ * transaction chain, and the cookie assigned with it has been
+ * returned previously from mmp_pdma_tx_submit().
+ *
+ * In case we have multiple transactions in the running chain,
+ * and the cookie does not match the one the user asked us
+ * about, reset the state variables and start over.
+ *
+ * This logic does not apply to cyclic transactions, where all
+ * descriptors have the ENDIRQEN bit set, and for which we
+ * can't have multiple transactions on one channel anyway.
+ */
+ if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
+ continue;
+
+ if (sw->async_tx.cookie == cookie) {
+ return residue;
+ } else {
+ residue = 0;
+ passed = false;
+ }
+ }
+
+ /* We should only get here in case of cyclic transactions */
+ return residue;
+}
+
+static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ enum dma_status ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ if (likely(ret != DMA_ERROR))
+ dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ if (ret == DMA_COMPLETE)
+ return ret;
+ else
+ return chan->status;
+}
+
+/*
+ * mmp_pdma_issue_pending - Issue the DMA start command
+ * pending list ==> running list
+ */
+static void mmp_pdma_issue_pending(struct dma_chan *dchan)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ unsigned long flags;
+ int ret = 0;
+
+ mmp_pdma_qos_get(chan);
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ ret = start_pending_queue(chan);
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ if (ret)
+ mmp_pdma_qos_put(chan);
+}
+
+/*
+ * dma_do_tasklet
+ * Do call back
+ * Start pending list
+ */
+static void dma_do_tasklet(struct tasklet_struct *t)
+{
+ struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet);
+ struct mmp_pdma_desc_sw *desc, *_desc;
+ LIST_HEAD(chain_cleanup);
+ unsigned long flags;
+ struct dmaengine_desc_callback cb;
+
+ int ret = 0;
+
+ /* return if this channel has been stopped */
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ if (chan->status == DMA_COMPLETE) {
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ if (chan->cyclic_first) {
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ desc = chan->cyclic_first;
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ dmaengine_desc_callback_invoke(&cb, NULL);
+
+ return;
+ }
+
+ /* submit pending list; callback for each desc; free desc */
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
+ /* special for the EORIRQEN case, residue is not 0 */
+ list_for_each_entry(desc, &chan->chain_running, node) {
+ if (desc->desc.dcmd & DCMD_ENDIRQEN) {
+ chan->bytes_residue =
+ mmp_pdma_residue(chan, desc->async_tx.cookie);
+ break;
+ }
+ }
+
+ list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
+ /*
+ * move the descriptors to a temporary list so we can drop
+ * the lock during the entire cleanup operation
+ */
+ list_move(&desc->node, &chain_cleanup);
+
+ /*
+ * Look for the first list entry which has the ENDIRQEN flag
+ * set. That is the descriptor we got an interrupt for, so
+ * complete that transaction and its cookie.
+ */
+ if (desc->desc.dcmd & DCMD_ENDIRQEN) {
+ dma_cookie_t cookie = desc->async_tx.cookie;
+ dma_cookie_complete(&desc->async_tx);
+ dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+ break;
+ }
+ }
+
+ /*
+ * The hardware is idle and ready for more when the
+ * chain_running list is empty.
+ */
+ chan->status = list_empty(&chan->chain_running) ?
+ DMA_COMPLETE : DMA_IN_PROGRESS;
+
+ /* Start any pending transactions automatically */
+ ret = start_pending_queue(chan);
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ /* restart pending transactions failed, do not need qos anymore */
+ if (ret)
+ mmp_pdma_qos_put(chan);
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
+ struct dma_async_tx_descriptor *txd = &desc->async_tx;
+
+ /* Remove from the list of transactions */
+ list_del(&desc->node);
+ /* Run the link descriptor callback function */
+ dmaengine_desc_get_callback(txd, &cb);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+
+ dma_pool_free(chan->desc_pool, desc, txd->phys);
+ }
+}
+
+static int mmp_pdma_remove(struct platform_device *op)
+{
+ struct mmp_pdma_device *pdev = platform_get_drvdata(op);
+ struct mmp_pdma_phy *phy;
+ int i, irq = 0, irq_num = 0;
+
+ if (op->dev.of_node)
+ of_dma_controller_free(op->dev.of_node);
+
+ for (i = 0; i < pdev->dma_channels; i++) {
+ if (platform_get_irq(op, i) > 0)
+ irq_num++;
+ }
+
+ if (irq_num != pdev->dma_channels) {
+ irq = platform_get_irq(op, 0);
+ devm_free_irq(&op->dev, irq, pdev);
+ } else {
+ for (i = 0; i < pdev->dma_channels; i++) {
+ phy = &pdev->phy[i];
+ irq = platform_get_irq(op, i);
+ devm_free_irq(&op->dev, irq, phy);
+ }
+ }
+
+ dma_async_device_unregister(&pdev->device);
+
+ reset_control_assert(pdev->resets);
+ clk_disable_unprepare(pdev->clk);
+
+ kfree(pdev->reserved_channels);
+ platform_set_drvdata(op, NULL);
+
+ return 0;
+}
+
+static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
+{
+ struct mmp_pdma_phy *phy = &pdev->phy[idx];
+ struct mmp_pdma_chan *chan;
+ int ret;
+
+ chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (chan == NULL)
+ return -ENOMEM;
+
+ phy->idx = idx;
+ phy->base = pdev->base;
+
+ if (irq) {
+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
+ IRQF_SHARED, "pdma", phy);
+ if (ret) {
+ dev_err(pdev->dev, "channel request irq fail!\n");
+ return ret;
+ }
+ }
+
+ spin_lock_init(&chan->desc_lock);
+ chan->dev = pdev->dev;
+ chan->chan.device = &pdev->device;
+ tasklet_setup(&chan->tasklet, dma_do_tasklet);
+ INIT_LIST_HEAD(&chan->chain_pending);
+ INIT_LIST_HEAD(&chan->chain_running);
+
+ chan->status = DMA_COMPLETE;
+ chan->bytes_residue = 0;
+ chan->qos_count = 0;
+ chan->user_do_qos = 1;
+
+ /* register virt channel to dma engine */
+ list_add_tail(&chan->chan.device_node, &pdev->device.channels);
+
+ return 0;
+}
+
+static const struct of_device_id mmp_pdma_dt_ids[] = {
+ { .compatible = "spacemit,pdma-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
+
+static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct mmp_pdma_device *d = ofdma->of_dma_data;
+ struct dma_chan *chan;
+#ifdef CONFIG_PM
+ struct mmp_pdma_chan *c;
+#endif
+
+ chan = dma_get_any_slave_channel(&d->device);
+ if (!chan)
+ return NULL;
+
+ to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
+#ifdef CONFIG_PM
+ if (unlikely(dma_spec->args_count != 2))
+ dev_err(d->dev, "#dma-cells should be 2!\n");
+
+ c = to_mmp_pdma_chan(chan);
+ c->user_do_qos = dma_spec->args[1] ? 1 : 0;
+
+ if (c->user_do_qos)
+ dev_dbg(d->dev, "channel %d: user does qos itself\n",
+ c->chan.chan_id);
+ else
+ dev_dbg(d->dev, "channel %d: pdma does qos\n",
+ c->chan.chan_id);
+#endif
+
+ return chan;
+}
+
+static int mmp_pdma_probe(struct platform_device *op)
+{
+ struct mmp_pdma_device *pdev;
+ const struct of_device_id *of_id;
+ struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
+ struct resource *iores;
+ int i, ret, irq = 0;
+ int dma_channels = 0, irq_num = 0;
+ const enum dma_slave_buswidth widths =
+ DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ int nr_reserved_channels;
+ const int *list;
+ unsigned int max_burst_size = DEFAULT_MAX_BURST_SIZE;
+
+ pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
+ if (!pdev)
+ return -ENOMEM;
+
+ pdev->dev = &op->dev;
+
+ spin_lock_init(&pdev->phy_lock);
+
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+ pdev->base = devm_ioremap_resource(pdev->dev, iores);
+ if (IS_ERR(pdev->base))
+ return PTR_ERR(pdev->base);
+
+ pdev->clk = devm_clk_get(pdev->dev,NULL);
+ if(IS_ERR(pdev->clk))
+ return PTR_ERR(pdev->clk);
+
+ ret = clk_prepare_enable(pdev->clk);
+ if (ret)
+ return dev_err_probe(pdev->dev, ret, "could not enable dma bus clock\n");
+
+ pdev->resets = devm_reset_control_get_optional(pdev->dev,NULL);
+ if(IS_ERR(pdev->resets)) {
+ ret = PTR_ERR(pdev->resets);
+ goto err_rst;
+ }
+ ret = reset_control_deassert(pdev->resets);
+ if(ret)
+ goto err_rst;
+
+ of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
+
+ if (of_id) {
+ int n;
+ of_property_read_u32(pdev->dev->of_node, "#dma-channels",
+ &dma_channels);
+
+ list = of_get_property(pdev->dev->of_node, "reserved-channels",
+ &n);
+
+ if (of_property_read_u32(pdev->dev->of_node, "max-burst-size",
+ &max_burst_size)) {
+ dev_err(pdev->dev, "Cannot find the max-burst-size node "
+ "in the device tree, set it to %d\n",
+ DEFAULT_MAX_BURST_SIZE);
+ max_burst_size = DEFAULT_MAX_BURST_SIZE;
+ }
+
+ if (get_max_burst_setting(max_burst_size) == INVALID_BURST_SETTING) {
+ dev_err(pdev->dev, "Unsupported max-burst-size value %d "
+ "in the device tree, set it to %d\n",
+ max_burst_size, DEFAULT_MAX_BURST_SIZE);
+ max_burst_size = DEFAULT_MAX_BURST_SIZE;
+ }
+
+ if (list) {
+ int num_args = 2;
+
+ nr_reserved_channels = n / (sizeof(u32) * num_args);
+
+ pdev->nr_reserved_channels = nr_reserved_channels;
+
+ pdev->reserved_channels = kzalloc(nr_reserved_channels * sizeof(struct reserved_chan),
+ GFP_KERNEL);
+
+ if (pdev->reserved_channels == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_reserved_channels; i++) {
+ int value;
+
+ of_property_read_u32_index(pdev->dev->of_node, "reserved-channels", i * num_args, &value);
+ pdev->reserved_channels[i].chan_id = value;
+ of_property_read_u32_index(pdev->dev->of_node, "reserved-channels", i * num_args + 1, &value);
+ pdev->reserved_channels[i].drcmr = value;
+ }
+ }
+ } else if (pdata && pdata->dma_channels) {
+ dma_channels = pdata->dma_channels;
+ } else {
+ dma_channels = 32; /* default 32 channel */
+ }
+ pdev->dma_channels = dma_channels;
+
+ pdev->max_burst_size = max_burst_size;
+ dev_dbg(pdev->dev, "set max burst size to %d\n", max_burst_size);
+
+#ifdef CONFIG_PM
+ pm_runtime_enable(&op->dev);
+ /*
+ * We can't ensure the pm operations are always in non-atomic context.
+ * Actually it depends on the drivers' behavior. So mark it as irq safe.
+ */
+ pm_runtime_irq_safe(&op->dev);
+#endif
+ for (i = 0; i < dma_channels; i++) {
+ if (platform_get_irq_optional(op, i) > 0)
+ irq_num++;
+ }
+
+ pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
+ GFP_KERNEL);
+ if (pdev->phy == NULL)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pdev->device.channels);
+
+ if (irq_num != dma_channels) {
+ /* all chan share one irq, demux inside */
+ irq = platform_get_irq(op, 0);
+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
+ IRQF_SHARED, "pdma", pdev);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < dma_channels; i++) {
+ irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
+ ret = mmp_pdma_chan_init(pdev, i, irq);
+ if (ret)
+ return ret;
+ }
+
+ dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
+ dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
+ dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
+ pdev->device.dev = &op->dev;
+ pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
+ pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
+ pdev->device.device_tx_status = mmp_pdma_tx_status;
+ pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
+ pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
+ pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
+ pdev->device.device_issue_pending = mmp_pdma_issue_pending;
+ pdev->device.device_config = mmp_pdma_config;
+ pdev->device.device_pause = mmp_pdma_pause_chan;
+ pdev->device.device_terminate_all = mmp_pdma_terminate_all;
+ pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
+ pdev->device.src_addr_widths = widths;
+ pdev->device.dst_addr_widths = widths;
+ pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT
+ dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
+#else
+ dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
+#endif
+
+ ret = dma_async_device_register(&pdev->device);
+ if (ret) {
+ dev_err(pdev->device.dev, "unable to register\n");
+ return ret;
+ }
+
+ if (op->dev.of_node) {
+ /* Device-tree DMA controller registration */
+ ret = of_dma_controller_register(op->dev.of_node,
+ mmp_pdma_dma_xlate, pdev);
+ if (ret < 0) {
+ dev_err(&op->dev, "of_dma_controller_register failed\n");
+ dma_async_device_unregister(&pdev->device);
+ return ret;
+ }
+ }
+
+ platform_set_drvdata(op, pdev);
+ dev_dbg(pdev->device.dev, "initialized %d channels\n", dma_channels);
+ return 0;
+
+err_rst:
+ clk_disable_unprepare(pdev->clk);
+ return ret;
+}
+
+/*
+ * Per-channel qos get/put function. This function ensures that pm_
+ * runtime_get/put are not called multi times for one channel.
+ * This guarantees pm_runtime_get/put always match for the entire device.
+ */
+static void mmp_pdma_qos_get(struct mmp_pdma_chan *chan)
+{
+ unsigned long flags;
+
+ if (chan->user_do_qos)
+ return;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ if (chan->qos_count == 0) {
+ chan->qos_count = 1;
+ /*
+ * Safe in spin_lock because it's marked as irq safe.
+ * Similar case for mmp_pdma_qos_put().
+ */
+ pm_runtime_get_sync(chan->dev);
+ }
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+}
+
+static void mmp_pdma_qos_put(struct mmp_pdma_chan *chan)
+{
+ unsigned long flags;
+
+ if (chan->user_do_qos)
+ return;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ if (chan->qos_count == 1) {
+ chan->qos_count = 0;
+ pm_runtime_put_autosuspend(chan->dev);
+ }
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+}
+
+static const struct platform_device_id mmp_pdma_id_table[] = {
+ { "mmp-pdma", },
+ { },
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int mmp_pdma_suspend_noirq(struct device *dev)
+{
+ struct mmp_pdma_device *pdev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pdev->clk);
+
+ return 0;
+}
+
+static int mmp_pdma_resume_noirq(struct device *dev)
+{
+ struct mmp_pdma_device *pdev = dev_get_drvdata(dev);
+
+ clk_prepare_enable(pdev->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops k1x_mmp_pdma_pm_qos = {
+ .suspend_noirq = mmp_pdma_suspend_noirq,
+ .resume_noirq = mmp_pdma_resume_noirq,
+};
+#endif
+
+static struct platform_driver mmp_pdma_driver = {
+ .driver = {
+ .name = "mmp-pdma",
+#ifdef CONFIG_PM_SLEEP
+ .pm = &k1x_mmp_pdma_pm_qos,
+#endif
+ .of_match_table = mmp_pdma_dt_ids,
+ },
+ .id_table = mmp_pdma_id_table,
+ .probe = mmp_pdma_probe,
+ .remove = mmp_pdma_remove,
+};
+
+static int __init mmp_pdma_init(void)
+{
+ return platform_driver_register(&mmp_pdma_driver);
+}
+
+static void __exit mmp_pdma_exit(void)
+{
+ platform_driver_unregister(&mmp_pdma_driver);
+}
+
+subsys_initcall(mmp_pdma_init);
+module_exit(mmp_pdma_exit);
+
+MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -639,7 +639,6 @@ static int mmp_tdma_probe(struct platform_device *pdev)
enum mmp_tdma_type type;
const struct of_device_id *of_id;
struct mmp_tdma_device *tdev;
- struct resource *iores;
int i, ret;
int irq = 0, irq_num = 0;
int chan_num = TDMA_CHANNEL_NUM;
@@ -663,8 +662,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
irq_num++;
}
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tdev->base = devm_ioremap_resource(&pdev->dev, iores);
+ tdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tdev->base))
return PTR_ERR(tdev->base);
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -563,7 +563,6 @@ static int moxart_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- struct resource *res;
void __iomem *dma_base_addr;
int ret, i;
unsigned int irq;
@@ -580,8 +579,7 @@ static int moxart_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dma_base_addr = devm_ioremap_resource(dev, res);
+ dma_base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dma_base_addr))
return PTR_ERR(dma_base_addr);
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 111111111111..222222222222 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -714,7 +714,6 @@ static int mv_xor_v2_resume(struct platform_device *dev)
static int mv_xor_v2_probe(struct platform_device *pdev)
{
struct mv_xor_v2_device *xor_dev;
- struct resource *res;
int i, ret = 0;
struct dma_device *dma_dev;
struct mv_xor_v2_sw_desc *sw_desc;
@@ -726,13 +725,11 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
if (!xor_dev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
+ xor_dev->dma_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xor_dev->dma_base))
return PTR_ERR(xor_dev->dma_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
+ xor_dev->glob_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(xor_dev->glob_base))
return PTR_ERR(xor_dev->glob_base);
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -746,7 +746,6 @@ static int mxs_dma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct mxs_dma_type *dma_type;
struct mxs_dma_engine *mxs_dma;
- struct resource *iores;
int ret, i;
mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
@@ -763,8 +762,7 @@ static int mxs_dma_probe(struct platform_device *pdev)
mxs_dma->type = dma_type->type;
mxs_dma->dev_id = dma_type->id;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores);
+ mxs_dma->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxs_dma->base))
return PTR_ERR(mxs_dma->base);
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 111111111111..222222222222 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1294,7 +1294,6 @@ static int nbpf_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct nbpf_device *nbpf;
struct dma_device *dma_dev;
- struct resource *iomem;
const struct nbpf_config *cfg;
int num_channels;
int ret, irq, eirq, i;
@@ -1318,8 +1317,7 @@ static int nbpf_probe(struct platform_device *pdev)
dma_dev = &nbpf->dma_dev;
dma_dev->dev = dev;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nbpf->base = devm_ioremap_resource(dev, iomem);
+ nbpf->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nbpf->base))
return PTR_ERR(nbpf->base);
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1345,7 +1345,6 @@ static int pxad_probe(struct platform_device *op)
const struct of_device_id *of_id;
const struct dma_slave_map *slave_map = NULL;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
- struct resource *iores;
int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
@@ -1357,8 +1356,7 @@ static int pxad_probe(struct platform_device *op)
spin_lock_init(&pdev->phy_lock);
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- pdev->base = devm_ioremap_resource(&op->dev, iores);
+ pdev->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -1237,7 +1237,6 @@ static int bam_dma_probe(struct platform_device *pdev)
{
struct bam_device *bdev;
const struct of_device_id *match;
- struct resource *iores;
int ret, i;
bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
@@ -1254,8 +1253,7 @@ static int bam_dma_probe(struct platform_device *pdev)
bdev->layout = match->data;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
+ bdev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bdev->regs))
return PTR_ERR(bdev->regs);
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -493,7 +493,6 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
static int sf_pdma_probe(struct platform_device *pdev)
{
struct sf_pdma *pdma;
- struct resource *res;
int ret, n_chans;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
@@ -518,8 +517,7 @@ static int sf_pdma_probe(struct platform_device *pdev)
pdma->n_chans = n_chans;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdma->membase = devm_ioremap_resource(&pdev->dev, res);
+ pdma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdma->membase))
return PTR_ERR(pdma->membase);
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 111111111111..222222222222 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -768,7 +768,6 @@ static int usb_dmac_probe(struct platform_device *pdev)
const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
struct dma_device *engine;
struct usb_dmac *dmac;
- struct resource *mem;
unsigned int i;
int ret;
@@ -789,8 +788,7 @@ static int usb_dmac_probe(struct platform_device *pdev)
return -ENOMEM;
/* Request resources. */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
+ dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmac->iomem))
return PTR_ERR(dmac->iomem);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index 111111111111..222222222222 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -179,7 +179,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct device_node *dma_node;
struct stm32_dmamux_data *stm32_dmamux;
- struct resource *res;
void __iomem *iomem;
struct reset_control *rst;
int i, count, ret;
@@ -238,8 +237,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
}
pm_runtime_get_noresume(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iomem = devm_ioremap_resource(&pdev->dev, res);
+ iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1595,7 +1595,6 @@ static int stm32_mdma_probe(struct platform_device *pdev)
struct stm32_mdma_device *dmadev;
struct dma_device *dd;
struct device_node *of_node;
- struct resource *res;
struct reset_control *rst;
u32 nr_channels, nr_requests;
int i, count, ret;
@@ -1637,8 +1636,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
count);
dmadev->nr_ahb_addr_masks = count;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmadev->base = devm_ioremap_resource(&pdev->dev, res);
+ dmadev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmadev->base))
return PTR_ERR(dmadev->base);
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1144,15 +1144,13 @@ static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
static int sun4i_dma_probe(struct platform_device *pdev)
{
struct sun4i_dma_dev *priv;
- struct resource *res;
int i, j, ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -1283,7 +1283,6 @@ static int sun6i_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct sun6i_dma_dev *sdc;
- struct resource *res;
int ret, i;
sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL);
@@ -1294,8 +1293,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
if (!sdc->cfg)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdc->base = devm_ioremap_resource(&pdev->dev, res);
+ sdc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdc->base))
return PTR_ERR(sdc->base);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -837,7 +837,6 @@ static int tegra_adma_probe(struct platform_device *pdev)
{
const struct tegra_adma_chip_data *cdata;
struct tegra_adma *tdma;
- struct resource *res;
int ret, i;
cdata = of_device_get_match_data(&pdev->dev);
@@ -857,8 +856,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->nr_channels = cdata->nr_channels;
platform_set_drvdata(pdev, tdma);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tdma->base_addr))
return PTR_ERR(tdma->base_addr);
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index 111111111111..222222222222 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -1039,7 +1039,6 @@ static int cppi41_dma_probe(struct platform_device *pdev)
struct cppi41_dd *cdd;
struct device *dev = &pdev->dev;
const struct cppi_glue_infos *glue_info;
- struct resource *mem;
int index;
int irq;
int ret;
@@ -1072,18 +1071,15 @@ static int cppi41_dma_probe(struct platform_device *pdev)
if (index < 0)
return index;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, index);
- cdd->ctrl_mem = devm_ioremap_resource(dev, mem);
+ cdd->ctrl_mem = devm_platform_ioremap_resource(pdev, index);
if (IS_ERR(cdd->ctrl_mem))
return PTR_ERR(cdd->ctrl_mem);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
- cdd->sched_mem = devm_ioremap_resource(dev, mem);
+ cdd->sched_mem = devm_platform_ioremap_resource(pdev, index + 1);
if (IS_ERR(cdd->sched_mem))
return PTR_ERR(cdd->sched_mem);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2);
- cdd->qmgr_mem = devm_ioremap_resource(dev, mem);
+ cdd->qmgr_mem = devm_platform_ioremap_resource(pdev, index + 2);
if (IS_ERR(cdd->qmgr_mem))
return PTR_ERR(cdd->qmgr_mem);
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1658,7 +1658,6 @@ static int omap_dma_probe(struct platform_device *pdev)
{
const struct omap_dma_config *conf;
struct omap_dmadev *od;
- struct resource *res;
int rc, i, irq;
u32 val;
@@ -1666,8 +1665,7 @@ static int omap_dma_probe(struct platform_device *pdev)
if (!od)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- od->base = devm_ioremap_resource(&pdev->dev, res);
+ od->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(od->base))
return PTR_ERR(od->base);
diff --git a/drivers/dma/udma.c b/drivers/dma/udma.c
new file mode 100644
index 000000000000..111111111111
--- /dev/null
+++ b/drivers/dma/udma.c
@@ -0,0 +1,432 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <asm/uaccess.h>
+#include <linux/mutex.h>
+
+#define USE_DMA_MALLOC
+// #define DMA_CONFIG_DEBUG
+
+#define DEVICE_NAME "udma"
+#define IOC_MAGIC 'c'
+#define DMA_MEMCPY_CMD _IOR(IOC_MAGIC, 0, int)
+#define DMA_VA_TO_PA _IOR(IOC_MAGIC, 1, int)
+
+static unsigned char dma_major;
+static struct class *dma_class;
+static struct dma_device *dma_dev;
+static struct dma_chan *dma_chan;
+static struct dma_async_tx_descriptor *dma_tx;
+static struct list_head dmabuf_list;
+static struct completion dma_m2m_ok;
+static struct mutex dma_mutex;
+
+typedef struct {
+ void *src;
+ void *dst;
+ size_t size;
+#ifdef DMA_CONFIG_DEBUG
+ long long time[10];
+ int time_cnt;
+ int dma_irq_subscript;
+#endif
+} memcpy_msg_t;
+
+typedef struct {
+ void *user_addr;
+ void *dma_addr;
+} va_to_pa_msg_t;
+
+typedef struct {
+ size_t size; // Size of the buffer
+ unsigned long user_addr; // User virtual address of the buffer
+ void *kern_addr; // Kernel virtual address of the buffer
+ dma_addr_t dma_addr; // DMA bus address of the buffer
+ struct list_head list; // List node pointers for dma alloc list
+} dma_map_info_t;
+
+typedef struct {
+ dma_addr_t addr;
+ size_t size;
+ int dirty;
+} va2pa_t;
+
+#ifdef DMA_CONFIG_DEBUG
+#include <linux/time.h>
+
+#define DMA_TIME_STAMP() \
+ do { \
+ g_time[g_time_cnt++] = getus(); \
+ } while (0)
+
+static volatile int g_time_cnt;
+static volatile long long g_time[10];
+static int g_dma_irq_subscript;
+
+static long long getus(void)
+{
+ return ktime_to_us(ktime_get());
+}
+#else
+#define DMA_TIME_STAMP()
+#endif
+
+static void dma_callback_func(void *priv)
+{
+#ifdef DMA_CONFIG_DEBUG
+ g_dma_irq_subscript = g_time_cnt;
+#endif
+ DMA_TIME_STAMP();
+ complete(&dma_m2m_ok);
+}
+
+static int dma_open(struct inode *inode, struct file *filp)
+{
+ return (dma_dev != NULL) ? 0 : -EPERM;
+}
+
+static int dma_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static ssize_t dma_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
+{
+ return size;
+}
+
+static ssize_t dma_write(struct file *filp, const char __user *buf, size_t size, loff_t *ppos)
+{
+ return size;
+}
+
+#ifdef USE_DMA_MALLOC
+static int dma_malloc(dma_map_info_t *dma_info, struct vm_area_struct *vma)
+{
+ int ret;
+
+ dma_info->kern_addr = dma_alloc_coherent(dma_dev->dev, dma_info->size, &dma_info->dma_addr, GFP_KERNEL);
+ if (!dma_info->kern_addr) {
+ dev_err(dma_dev->dev,"Unable to allocate contiguous DMA memory region of size " \
+ "%zu.\n", dma_info->size);
+ return -ENOMEM;
+ }
+
+ ret = dma_mmap_coherent(dma_dev->dev, vma, dma_info->kern_addr,
+ dma_info->dma_addr, dma_info->size);
+ if (ret < 0) {
+ dev_err(dma_dev->dev,"Unable to remap address %p to userspace address 0x%lx, size "\
+ "%zu.\n", dma_info->kern_addr, dma_info->user_addr, \
+ dma_info->size);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int dma_free(dma_map_info_t *dma_info)
+{
+ dma_free_coherent(dma_dev->dev, dma_info->size, dma_info->kern_addr, dma_info->dma_addr);
+
+ return 0;
+}
+#else
+static int kernel_malloc(dma_map_info_t *dma_info, struct vm_area_struct *vma)
+{
+ dma_info->kern_addr = kmalloc(dma_info->size, GFP_KERNEL);
+ if (!dma_info->kern_addr) {
+ dev_err(dma_dev->dev,"kmalloc failed\n");
+ return -ENOMEM;
+ }
+
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ (virt_to_phys(dma_info->kern_addr) >> PAGE_SHIFT),
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ return -EAGAIN;
+ }
+
+ dma_info->dma_addr = dma_map_single(dma_dev->dev, dma_info->kern_addr, dma_info->size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev->dev, dma_info->dma_addr)) {
+ dev_err(dma_dev->dev,"mapping buffer failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int kernel_free(dma_map_info_t *dma_info)
+{
+ dma_unmap_single(dma_dev->dev, dma_info->dma_addr, dma_info->size, DMA_FROM_DEVICE);
+ kfree(dma_info->kern_addr);
+
+ return 0;
+}
+#endif
+
+static void dma_vma_close(struct vm_area_struct *vma)
+{
+ dma_map_info_t *dma_info;
+
+ dma_info = vma->vm_private_data;
+#ifdef USE_DMA_MALLOC
+ dma_free(dma_info);
+#else
+ kernel_free(dma_info);
+#endif
+ kfree(dma_info);
+
+ return;
+}
+
+static const struct vm_operations_struct dma_vm_ops = {
+ .close = dma_vma_close,
+};
+
+static int dma_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int ret;
+ dma_map_info_t *dma_info;
+
+ dma_info = kmalloc(sizeof(*dma_info), GFP_KERNEL);
+ if (dma_info == NULL) {
+ dev_err(dma_dev->dev,"Unable to allocate VMA data structure.");
+ return -ENOMEM;
+ }
+
+ dma_info->size = vma->vm_end - vma->vm_start;
+ dma_info->user_addr = vma->vm_start;
+
+#ifdef USE_DMA_MALLOC
+ ret = dma_malloc(dma_info, vma);
+#else
+ ret = kernel_malloc(dma_info, vma);
+#endif
+
+ if (ret < 0) {
+ return -1;
+ }
+
+ vma->vm_ops = &dma_vm_ops;
+ vma->vm_private_data = dma_info;
+ vma->vm_flags |= VM_DONTCOPY;
+
+ list_add(&dma_info->list, &dmabuf_list);
+ return 0;
+}
+
+static int va2pa(void *va, size_t size, va2pa_t **va2pa)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+
+ unsigned long pg_offset;
+ unsigned long pg_address;
+ unsigned long old_pfn;
+ unsigned long paddr;
+ size_t total;
+ int flag = 0;
+
+ va2pa_t *p;
+ int num = size/sizeof(PAGE_SIZE);
+ unsigned long vaddr = (unsigned long)va;
+ int i, j;
+
+ p = kmalloc(num*sizeof(va2pa_t), GFP_KERNEL);
+ *va2pa = p;
+
+ j = 0;
+ old_pfn = 0;
+ total = 0;
+
+ for (i = 0; i < num; i++) {
+ memset(p +i, 0x00, sizeof(va2pa_t));
+ pmd = pmd_off(current->mm, vaddr);
+ if(pmd_none(*pmd)) {
+ dev_err(dma_dev->dev, "not in the pmd!");
+ flag = -1;
+ break;
+ }
+
+ pte = pte_offset_map(pmd, vaddr);
+ if(pte_none(*pte)) {
+ dev_err(dma_dev->dev, "not in the pte!");
+ flag = -1;
+ break;
+ }
+
+ pg_offset = offset_in_page(vaddr);
+ pg_address = pte_pfn(__pte(pte_val(*pte))) << PAGE_SHIFT;
+ paddr = pg_address | pg_offset;
+
+ if ((old_pfn + PAGE_SIZE) == pg_address) {
+ p[j].size += (PAGE_SIZE - (vaddr - (vaddr & PAGE_MASK)));
+ total += (PAGE_SIZE - (vaddr - (vaddr & PAGE_MASK)));
+ p[j].dirty = 1;
+ } else {
+ if (p[j].dirty == 1) {
+ j++;
+ }
+ p[j].addr = paddr;
+ p[j].size = (PAGE_SIZE - (vaddr - (vaddr & PAGE_MASK)));
+ total += p[j].size;
+ p[j].dirty = 1;
+ }
+ if (total >= size) {
+ j ++;
+ break;
+ }
+ old_pfn = pg_address;
+ vaddr = (vaddr & PAGE_MASK) + PAGE_SIZE;
+ }
+
+ if (flag == -1) {
+ kfree(p);
+ *va2pa = NULL;
+ j = 0;
+ }
+
+ return j;
+}
+
+static int dma_memcpy(dma_addr_t dst, dma_addr_t src, size_t size)
+{
+#ifndef USE_DMA_MALLOC
+ dma_sync_single_for_cpu(dma_dev->dev, (dma_addr_t)src, size, DMA_FROM_DEVICE);
+#endif
+ mutex_lock(&dma_mutex);
+ dma_tx = dma_dev->device_prep_dma_memcpy(dma_chan,
+ dst,
+ src,
+ size,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!dma_tx){
+ dev_err(dma_dev->dev, "Failed to prepare DMA memcpy");
+ return -1;
+ }
+
+ dma_tx->callback = dma_callback_func;//set call back function
+ dma_tx->callback_param = NULL;
+ if (dma_submit_error(dma_tx->tx_submit(dma_tx))){
+ dev_err(dma_dev->dev, "Failed to do DMA tx_submit");
+ return -1;
+ }
+
+ init_completion(&dma_m2m_ok);
+ dma_async_issue_pending(dma_chan);//begin dma transfer
+ wait_for_completion(&dma_m2m_ok);
+ reinit_completion(&dma_m2m_ok);
+
+#ifndef USE_DMA_MALLOC
+ dma_sync_single_for_device(dma_dev->dev, (dma_addr_t)msg.dst, msg.size, DMA_FROM_DEVICE);
+#endif
+ mutex_unlock(&dma_mutex);
+
+ return 0;
+}
+
+static long dma_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ if (cmd == DMA_MEMCPY_CMD) {
+ memcpy_msg_t msg;
+ va2pa_t *s_pa_l, *d_pa_l;
+ int i, loop, ret;
+ size_t off;
+
+ if(copy_from_user(&msg, (void *)arg, sizeof(memcpy_msg_t))) {
+ return -EFAULT;
+ }
+
+ ret = va2pa(msg.src, msg.size, &s_pa_l);
+ if (ret != 1) {
+ if (s_pa_l) {
+ kfree(s_pa_l);
+ }
+ return -1;
+ }
+ ret = va2pa(msg.dst, msg.size, &d_pa_l);
+ if (ret == 0) {
+ kfree(s_pa_l);
+ return -1;
+ }
+
+ loop = ret;
+ off = 0;
+
+ for (i = 0; i < loop; i++) {
+ dma_memcpy(d_pa_l[i].addr, s_pa_l[0].addr + off, d_pa_l[i].size);
+ off += d_pa_l[i].size;
+ }
+
+ kfree(s_pa_l);
+ kfree(d_pa_l);
+ }
+ return 0;
+}
+
+static const struct file_operations dma_fops = {
+ .owner = THIS_MODULE,
+ .read = dma_read,
+ .write = dma_write,
+ .open = dma_open,
+ .release = dma_release,
+ .mmap = dma_mmap,
+ .unlocked_ioctl = dma_ioctl,
+};
+
+static int dma_init(void)
+{
+ dma_cap_mask_t mask;
+ struct device *dev_ret;
+
+ dma_major = register_chrdev(0, DEVICE_NAME, &dma_fops);
+ if (dma_major < 0)
+ return dma_major;
+
+ dma_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(dma_class))
+ return -1;
+
+ dev_ret = device_create(dma_class, NULL, MKDEV(dma_major, 0), NULL, DEVICE_NAME);
+ if (IS_ERR(dev_ret))
+ return PTR_ERR(dev_ret);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);//direction:memory to memory
+ dma_chan = dma_request_channel(mask,NULL,NULL); //request a dma channel
+ if (!dma_chan) {
+ printk(KERN_ERR"dma request failed\n");
+ return -1;
+ }
+
+ dma_dev = dma_chan->device;
+ dma_set_mask(dma_dev->dev, DMA_BIT_MASK(32));
+
+ INIT_LIST_HEAD(&dmabuf_list);
+ dev_dbg(dma_dev->dev, "dma channel id = %d\n",dma_chan->chan_id);
+ mutex_init(&dma_mutex);
+
+ return 0;
+}
+
+static void dma_exit(void)
+{
+ unregister_chrdev(dma_major, DEVICE_NAME);
+ device_destroy(dma_class, MKDEV(dma_major, 0));
+ class_destroy(dma_class);
+ dma_release_channel(dma_chan);
+}
+
+module_init(dma_init);
+module_exit(dma_exit);
+
+MODULE_LICENSE("GPL");
\ No newline at end of file
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 111111111111..222222222222 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -890,7 +890,6 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
struct platform_device *pdev)
{
struct zynqmp_dma_chan *chan;
- struct resource *res;
struct device_node *node = pdev->dev.of_node;
int err;
@@ -900,8 +899,7 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
chan->dev = zdev->dev;
chan->zdev = zdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chan->regs = devm_ioremap_resource(&pdev->dev, res);
+ chan->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chan->regs))
return PTR_ERR(chan->regs);
--
Armbian