27874 lines
833 KiB
Diff
27874 lines
833 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Patrick Yavitz <pyavitz@armbian.com>
|
|
Date: Fri, 21 Jun 2024 11:54:06 -0400
|
|
Subject: add spacemit patch set
|
|
|
|
source: https://gitee.com/bianbu-linux/linux-6.1
|
|
|
|
Signed-off-by: Patrick Yavitz <pyavitz@armbian.com>
|
|
---
|
|
drivers/media/platform/Kconfig | 1 +
|
|
drivers/media/platform/Makefile | 1 +
|
|
drivers/media/platform/spacemit/Kconfig | 7 +
|
|
drivers/media/platform/spacemit/Makefile | 3 +
|
|
drivers/media/platform/spacemit/camera/Kconfig | 56 +
|
|
drivers/media/platform/spacemit/camera/Makefile | 35 +
|
|
drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.c | 863 ++
|
|
drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.h | 224 +
|
|
drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.c | 353 +
|
|
drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.h | 237 +
|
|
drivers/media/platform/spacemit/camera/cam_ccic/csiphy.c | 364 +
|
|
drivers/media/platform/spacemit/camera/cam_ccic/csiphy.h | 25 +
|
|
drivers/media/platform/spacemit/camera/cam_ccic/dptc_drv.c | 747 ++
|
|
drivers/media/platform/spacemit/camera/cam_ccic/dptc_drv.h | 110 +
|
|
drivers/media/platform/spacemit/camera/cam_ccic/dptc_pll_setting.h | 56 +
|
|
drivers/media/platform/spacemit/camera/cam_cpp/cpp-v2p0.c | 555 +
|
|
drivers/media/platform/spacemit/camera/cam_cpp/cpp_compat_ioctl32.c | 166 +
|
|
drivers/media/platform/spacemit/camera/cam_cpp/cpp_compat_ioctl32.h | 12 +
|
|
drivers/media/platform/spacemit/camera/cam_cpp/cpp_dmabuf.c | 250 +
|
|
drivers/media/platform/spacemit/camera/cam_cpp/cpp_dmabuf.h | 85 +
|
|
drivers/media/platform/spacemit/camera/cam_cpp/cpp_iommu.c | 833 ++
|
|
drivers/media/platform/spacemit/camera/cam_cpp/cpp_iommu.h | 73 +
|
|
drivers/media/platform/spacemit/camera/cam_cpp/k1x_cpp.c | 1451 +++
|
|
drivers/media/platform/spacemit/camera/cam_cpp/k1x_cpp.h | 188 +
|
|
drivers/media/platform/spacemit/camera/cam_cpp/regs-cpp-iommu.h | 37 +
|
|
drivers/media/platform/spacemit/camera/cam_cpp/regs-cpp-v2p0.h | 147 +
|
|
drivers/media/platform/spacemit/camera/cam_cpp/regs-fbc-v2p0.h | 70 +
|
|
drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_drv.c | 794 ++
|
|
drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_drv.h | 469 +
|
|
drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_pipe.c | 1845 ++++
|
|
drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_pipe.h | 12 +
|
|
drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_reg.c | 171 +
|
|
drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_reg.h | 117 +
|
|
drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_statistic.c | 1255 +++
|
|
drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_statistic.h | 144 +
|
|
drivers/media/platform/spacemit/camera/cam_plat/cam_plat.c | 294 +
|
|
drivers/media/platform/spacemit/camera/cam_plat/cam_plat.h | 180 +
|
|
drivers/media/platform/spacemit/camera/cam_sensor/cam_sensor.c | 1419 +++
|
|
drivers/media/platform/spacemit/camera/cam_sensor/cam_sensor.h | 42 +
|
|
drivers/media/platform/spacemit/camera/cam_util/cam_dbg.c | 90 +
|
|
drivers/media/platform/spacemit/camera/cam_util/cam_dbg.h | 96 +
|
|
drivers/media/platform/spacemit/camera/vi/cam_block.c | 58 +
|
|
drivers/media/platform/spacemit/camera/vi/cam_block.h | 34 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/fe_isp.c | 5416 ++++++++++
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/fe_isp.h | 284 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_ccic.c | 55 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_ccic.h | 25 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_dma.c | 346 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_dma.h | 134 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_iommu.c | 373 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_iommu.h | 66 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_isp.c | 587 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_isp.h | 172 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_postpipe.c | 138 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_postpipe.h | 55 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_reg.h | 650 ++
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_reg_iommu.h | 40 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/k1xvi.c | 565 +
|
|
drivers/media/platform/spacemit/camera/vi/k1xvi/k1xvi.h | 59 +
|
|
drivers/media/platform/spacemit/camera/vi/mlink.c | 831 ++
|
|
drivers/media/platform/spacemit/camera/vi/mlink.h | 171 +
|
|
drivers/media/platform/spacemit/camera/vi/spacemit_videobuf2.h | 41 +
|
|
drivers/media/platform/spacemit/camera/vi/subdev.c | 476 +
|
|
drivers/media/platform/spacemit/camera/vi/subdev.h | 79 +
|
|
drivers/media/platform/spacemit/camera/vi/vdev.c | 2384 ++++
|
|
drivers/media/platform/spacemit/camera/vi/vdev.h | 170 +
|
|
drivers/media/platform/spacemit/camera/vi/vsensor.c | 238 +
|
|
drivers/media/platform/spacemit/camera/vi/vsensor.h | 49 +
|
|
68 files changed, 27373 insertions(+)
|
|
|
|
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/media/platform/Kconfig
|
|
+++ b/drivers/media/platform/Kconfig
|
|
@@ -84,5 +84,6 @@ source "drivers/media/platform/ti/Kconfig"
|
|
source "drivers/media/platform/verisilicon/Kconfig"
|
|
source "drivers/media/platform/via/Kconfig"
|
|
source "drivers/media/platform/xilinx/Kconfig"
|
|
+source "drivers/media/platform/spacemit/Kconfig"
|
|
|
|
endif # MEDIA_PLATFORM_DRIVERS
|
|
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/media/platform/Makefile
|
|
+++ b/drivers/media/platform/Makefile
|
|
@@ -27,6 +27,7 @@ obj-y += ti/
|
|
obj-y += verisilicon/
|
|
obj-y += via/
|
|
obj-y += xilinx/
|
|
+obj-y += spacemit/
|
|
|
|
# Please place here only ancillary drivers that aren't SoC-specific
|
|
# Please keep it alphabetically sorted by Kconfig name
|
|
diff --git a/drivers/media/platform/spacemit/Kconfig b/drivers/media/platform/spacemit/Kconfig
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/Kconfig
|
|
@@ -0,0 +1,7 @@
|
|
+# SPDX-License-Identifier: GPL-2.0-only
|
|
+
|
|
+comment "Spacemit media platform drivers"
|
|
+
|
|
+source "drivers/media/platform/spacemit/vpu_k1x/Kconfig"
|
|
+source "drivers/media/platform/spacemit/camera/Kconfig"
|
|
+
|
|
diff --git a/drivers/media/platform/spacemit/Makefile b/drivers/media/platform/spacemit/Makefile
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/Makefile
|
|
@@ -0,0 +1,3 @@
|
|
+# SPDX-License-Identifier: GPL-2.0-only
|
|
+obj-y += vpu_k1x/
|
|
+obj-y += camera/
|
|
diff --git a/drivers/media/platform/spacemit/camera/Kconfig b/drivers/media/platform/spacemit/camera/Kconfig
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/Kconfig
|
|
@@ -0,0 +1,56 @@
|
|
+# SPDX-License-Identifier: GPL-2.0
|
|
+#
|
|
+# SPACEMIT K1X camera configuration
|
|
+#
|
|
+
|
|
+comment "SPACEMIT K1X Camera And Video V2"
|
|
+
|
|
+menuconfig SPACEMIT_K1X_CAMERA_V2
|
|
+ tristate "SPACEMIT K1X camera and video capture V2 support"
|
|
+ select MEDIA_CONTROLLER
|
|
+ select VIDEO_V4L2_SUBDEV_API
|
|
+ help
|
|
+ Say Y here to enable selecting the video adapters for
|
|
+ SPACEMIT K1X camera and video v2
|
|
+
|
|
+config SPACEMIT_K1X_CCIC_V2
|
|
+ tristate "SPACEMIT K1X CCIC V2 support"
|
|
+ depends on SPACEMIT_K1X_CAMERA_V2
|
|
+ help
|
|
+ Enable support for ccic v2
|
|
+
|
|
+config SPACEMIT_K1X_VI_V2
|
|
+ tristate "SPACEMIT K1X VI V2 support"
|
|
+ select VIDEOBUF2_CORE
|
|
+ select VIDEOBUF2_V4L2
|
|
+ select VIDEOBUF2_DMA_CONTIG
|
|
+ select VIDEOBUF2_DMA_SG
|
|
+ select SPACEMIT_K1X_CCIC_V2
|
|
+ depends on SPACEMIT_K1X_CAMERA_V2
|
|
+ help
|
|
+ Enable support for vi
|
|
+
|
|
+config SPACEMIT_K1X_VI_IOMMU
|
|
+ bool "SPACEMIT K1X VI IOMMU support"
|
|
+ depends on SPACEMIT_K1X_VI_V2
|
|
+ help
|
|
+ Enable support for vi iommu
|
|
+
|
|
+
|
|
+config SPACEMIT_K1X_ISP_V2
|
|
+ tristate "SPACEMIT K1X ISP V2 support"
|
|
+ depends on SPACEMIT_K1X_CAMERA_V2
|
|
+ help
|
|
+ Enable support for isp v2
|
|
+
|
|
+config SPACEMIT_K1X_CPP_V2
|
|
+ tristate "SPACEMIT K1X CPP V2 support"
|
|
+ depends on SPACEMIT_K1X_CAMERA_V2
|
|
+ help
|
|
+ Enable support for cpp v2
|
|
+
|
|
+config SPACEMIT_K1X_SENSOR_V2
|
|
+ tristate "SPACEMIT K1X SENSOR V2 support"
|
|
+ depends on SPACEMIT_K1X_CAMERA_V2
|
|
+ help
|
|
+ Enable support for sensor v2
|
|
diff --git a/drivers/media/platform/spacemit/camera/Makefile b/drivers/media/platform/spacemit/camera/Makefile
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/Makefile
|
|
@@ -0,0 +1,35 @@
|
|
+# SPDX-License-Identifier: GPL-2.0
|
|
+subdir-ccflags-y += -I$(srctree)/drivers/media/platform/spacemit/camera/cam_util
|
|
+subdir-ccflags-y += -I$(srctree)/drivers/media/platform/spacemit/camera/cam_plat
|
|
+# subdir-ccflags-y += -DCONFIG_ARCH_ZYNQMP
|
|
+subdir-ccflags-y += -DCONFIG_ARCH_SPACEMIT
|
|
+
|
|
+obj-$(CONFIG_SPACEMIT_K1X_CAMERA_V2) += cam_plat_v2.o
|
|
+cam_plat_v2-objs = cam_plat/cam_plat.o cam_util/cam_dbg.o
|
|
+
|
|
+obj-$(CONFIG_SPACEMIT_K1X_CCIC_V2) += cam_ccic_v2.o
|
|
+cam_ccic_v2-objs += cam_ccic/ccic_hwreg.o
|
|
+cam_ccic_v2-objs += cam_ccic/csiphy.o
|
|
+cam_ccic_v2-objs += cam_ccic/ccic_drv.o
|
|
+cam_ccic_v2-objs += cam_ccic/dptc_drv.o
|
|
+
|
|
+obj-$(CONFIG_SPACEMIT_K1X_CPP_V2) += cam_cpp_v2.o
|
|
+cam_cpp_v2-objs += cam_cpp/k1x_cpp.o
|
|
+#cam_cpp_v2-objs += cam_cpp/cpp_compat_ioctl32.o
|
|
+cam_cpp_v2-objs += cam_cpp/cpp-v2p0.o
|
|
+cam_cpp_v2-objs += cam_cpp/cpp_dmabuf.o
|
|
+cam_cpp_v2-objs += cam_cpp/cpp_iommu.o
|
|
+
|
|
+obj-$(CONFIG_SPACEMIT_K1X_SENSOR_V2) += cam_sensor_v2.o
|
|
+cam_sensor_v2-objs += cam_sensor/cam_sensor.o
|
|
+
|
|
+obj-$(CONFIG_SPACEMIT_K1X_ISP_V2) += cam_isp_v2.o
|
|
+cam_isp_v2-objs += cam_isp/k1x_isp_drv.o cam_isp/k1x_isp_reg.o cam_isp/k1x_isp_statistic.o cam_isp/k1x_isp_pipe.o
|
|
+
|
|
+obj-$(CONFIG_SPACEMIT_K1X_VI_V2) += cam_vi_v2.o
|
|
+cam_vi_v2-objs += vi/mlink.o vi/cam_block.o vi/vdev.o vi/subdev.o vi/vsensor.o
|
|
+cam_vi_v2-objs += vi/k1xvi/hw-seq/hw_dma.o vi/k1xvi/hw-seq/hw_isp.o vi/k1xvi/hw-seq/hw_postpipe.o vi/k1xvi/hw-seq/hw_iommu.o
|
|
+cam_vi_v2-objs += vi/k1xvi/fe_isp.o vi/k1xvi/k1xvi.o
|
|
+#
|
|
+#obj-$(CONFIG_LEDS_AW36515) += leds-aw36515.o
|
|
+#leds-aw36515-objs += flash/leds-aw36515.o
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.c b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.c
|
|
@@ -0,0 +1,863 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * Driver for SPACEMIT K1X IPE MODULE
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited.
|
|
+ */
|
|
+#define DEBUG /* for pr_debug() */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/pm_runtime.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <media/v4l2-common.h>
|
|
+#include <media/v4l2-dev.h>
|
|
+#include <media/videobuf2-dma-contig.h>
|
|
+#include <media/videobuf2-dma-sg.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/clk.h>
|
|
+#include "ccic_drv.h"
|
|
+#include "ccic_hwreg.h"
|
|
+#include "csiphy.h"
|
|
+
|
|
+#ifdef CONFIG_ARCH_ZYNQMP
|
|
+#include "dptc_drv.h"
|
|
+#include "dptc_pll_setting.h"
|
|
+#endif
|
|
+
|
|
+#define K1X_CCIC_DRV_NAME "k1xccic"
|
|
+
|
|
+static LIST_HEAD(ccic_devices);
|
|
+static DEFINE_MUTEX(list_lock);
|
|
+
|
|
+static void ccic_irqmask(struct ccic_ctrl *ctrl, int on)
|
|
+{
|
|
+ struct ccic_dev *ccic_dev = ctrl->ccic_dev;
|
|
+ u32 mask_val = ccic_dev->interrupt_mask_value;
|
|
+
|
|
+ if (on) {
|
|
+ ccic_reg_write(ccic_dev, REG_IRQSTAT, mask_val);
|
|
+ ccic_reg_set_bit(ccic_dev, REG_IRQMASK, mask_val);
|
|
+ } else {
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_IRQMASK, mask_val);
|
|
+ }
|
|
+}
|
|
+
|
|
+#ifndef CONFIG_ARCH_ZYNQMP
|
|
+static int ccic_config_csi2(struct ccic_dev *ccic_dev, struct mipi_csi2 *csi,
|
|
+ int enable)
|
|
+{
|
|
+ unsigned int dphy5_val = 0;
|
|
+ unsigned int ctrl0_val = 0;
|
|
+ int lanes = csi->dphy_desc.nr_lane;
|
|
+
|
|
+ if (!ccic_dev->csiphy)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (enable) {
|
|
+ csiphy_start(ccic_dev->csiphy, csi);
|
|
+ dphy5_val = CSI2_DPHY5_LANE_ENA(lanes);
|
|
+ dphy5_val = dphy5_val | (dphy5_val << CSI2_DPHY5_LANE_RESC_ENA_SHIFT);
|
|
+ ctrl0_val = ccic_reg_read(ccic_dev, REG_CSI2_CTRL0);
|
|
+ ctrl0_val &= ~(CSI2_C0_LANE_NUM_MASK);
|
|
+ ctrl0_val |= CSI2_C0_LANE_NUM(lanes);
|
|
+ ctrl0_val |= CSI2_C0_ENABLE;
|
|
+ ctrl0_val &= ~(CSI2_C0_VLEN_MASK);
|
|
+ ctrl0_val |= CSI2_C0_VLEN;
|
|
+ ccic_reg_write(ccic_dev, REG_CSI2_DPHY5, dphy5_val);
|
|
+ ccic_reg_write(ccic_dev, REG_CSI2_CTRL0, ctrl0_val);
|
|
+ } else {
|
|
+ csiphy_stop(ccic_dev->csiphy);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE); //csi off
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#else /* connected to DPTC daughter board */
|
|
+/*ipe_fpga_rstn_adr*/
|
|
+#define REG_CSI2_FPGA_RSTN (0x1fc)
|
|
+static int ccic_config_csi2(struct ccic_dev *ccic_dev,
|
|
+ struct mipi_csi2 *csi, int enable)
|
|
+{
|
|
+ unsigned int ctrl0_val = 0;
|
|
+ int lanes = 1;
|
|
+ unsigned int sensor_width = 0, sensor_height = 0;
|
|
+
|
|
+ if (!enable)
|
|
+ goto out;
|
|
+
|
|
+ ctrl0_val = ccic_reg_read(ccic_dev, REG_CSI2_CTRL0);
|
|
+ ctrl0_val &= ~(CSI2_C0_LANE_NUM_MASK);
|
|
+ ctrl0_val |= CSI2_C0_LANE_NUM(lanes);
|
|
+ ctrl0_val |= CSI2_C0_ENABLE;
|
|
+ ctrl0_val &= ~(CSI2_C0_VLEN_MASK);
|
|
+ ctrl0_val |= CSI2_C0_VLEN;
|
|
+ ccic_reg_write(ccic_dev, REG_CSI2_CTRL0, ctrl0_val);
|
|
+
|
|
+out:
|
|
+ if (!enable) {
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE); //csi off
|
|
+ DPTC_func3_close();
|
|
+ } else {
|
|
+ DPTC_func3_open();
|
|
+ /* csi_ccic_fpga_reset:bit[5:0] = 00 */
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_FPGA_RSTN, 0, 0x3f);
|
|
+ /* udelay(10); */
|
|
+ dptc_csi_reg_setting(16, sensor_width, sensor_height, lanes);
|
|
+ /*csi_ccic_fpga_release: bit[5:0] = b11 */
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_FPGA_RSTN, 0x3f, 0x3f);
|
|
+ /* udelay(10); */
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int ccic_config_csi2_dphy(struct ccic_ctrl *ctrl,
|
|
+ struct mipi_csi2 *csi, int enable)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct ccic_dev *ccic_dev = ctrl->ccic_dev;
|
|
+
|
|
+ ret = ccic_config_csi2(ccic_dev, csi, enable);
|
|
+ if (ret)
|
|
+ dev_err(ccic_dev->dev, "csi2 config failed\n");
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int ccic_config_csi2_vc(struct ccic_ctrl *ctrl, int md, u8 vc0, u8 vc1)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct ccic_dev *ccic_dev = ctrl->ccic_dev;
|
|
+
|
|
+ switch (md) {
|
|
+ case CCIC_CSI2VC_NM: /* Normal mode */
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL,
|
|
+ CSI2_VCCTRL_MD_NORMAL, CSI2_VCCTRL_MD_MASK);
|
|
+ break;
|
|
+ case CCIC_CSI2VC_VC: /* Virtual Channel mode */
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL,
|
|
+ CSI2_VCCTRL_MD_VC, CSI2_VCCTRL_MD_MASK);
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL, vc0 << 14,
|
|
+ CSI2_VCCTRL_VC0_MASK);
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL, vc1 << 22,
|
|
+ CSI2_VCCTRL_VC1_MASK);
|
|
+ break;
|
|
+ case CCIC_CSI2VC_DT: /* TODO: Data-Type Interleaving */
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL,
|
|
+ CSI2_VCCTRL_MD_DT, CSI2_VCCTRL_MD_MASK);
|
|
+ pr_err("csi2 vc mode %d todo\n", md);
|
|
+ break;
|
|
+ default:
|
|
+ dev_err(ccic_dev->dev, "invalid csi2 vc mode %d\n", md);
|
|
+ ret = -EINVAL;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ccic_config_idi_mux - DPCM/Repack Mux Select
|
|
+ *
|
|
+ * @ctrl: ccic controller
|
|
+ * @mux: ipe mux path
|
|
+ *
|
|
+ * Return: 0 on success, error code otherwise.
|
|
+ */
|
|
+static int ccic_config_idi_mux(struct ccic_ctrl *ctrl, int mux)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct ccic_dev *ccic_dev = ctrl->ccic_dev;
|
|
+
|
|
+ /*
|
|
+ * IPE1-->
|
|
+ * 0x0 Select "local" CSI2 main output
|
|
+ * 0x1 Select "IPE2" CSI2 VC/DT output
|
|
+ * 0x2 Select "IPE2" CSI2 main output
|
|
+ * 0x3 Select "IPE3" CSI2 VC/DT output
|
|
+ *
|
|
+ * IPE3-->
|
|
+ * 0x0 Select "local" CSI2 main output
|
|
+ * 0x1 Select "IPE2" CSI2 VC/DT output
|
|
+ * 0x2 Select "IPE2" CSI2 main output
|
|
+ * 0x3 Select "IPE1" CSI2 VC/DT output
|
|
+ */
|
|
+ switch (mux) {
|
|
+ case CCIC_IDI_MUX_LOCAL_MAIN:
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_CTRL2,
|
|
+ CSI2_C2_MUX_SEL_LOCAL_MAIN, CSI2_C2_MUX_SEL_MASK);
|
|
+ break;
|
|
+ case CCIC_IDI_MUX_IPE2_VCDT:
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_CTRL2,
|
|
+ CSI2_C2_MUX_SEL_IPE2_VCDT, CSI2_C2_MUX_SEL_MASK);
|
|
+ break;
|
|
+ case CCIC_IDI_MUX_IPE2_MAIN:
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_CTRL2,
|
|
+ CSI2_C2_MUX_SEL_IPE2_MAIN, CSI2_C2_MUX_SEL_MASK);
|
|
+ break;
|
|
+ case CCIC_IDI_MUX_REMOTE_VCDT:
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_CTRL2,
|
|
+ CSI2_C2_MUX_SEL_REMOTE_VCDT, CSI2_C2_MUX_SEL_MASK);
|
|
+ break;
|
|
+ default:
|
|
+ dev_err(ccic_dev->dev, "invalid idi mux %d\n", mux);
|
|
+ ret = -EINVAL;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int ccic_config_idi_sel(struct ccic_ctrl *ctrl, int sel)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct ccic_dev *ccic_dev = ctrl->ccic_dev;
|
|
+
|
|
+ switch (sel) {
|
|
+ case CCIC_IDI_SEL_NONE:
|
|
+ /* ccic_reg_clear_bit(ccic_dev, REG_IDI_CTRL, IDI_RELEASE_RESET); */
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_REPACK_RST);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_REPACK_ENA);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_DPCM_ENA);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_VCCTRL, CSI2_VCCTRL_MD_VC);
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_CTRL2,
|
|
+ CSI2_C2_MUX_SEL_LOCAL_MAIN, CSI2_C2_MUX_SEL_MASK);
|
|
+ break;
|
|
+ case CCIC_IDI_SEL_REPACK:
|
|
+ ccic_reg_write_mask(ccic_dev, REG_IDI_CTRL, IDI_SEL_DPCM_REPACK,
|
|
+ IDI_SEL_MASK);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_IDI_MUX_SEL_DPCM);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_REPACK_RST);
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_REPACK_ENA);
|
|
+ break;
|
|
+ case CCIC_IDI_SEL_DPCM:
|
|
+ ccic_reg_write_mask(ccic_dev, REG_IDI_CTRL,
|
|
+ IDI_SEL_DPCM_REPACK, IDI_SEL_MASK);
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_IDI_MUX_SEL_DPCM);
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_DPCM_ENA);
|
|
+ break;
|
|
+ case CCIC_IDI_SEL_PARALLEL:
|
|
+ ccic_reg_write_mask(ccic_dev, REG_IDI_CTRL,
|
|
+ IDI_SEL_PARALLEL, IDI_SEL_MASK);
|
|
+ break;
|
|
+ default:
|
|
+ dev_err(ccic_dev->dev, "IDI source is error %d\n", sel);
|
|
+ ret = -EINVAL;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int __maybe_unused ccic_enable_csi2idi(struct ccic_ctrl *ctrl)
|
|
+{
|
|
+ struct ccic_dev *ccic_dev = ctrl->ccic_dev;
|
|
+
|
|
+ ccic_csi2idi_reset(ccic_dev, 0);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __maybe_unused ccic_disable_csi2idi(struct ccic_ctrl *ctrl)
|
|
+{
|
|
+ struct ccic_dev *ccic_dev = ctrl->ccic_dev;
|
|
+
|
|
+ ccic_csi2idi_reset(ccic_dev, 1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#define ISP_BUS_CLK_FREQ (307200000)
|
|
+static int axi_set_clock_rates(struct clk *clock)
|
|
+{
|
|
+ long rate;
|
|
+ int ret;
|
|
+
|
|
+ rate = clk_round_rate(clock, ISP_BUS_CLK_FREQ);
|
|
+ if (rate < 0) {
|
|
+ pr_err("axi clk round rate failed: %ld\n", rate);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ ret = clk_set_rate(clock, rate);
|
|
+ if (ret < 0) {
|
|
+ pr_err("axi clk set rate failed: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int ccic_dma_clk_enable(struct ccic_dma *dma, int on)
|
|
+{
|
|
+ struct ccic_dev *ccic = dma->ccic_dev;
|
|
+ struct device *dev = &ccic->pdev->dev;
|
|
+ int ret;
|
|
+
|
|
+ if (on) {
|
|
+ ret = pm_runtime_get_sync(dev);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = clk_prepare_enable(ccic->axi_clk);
|
|
+ if (ret < 0) {
|
|
+ pm_runtime_put_sync(dev);
|
|
+ return ret;
|
|
+ }
|
|
+ reset_control_deassert(ccic->isp_ci_reset);
|
|
+
|
|
+ ret = axi_set_clock_rates(ccic->axi_clk);
|
|
+ if (ret < 0) {
|
|
+ pm_runtime_put_sync(dev);
|
|
+ return ret;
|
|
+ }
|
|
+ reset_control_deassert(ccic->isp_ci_reset);
|
|
+ } else {
|
|
+ clk_disable_unprepare(ccic->axi_clk);
|
|
+ reset_control_assert(ccic->isp_ci_reset);
|
|
+ pm_runtime_put_sync(dev);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct ccic_dma_ops ccic_dma_ops = {
|
|
+ .clk_enable = ccic_dma_clk_enable,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * TBD: calculate the clk rate dynamically based on
|
|
+ * fps, resolution and other arguments.
|
|
+ */
|
|
+static int ccic_clk_set_rate(struct ccic_ctrl *ctrl_dev, int mode)
|
|
+{
|
|
+ unsigned long clk_val;
|
|
+ struct ccic_dev *ccic_dev = ctrl_dev->ccic_dev;
|
|
+
|
|
+#if defined(CONFIG_SPACEMIT_ZEBU)
|
|
+ clk_val = clk_round_rate(ccic_dev->csi_clk, 1248000000);
|
|
+#else
|
|
+ clk_val = clk_round_rate(ccic_dev->csi_clk, 624000000);
|
|
+#endif
|
|
+
|
|
+ clk_set_rate(ccic_dev->csi_clk, clk_val);
|
|
+ pr_debug("cam clk[csi_func]: %ld\n", clk_val);
|
|
+
|
|
+ if (mode == SC2_MODE_ISP) {
|
|
+#if defined(CONFIG_SPACEMIT_ZEBU)
|
|
+ clk_val = clk_round_rate(ccic_dev->clk4x, 1248000000);
|
|
+#else
|
|
+ clk_val = clk_round_rate(ccic_dev->csi_clk, 624000000);
|
|
+#endif
|
|
+
|
|
+ clk_set_rate(ccic_dev->clk4x, clk_val);
|
|
+ pr_debug("cam clk[ccic4x_func]: %ld\n", clk_val);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int ccic_clk_enable(struct ccic_ctrl *ctrl, int en)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct ccic_dev *ccic_dev = ctrl->ccic_dev;
|
|
+
|
|
+ if (en) {
|
|
+ ret = pm_runtime_get_sync(&ccic_dev->pdev->dev);
|
|
+ if (ret < 0) {
|
|
+ pr_err("rpm get failed\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ //clk_prepare_enable(ccic_dev->ahb_clk);
|
|
+ reset_control_deassert(ccic_dev->ahb_reset);
|
|
+
|
|
+ clk_prepare_enable(ccic_dev->clk4x);
|
|
+ reset_control_deassert(ccic_dev->ccic_4x_reset);
|
|
+ clk_prepare_enable(ccic_dev->csi_clk);
|
|
+ reset_control_deassert(ccic_dev->csi_reset);
|
|
+
|
|
+ ret = ccic_clk_set_rate(ctrl, SC2_MODE_ISP);
|
|
+ if (ret < 0) {
|
|
+ pm_runtime_put_sync(&ccic_dev->pdev->dev);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ } else {
|
|
+ clk_disable_unprepare(ccic_dev->csi_clk);
|
|
+ reset_control_assert(ccic_dev->csi_reset);
|
|
+
|
|
+ clk_disable_unprepare(ccic_dev->clk4x);
|
|
+ reset_control_assert(ccic_dev->ccic_4x_reset);
|
|
+
|
|
+ //clk_disable_unprepare(ccic_dev->ahb_clk);
|
|
+ reset_control_assert(ccic_dev->ahb_reset);
|
|
+
|
|
+ pm_runtime_put_sync(&ccic_dev->pdev->dev);
|
|
+ }
|
|
+
|
|
+ pr_debug("ccic%d clock %s", ccic_dev->index, en ? "enabled" : "disabled");
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int ccic_config_csi2_mbus(struct ccic_ctrl *ctrl, int md, u8 vc0, u8 vc1, int lanes)
|
|
+{
|
|
+ int ret;
|
|
+ struct ccic_dev *ccic_dev = ctrl->ccic_dev;
|
|
+ struct mipi_csi2 csi2para;
|
|
+
|
|
+ ret = ccic_config_csi2_vc(ctrl, md, vc0, vc1);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ csi2para.calc_dphy = 0;
|
|
+ csi2para.dphy[0] = 0x00000001;
|
|
+ csi2para.dphy[1] = 0xa2848888;
|
|
+ csi2para.dphy[2] = 0x0000201a; //0x0000201a: 1.0G ~ 1.5G
|
|
+ csi2para.dphy[3] = 0x000000ff;
|
|
+ csi2para.dphy[4] = 0x1001;
|
|
+ csi2para.dphy_desc.nr_lane = lanes;
|
|
+ ret = ccic_config_csi2_dphy(ctrl, &csi2para, !!lanes);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ pr_debug("ccic%d csi2 %s", ccic_dev->index, lanes ? "enabled" : "disabled");
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int ccic_config_csi2idi_mux(struct ccic_ctrl *ctrl, int chnl, int idi, int en)
|
|
+{
|
|
+ struct ccic_dev *csi2idi = NULL;
|
|
+ struct ccic_dev *tmp;
|
|
+ int csi2idi_idx;
|
|
+
|
|
+ if (idi == CCIC_CSI2IDI0) {
|
|
+ csi2idi_idx = 0;
|
|
+ } else if (idi == CCIC_CSI2IDI1) {
|
|
+ csi2idi_idx = 2;
|
|
+ } else {
|
|
+ pr_err("%s: invalid idi index %d\n", __func__, idi);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ list_for_each_entry(tmp, &ccic_devices, list) {
|
|
+ if (tmp->index == csi2idi_idx) {
|
|
+ csi2idi = tmp;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!csi2idi) {
|
|
+ pr_err("%s: ccic%d not found\n", __func__, csi2idi_idx);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ if (!en) {
|
|
+ ccic_csi2idi_reset(csi2idi, 1);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ ccic_config_idi_sel(csi2idi->ctrl, CCIC_IDI_SEL_REPACK);
|
|
+ if (ctrl->ccic_dev->index == csi2idi->index) {
|
|
+ ccic_config_idi_mux(csi2idi->ctrl, CCIC_IDI_MUX_LOCAL_MAIN);
|
|
+ } else if (ctrl->ccic_dev->index == 1) {
|
|
+ if (chnl == CCIC_CSI2VC_MAIN)
|
|
+ ccic_config_idi_mux(csi2idi->ctrl, CCIC_IDI_MUX_IPE2_MAIN);
|
|
+ else
|
|
+ ccic_config_idi_mux(csi2idi->ctrl, CCIC_IDI_MUX_IPE2_VCDT);
|
|
+ } else {
|
|
+ ccic_config_idi_mux(csi2idi->ctrl, CCIC_IDI_MUX_REMOTE_VCDT);
|
|
+ }
|
|
+
|
|
+ ccic_csi2idi_reset(csi2idi, 0);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int ccic_reset_csi2idi(struct ccic_ctrl *ctrl, int idi, int rst)
|
|
+{
|
|
+ struct ccic_dev *csi2idi = NULL;
|
|
+ struct ccic_dev *tmp;
|
|
+ int csi2idi_idx;
|
|
+
|
|
+ if (idi == 0) {
|
|
+ csi2idi_idx = 0;
|
|
+ } else if (idi == 1) {
|
|
+ csi2idi_idx = 2;
|
|
+ } else {
|
|
+ pr_err("%s: invalid idi index %d\n", __func__, idi);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ list_for_each_entry(tmp, &ccic_devices, list) {
|
|
+ if (tmp->index == csi2idi_idx) {
|
|
+ csi2idi = tmp;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!csi2idi) {
|
|
+ pr_err("%s: ccic%d not found\n", __func__, csi2idi_idx);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ ccic_csi2idi_reset(csi2idi, rst);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct ccic_ctrl_ops ccic_ctrl_ops = {
|
|
+ .irq_mask = ccic_irqmask,
|
|
+ .clk_enable = ccic_clk_enable,
|
|
+ .config_csi2_mbus = ccic_config_csi2_mbus,
|
|
+ .config_csi2idi_mux = ccic_config_csi2idi_mux,
|
|
+ .reset_csi2idi = ccic_reset_csi2idi,
|
|
+};
|
|
+
|
|
+static int ccic_init_clk(struct ccic_dev *dev)
|
|
+{
|
|
+#ifdef CONFIG_ARCH_SPACEMIT
|
|
+ dev->axi_clk = devm_clk_get(&dev->pdev->dev, "isp_axi");
|
|
+ if (IS_ERR(dev->axi_clk))
|
|
+ return PTR_ERR(dev->axi_clk);
|
|
+/*
|
|
+ dev->ahb_clk = devm_clk_get(&dev->pdev->dev, "isp_ahb");
|
|
+ if (IS_ERR(dev->ahb_clk))
|
|
+ return PTR_ERR(dev->ahb_clk);
|
|
+*/
|
|
+ dev->ahb_reset = devm_reset_control_get_optional_shared(&dev->pdev->dev, "isp_ahb_reset");
|
|
+ if (IS_ERR_OR_NULL(dev->ahb_reset))
|
|
+ return PTR_ERR(dev->ahb_reset);
|
|
+
|
|
+ dev->csi_reset = devm_reset_control_get_optional_shared(&dev->pdev->dev, "csi_reset");
|
|
+ if (IS_ERR_OR_NULL(dev->csi_reset))
|
|
+ return PTR_ERR(dev->csi_reset);
|
|
+
|
|
+ dev->ccic_4x_reset = devm_reset_control_get_optional_shared(&dev->pdev->dev, "ccic_4x_reset");
|
|
+ if (IS_ERR_OR_NULL(dev->ccic_4x_reset))
|
|
+ return PTR_ERR(dev->ccic_4x_reset);
|
|
+
|
|
+ dev->isp_ci_reset = devm_reset_control_get_optional_shared(&dev->pdev->dev, "isp_ci_reset");
|
|
+ if (IS_ERR_OR_NULL(dev->isp_ci_reset))
|
|
+ return PTR_ERR(dev->isp_ci_reset);
|
|
+
|
|
+ dev->csi_clk = devm_clk_get(&dev->pdev->dev, "csi_func");
|
|
+ if (IS_ERR(dev->csi_clk))
|
|
+ return PTR_ERR(dev->csi_clk);
|
|
+
|
|
+ dev->clk4x = devm_clk_get(&dev->pdev->dev, "ccic_func");
|
|
+ return PTR_ERR_OR_ZERO(dev->clk4x);
|
|
+#else
|
|
+ return 0;
|
|
+#endif
|
|
+}
|
|
+
|
|
+static int ccic_device_register(struct ccic_dev *ccic_dev)
|
|
+{
|
|
+ struct ccic_dev *other;
|
|
+
|
|
+ mutex_lock(&list_lock);
|
|
+ list_for_each_entry(other, &ccic_devices, list) {
|
|
+ if (other->index == ccic_dev->index) {
|
|
+ dev_warn(ccic_dev->dev, "ccic%d already registered\n",
|
|
+ ccic_dev->index);
|
|
+ mutex_unlock(&list_lock);
|
|
+ return -EBUSY;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ list_add_tail(&ccic_dev->list, &ccic_devices);
|
|
+ mutex_unlock(&list_lock);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ccic_device_unregister(struct ccic_dev *ccic_dev)
|
|
+{
|
|
+ mutex_lock(&list_lock);
|
|
+ list_del(&ccic_dev->list);
|
|
+ mutex_unlock(&list_lock);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int ccic_dphy_hssettle_set(unsigned int ccic_id, unsigned int dphy_freg)
|
|
+{
|
|
+ u32 reg_settle = 0x00002b00;
|
|
+ struct ccic_dev *ccic_dev = NULL;
|
|
+ struct ccic_dev *tmp;
|
|
+
|
|
+ if (dphy_freg < 80) //dphy_clock uint: MHZ
|
|
+ return -EINVAL;
|
|
+ // RX_Tsettle > TX_HSprepare; reg uint: (1 / (dphy_clock / 2))
|
|
+ reg_settle =
|
|
+ (HS_PREP_ZERO_MIN + HS_PREP_MAX) * dphy_freg / (2 * 2 * 1000) + (6 / 2);
|
|
+ reg_settle = reg_settle << CSI2_DPHY3_HS_SETTLE_SHIFT;
|
|
+
|
|
+ mutex_lock(&list_lock);
|
|
+ list_for_each_entry(tmp, &ccic_devices, list) {
|
|
+ if (tmp->index == ccic_id) {
|
|
+ ccic_dev = tmp;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (!ccic_dev) {
|
|
+ pr_err("ccic%d not found", ccic_id);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ ccic_reg_write(ccic_dev, REG_CSI2_DPHY3, reg_settle);
|
|
+ mutex_unlock(&list_lock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL_GPL(ccic_dphy_hssettle_set);
|
|
+
|
|
+int ccic_ctrl_get(struct ccic_ctrl **ctrl_host, int id,
|
|
+ irqreturn_t(*handler) (struct ccic_ctrl *, u32))
|
|
+{
|
|
+ struct ccic_dev *ccic_dev = NULL;
|
|
+ struct ccic_dev *tmp;
|
|
+ struct ccic_ctrl *ctrl = NULL;
|
|
+
|
|
+ list_for_each_entry(tmp, &ccic_devices, list) {
|
|
+ if (tmp->index == id) {
|
|
+ ccic_dev = tmp;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (!ccic_dev) {
|
|
+ pr_err("ccic%d not found", id);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ ctrl = ccic_dev->ctrl;
|
|
+ ctrl->handler = handler;
|
|
+ *ctrl_host = ctrl;
|
|
+ pr_debug("acquire ccic%d ctrl dev succeed\n", id);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(ccic_ctrl_get);
|
|
+
|
|
+void ccic_ctrl_put(struct ccic_ctrl *ctrl)
|
|
+{
|
|
+ // TODO
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(ccic_ctrl_put);
|
|
+
|
|
+static void ipe_error_irq_handler(struct ccic_dev *ccic, u32 ipestatus, u32 csi2status)
|
|
+{
|
|
+ static DEFINE_RATELIMIT_STATE(rs, 5 * HZ, 20);
|
|
+
|
|
+ if (__ratelimit(&rs)) {
|
|
+ pr_err("CCIC%d: interrupt status 0x%08x, csi2 status 0x%08x\n",
|
|
+ ccic->index, ipestatus, csi2status);
|
|
+ } else {
|
|
+ /* aovid soft lockup due to high frequency interrupt */
|
|
+ ccic_reg_clear_bit(ccic, REG_IRQMASK, CSI2PHYERRS);
|
|
+ pr_err("CCIC%d: too many interrupt errors, mask\n", ccic->index);
|
|
+ }
|
|
+}
|
|
+
|
|
+static irqreturn_t k1x_ccic_isr(int irq, void *data)
|
|
+{
|
|
+ struct ccic_dev *ccic_dev = data;
|
|
+ uint32_t irqs, csi2status;
|
|
+
|
|
+ irqs = ccic_reg_read(ccic_dev, REG_IRQSTAT);
|
|
+ if (!(irqs & ~IRQ_IDI_PRO_LINE))
|
|
+ return IRQ_NONE;
|
|
+
|
|
+ csi2status = ccic_reg_read(ccic_dev, 0x108);
|
|
+ ccic_reg_write(ccic_dev, REG_IRQSTAT, irqs & ~IRQ_IDI_PRO_LINE);
|
|
+
|
|
+ if (irqs & CSI2PHYERRS)
|
|
+ ipe_error_irq_handler(ccic_dev, irqs, csi2status);
|
|
+
|
|
+ if (irqs & IRQ_DMA_PRO_LINE)
|
|
+ pr_debug("CCIC%d: IRQ_DMA_PRO_LINE\n", ccic_dev->index);
|
|
+
|
|
+ if (irqs & IRQ_IDI_PRO_LINE)
|
|
+ pr_debug("CCIC%d: IRQ_IDI_PRO_LINE\n", ccic_dev->index);
|
|
+
|
|
+ if (irqs & IRQ_CSI2IDI_FLUSH)
|
|
+ pr_debug("CCIC%d: IRQ_CSI2IDI_FLUSH\n", ccic_dev->index);
|
|
+
|
|
+ if (irqs & IRQ_CSI2IDI_HBLK2HSYNC)
|
|
+ pr_debug("CCIC%d: IRQ_CSI2IDI_HBLK2HSYNC\n", ccic_dev->index);
|
|
+
|
|
+ if (irqs & IRQ_DPHY_RX_CLKULPS_ACTIVE)
|
|
+ pr_debug("CCIC%d: IRQ_DPHY_RX_CLKULPS_ACTIVE\n", ccic_dev->index);
|
|
+
|
|
+ if (irqs & IRQ_DPHY_RX_CLKULPS)
|
|
+ pr_debug("CCIC%d: IRQ_DPHY_RX_CLKULPS\n", ccic_dev->index);
|
|
+
|
|
+ if (irqs & IRQ_DPHY_LN_ULPS_ACTIVE)
|
|
+ pr_debug("CCIC%d: IRQ_DPHY_LN_ULPS_ACTIVE\n", ccic_dev->index);
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static int k1x_ccic_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct device_node *np = pdev->dev.of_node;
|
|
+ struct ccic_dev *ccic_dev;
|
|
+ struct ccic_ctrl *ccic_ctrl;
|
|
+ struct ccic_dma *ccic_dma;
|
|
+ struct device *dev = &pdev->dev;
|
|
+ int ret;
|
|
+ int irq;
|
|
+
|
|
+ pr_debug("%s begin to probe\n", dev_name(&pdev->dev));
|
|
+
|
|
+ ret = of_property_read_u32(np, "cell-index", &pdev->id);
|
|
+ if (ret < 0) {
|
|
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ccic_dev = devm_kzalloc(&pdev->dev, sizeof(*ccic_dev), GFP_KERNEL);
|
|
+ if (!ccic_dev) {
|
|
+ dev_err(&pdev->dev, "camera: Could not allocate ccic dev\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ ccic_ctrl = devm_kzalloc(&pdev->dev, sizeof(*ccic_ctrl), GFP_KERNEL);
|
|
+ if (!ccic_ctrl) {
|
|
+ dev_err(&pdev->dev, "camera: Could not allocate ctrl dev\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ ccic_dma = devm_kzalloc(&pdev->dev, sizeof(*ccic_dma), GFP_KERNEL);
|
|
+ if (!ccic_dma) {
|
|
+ dev_err(&pdev->dev, "camera: Could not allocate dma dev\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ /* get mem */
|
|
+ ccic_dev->mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ccic-regs");
|
|
+ if (!ccic_dev->mem) {
|
|
+ dev_err(&pdev->dev, "no mem resource");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ ccic_dev->base = devm_ioremap(&pdev->dev, ccic_dev->mem->start,
|
|
+ resource_size(ccic_dev->mem));
|
|
+ if (IS_ERR(ccic_dev->base)) {
|
|
+ dev_err(&pdev->dev, "fail to remap iomem\n");
|
|
+ return PTR_ERR(ccic_dev->base);
|
|
+ }
|
|
+
|
|
+ /* get irqs */
|
|
+ irq = platform_get_irq_byname(pdev, "ipe-irq");
|
|
+ if (irq < 0) {
|
|
+ dev_err(&pdev->dev, "no irq resource");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ dev_dbg(&pdev->dev, "ipe irq: %d\n", irq);
|
|
+ ret = devm_request_irq(&pdev->dev, irq, k1x_ccic_isr,
|
|
+ IRQF_SHARED, K1X_CCIC_DRV_NAME, ccic_dev);
|
|
+ if (ret) {
|
|
+ dev_err(&pdev->dev, "fail to request irq\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* ccic device and ctrl init */
|
|
+ ccic_ctrl->ccic_dev = ccic_dev;
|
|
+ ccic_ctrl->index = pdev->id;
|
|
+ ccic_ctrl->ops = &ccic_ctrl_ops;
|
|
+ atomic_set(&ccic_ctrl->usr_cnt, 0);
|
|
+
|
|
+ ccic_dma->ccic_dev = ccic_dev;
|
|
+ ccic_dma->ops = &ccic_dma_ops;
|
|
+
|
|
+ ccic_dev->csiphy = csiphy_lookup_by_phandle(&pdev->dev, "spacemit,csiphy");
|
|
+ if (!ccic_dev->csiphy) {
|
|
+ dev_err(&pdev->dev, "fail to acquire csiphy\n");
|
|
+ return -EPROBE_DEFER;
|
|
+ }
|
|
+
|
|
+ ccic_dev->index = pdev->id;
|
|
+ ccic_dev->pdev = pdev;
|
|
+ ccic_dev->dev = &pdev->dev;
|
|
+ ccic_dev->ctrl = ccic_ctrl;
|
|
+ ccic_dev->dma = ccic_dma;
|
|
+ ccic_dev->interrupt_mask_value = CSI2PHYERRS;
|
|
+ dev_set_drvdata(dev, ccic_dev);
|
|
+
|
|
+ /* enable runtime pm */
|
|
+ pm_runtime_enable(&pdev->dev);
|
|
+
|
|
+ ccic_init_clk(ccic_dev);
|
|
+
|
|
+ ccic_device_register(ccic_dev);
|
|
+
|
|
+ pr_debug("%s probed", dev_name(&pdev->dev));
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int k1x_ccic_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct ccic_dev *ccic_dev;
|
|
+ struct ccic_dma *dma;
|
|
+
|
|
+ ccic_dev = dev_get_drvdata(&pdev->dev);
|
|
+ dma = ccic_dev->dma;
|
|
+
|
|
+ ccic_device_unregister(ccic_dev);
|
|
+
|
|
+ /* disable runtime pm */
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct of_device_id k1x_ccic_dt_match[] = {
|
|
+ {.compatible = "zynq,k1x-ccic",.data = NULL },
|
|
+ {.compatible = "spacemit,k1xccic",.data = NULL },
|
|
+ { },
|
|
+};
|
|
+
|
|
+MODULE_DEVICE_TABLE(of, k1x_ccic_dt_match);
|
|
+
|
|
+struct platform_driver k1x_ccic_driver = {
|
|
+ .driver = {
|
|
+ .name = K1X_CCIC_DRV_NAME,
|
|
+ .of_match_table = of_match_ptr(k1x_ccic_dt_match),
|
|
+ },
|
|
+ .probe = k1x_ccic_probe,
|
|
+ .remove = k1x_ccic_remove,
|
|
+};
|
|
+
|
|
+int __init k1x_ccic_driver_init(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = ccic_csiphy_register();
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = platform_driver_register(&k1x_ccic_driver);
|
|
+ if (ret < 0)
|
|
+ ccic_csiphy_unregister();
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void __exit k1x_ccic_driver_exit(void)
|
|
+{
|
|
+ platform_driver_unregister(&k1x_ccic_driver);
|
|
+ ccic_csiphy_unregister();
|
|
+}
|
|
+
|
|
+module_init(k1x_ccic_driver_init);
|
|
+module_exit(k1x_ccic_driver_exit);
|
|
+/* module_platform_driver(k1x_ccic_driver); */
|
|
+
|
|
+MODULE_DESCRIPTION("K1X CCIC Driver");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.h b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_drv.h
|
|
@@ -0,0 +1,224 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * k1x_ipe.h - Driver for ccic
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef __K1X_IPE_H
|
|
+#define __K1X_IPE_H
|
|
+#include "linux/types.h"
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/of_device.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <media/v4l2-device.h>
|
|
+#include <media/videobuf2-v4l2.h>
|
|
+#include <linux/reset.h>
|
|
+
|
|
+#define SC2_MODE_CCIC 1
|
|
+#define SC2_MODE_ISP 2
|
|
+
|
|
+#define MHZ 1000000
|
|
+/*
|
|
+ * the min/max is for calcuting DPHY, the unit is ns.
|
|
+ */
|
|
+#define D_TERMEN_MAX (35)
|
|
+#define HS_PREP_MIN (40)
|
|
+#define HS_PREP_MAX (85)
|
|
+#define HS_PREP_ZERO_MIN (145)
|
|
+#define NS_TO_PS(nsec) ((nsec) * 1000)
|
|
+
|
|
+/* MIPI related */
|
|
+/* Sensor MIPI behavior descriptor, sensor driver should pass it to controller
|
|
+ * driver, and let controller driver decide how to config its PHY registers */
|
|
+struct csi_dphy_desc {
|
|
+ u32 clk_mul;
|
|
+ u32 clk_div; /* clock_lane_freq = input_clock * clk_mul / clk_div */
|
|
+ u32 clk_freq;
|
|
+ u32 cl_prepare; /* cl_* describes clock lane timing in the unit of ns */
|
|
+ u32 cl_zero;
|
|
+ u32 hs_prepare; /* hs_* describes data LP to HS transition timing */
|
|
+ u32 hs_zero; /* in the unit of clock lane period(DDR period) */
|
|
+ u32 nr_lane; /* When set to 0, S/W will try to figure out a value */
|
|
+};
|
|
+
|
|
+struct mipi_csi2 {
|
|
+ int dphy_type; /* 0: DPHY on chip, 1: DPTC off chip */
|
|
+ u32 dphy[5]; /* DPHY: CSI2_DPHY1, CSI2_DPHY2, CSI2_DPHY3, CSI2_DPHY5, CSI2_DPHY6 */
|
|
+ int calc_dphy;
|
|
+ int enable_dpcm;
|
|
+ struct csi_dphy_desc dphy_desc;
|
|
+};
|
|
+
|
|
+#define HS_SETTLE_POS_MAX (100)
|
|
+struct csi_dphy_calc {
|
|
+ char name[16];
|
|
+ int hs_termen_pos;
|
|
+ int hs_settle_pos; /* 0~100 */
|
|
+};
|
|
+
|
|
+struct csi_dphy_reg {
|
|
+ u16 cl_termen;
|
|
+ u16 cl_settle;
|
|
+ u16 cl_miss;
|
|
+ u16 hs_termen;
|
|
+ u16 hs_settle;
|
|
+ u16 hs_rx_to;
|
|
+ u16 lane; /* When set to 0, S/W will try to figure out a value */
|
|
+ u16 vc; /* Virtual channel */
|
|
+ u16 dt1; /* Data type 1: For video or main data type */
|
|
+ u16 dt2; /* Data type 2: For thumbnail or auxiliry data type */
|
|
+};
|
|
+
|
|
+struct ccic_ctrl {
|
|
+ int index;
|
|
+ atomic_t usr_cnt;
|
|
+ struct mipi_csi2 csi;
|
|
+ struct ccic_dev *ccic_dev;
|
|
+ struct ccic_ctrl_ops *ops;
|
|
+ irqreturn_t(*handler) (struct ccic_ctrl *, u32);
|
|
+};
|
|
+
|
|
+enum ccic_idi {
|
|
+ CCIC_CSI2IDI0 = 0,
|
|
+ CCIC_CSI2IDI1,
|
|
+};
|
|
+
|
|
+enum ccic_idi_sel {
|
|
+ CCIC_IDI_SEL_NONE = 0,
|
|
+ CCIC_IDI_SEL_DPCM,
|
|
+ CCIC_IDI_SEL_REPACK,
|
|
+ CCIC_IDI_SEL_PARALLEL,
|
|
+ CCIC_IDI_SEL_AHB,
|
|
+ CCIC_IDI_RELEASE_RESET,
|
|
+};
|
|
+
|
|
+enum ccic_idi_mux {
|
|
+ CCIC_IDI_MUX_LOCAL_MAIN = 0,
|
|
+ CCIC_IDI_MUX_IPE2_VCDT,
|
|
+ CCIC_IDI_MUX_IPE2_MAIN,
|
|
+ CCIC_IDI_MUX_REMOTE_VCDT,
|
|
+};
|
|
+
|
|
+enum ccic_csi2vc_mode {
|
|
+ CCIC_CSI2VC_NM = 0,
|
|
+ CCIC_CSI2VC_VC,
|
|
+ CCIC_CSI2VC_DT,
|
|
+};
|
|
+
|
|
+enum ccic_csi2vc_chnl {
|
|
+ CCIC_CSI2VC_MAIN = 0,
|
|
+ CCIC_CSI2VC_VCDT,
|
|
+};
|
|
+
|
|
+struct ccic_ctrl_ops {
|
|
+ void (*irq_mask)(struct ccic_ctrl *ctrl, int on);
|
|
+ int (*clk_enable)(struct ccic_ctrl *ctrl, int en);
|
|
+ int (*config_csi2_mbus)(struct ccic_ctrl *ctrl, int md, u8 vc0, u8 vc1,
|
|
+ int lanes);
|
|
+ int (*config_csi2idi_mux)(struct ccic_ctrl *ctrl, int chnl, int idi, int en);
|
|
+ int (*reset_csi2idi)(struct ccic_ctrl *ctrl, int idi, int rst);
|
|
+};
|
|
+
|
|
+struct ccic_dma {
|
|
+ int index;
|
|
+ struct v4l2_device v4l2_dev;
|
|
+ struct video_device vdev;
|
|
+ struct ccic_dev *ccic_dev;
|
|
+ struct v4l2_pix_format pix_format;
|
|
+ struct mutex ops_mutex;
|
|
+ spinlock_t dev_lock;
|
|
+ struct list_head pending_bq;
|
|
+ struct list_head active_bq;
|
|
+ struct vb2_queue vb_queue;
|
|
+ u32 csi_sof_cnt;
|
|
+ u32 dma_sof_cnt;
|
|
+ u32 dma_eof_cnt;
|
|
+
|
|
+ struct ccic_dma_ops *ops;
|
|
+};
|
|
+
|
|
+enum ccic_dma_sel {
|
|
+ CCIC_DMA_SEL_LOCAL_MAIN = 0,
|
|
+ CCIC_DMA_SEL_LOCAL_VCDT,
|
|
+ CCIC_DMA_SEL_REMOTE_MAIN,
|
|
+ CCIC_DMA_SEL_REMOTE_VCDT,
|
|
+};
|
|
+
|
|
+struct ccic_dma_ops {
|
|
+ int (*setup_image)(struct ccic_dma *dma_dev);
|
|
+ int (*shadow_ready)(struct ccic_dma *dma_dev, int enable);
|
|
+ int (*set_addr)(struct ccic_dma *dma_dev, u8 chnl, u32 addr);
|
|
+ int (*ccic_enable)(struct ccic_dma *dma_dev, int enable);
|
|
+ int (*clk_enable)(struct ccic_dma *dma_dev, int enable);
|
|
+};
|
|
+
|
|
+struct ccic_dev {
|
|
+ int index;
|
|
+ struct device *dev;
|
|
+ struct platform_device *pdev;
|
|
+ struct list_head list;
|
|
+ struct resource *irq;
|
|
+ struct resource *mem;
|
|
+ void __iomem *base;
|
|
+ struct clk *csi_clk;
|
|
+ struct clk *clk4x;
|
|
+// struct clk *ahb_clk;
|
|
+ struct clk *axi_clk;
|
|
+ struct reset_control *ahb_reset;
|
|
+ struct reset_control *csi_reset;
|
|
+ struct reset_control *ccic_4x_reset;
|
|
+ struct reset_control *isp_ci_reset;
|
|
+
|
|
+ int dma_burst;
|
|
+ spinlock_t ccic_lock; /* protect the struct members and HW */
|
|
+ u32 interrupt_mask_value;
|
|
+
|
|
+ /* object for ccic csi part */
|
|
+ struct ccic_ctrl *ctrl;
|
|
+ /* object for ccic dma part */
|
|
+ struct ccic_dma *dma;
|
|
+ /* object for csiphy part */
|
|
+ struct csiphy_device *csiphy;
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Device register I/O
|
|
+ */
|
|
+static inline u32 ccic_reg_read(struct ccic_dev *ccic_dev, unsigned int reg)
|
|
+{
|
|
+ return ioread32(ccic_dev->base + reg);
|
|
+}
|
|
+
|
|
+static inline void ccic_reg_write(struct ccic_dev *ccic_dev, unsigned int reg, u32 val)
|
|
+{
|
|
+ iowrite32(val, ccic_dev->base + reg);
|
|
+}
|
|
+
|
|
+static inline void ccic_reg_write_mask(struct ccic_dev *ccic_dev,
|
|
+ unsigned int reg, u32 val, u32 mask)
|
|
+{
|
|
+ u32 v = ccic_reg_read(ccic_dev, reg);
|
|
+
|
|
+ v = (v & ~mask) | (val & mask);
|
|
+ ccic_reg_write(ccic_dev, reg, v);
|
|
+}
|
|
+
|
|
+static inline void ccic_reg_set_bit(struct ccic_dev *ccic_dev,
|
|
+ unsigned int reg, u32 val)
|
|
+{
|
|
+ ccic_reg_write_mask(ccic_dev, reg, val, val);
|
|
+}
|
|
+
|
|
+static inline void ccic_reg_clear_bit(struct ccic_dev *ccic_dev,
|
|
+ unsigned int reg, u32 val)
|
|
+{
|
|
+ ccic_reg_write_mask(ccic_dev, reg, 0, val);
|
|
+}
|
|
+
|
|
+int ccic_ctrl_get(struct ccic_ctrl **ctrl_host, int id,
|
|
+ irqreturn_t(*handler) (struct ccic_ctrl *, u32));
|
|
+int ccic_dphy_hssettle_set(unsigned int ccic_id, unsigned int dphy_freg);
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.c b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.c
|
|
@@ -0,0 +1,353 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * SPACEMIT ccic driver
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited.
|
|
+ */
|
|
+
|
|
+#include <media/v4l2-dev.h>
|
|
+#include "ccic_drv.h"
|
|
+#include "ccic_hwreg.h"
|
|
+
|
|
+int ccic_csi2_config_dphy(struct ccic_dev *ccic_dev, int lanes, int enable)
|
|
+{
|
|
+ unsigned int dphy2_val = 0xa2848888;
|
|
+ unsigned int dphy3_val = 0x00001500;
|
|
+ /* unsigned int dphy4_val = 0x00000000; */
|
|
+ unsigned int dphy5_val = 0x000000ff; /* 4lanes */
|
|
+ unsigned int dphy6_val = 0x1001;
|
|
+
|
|
+ if (!enable) {
|
|
+ ccic_reg_write(ccic_dev, REG_CSI2_DPHY5, 0x00);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_DPHY1, CSI2_DHPY1_ANA_PU); /* analog power off */
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (lanes < 1 || lanes > 4)
|
|
+ return -EINVAL;
|
|
+
|
|
+ dphy5_val = CSI2_DPHY5_LANE_ENA(lanes);
|
|
+ dphy5_val = dphy5_val | (dphy5_val << CSI2_DPHY5_LANE_RESC_ENA_SHIFT);
|
|
+
|
|
+ ccic_reg_write(ccic_dev, REG_CSI2_DPHY2, dphy2_val);
|
|
+ ccic_reg_write(ccic_dev, REG_CSI2_DPHY3, dphy3_val);
|
|
+ /* ccic_reg_write(ccic_dev, REG_CSI2_DPHY4, dphy4_val); */
|
|
+ ccic_reg_write(ccic_dev, REG_CSI2_DPHY5, dphy5_val);
|
|
+ ccic_reg_write(ccic_dev, REG_CSI2_DPHY6, dphy6_val);
|
|
+
|
|
+ /* analog power on */
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_DPHY1, CSI2_DHPY1_ANA_PU);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int ccic_csi2_lanes_enable(struct ccic_dev *ccic_dev, int lanes)
|
|
+{
|
|
+ unsigned int ctrl0_val = 0;
|
|
+
|
|
+ if (lanes < 0 || lanes > 4)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (!lanes) { /* Disable MIPI CSI2 Interface */
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE); //csi off
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ ctrl0_val = ccic_reg_read(ccic_dev, REG_CSI2_CTRL0);
|
|
+ ctrl0_val &= ~(CSI2_C0_LANE_NUM_MASK);
|
|
+ ctrl0_val |= CSI2_C0_LANE_NUM(lanes);
|
|
+ ctrl0_val |= CSI2_C0_ENABLE;
|
|
+ ctrl0_val &= ~(CSI2_C0_VLEN_MASK);
|
|
+ ctrl0_val |= CSI2_C0_VLEN;
|
|
+
|
|
+ ccic_reg_write(ccic_dev, REG_CSI2_CTRL0, ctrl0_val);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int ccic_csi2_vc_ctrl(struct ccic_dev *ccic_dev, int md, u8 vc0, u8 vc1)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ switch (md) {
|
|
+ case CCIC_CSI2VC_NM: /* Normal mode */
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL,
|
|
+ CSI2_VCCTRL_MD_NORMAL, CSI2_VCCTRL_MD_MASK);
|
|
+ break;
|
|
+ case CCIC_CSI2VC_VC: /* Virtual Channel mode */
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL,
|
|
+ CSI2_VCCTRL_MD_VC, CSI2_VCCTRL_MD_MASK);
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL, vc0 << 14,
|
|
+ CSI2_VCCTRL_VC0_MASK);
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL, vc1 << 22,
|
|
+ CSI2_VCCTRL_VC1_MASK);
|
|
+ break;
|
|
+ case CCIC_CSI2VC_DT: /* TODO: Data-Type Interleaving */
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_VCCTRL,
|
|
+ CSI2_VCCTRL_MD_DT, CSI2_VCCTRL_MD_MASK);
|
|
+ pr_err("csi2 vc mode %d todo\n", md);
|
|
+ break;
|
|
+ default:
|
|
+ pr_err("%s: invalid csi2 vc mode %d\n", __func__, md);
|
|
+ ret = -EINVAL;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int ccic_dma_src_sel(struct ccic_dev *ccic_dev, int sel)
|
|
+{
|
|
+ switch (sel) {
|
|
+ case CCIC_DMA_SEL_LOCAL_MAIN:
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_EXT_TIM_ENA);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_VCDC_SEL);
|
|
+ /* FIXME: no need */
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE);
|
|
+ break;
|
|
+ case CCIC_DMA_SEL_LOCAL_VCDT:
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_EXT_TIM_ENA);
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_VCDC_SEL);
|
|
+ /* FIXME: no need */
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE);
|
|
+ break;
|
|
+ case CCIC_DMA_SEL_REMOTE_VCDT:
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_EXT_TIM_ENA);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_VCDC_SEL);
|
|
+ /* When EXT_TIM_ENA is enabled, this field must be enabled too. */
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE);
|
|
+ break;
|
|
+ case CCIC_DMA_SEL_REMOTE_MAIN:
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int ccic_dma_set_out_format(struct ccic_dev *ccic_dev, u32 pixfmt, u32 width,
|
|
+ u32 height)
|
|
+{
|
|
+ u16 pitch_y, pitch_uv, imgsz_h, imgsz_w;
|
|
+ u32 data_fmt;
|
|
+
|
|
+ switch (pixfmt) {
|
|
+ case V4L2_PIX_FMT_SBGGR8:
|
|
+ case V4L2_PIX_FMT_SGBRG8:
|
|
+ case V4L2_PIX_FMT_SGRBG8:
|
|
+ case V4L2_PIX_FMT_SRGGB8:
|
|
+ pitch_y = width;
|
|
+ pitch_uv = 0;
|
|
+ imgsz_w = pitch_y;
|
|
+ imgsz_h = height;
|
|
+ data_fmt = C0_DF_BAYER;
|
|
+ break;
|
|
+ case V4L2_PIX_FMT_SBGGR10P:
|
|
+ case V4L2_PIX_FMT_SGBRG10P:
|
|
+ case V4L2_PIX_FMT_SGRBG10P:
|
|
+ case V4L2_PIX_FMT_SRGGB10P:
|
|
+ pitch_y = width * 5 / 4;
|
|
+ pitch_uv = 0;
|
|
+ imgsz_w = pitch_y;
|
|
+ imgsz_h = height;
|
|
+ data_fmt = C0_DF_BAYER;
|
|
+ break;
|
|
+ case V4L2_PIX_FMT_SBGGR12P:
|
|
+ case V4L2_PIX_FMT_SGBRG12P:
|
|
+ case V4L2_PIX_FMT_SGRBG12P:
|
|
+ case V4L2_PIX_FMT_SRGGB12P:
|
|
+ pitch_y = width * 3 / 2;
|
|
+ pitch_uv = 0;
|
|
+ imgsz_w = pitch_y;
|
|
+ imgsz_h = height;
|
|
+ data_fmt = C0_DF_BAYER;
|
|
+ break;
|
|
+ default:
|
|
+ pr_err("%s failed: invalid pixfmt %d\n", __func__, pixfmt);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ ccic_reg_write(ccic_dev, REG_IMGPITCH, pitch_uv << 16 | pitch_y);
|
|
+ ccic_reg_write(ccic_dev, REG_IMGSIZE, imgsz_h << 16 | imgsz_w);
|
|
+ ccic_reg_write(ccic_dev, REG_IMGOFFSET, 0x0);
|
|
+
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CTRL0, data_fmt, C0_DF_MASK);
|
|
+ /* Make sure it knows we want to use hsync/vsync. */
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CTRL0, C0_SIF_HVSYNC, C0_SIFM_MASK);
|
|
+ /* Need set following bit for auto-recovery */
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CTRL0, C0_EOFFLUSH);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int ccic_dma_set_burst(struct ccic_dev *ccic_dev)
|
|
+{
|
|
+ u32 dma_burst;
|
|
+
|
|
+ /* setup the DMA burst */
|
|
+ switch (ccic_dev->dma_burst) {
|
|
+ case 128:
|
|
+ dma_burst = C1_DMAB128;
|
|
+ break;
|
|
+ case 256:
|
|
+ dma_burst = C1_DMAB256;
|
|
+ break;
|
|
+ default:
|
|
+ dma_burst = C1_DMAB64;
|
|
+ break;
|
|
+ }
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CTRL1, dma_burst, C1_DMAB_MASK);
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CTRL1, C1_DMAB_LENSEL);
|
|
+
|
|
+ /* ccic_reg_set_bit(ccic_dev, REG_CTRL2, C2_LGCY_LNNUM); */
|
|
+ /* ccic_reg_set_bit(ccic_dev, REG_CTRL2, C2_LGCY_HBLANK); */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void ccic_dma_enable(struct ccic_dev *ccic_dev, int en)
|
|
+{
|
|
+ if (en) {
|
|
+ ccic_reg_set_bit(ccic_dev, REG_IRQMASK, FRAMEIRQS);
|
|
+ /* 0x3c: enable ccic dma */
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CTRL0, BIT(0));
|
|
+ } else {
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_IRQMASK, FRAMEIRQS);
|
|
+ /* 0x3c: disable ccic dma */
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CTRL0, BIT(0));
|
|
+ }
|
|
+}
|
|
+
|
|
+int ccic_csi2idi_src_sel(struct ccic_dev *ccic_dev, int sel)
|
|
+{
|
|
+ switch (sel) {
|
|
+ case CCIC_IDI_SEL_NONE:
|
|
+ /* ccic_reg_clear_bit(ccic_dev, REG_IDI_CTRL, IDI_RELEASE_RESET); */
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_REPACK_RST);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_REPACK_ENA);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_DPCM_ENA);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_VCCTRL, CSI2_VCCTRL_MD_VC);
|
|
+ ccic_reg_write_mask(ccic_dev, REG_CSI2_CTRL2,
|
|
+ CSI2_C2_MUX_SEL_LOCAL_MAIN, CSI2_C2_MUX_SEL_MASK);
|
|
+ break;
|
|
+ case CCIC_IDI_SEL_REPACK:
|
|
+ ccic_reg_write_mask(ccic_dev, REG_IDI_CTRL, IDI_SEL_DPCM_REPACK, IDI_SEL_MASK);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_IDI_MUX_SEL_DPCM);
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_REPACK_RST);
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_REPACK_ENA);
|
|
+ break;
|
|
+ case CCIC_IDI_SEL_DPCM:
|
|
+ ccic_reg_write_mask(ccic_dev, REG_IDI_CTRL, IDI_SEL_DPCM_REPACK, IDI_SEL_MASK);
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_IDI_MUX_SEL_DPCM);
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_DPCM_ENA);
|
|
+ break;
|
|
+ case CCIC_IDI_SEL_PARALLEL:
|
|
+ ccic_reg_write_mask(ccic_dev, REG_IDI_CTRL, IDI_SEL_PARALLEL, IDI_SEL_MASK);
|
|
+ break;
|
|
+ default:
|
|
+ pr_err("%s: IDI source is error %d\n", __func__, sel);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void ccic_csi2idi_reset(struct ccic_dev *ccic_dev, int reset)
|
|
+{
|
|
+ if (reset) {
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_IDI_CTRL, IDI_RELEASE_RESET);
|
|
+ /* assert reset to Repack module */
|
|
+ ccic_reg_set_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_REPACK_RST);
|
|
+ } else {
|
|
+ ccic_reg_set_bit(ccic_dev, REG_IDI_CTRL, IDI_RELEASE_RESET);
|
|
+ /* Deassert reset to Repack module */
|
|
+ ccic_reg_clear_bit(ccic_dev, REG_CSI2_CTRL2, CSI2_C2_REPACK_RST);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* dump register in order to debug */
|
|
+void ccic_hw_dump_regs(struct ccic_dev *ccic_dev)
|
|
+{
|
|
+ unsigned int ret;
|
|
+
|
|
+ pr_info("CCIC%d regs dump:\n", ccic_dev->index);
|
|
+ /*
|
|
+ * CCIC IRQ REG
|
|
+ */
|
|
+ ret = ccic_reg_read(ccic_dev, REG_IRQSTAT);
|
|
+ pr_info("CCIC: REG_IRQSTAT[0x%02x] is 0x%08x\n", REG_IRQSTAT, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_IRQSTATRAW);
|
|
+ pr_info("CCIC: REG_IRQSTATRAW[0x%02x] is 0x%08x\n", REG_IRQSTATRAW, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_IRQMASK);
|
|
+ pr_info("CCIC: REG_IRQMASK[0x%02x] is 0x%08x\n\n", REG_IRQMASK, ret);
|
|
+
|
|
+ /*
|
|
+ * CCIC IMG REG
|
|
+ */
|
|
+ ret = ccic_reg_read(ccic_dev, REG_IMGPITCH);
|
|
+ pr_info("CCIC: REG_IMGPITCH[0x%02x] is 0x%08x\n", REG_IMGPITCH, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_IMGSIZE);
|
|
+ pr_info("CCIC: REG_IMGSIZE[0x%02x] is 0x%08x\n", REG_IMGSIZE, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_IMGOFFSET);
|
|
+ pr_info("CCIC: REG_IMGOFFSET[0x%02x] is 0x%08x\n\n", REG_IMGOFFSET, ret);
|
|
+
|
|
+ /*
|
|
+ * CCIC CTRL REG
|
|
+ */
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CTRL0);
|
|
+ pr_info("CCIC: REG_CTRL0[0x%02x] is 0x%08x\n", REG_CTRL0, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CTRL1);
|
|
+ pr_info("CCIC: REG_CTRL1[0x%02x] is 0x%08x\n", REG_CTRL1, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CTRL2);
|
|
+ pr_info("CCIC: REG_CTRL2[0x%02x] is 0x%08x\n", REG_CTRL2, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CTRL3);
|
|
+ pr_info("CCIC: REG_CTRL3[0x%02x] is 0x%08x\n", REG_CTRL3, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_IDI_CTRL);
|
|
+ pr_info("CCIC: REG_IDI_CTRL[0x%02x] is 0x%08x\n\n", REG_IDI_CTRL, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CSI2_VCCTRL);
|
|
+ pr_info("CCIC: REG_CSI2_VCCTRL[0x%02x] is 0x%08x\n\n", REG_CSI2_VCCTRL, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_LNNUM);
|
|
+ pr_info("CCIC: REG_LNNUM[0x%02x] is 0x%08x\n", REG_LNNUM, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_FRAME_CNT);
|
|
+ pr_info("CCIC: REG_FRAME_CNT[0x%02x] is 0x%08x\n", REG_FRAME_CNT, ret);
|
|
+
|
|
+ /*
|
|
+ * CCIC CSI2 REG
|
|
+ */
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CSI2_DPHY1);
|
|
+ pr_info("CCIC: REG_CSI2_DPHY1[0x%02x] is 0x%08x\n", REG_CSI2_DPHY1, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CSI2_DPHY2);
|
|
+ pr_info("CCIC: REG_CSI2_DPHY2[0x%02x] is 0x%08x\n", REG_CSI2_DPHY2, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CSI2_DPHY3);
|
|
+ pr_info("CCIC: REG_CSI2_DPHY3[0x%02x] is 0x%08x\n", REG_CSI2_DPHY3, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CSI2_DPHY4);
|
|
+ pr_info("CCIC: REG_CSI2_DPHY4[0x%02x] is 0x%08x\n", REG_CSI2_DPHY4, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CSI2_DPHY5);
|
|
+ pr_info("CCIC: REG_CSI2_DPHY5[0x%02x] is 0x%08x\n", REG_CSI2_DPHY5, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CSI2_DPHY6);
|
|
+ pr_info("CCIC: REG_CSI2_DPHY6[0x%02x] is 0x%08x\n", REG_CSI2_DPHY6, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CSI2_CTRL0);
|
|
+ pr_info("CCIC: REG_CSI2_CTRL0[0x%02x] is 0x%08x\n\n", REG_CSI2_CTRL0, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CSI2_CTRL2);
|
|
+ pr_info("CCIC: REG_CSI2_CTRL2[0x%02x] is 0x%08x\n\n", REG_CSI2_CTRL2, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_CSI2_CTRL3);
|
|
+ pr_info("CCIC: REG_CSI2_CTRL3[0x%02x] is 0x%08x\n\n", REG_CSI2_CTRL3, ret);
|
|
+
|
|
+ /*
|
|
+ * CCIC YUV REG
|
|
+ */
|
|
+ ret = ccic_reg_read(ccic_dev, REG_Y0BAR);
|
|
+ pr_info("CCIC: REG_Y0BAR[0x%02x] 0x%08x\n", REG_Y0BAR, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_U0BAR);
|
|
+ pr_info("CCIC: REG_U0BAR[0x%02x] 0x%08x\n", REG_U0BAR, ret);
|
|
+ ret = ccic_reg_read(ccic_dev, REG_V0BAR);
|
|
+ pr_info("CCIC: REG_V0BAR[0x%02x] 0x%08x\n\n", REG_V0BAR, ret);
|
|
+
|
|
+#if 0
|
|
+ /*
|
|
+ * CCIC APMU REG
|
|
+ */
|
|
+ ret = __raw_readl(get_apmu_base_va() + REG_CLK_CCIC_RES);
|
|
+ pr_info("CCIC: APMU_CCIC_RES[0x%02x] is 0x%08x\n", REG_CLK_CCIC_RES, ret);
|
|
+ ret = __raw_readl(get_apmu_base_va() + REG_CLK_CCIC2_RES);
|
|
+ pr_info("CCIC: APMU_CCIC2_RES[0x%02x] is 0x%08x\n", REG_CLK_CCIC2_RES, ret);
|
|
+#endif
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.h b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_ccic/ccic_hwreg.h
|
|
@@ -0,0 +1,237 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * ccic_hwreg.h - hw register for ccic
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef __CCIC_HWREG_H__
|
|
+#define __CCIC_HWREG_H__
|
|
+
|
|
+#define REG_Y0BAR 0x00
|
|
+#define REG_U0BAR 0x0c
|
|
+#define REG_V0BAR 0x18
|
|
+
|
|
+#define REG_IMGPITCH 0x24 /* Image pitch register */
|
|
+#define IMGP_YP_SHFT 0 /* Y pitch params */
|
|
+#define IMGP_YP_MASK 0x00003fff /* Y pitch field */
|
|
+#define IMGP_UVP_SHFT 16 /* UV pitch (planar) */
|
|
+#define IMGP_UVP_MASK 0x3fff0000
|
|
+
|
|
+#define REG_IRQSTATRAW 0x28 /* RAW IRQ Status */
|
|
+#define REG_IRQMASK 0x2c /* IRQ mask - same bits as IRQSTAT */
|
|
+#define REG_IRQSTAT 0x30 /* IRQ status / clear */
|
|
+#define IRQ_DMA_EOF (BIT(0)) /* DMA EOF IRQ */
|
|
+#define IRQ_DMA_SOF (BIT(1)) /* DMA SOF IRQ */
|
|
+#define IRQ_CSI_EOF (BIT(2)) /* CSI EOF IRQ */
|
|
+#define IRQ_CSI_SOF (BIT(3)) /* CSI SOF IRQ */
|
|
+#define IRQ_DMA_NOT_DONE (BIT(4)) /* DMA not done at frame start IRQ */
|
|
+#define IRQ_SHADOW_NOT_RDY (BIT(5)) /* Shadow bit not ready at frame start IRQ */
|
|
+#define IRQ_DMA_OVERFLOW (BIT(6)) /* FIFO full IRQ */
|
|
+#define IRQ_DMA_PRO_LINE (BIT(7)) /* CCIC Programmable Line IRQ */
|
|
+#define IRQ_IDI_PRO_LINE (BIT(8)) /* IDI Programmable Line IRQ */
|
|
+#define IRQ_CSI2IDI_FLUSH (BIT(9)) /* CSI2IDI DATA FLUSH IRQ */
|
|
+#define IRQ_CSI2IDI_HBLK2HSYNC (BIT(10)) /* HBLK_TO_HSYNC IRQ, CSI2IDI module detects Hblank to Hsync gap too small */
|
|
+#define IRQ_DMA_WR_ERR (BIT(11)) /* AXI Write Error IRQ */
|
|
+#define IRQ_DPHY_RX_CLKULPS_ACTIVE (BIT(12)) /* DPHY Rx CLKULPS Active IRQ */
|
|
+#define IRQ_DPHY_RX_CLKULPS (BIT(13)) /* DPHY Rx CLKULPS IRQ */
|
|
+#define IRQ_DPHY_LN_ULPS_ACTIVE (BIT(14)) /* DPHY Lane ULPS Active IRQ */
|
|
+#define IRQ_DPHY_LN_ERR_CTL (BIT(15)) /* DPHY Lane Error Control IRQ */
|
|
+#define IRQ_DPHY_LN_TX_SYNC_ERR (BIT(16)) /* DPHY Lane Start of Transmission Sync Error IRQ */
|
|
+#define IRQ_DPHY_LN_TX_ERR (BIT(17)) /* DPHY Lane Start of Transmission Error IRQ */
|
|
+#define IRQ_DPHY_LN_RX_ERR (BIT(18)) /* DPHY receiver Line Error IRQ */
|
|
+#define IRQ_DPCM_REPACK_ERR (BIT(19)) /* DPCM/Repack IRQ */
|
|
+
|
|
+#define IRQ_CSI2PACKET_ERR (BIT(23))
|
|
+#define IRQ_CSI2CRC_ERR (BIT(24))
|
|
+#define IRQ_CSI2ECC2BIT_ERR (BIT(25))
|
|
+#define IRQ_CSI2PATIRY_ERR (BIT(26))
|
|
+#define IRQ_CSI2ECCCORRECTABLE_ERR (BIT(27))
|
|
+#define IRQ_CSI2LANEFIFOOVERRUN_ERR (BIT(28))
|
|
+#define IRQ_CSI2PARSE_ERR (BIT(29))
|
|
+#define IRQ_CSI2GENSHORTPACKVALID (BIT(30))
|
|
+#define IRQ_CSI2GENSHORTPACK_ERR (BIT(31))
|
|
+// #define FRAMEIRQS (IRQ_CSI_SOF | IRQ_CSI_EOF | IRQ_DMA_SOF | IRQ_DMA_EOF)
|
|
+#define FRAMEIRQS (IRQ_DMA_SOF | IRQ_DMA_EOF | IRQ_DMA_OVERFLOW | IRQ_DMA_NOT_DONE | IRQ_SHADOW_NOT_RDY)
|
|
+#define CSI2PHYERRS (0xFF0B0000)
|
|
+#define ALLIRQS (FRAMEIRQS | CSI2PHYERRS | IRQ_CSI2IDI_HBLK2HSYNC)
|
|
+
|
|
+#define REG_IMGSIZE 0x34 /* Image size */
|
|
+#define IMGSZ_V_MASK 0x1fff0000
|
|
+#define IMGSZ_V_SHIFT 16
|
|
+#define IMGSZ_H_MASK 0x00003fff
|
|
+#define IMGSZ_H_SHIFT 0
|
|
+
|
|
+#define REG_IMGOFFSET 0x38 /* IMage offset */
|
|
+
|
|
+#define REG_CTRL0 0x3c /* Control 0 */
|
|
+#define C0_ENABLE 0x00000001 /* Makes the whole thing go */
|
|
+/* Mask for all the format bits */
|
|
+#define C0_DF_MASK 0x08dffffc
|
|
+/* RGB ordering */
|
|
+#define C0_RGB4_RGBX 0x00000000
|
|
+#define C0_RGB4_XRGB 0x00000004
|
|
+#define C0_RGB4_BGRX 0x00000008
|
|
+#define C0_RGB4_XBGR 0x0000000c
|
|
+#define C0_RGB5_RGGB 0x00000000
|
|
+#define C0_RGB5_GRBG 0x00000004
|
|
+#define C0_RGB5_GBRG 0x00000008
|
|
+#define C0_RGB5_BGGR 0x0000000c
|
|
+/* YUV4222 to 420 Semi-Planar Enable */
|
|
+#define C0_YUV420SP 0x00000010
|
|
+/* Spec has two fields for DIN and DOUT, but they must match, so
|
|
+ combine them here. */
|
|
+#define C0_DF_YUV 0x00000000 /* Data is YUV */
|
|
+#define C0_DF_RGB 0x000000a0 /* Data is RGB */
|
|
+#define C0_DF_BAYER 0x00000140 /* Data is Bayer */
|
|
+/* 8-8-8 must be missing from the below - ask */
|
|
+#define C0_RGBF_565 0x00000000
|
|
+#define C0_RGBF_444 0x00000800
|
|
+#define C0_RGB_BGR 0x00001000 /* Blue comes first */
|
|
+#define C0_YUV_PLANAR 0x00000000 /* YUV 422 planar format */
|
|
+#define C0_YUV_PACKED 0x00008000 /* YUV 422 packed format */
|
|
+#define C0_YUV_420PL 0x0000a000 /* YUV 420 planar format */
|
|
+/* Think that 420 packed must be 111 - ask */
|
|
+#define C0_YUVE_YUYV 0x00000000 /* Y1CbY0Cr */
|
|
+#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
|
|
+#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
|
|
+#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
|
|
+#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
|
|
+#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
|
|
+#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
|
|
+#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
|
|
+/* Bayer bits 18,19 if needed */
|
|
+#define C0_HPOL_LOW 0x01000000 /* HSYNC polarity active low */
|
|
+#define C0_VPOL_LOW 0x02000000 /* VSYNC polarity active low */
|
|
+#define C0_VCLK_LOW 0x04000000 /* VCLK on falling edge */
|
|
+#define C0_420SP_UVSWAP 0x08000000 /* YUV420SP U/V Swap */
|
|
+#define C0_SIFM_MASK 0xc0000000 /* SIF mode bits */
|
|
+#define C0_SIF_HVSYNC 0x00000000 /* Use H/VSYNC */
|
|
+#define C0_SOF_NOSYNC 0x40000000 /* Use inband active signaling */
|
|
+#define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */
|
|
+#define C0_VEDGE_CTRL 0x00800000 /* Detecting falling edge of VSYNC */
|
|
+/* bit 21: fifo overrun auto-recovery */
|
|
+#define C0_EOFFLUSH 0x00200000
|
|
+/* bit 27 YUV420SP_UV_SWAP */
|
|
+#define C0_YUV420SP_UV_SWAP 0x08000000
|
|
+
|
|
+#define REG_CTRL1 0x40 /* Control 1 */
|
|
+#define C1_SENCLKGATE 0x00000001 /* Sensor Clock Gate */
|
|
+#define C1_RESVZ 0x0001fffe
|
|
+#define C1_DMAB_LENSEL 0x00020000 /* set 1, coupled CCICx */
|
|
+#define C1_444ALPHA 0x00f00000 /* Alpha field in RGB444 */
|
|
+#define C1_ALPHA_SHFT 20
|
|
+#define C1_AWCACHE 0x00100000 /* set 1. coupled CCICx */
|
|
+#define C1_DMAB64 0x00000000 /* 64-byte DMA burst */
|
|
+#define C1_DMAB128 0x02000000 /* 128-byte DMA burst */
|
|
+#define C1_DMAB256 0x04000000 /* 256-byte DMA burst */
|
|
+#define C1_DMAB_MASK 0x06000000
|
|
+#define C1_SHADOW_RDY 0x08000000 /* set it 1 when BAR is set */
|
|
+#define C1_PWRDWN 0x10000000 /* Power down */
|
|
+#define C1_DMAPOSTED 0x40000000 /* DMA Posted Select */
|
|
+
|
|
+#define REG_CTRL2 0x44 /* Control 2 */
|
|
+/* recommend set 1 to disable legacy calc DMA line num */
|
|
+#define C2_LGCY_LNNUM 0x80000000
|
|
+/* recommend set 1 to disaable legacy CSI2 hblank */
|
|
+#define C2_LGCY_HBLANK 0x40000000
|
|
+
|
|
+#define REG_CTRL3 0x48 /* Control 2 */
|
|
+
|
|
+#define REG_LNNUM 0x60 /* Lines num DMA filled */
|
|
+
|
|
+#define CLK_DIV_MASK 0x0000ffff /* Upper bits RW "reserved" */
|
|
+#define REG_FRAME_CNT 0x23C
|
|
+
|
|
+/* MIPI */
|
|
+#define REG_CSI2_CTRL0 0x100
|
|
+#define CSI2_C0_ENABLE 0x01
|
|
+#define CSI2_C0_LANE_NUM(n) (((n)-1) << 1)
|
|
+#define CSI2_C0_LANE_NUM_MASK 0x06
|
|
+#define CSI2_C0_EXT_TIM_ENA (0x1 << 3)
|
|
+#define CSI2_C0_VLEN (0x4 << 4)
|
|
+#define CSI2_C0_VLEN_MASK (0xf << 4)
|
|
+#define CSI2_C0_VCDC_SEL (0x1 << 13)
|
|
+#define REG_CSI2_VCCTRL 0x114
|
|
+#define CSI2_VCCTRL_MD_MASK (0x3 << 0)
|
|
+#define CSI2_VCCTRL_MD_NORMAL (0x0 << 0)
|
|
+#define CSI2_VCCTRL_MD_VC (0x1 << 0)
|
|
+#define CSI2_VCCTRL_MD_DT (0x2 << 0)
|
|
+#define CSI2_VCCTRL_VC0_MASK (0x3 << 14)
|
|
+#define CSI2_VCCTRL_DT1_MASK (0x3 << 16)
|
|
+#define CSI2_VCCTRL_VC1_MASK (0x3 << 22)
|
|
+#define REG_CSI2_DPHY1 0x124
|
|
+#define CSI2_DHPY1_ANA_PU (0x1 << 0)
|
|
+#define CSI2_DHPY1_BIF_EN (0x1 << 1)
|
|
+#define REG_CSI2_DPHY2 0x128
|
|
+#define CSI2_DPHY2_SEL_IREF(n) ((n & 0x03) << 30)
|
|
+#define CSI2_DPHY2_VTH_LPRX_H(n) ((n & 0x07) << 27)
|
|
+#define CSI2_DPHY2_VTH_LPRX_L(n) ((n & 0x07) << 24)
|
|
+#define CSI2_DPHY2_CK_ENA 0x00800000
|
|
+#define CSI2_DPHY2_CK_DELAY(n) ((n & 0x07) << 20)
|
|
+#define CSI2_DPHY2_LPRX_CTL 0x00080000
|
|
+#define CSI2_DPHY2_HSRX_TERM(n) ((n & 0x07) << 16)
|
|
+#define CSI2_DPHY2_CH2_ENA 0x00008000
|
|
+#define CSI2_DPHY2_CH2_DELAY(n) ((n & 0x07) << 12)
|
|
+#define CSI2_DPHY2_CH3_ENA 0x00000800
|
|
+#define CSI2_DPHY2_CH3_DELAY(n) ((n & 0x07) << 8)
|
|
+#define CSI2_DPHY2_CH0_ENA 0x00000080
|
|
+#define CSI2_DPHY2_CH0_DELAY(n) ((n & 0x07) << 4)
|
|
+#define CSI2_DPHY2_CH1_ENA 0x00000008
|
|
+#define CSI2_DPHY2_CH1_DELAY(n) ((n & 0x07) << 0)
|
|
+#define REG_CSI2_DPHY3 0x12c
|
|
+#define CSI2_DPHY3_HS_SETTLE_SHIFT 8
|
|
+#define REG_CSI2_DPHY4 0x130
|
|
+#define CSI2_DHPY4_BIF_EN (0x1 << 23)
|
|
+#define CSI2_DPHY4_CHK_ENA 0x00000040
|
|
+#define CSI2_DPHY4_ERR_OUT_SEL 0x00000020
|
|
+#define CSI2_DPHY4_ERR_CLEAR 0x00000010
|
|
+#define CSI2_DPHY4_LANE_SEL(n) ((n & 0x03) << 2)
|
|
+#define CSI2_DPHY4_PATTERN_SEL(n) ((n & 0x03) << 0)
|
|
+#define REG_CSI2_DPHY5 0x134
|
|
+#define CSI2_DPHY5_LANE_RESC_ENA_SHIFT 4
|
|
+#define CSI2_DPHY5_LANE_ENA(n) ((1 << (n)) - 1)
|
|
+#define REG_CSI2_DPHY6 0x138
|
|
+#define CSI2_DPHY6_CK_SETTLE_SHIFT 8
|
|
+#define REG_CSI2_CTRL2 0x140
|
|
+#define CSI2_C2_MUX_SEL_MASK 0x06000000
|
|
+#define CSI2_C2_MUX_SEL_LOCAL_MAIN 0x00000000
|
|
+#define CSI2_C2_MUX_SEL_IPE2_VCDT 0x02000000
|
|
+#define CSI2_C2_MUX_SEL_IPE2_MAIN 0x04000000
|
|
+#define CSI2_C2_MUX_SEL_REMOTE_VCDT 0x06000000
|
|
+#define CSI2_C2_IDI_MUX_SEL_DPCM 0x01000000
|
|
+#define CSI2_C2_REPACK_RST 0x00200000
|
|
+#define CSI2_C2_REPACK_ENA 0x00010000
|
|
+#define CSI2_C2_DPCM_ENA 0x00000001
|
|
+#define REG_CSI2_CTRL3 0x144
|
|
+/* IDI */
|
|
+#define REG_IDI_CTRL 0x310
|
|
+#define IDI_SEL_MASK 0x06
|
|
+#define IDI_SEL_DPCM_REPACK 0x00
|
|
+#define IDI_SEL_PARALLEL 0x02
|
|
+#define IDI_SEL_AHB 0x04
|
|
+#define IDI_RELEASE_RESET 0x01
|
|
+
|
|
+#define REG_IDI_TRIG_LINE_NUM 0x330
|
|
+#define REG_CSI_LANE_STATE_DBG 0x334
|
|
+
|
|
+/* APMU */
|
|
+#define REG_CLK_CCIC_RES 0x50
|
|
+#define REG_CLK_CCIC2_RES 0x24
|
|
+
|
|
+#define CF_SINGLE_BUF 0
|
|
+#define CF_FRAME_SOF0 1
|
|
+#define CF_FRAME_OVERFLOW 2
|
|
+
|
|
+int ccic_csi2_config_dphy(struct ccic_dev *ccic_dev, int lanes, int enable);
|
|
+int ccic_csi2_lanes_enable(struct ccic_dev *ccic_dev, int lanes);
|
|
+int ccic_csi2_vc_ctrl(struct ccic_dev *ccic_dev, int md, u8 vc0, u8 vc1);
|
|
+int ccic_dma_src_sel(struct ccic_dev *ccic_dev, int sel);
|
|
+int ccic_dma_set_out_format(struct ccic_dev *ccic_dev, u32 pixfmt, u32 width,
|
|
+ u32 height);
|
|
+int ccic_dma_set_burst(struct ccic_dev *ccic_dev);
|
|
+void ccic_dma_enable(struct ccic_dev *ccic_dev, int en);
|
|
+int ccic_csi2idi_src_sel(struct ccic_dev *ccic_dev, int sel);
|
|
+void ccic_csi2idi_reset(struct ccic_dev *ccic_dev, int reset);
|
|
+void ccic_hw_dump_regs(struct ccic_dev *ccic_dev);
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/csiphy.c b/drivers/media/platform/spacemit/camera/cam_ccic/csiphy.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_ccic/csiphy.c
|
|
@@ -0,0 +1,364 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * Driver for SPACEMIT CCIC MIPI D-PHY MODULE
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited.
|
|
+ */
|
|
+#define DEBUG /* for pr_debug() */
|
|
+
|
|
+#include <linux/device.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/printk.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/atomic.h>
|
|
+#include <linux/reset.h>
|
|
+#include "ccic_drv.h"
|
|
+#include "ccic_hwreg.h"
|
|
+#include "csiphy.h"
|
|
+
|
|
+#define K1X_CSIPHY_DRV_NAME "k1x-csiphy"
|
|
+
|
|
+struct csiphy_device {
|
|
+ struct list_head list;
|
|
+ struct platform_device *pdev;
|
|
+ struct device *dev;
|
|
+ struct resource *mem;
|
|
+ void __iomem *base;
|
|
+ struct clk *csiphy_clk;
|
|
+ struct reset_control *cphy_reset;
|
|
+ atomic_t usecnt;
|
|
+ bool is_bifmode;
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Device register I/O
|
|
+ */
|
|
+static inline u32 csiphy_readl(struct csiphy_device *csiphy, unsigned int reg)
|
|
+{
|
|
+ return readl(csiphy->base + reg);
|
|
+}
|
|
+
|
|
+static inline void csiphy_writel(struct csiphy_device *csiphy, unsigned int reg,
|
|
+ u32 val)
|
|
+{
|
|
+ writel(val, csiphy->base + reg);
|
|
+}
|
|
+
|
|
+static inline void csiphy_mask_writel(struct csiphy_device *csiphy,
|
|
+ unsigned int reg, u32 val, u32 mask)
|
|
+{
|
|
+ u32 v = csiphy_readl(csiphy, reg);
|
|
+
|
|
+ v = (v & ~mask) | (val & mask);
|
|
+ csiphy_writel(csiphy, reg, v);
|
|
+}
|
|
+
|
|
+static inline void csiphy_set_bit(struct csiphy_device *csiphy,
|
|
+ unsigned int reg, u32 val)
|
|
+{
|
|
+ csiphy_mask_writel(csiphy, reg, val, val);
|
|
+}
|
|
+
|
|
+static inline void csiphy_clear_bit(struct csiphy_device *csiphy,
|
|
+ unsigned int reg, u32 val)
|
|
+{
|
|
+ csiphy_mask_writel(csiphy, reg, 0, val);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * csiphy_set_power - Power on/off csiphy module
|
|
+ *
|
|
+ * @csiphy_dev: csiphy device
|
|
+ * @on: requested power state
|
|
+ *
|
|
+ * Return: 0 on success, error code otherwise.
|
|
+ */
|
|
+static int csiphy_set_power(struct csiphy_device *csiphy_dev, int on)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (on) {
|
|
+ ret = clk_prepare_enable(csiphy_dev->csiphy_clk);
|
|
+ if (ret < 0) {
|
|
+ pr_err("%s failed, %d", __func__, ret);
|
|
+ return ret;
|
|
+ }
|
|
+ reset_control_deassert(csiphy_dev->cphy_reset);
|
|
+ } else {
|
|
+ clk_disable_unprepare(csiphy_dev->csiphy_clk);
|
|
+ reset_control_assert(csiphy_dev->cphy_reset);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * csiphy_set_2to2dphy -
|
|
+ *
|
|
+ * Return: 0 on success, error code otherwise.
|
|
+ */
|
|
+int csiphy_set_2to2dphy(struct csiphy_device *csiphy_dev, int enable)
|
|
+{
|
|
+ if (enable) {
|
|
+ /* REG_CSI2_DPHY1[1]: analog bif mode on */
|
|
+ csiphy_set_bit(csiphy_dev, REG_CSI2_DPHY1, CSI2_DHPY1_BIF_EN);
|
|
+ /* ccic3:REG_CSI2_DPHY4[23]: dphy3 2+2 lane mux */
|
|
+ csiphy_set_bit(csiphy_dev, REG_CSI2_DPHY4, CSI2_DHPY4_BIF_EN);
|
|
+ } else {
|
|
+ csiphy_clear_bit(csiphy_dev, REG_CSI2_DPHY1, CSI2_DHPY1_BIF_EN);
|
|
+ csiphy_clear_bit(csiphy_dev, REG_CSI2_DPHY4, CSI2_DHPY4_BIF_EN);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct csi_dphy_calc dphy_calc_profiles[] = {
|
|
+ {
|
|
+ .hs_termen_pos = 0,
|
|
+ .hs_settle_pos = 50,
|
|
+ },
|
|
+};
|
|
+
|
|
+static int ccic_calc_dphy(struct csi_dphy_desc *desc,
|
|
+ struct csi_dphy_calc *algo, struct csi_dphy_reg *reg)
|
|
+{
|
|
+ u32 ps_period, ps_ui, ps_termen_max, ps_prep_max, ps_prep_min;
|
|
+ u32 ps_sot_min, ps_termen, ps_settle;
|
|
+
|
|
+ ps_period = MHZ * 1000 / (desc->clk_freq / 1000);
|
|
+ ps_ui = ps_period / 2;
|
|
+ ps_termen_max = NS_TO_PS(D_TERMEN_MAX) + 4 * ps_ui;
|
|
+ ps_prep_min = NS_TO_PS(HS_PREP_MIN) + 4 * ps_ui;
|
|
+ ps_prep_max = NS_TO_PS(HS_PREP_MAX) + 6 * ps_ui;
|
|
+ ps_sot_min = NS_TO_PS(HS_PREP_ZERO_MIN) + 10 * ps_ui;
|
|
+ ps_termen = ps_termen_max + algo->hs_termen_pos * ps_period;
|
|
+ ps_settle = NS_TO_PS(desc->hs_prepare + desc->hs_zero *
|
|
+ algo->hs_settle_pos / HS_SETTLE_POS_MAX);
|
|
+
|
|
+ reg->cl_termen = 0x01;
|
|
+ reg->cl_settle = 0x10;
|
|
+ reg->cl_miss = 0x00;
|
|
+ /* term_en = round_up(ps_termen / ps_period) - 1 */
|
|
+ reg->hs_termen = (ps_termen + ps_period - 1) / ps_period - 1;
|
|
+ /* For Marvell DPHY, Ths-settle started from HS-0, not VILmax */
|
|
+ ps_settle -= (reg->hs_termen + 1) * ps_period;
|
|
+ /* DE recommend this value reset to zero */
|
|
+ reg->hs_termen = 0x0;
|
|
+ /* round_up(ps_settle / ps_period) - 1 */
|
|
+ reg->hs_settle = (ps_settle + ps_period - 1) / ps_period - 1;
|
|
+ reg->hs_rx_to = 0xFFFF;
|
|
+ reg->lane = desc->nr_lane;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int csiphy_stop(struct csiphy_device *csiphy_dev)
|
|
+{
|
|
+ if (csiphy_dev->is_bifmode)
|
|
+ csiphy_set_2to2dphy(csiphy_dev, 0);
|
|
+
|
|
+ csiphy_clear_bit(csiphy_dev, REG_CSI2_CTRL0, CSI2_C0_ENABLE);
|
|
+ csiphy_writel(csiphy_dev, REG_CSI2_DPHY5, 0x00);
|
|
+ /* analog power off */
|
|
+ csiphy_clear_bit(csiphy_dev, REG_CSI2_DPHY1, CSI2_DHPY1_ANA_PU);
|
|
+
|
|
+ if (atomic_dec_if_positive(&csiphy_dev->usecnt) == 0)
|
|
+ csiphy_set_power(csiphy_dev, 0);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int csiphy_start(struct csiphy_device *csiphy_dev, struct mipi_csi2 *csi)
|
|
+{
|
|
+ unsigned int dphy2_val = 0;
|
|
+ unsigned int dphy3_val = 0;
|
|
+ unsigned int dphy5_val = 0;
|
|
+ unsigned int dphy6_val = 0;
|
|
+ unsigned int ctrl0_val = 0;
|
|
+ struct csi_dphy_reg dphy_reg;
|
|
+ int lanes = csi->dphy_desc.nr_lane;
|
|
+
|
|
+ if (lanes < 1 || lanes > (csiphy_dev->is_bifmode ? 2 : 4)) {
|
|
+ dev_err(csiphy_dev->dev, "wrong lanes num %d\n", lanes);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (atomic_inc_return(&csiphy_dev->usecnt) == 1)
|
|
+ csiphy_set_power(csiphy_dev, 1);
|
|
+
|
|
+ if (csi->calc_dphy) {
|
|
+ ccic_calc_dphy(&csi->dphy_desc, dphy_calc_profiles, &dphy_reg);
|
|
+ dphy3_val = dphy_reg.hs_settle & 0xFF;
|
|
+ dphy3_val =
|
|
+ dphy_reg.hs_termen | (dphy3_val << CSI2_DPHY3_HS_SETTLE_SHIFT);
|
|
+ dphy6_val = dphy_reg.cl_settle & 0xFF;
|
|
+ dphy6_val =
|
|
+ dphy_reg.cl_termen | (dphy6_val << CSI2_DPHY6_CK_SETTLE_SHIFT);
|
|
+ } else {
|
|
+ dphy2_val = csi->dphy[1];
|
|
+ dphy3_val = csi->dphy[2];
|
|
+ dphy6_val = csi->dphy[4];
|
|
+ }
|
|
+
|
|
+ dphy5_val = CSI2_DPHY5_LANE_ENA(lanes);
|
|
+ dphy5_val = dphy5_val | (dphy5_val << CSI2_DPHY5_LANE_RESC_ENA_SHIFT);
|
|
+
|
|
+ ctrl0_val = csiphy_readl(csiphy_dev, REG_CSI2_CTRL0);
|
|
+ ctrl0_val &= ~(CSI2_C0_LANE_NUM_MASK);
|
|
+ ctrl0_val |= CSI2_C0_LANE_NUM(lanes);
|
|
+ ctrl0_val |= CSI2_C0_ENABLE;
|
|
+ ctrl0_val &= ~(CSI2_C0_VLEN_MASK);
|
|
+ ctrl0_val |= CSI2_C0_VLEN;
|
|
+
|
|
+ //CSI2_DPHY1 must set bit,otherwise may cover someone else's config.
|
|
+ /* analog power on */
|
|
+ csiphy_set_bit(csiphy_dev, REG_CSI2_DPHY1, CSI2_DHPY1_ANA_PU);
|
|
+ csiphy_writel(csiphy_dev, REG_CSI2_DPHY2, dphy2_val);
|
|
+ if (!csiphy_readl(csiphy_dev, REG_CSI2_DPHY3))
|
|
+ csiphy_writel(csiphy_dev, REG_CSI2_DPHY3, dphy3_val);
|
|
+ csiphy_writel(csiphy_dev, REG_CSI2_DPHY5, dphy5_val);
|
|
+ csiphy_writel(csiphy_dev, REG_CSI2_DPHY6, dphy6_val);
|
|
+ csiphy_writel(csiphy_dev, REG_CSI2_CTRL0, ctrl0_val);
|
|
+
|
|
+ if (csiphy_dev->is_bifmode)
|
|
+ csiphy_set_2to2dphy(csiphy_dev, 1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static DEFINE_MUTEX(csiphy_list_mutex);
|
|
+static LIST_HEAD(csiphy_list);
|
|
+struct csiphy_device *csiphy_lookup_by_phandle(struct device *dev, const char *name)
|
|
+{
|
|
+ struct device_node *csiphy_node = of_parse_phandle(dev->of_node, name, 0);
|
|
+ struct csiphy_device *csiphy_dev;
|
|
+
|
|
+ mutex_lock(&csiphy_list_mutex);
|
|
+ list_for_each_entry(csiphy_dev, &csiphy_list, list) {
|
|
+ if (csiphy_node == csiphy_dev->dev->of_node) {
|
|
+ mutex_unlock(&csiphy_list_mutex);
|
|
+ of_node_put(csiphy_node);
|
|
+ return csiphy_dev;
|
|
+ }
|
|
+ }
|
|
+ mutex_unlock(&csiphy_list_mutex);
|
|
+
|
|
+ of_node_put(csiphy_node);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static int k1x_csiphy_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct csiphy_device *csiphy_dev;
|
|
+ struct device_node *np = pdev->dev.of_node;
|
|
+ int ret;
|
|
+
|
|
+ pr_debug("%s begin to probe\n", dev_name(&pdev->dev));
|
|
+
|
|
+ csiphy_dev = devm_kzalloc(&pdev->dev, sizeof(struct csiphy_device), GFP_KERNEL);
|
|
+ if (!csiphy_dev) {
|
|
+ dev_err(&pdev->dev, "no enough memory\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ ret = of_property_read_u32(np, "cell-index", &pdev->id);
|
|
+ if (ret < 0) {
|
|
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* get mem */
|
|
+ csiphy_dev->mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
|
+ "csiphy-regs");
|
|
+ if (!csiphy_dev->mem) {
|
|
+ dev_err(&pdev->dev, "no mem resource");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ csiphy_dev->base = devm_ioremap(&pdev->dev, csiphy_dev->mem->start,
|
|
+ resource_size(csiphy_dev->mem));
|
|
+ if (IS_ERR(csiphy_dev->base)) {
|
|
+ dev_err(&pdev->dev, "fail to remap iomem\n");
|
|
+ return PTR_ERR(csiphy_dev->base);
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_ARCH_SPACEMIT
|
|
+ /* get clock(s) */
|
|
+ csiphy_dev->csiphy_clk = devm_clk_get(&pdev->dev, "csi_dphy");
|
|
+ if (IS_ERR(csiphy_dev->csiphy_clk)) {
|
|
+ ret = PTR_ERR(csiphy_dev->csiphy_clk);
|
|
+ dev_err(&pdev->dev, "failed to get csiphy clock: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+ csiphy_dev->cphy_reset = devm_reset_control_get_optional(&pdev->dev, "cphy_reset");
|
|
+ if (IS_ERR_OR_NULL(csiphy_dev->cphy_reset)) {
|
|
+ dev_err(&pdev->dev, "not found core cphy_reset\n");
|
|
+ return PTR_ERR(csiphy_dev->cphy_reset);
|
|
+ }
|
|
+
|
|
+#endif
|
|
+
|
|
+ csiphy_dev->is_bifmode = of_property_read_bool(np, "spacemit,bifmode-enable");
|
|
+ atomic_set(&csiphy_dev->usecnt, 0);
|
|
+ csiphy_dev->pdev = pdev;
|
|
+ csiphy_dev->dev = &pdev->dev;
|
|
+ platform_set_drvdata(pdev, csiphy_dev);
|
|
+
|
|
+ mutex_lock(&csiphy_list_mutex);
|
|
+ list_add(&csiphy_dev->list, &csiphy_list);
|
|
+ mutex_unlock(&csiphy_list_mutex);
|
|
+
|
|
+ pr_debug("%s probed", dev_name(&pdev->dev));
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int k1x_csiphy_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct csiphy_device *csiphy_dev;
|
|
+
|
|
+ csiphy_dev = platform_get_drvdata(pdev);
|
|
+ if (!csiphy_dev) {
|
|
+ dev_err(&pdev->dev, "csiphy device is NULL");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ devm_kfree(&pdev->dev, csiphy_dev);
|
|
+ pr_debug("%s removed", dev_name(&pdev->dev));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct of_device_id k1x_csiphy_dt_match[] = {
|
|
+ {.compatible = "spacemit,csi-dphy",.data = NULL },
|
|
+ { },
|
|
+};
|
|
+
|
|
+MODULE_DEVICE_TABLE(of, k1x_csiphy_dt_match);
|
|
+
|
|
+struct platform_driver k1x_csiphy_driver = {
|
|
+ .driver = {
|
|
+ .name = K1X_CSIPHY_DRV_NAME,
|
|
+ .of_match_table = of_match_ptr(k1x_csiphy_dt_match),
|
|
+ },
|
|
+ .probe = k1x_csiphy_probe,
|
|
+ .remove = k1x_csiphy_remove,
|
|
+};
|
|
+
|
|
+int ccic_csiphy_register(void)
|
|
+{
|
|
+ return platform_driver_register(&k1x_csiphy_driver);
|
|
+}
|
|
+
|
|
+void ccic_csiphy_unregister(void)
|
|
+{
|
|
+ platform_driver_unregister(&k1x_csiphy_driver);
|
|
+}
|
|
+
|
|
+/* module_platform_driver(k1x_csiphy_driver); */
|
|
+
|
|
+MODULE_DESCRIPTION("K1X CSIPHY Driver");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/csiphy.h b/drivers/media/platform/spacemit/camera/cam_ccic/csiphy.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_ccic/csiphy.h
|
|
@@ -0,0 +1,25 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * Driver for SPACEMIT CCIC MIPI D-PHY MODULE
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited.
|
|
+ */
|
|
+#ifndef __CSIPHY_H__
|
|
+#define __CSIPHY_H__
|
|
+
|
|
+#include "ccic_drv.h"
|
|
+#include <linux/io.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/list.h>
|
|
+
|
|
+struct csiphy_device;
|
|
+struct mipi_csi2;
|
|
+
|
|
+struct csiphy_device *csiphy_lookup_by_phandle(struct device *dev, const char *name);
|
|
+int csiphy_stop(struct csiphy_device *csiphy_dev);
|
|
+int csiphy_start(struct csiphy_device *csiphy_dev, struct mipi_csi2 *csi);
|
|
+
|
|
+int ccic_csiphy_register(void);
|
|
+void ccic_csiphy_unregister(void);
|
|
+#endif /* ifndef __CSIPHY_H__ */
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/dptc_drv.c b/drivers/media/platform/spacemit/camera/cam_ccic/dptc_drv.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_ccic/dptc_drv.c
|
|
@@ -0,0 +1,747 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * dptc_drv.c - Driver for dptc
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited.
|
|
+ */
|
|
+
|
|
+#ifdef CONFIG_ARCH_ZYNQMP
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/of_device.h>
|
|
+#include <linux/i2c.h>
|
|
+#include <linux/delay.h>
|
|
+
|
|
+#include "dptc_drv.h"
|
|
+#include "dptc_pll_setting.h"
|
|
+
|
|
+#define DPTCDELAY_US(us) usleep_range(us, us)
|
|
+#define DPTCDELAY_MS(ms) DPTCDELAY_US(ms * 1000)
|
|
+
|
|
+u8 g_current_function = DPTC_FUNC_LIMIT;
|
|
+
|
|
+struct s_pll_setting DP_pll_configs[PLL_SSC_LIMIT][PLL_RATE_LIMIT] = {
|
|
+ { /*PLL_SSC_0 */
|
|
+ { 0x27, 0x76, 0x62, 0x3E, 0x20, 0x44, 0x85, 0x42, 0x0}, /*PLL_RATE_1620 */
|
|
+ { 0x76, 0x62, 0x7F, 0x34, 0x20, 0x4B, 0x95, 0x42, 0x0}, /*PLL_RATE_2700 */
|
|
+ { 0x9E, 0xD8, 0x61, 0x45, 0x20, 0x5B, 0xA5, 0x42, 0x0}, /*PLL_RATE_5400 */
|
|
+ },
|
|
+ { /*PLL_SSC_5000 */
|
|
+ { 0x27, 0x76, 0x62, 0x3E, 0x2A, 0x44, 0x85, 0x42, 0x0}, /*PLL_RATE_1620 */
|
|
+ { 0x76, 0x62, 0x7F, 0x34, 0x2A, 0x4B, 0x95, 0x42, 0x0}, /*PLL_RATE_2700 */
|
|
+ { 0x9E, 0xD8, 0x61, 0x45, 0x2A, 0x5B, 0xA5, 0x42, 0x0}, /*PLL_RATE_5400 */
|
|
+ }
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ STANDARD_MODE = 0, /*100Kbps */
|
|
+ FAST_MODE, /*400Kbps */
|
|
+ HS_MODE, /*3.4 Mbps slave/3.3 Mbps master,standard mode when not doing a high speed transfer */
|
|
+ HS_MODE_FAST, /*3.4 Mbps slave/3.3 Mbps master,fast mode when not doing a high speed transfer */
|
|
+} I2C_FAST_MODE;
|
|
+
|
|
+struct k1x_twsi_data dptc_i2c_data;
|
|
+static DEFINE_MUTEX(cmd_mutex);
|
|
+
|
|
+u32 swap_n(u32 addr, unsigned int cmdlen)
|
|
+{
|
|
+ u32 res;
|
|
+
|
|
+ switch (cmdlen) {
|
|
+ case 1:
|
|
+ res = addr;
|
|
+ break;
|
|
+ case 2:
|
|
+ res = (addr & 0xff) << 8 | ((addr >> 8) & 0xff);
|
|
+ break;
|
|
+ case 3:
|
|
+ res = (addr & 0xff) << 16 | (addr & 0xff00) | ((addr >> 16) & 0xff);
|
|
+ break;
|
|
+ case 4:
|
|
+ res = (addr & 0xff) << 24 | ((addr & 0xff00) << 8) |
|
|
+ ((addr & 0xff0000) >> 8) | ((addr >> 24) & 0xff);
|
|
+ break;
|
|
+ default:
|
|
+ res = addr;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return res;
|
|
+}
|
|
+
|
|
+int twsi_write_i2c(struct k1x_twsi_data *data)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct i2c_adapter *adapter;
|
|
+ struct i2c_msg msg;
|
|
+ u8 val[8];
|
|
+ int i, j = 0;
|
|
+
|
|
+ if (!data || !data->addr || !data->reg_len || !data->val_len) {
|
|
+ pr_err("Error: %s, %d", __func__, __LINE__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ msg.addr = data->addr;
|
|
+ msg.flags = 0;
|
|
+ msg.len = data->reg_len + data->val_len;
|
|
+ msg.buf = val;
|
|
+
|
|
+ adapter = i2c_get_adapter(data->twsi_no);
|
|
+ if (!adapter)
|
|
+ return -1;
|
|
+
|
|
+ mutex_lock(&cmd_mutex);
|
|
+ for (i = 0; i < data->reg_len; i++)
|
|
+ val[j++] = ((u8 *) (&data->reg))[i];
|
|
+ for (i = 0; i < data->val_len; i++)
|
|
+ val[j++] = ((u8 *) (&data->val))[i];
|
|
+ ret = i2c_transfer(adapter, &msg, 1);
|
|
+ if (ret < 0) {
|
|
+ mutex_unlock(&cmd_mutex);
|
|
+ return ret;
|
|
+ }
|
|
+ mutex_unlock(&cmd_mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int twsi_read_i2c(struct k1x_twsi_data *data)
|
|
+{
|
|
+ struct i2c_adapter *adapter;
|
|
+ struct i2c_msg msg;
|
|
+ int ret = 0;
|
|
+ u8 val[4];
|
|
+
|
|
+ if (!data || !data->addr || !data->reg_len || !data->val_len) {
|
|
+ pr_err("%s, error param", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ msg.addr = data->addr;
|
|
+ msg.flags = 0;
|
|
+ msg.len = data->reg_len;
|
|
+ msg.buf = val;
|
|
+
|
|
+ adapter = i2c_get_adapter(data->twsi_no);
|
|
+ if (!adapter)
|
|
+ return -1;
|
|
+
|
|
+ mutex_lock(&cmd_mutex);
|
|
+ if (data->reg_len == I2C_8BIT) {
|
|
+ val[0] = data->reg & 0xff;
|
|
+ } else if (data->reg_len == I2C_16BIT) {
|
|
+ val[0] = (data->reg >> 8) & 0xff;
|
|
+ val[1] = data->reg & 0xff;
|
|
+ }
|
|
+ msg.len = data->reg_len;
|
|
+ ret = i2c_transfer(adapter, &msg, 1);
|
|
+ if (ret < 0) {
|
|
+ mutex_unlock(&cmd_mutex);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ msg.flags = I2C_M_RD;
|
|
+ msg.len = data->val_len;
|
|
+ ret = i2c_transfer(adapter, &msg, 1);
|
|
+ if (ret < 0) {
|
|
+ mutex_unlock(&cmd_mutex);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ if (data->val_len == I2C_8BIT)
|
|
+ data->val = val[0];
|
|
+ else if (data->val_len == I2C_16BIT)
|
|
+ data->val = (val[0] << 8) + val[1];
|
|
+ else if (data->val_len == I2C_32BIT)
|
|
+ data->val = (val[3] << 24) + (val[2] << 16) + (val[1] << 8) + val[0];
|
|
+ //pr_info("twsi_read_i2c: val[0]=0x%x,val[1]=0x%x,val[2]=0x%x,val[3]=0x%x\n",val[0],val[1],val[2],val[3]);
|
|
+ mutex_unlock(&cmd_mutex);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ pr_err("Failed reading register 0x%02x!", data->reg);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void TWSI_Init(I2C_FAST_MODE mode, unsigned int i2c_no)
|
|
+{
|
|
+ dptc_i2c_data.twsi_no = i2c_no;
|
|
+ dptc_i2c_data.addr = 0x6c;
|
|
+ dptc_i2c_data.reg_len = 1; //I2C_8BIT;
|
|
+ dptc_i2c_data.val_len = 4; //I2C_32BIT;
|
|
+}
|
|
+
|
|
+int TWSI_REG_READ_DPTC(u8 i2c_no, u8 slaveaddress, u8 addr)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ dptc_i2c_data.twsi_no = i2c_no;
|
|
+ dptc_i2c_data.addr = slaveaddress;
|
|
+ dptc_i2c_data.reg = addr;
|
|
+ dptc_i2c_data.val = 0x00;
|
|
+ ret = twsi_read_i2c(&dptc_i2c_data);
|
|
+ if (ret == 0)
|
|
+ return dptc_i2c_data.val;
|
|
+ else
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+int TWSI_REG_WRITE_DPTC(u8 i2c_no, u8 slaveaddress, u8 addr, u32 data)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ dptc_i2c_data.twsi_no = i2c_no;
|
|
+ dptc_i2c_data.addr = slaveaddress;
|
|
+ dptc_i2c_data.reg = addr;
|
|
+ dptc_i2c_data.val = data;
|
|
+ ret = twsi_write_i2c(&dptc_i2c_data);
|
|
+ if (ret == 0)
|
|
+ return 1;
|
|
+ else
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+int TWSI_REG_WRITE_CAM(u8 i2c_no, u8 slaveaddress, u16 addr, u8 data)
|
|
+{
|
|
+#if 0
|
|
+ u32 twsi_param = 0x21; /*2 byte address, 1 byte data */
|
|
+ int ret = 0;
|
|
+
|
|
+ ret = TWSIput(i2c_no, slaveaddress, addr, data, &twsi_param);
|
|
+ if (ret == 0)
|
|
+ return 1;
|
|
+ else
|
|
+ return -1;
|
|
+#else
|
|
+ return -1;
|
|
+#endif
|
|
+}
|
|
+
|
|
+int TWSI_REG_READ_CAM(u8 i2c_no, u8 slaveaddress, u16 addr)
|
|
+{
|
|
+#if 0
|
|
+ u32 twsi_param = 0x21; /*2 byte address, 1 byte data */
|
|
+ int ret = 0;
|
|
+
|
|
+ ret = TWSIget(i2c_no, slaveaddress, addr, &twsi_param);
|
|
+ if (ret == 0)
|
|
+ return twsi_param;
|
|
+ else
|
|
+ return -1;
|
|
+#else
|
|
+ return -1;
|
|
+#endif
|
|
+}
|
|
+
|
|
+static u32 top_read(u8 reg)
|
|
+{
|
|
+ return TWSI_REG_READ_DPTC(DPTC_I2C_PORT, TOP_SLAVE_ADDR, reg >> 2);
|
|
+}
|
|
+
|
|
+static void top_write(u8 reg, u32 data)
|
|
+{
|
|
+ //pr_info("DPTC:top_write: 0x%x = 0x%x\r\n", reg, data);
|
|
+ TWSI_REG_WRITE_DPTC(DPTC_I2C_PORT, TOP_SLAVE_ADDR, reg >> 2, data);
|
|
+}
|
|
+
|
|
+static void top_set_bits(u8 reg, u32 bits)
|
|
+{
|
|
+ top_write(reg, top_read(reg) | bits);
|
|
+}
|
|
+
|
|
+static void top_clear_bits(u8 reg, u32 bits)
|
|
+{
|
|
+ top_write(reg, top_read(reg) & ~bits);
|
|
+}
|
|
+
|
|
+static void top_write_bits(u8 reg, u32 value, u8 shifts, u8 width)
|
|
+{
|
|
+ u32 reg_val;
|
|
+ u32 mask = 0;
|
|
+
|
|
+ mask = (1 << width) - 1;
|
|
+ reg_val = top_read(reg);
|
|
+ reg_val &= ~(mask << shifts);
|
|
+ reg_val |= (value & mask) << shifts;
|
|
+ top_write(reg, reg_val);
|
|
+}
|
|
+
|
|
+u32 top_getverion(void)
|
|
+{
|
|
+ u32 reg = 0;
|
|
+ u32 version = 0;
|
|
+
|
|
+ pr_info("DPTC:top_getverion ++\r\n");
|
|
+ reg = top_read(VERSION_REG);
|
|
+ pr_info("DPTC:top_getverion 0x0 = 0x%x\r\n", reg);
|
|
+
|
|
+ version = reg & 0xFF;
|
|
+ pr_info("DPTC:top_getverion version is 0x%x\r\n", version);
|
|
+
|
|
+ return version;
|
|
+}
|
|
+
|
|
+int32_t top_powerup(void)
|
|
+{
|
|
+ u32 reg = 0;
|
|
+ u32 timeout = 1000;
|
|
+
|
|
+ pr_info("DPTC:top_powerup ++\r\n");
|
|
+
|
|
+ reg = top_read(PLL_CTRL);
|
|
+ while (0 == (reg & (RCAL_DONE | RCAL_TIMEOUT)) && timeout > 0) {
|
|
+ timeout--;
|
|
+ DPTCDELAY_US(10);
|
|
+ reg = top_read(PLL_CTRL);
|
|
+ }
|
|
+ if (timeout == 0) {
|
|
+ pr_err("DPTC:top_powerup timeout(0x%x)!\r\n", reg);
|
|
+ return -1;
|
|
+ }
|
|
+ pr_info("DPTC:top_powerup OK (0x%x)!\r\n", reg);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int32_t top_init(u8 func)
|
|
+{
|
|
+ pr_info("DPTC:top_init ++ (%d)\r\n", func);
|
|
+
|
|
+ g_current_function = func;
|
|
+
|
|
+ switch (func) {
|
|
+ case DPTC_FUNC_0: /*DP PHY TEST MODE */
|
|
+ top_write(FUNCTION_SEL, 0);
|
|
+ top_clear_bits(PLL_CTRL, PLL_CTRL_SEL); /*DP control pll */
|
|
+ top_write(CLK_RESET, 0); /*reset devices */
|
|
+ DPTCDELAY_MS(5);
|
|
+ top_write(CLK_RESET, DPC_SW_RST); /*release DP */
|
|
+ break;
|
|
+ case DPTC_FUNC_1: /*DP CONTROLLER + PHY TEST MODE */
|
|
+ top_write(FUNCTION_SEL, 1);
|
|
+ top_clear_bits(PLL_CTRL, PLL_CTRL_SEL); /*DP control pll */
|
|
+ top_write(CLK_RESET, 0); /*reset devices */
|
|
+ DPTCDELAY_MS(5);
|
|
+ top_write(CLK_RESET, DPC_SW_RST); /*release DP */
|
|
+ break;
|
|
+ case DPTC_FUNC_2: /*CSI + DP TEST MODE */
|
|
+ top_write(FUNCTION_SEL, 2);
|
|
+ top_clear_bits(PLL_CTRL, PLL_CTRL_SEL); /*DP control pll */
|
|
+ top_write(CLK_RESET, 0); /*reset devices */
|
|
+ DPTCDELAY_MS(5);
|
|
+ top_write(CLK_RESET, DPC_SW_RST | CSI_SW_RST); /*release DP & CSI */
|
|
+ break;
|
|
+ case DPTC_FUNC_3: /*CSI PHY TEST MODE */
|
|
+ top_write(FUNCTION_SEL, 3);
|
|
+ top_set_bits(PLL_CTRL, PLL_CTRL_SEL); /*TOP control pll */
|
|
+ top_write(CLK_RESET, 0); /*reset devices */
|
|
+ DPTCDELAY_US(5);
|
|
+ top_write(CLK_RESET, CSI_SW_RST | (FROM_PLL << 4)); /*release CSI */
|
|
+ break;
|
|
+ case DPTC_FUNC_4: /*DSI PHY TEST MODE */
|
|
+ top_write(FUNCTION_SEL, 4);
|
|
+ top_set_bits(PLL_CTRL, PLL_CTRL_SEL); /*TOP control pll */
|
|
+ top_write(CLK_RESET, 0); /*reset devices */
|
|
+ DPTCDELAY_US(5);
|
|
+ top_write(CLK_RESET, DSI_SW_RST); /*release DP & DSI */
|
|
+ break;
|
|
+ default:
|
|
+ g_current_function = DPTC_FUNC_LIMIT;
|
|
+ pr_err("DPTC:top_sel_func Invalid param(%d)!\r\n", func);
|
|
+ return -1;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void top_enable_clks(u32 clks, u32 enable)
|
|
+{
|
|
+ u32 reg = 0;
|
|
+
|
|
+ pr_info("top_enable_clks ++ (0x%x, enable=%d)\r\n", clks, enable);
|
|
+
|
|
+ if (0 != (clks & DP_CLK_DSI))
|
|
+ reg |= PLL_CLK_DSI;
|
|
+ if (0 != (clks & DP_CLK_CSI))
|
|
+ reg |= PLL_CLK_CSI;
|
|
+ if (0 != (clks & DP_CLK_LS))
|
|
+ reg |= PLL_CLK_LS;
|
|
+ if (0 != (clks & DP_CLK_ESC))
|
|
+ reg |= PLL_CLK_ESC;
|
|
+ if (0 != (clks & DP_CLK_20X_40X))
|
|
+ reg |= PLL_CLK_20X_40X;
|
|
+
|
|
+ if (enable)
|
|
+ top_write_bits(PLL_CFG2, reg, 8, 8);
|
|
+ else
|
|
+ top_clear_bits(PLL_CFG2, reg << 8);
|
|
+}
|
|
+
|
|
+static int top_set_pllrate(u8 freq, u8 ssc)
|
|
+{
|
|
+ struct s_pll_setting *pll = NULL;
|
|
+ u32 reg0 = 0, reg1 = 0, reg2 = 0;
|
|
+ u32 reg = 0;
|
|
+ u32 timeout = 50;
|
|
+
|
|
+ pr_info("DP:top_set_pllrate ++ (%d)\r\n", freq);
|
|
+
|
|
+ if (freq >= PLL_RATE_LIMIT || ssc >= PLL_SSC_LIMIT) {
|
|
+ pr_err("DP:top_set_pllrate Invalid param(%d)!\r\n", freq);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ pll = &DP_pll_configs[ssc][freq];
|
|
+
|
|
+ reg0 = (pll->reg3 << 24) | (pll->reg2 << 16) | (pll->reg1 << 8) | pll->reg0;
|
|
+ reg1 = (pll->reg7 << 24) | (pll->reg6 << 16) | (pll->reg5 << 8) | pll->reg4;
|
|
+ reg2 = pll->reg8;
|
|
+
|
|
+ /*power down pll */
|
|
+ top_clear_bits(PLL_CTRL, PLL_POWER_UP);
|
|
+
|
|
+ /*config pll */
|
|
+ top_write(PLL_CFG0, reg0);
|
|
+ top_write(PLL_CFG1, reg1);
|
|
+ top_write_bits(PLL_CFG2, reg2, 0, 8);
|
|
+
|
|
+ /*power up pll */
|
|
+ top_set_bits(PLL_CTRL, PLL_POWER_UP);
|
|
+
|
|
+ /*wait for pll_lk */
|
|
+ reg = top_read(PLL_CTRL);
|
|
+ while (((reg & PLL_LOCK) != PLL_LOCK) && timeout > 0) {
|
|
+ timeout--;
|
|
+ DPTCDELAY_US(10);
|
|
+ reg = top_read(PLL_CTRL);
|
|
+ }
|
|
+
|
|
+ if (timeout == 0) {
|
|
+ pr_err("DP:top_set_pllrate pll lock timeout(0x%x)!\r\n", reg);
|
|
+ return -1;
|
|
+ }
|
|
+ pr_info("DP:top_set_pllrate pll locked at %d KHz\r\n", freq);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int32_t top_pll_init(u32 func)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ pr_info("top_pll_init (func: %d)+++!\r\n", func);
|
|
+
|
|
+ switch (func) {
|
|
+ case DPTC_FUNC_0: /*DP PHY TEST MODE */
|
|
+ break;
|
|
+ case DPTC_FUNC_1: /*DP CONTROLLER + PHY TEST MODE */
|
|
+ break;
|
|
+ case DPTC_FUNC_2: /*CSI + DP TEST MODE */
|
|
+ break;
|
|
+ case DPTC_FUNC_3: /*CSI PHY TEST MODE */
|
|
+ top_enable_clks(DP_CLK_FUNC3, 1);
|
|
+ ret = top_set_pllrate(PLL_RATE_5400, PLL_SSC_0); //PLL_SSC_5000
|
|
+ break;
|
|
+ case DPTC_FUNC_4: /*DSI PHY TEST MODE */
|
|
+ break;
|
|
+ default:
|
|
+ g_current_function = DPTC_FUNC_LIMIT;
|
|
+ pr_err("top_pll_init Invalid param(%d)!\r\n", func);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ if (ret != 0)
|
|
+ pr_info("top_pll_init fail (%d)!\r\n", ret);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static u32 dp_read(u8 reg)
|
|
+{
|
|
+ return TWSI_REG_READ_DPTC(DPTC_I2C_PORT, DP_SLAVE_ADDR, reg >> 2);
|
|
+}
|
|
+
|
|
+static void dp_write(u8 reg, u32 data)
|
|
+{
|
|
+ //pr_info("DPTC:top_write: 0x%x = 0x%x\r\n", reg, data);
|
|
+ TWSI_REG_WRITE_DPTC(DPTC_I2C_PORT, DP_SLAVE_ADDR, reg >> 2, data);
|
|
+}
|
|
+
|
|
+static void dp_set_bits(u8 reg, u32 bits)
|
|
+{
|
|
+ dp_write(reg, dp_read(reg) | bits);
|
|
+}
|
|
+
|
|
+static void dp_clear_bits(u8 reg, u32 bits)
|
|
+{
|
|
+ dp_write(reg, dp_read(reg) & ~bits);
|
|
+}
|
|
+
|
|
+static void dp_write_bits(u8 reg, u32 value, u8 shifts, u8 width)
|
|
+{
|
|
+ u32 reg_val;
|
|
+ u32 mask = 0;
|
|
+
|
|
+ mask = (1 << width) - 1;
|
|
+ reg_val = dp_read(reg);
|
|
+ reg_val &= ~(mask << shifts);
|
|
+ reg_val |= (value & mask) << shifts;
|
|
+ dp_write(reg, reg_val);
|
|
+}
|
|
+
|
|
+static void dp_enable_clks(u32 clks, u32 enable)
|
|
+{
|
|
+ u32 reg = 0;
|
|
+
|
|
+ pr_info("DP:DP_enable_clks ++ (0x%x, enable=%d)\r\n", clks, enable);
|
|
+
|
|
+ if (0 != (clks & DP_CLK_DSI))
|
|
+ reg |= PLL_CLK_DSI;
|
|
+ if (0 != (clks & DP_CLK_CSI))
|
|
+ reg |= PLL_CLK_CSI;
|
|
+ if (0 != (clks & DP_CLK_LS))
|
|
+ reg |= PLL_CLK_LS;
|
|
+ if (0 != (clks & DP_CLK_ESC))
|
|
+ reg |= PLL_CLK_ESC;
|
|
+ if (0 != (clks & DP_CLK_20X_40X))
|
|
+ reg |= PLL_CLK_20X_40X;
|
|
+
|
|
+ if (enable)
|
|
+ dp_write_bits(PHY_CTRL2, reg, 8, 6);
|
|
+ else
|
|
+ dp_clear_bits(PHY_CTRL2, reg << 8);
|
|
+}
|
|
+
|
|
+static int dp_set_pllrate(u8 freq, u8 ssc)
|
|
+{
|
|
+ struct s_pll_setting *pll = NULL;
|
|
+ u32 reg0 = 0, reg1 = 0, reg2 = 0;
|
|
+ u32 reg = 0;
|
|
+ u32 timeout = 50;
|
|
+
|
|
+ pr_info("DP:DP_set_pllrate ++ (%d)\r\n", freq);
|
|
+
|
|
+ if (freq >= PLL_RATE_LIMIT || ssc >= PLL_SSC_LIMIT) {
|
|
+ pr_err("DP:DP_set_pllrate Invalid param(%d)!\r\n", freq);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ pll = &DP_pll_configs[ssc][freq];
|
|
+ reg0 = (pll->reg3 << 24) | (pll->reg2 << 16) | (pll->reg1 << 8) | pll->reg0;
|
|
+ reg1 = (pll->reg7 << 24) | (pll->reg6 << 16) | (pll->reg5 << 8) | pll->reg4;
|
|
+ reg2 = pll->reg8;
|
|
+
|
|
+ /*power down pll */
|
|
+ dp_clear_bits(PHY_PU_CTRL, PU_HPD | PU_AUX | PU_LANE1 | PU_LANE0 | PU_PLL);
|
|
+
|
|
+ /*config pll */
|
|
+ dp_write(PHY_CTRL0, reg0);
|
|
+ dp_write(PHY_CTRL1, reg1);
|
|
+ dp_write_bits(PHY_CTRL2, reg2, 0, 8);
|
|
+
|
|
+ /*power up pll, aux, hpd */
|
|
+ dp_set_bits(PHY_PU_CTRL, PU_HPD | PU_AUX | PU_PLL);
|
|
+
|
|
+ /*wait for pll_lk */
|
|
+ reg = dp_read(PHY_PU_CTRL);
|
|
+ while (((reg & PLL_LK) != PLL_LK) && timeout > 0) {
|
|
+ timeout--;
|
|
+ DPTCDELAY_US(10);
|
|
+ reg = dp_read(PHY_PU_CTRL);
|
|
+ }
|
|
+
|
|
+ if (timeout == 0) {
|
|
+ pr_err("DP:DP_set_pllrate pll lock timeout(0x%x)!\r\n", reg);
|
|
+ return -1;
|
|
+ }
|
|
+ pr_info("DP:DP_set_pllrate pll locked at %d KHz\r\n", freq);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int32_t dp_init(u32 func)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ pr_info("DP:DP_init (func: %d)+++!\r\n", func);
|
|
+
|
|
+ switch (func) {
|
|
+ case DPTC_FUNC_0: /*DP PHY TEST MODE */
|
|
+ break;
|
|
+ case DPTC_FUNC_1: /*DP CONTROLLER + PHY TEST MODE */
|
|
+ break;
|
|
+ case DPTC_FUNC_2: /*CSI + DP TEST MODE */
|
|
+ dp_enable_clks(DP_CLK_ALL, 1);
|
|
+ ret = dp_set_pllrate(PLL_RATE_5400, PLL_SSC_5000);
|
|
+ break;
|
|
+ case DPTC_FUNC_3: /*CSI PHY TEST MODE */
|
|
+ dp_enable_clks(DP_CLK_FUNC3, 1);
|
|
+ ret = dp_set_pllrate(PLL_RATE_5400, PLL_SSC_5000);
|
|
+ break;
|
|
+ case DPTC_FUNC_4: /*DSI PHY TEST MODE */
|
|
+ break;
|
|
+ default:
|
|
+ g_current_function = DPTC_FUNC_LIMIT;
|
|
+ pr_err("EDP:top_sel_func Invalid param(%d)!\r\n", func);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ if (ret != 0)
|
|
+ pr_err("DP:DP_init fail (%d)!\r\n", ret);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static u32 dptc_csi_read(u8 reg)
|
|
+{
|
|
+ return TWSI_REG_READ_DPTC(DPTC_I2C_PORT, CSI_SLAVE_ADDR, reg >> 2);
|
|
+}
|
|
+
|
|
+static void dptc_csi_write(u8 reg, u32 data)
|
|
+{
|
|
+ //pr_info("DPTC:top_write: 0x%x = 0x%x\r\n", reg, data);
|
|
+ TWSI_REG_WRITE_DPTC(DPTC_I2C_PORT, CSI_SLAVE_ADDR, reg >> 2, data);
|
|
+}
|
|
+
|
|
+#if 0
|
|
+static void dptc_csi_set_bits(u8 reg, u32 bits)
|
|
+{
|
|
+ dptc_csi_write(reg, dptc_csi_read(reg) | bits);
|
|
+}
|
|
+
|
|
+static void dptc_csi_clear_bits(u8 reg, u32 bits)
|
|
+{
|
|
+ dptc_csi_write(reg, dptc_csi_read(reg) & ~bits);
|
|
+}
|
|
+#endif
|
|
+static void dptc_csi_write_bits(u8 reg, u32 value, u8 shifts, u8 width)
|
|
+{
|
|
+ u32 reg_val;
|
|
+ u32 mask = 0;
|
|
+
|
|
+ mask = (1 << width) - 1;
|
|
+ reg_val = top_read(reg);
|
|
+ reg_val &= ~(mask << shifts);
|
|
+ reg_val |= (value & mask) << shifts;
|
|
+ dptc_csi_write(reg, reg_val);
|
|
+}
|
|
+
|
|
+void dptc_csi_dphy_setting(u8 lane_num)
|
|
+{
|
|
+ dptc_csi_write_bits(dptc_csi_phy_ctrl_adr, 0x01, 8, 1); //analog power on:bit8
|
|
+ //bit[31:30] adjust the 25uA bias current for HSRX. 10b:def, 11b:max
|
|
+ //bit[18:16] hsrx termination adjustment.
|
|
+ dptc_csi_write(dptc_csi_phy_ana_cfg0_adr, 0xa28c8888); //0xa2848888 //0xe2848888 //0xa2848888
|
|
+ dptc_csi_write(dptc_csi_phy_timing_adr, 0x15001500); //thomaszhang dptc_csi_write_bits(dptc_csi_phy_timing_adr, 0x00000820, 0, 16);// 0x00001501 bit[15:8]:HS_SETTLE, bit[7:0]:HS_TERM_ENA xUI
|
|
+
|
|
+ if (lane_num == 1) //bit[3:0]:Lane Enable,Each bit controls each of the MIPI lane
|
|
+ dptc_csi_write_bits(dptc_csi_phy_ctrl_adr, 0x11, 0, 8);
|
|
+ else if (lane_num == 2)
|
|
+ dptc_csi_write_bits(dptc_csi_phy_ctrl_adr, 0x33, 0, 8);
|
|
+ else if (lane_num == 4)
|
|
+ dptc_csi_write_bits(dptc_csi_phy_ctrl_adr, 0xff, 0, 8);
|
|
+}
|
|
+
|
|
+void dptc_csi_reg_setting(u8 RAW_type, u32 sensor_width, u32 sensor_height,
|
|
+ u8 csi_lanes)
|
|
+{
|
|
+ u32 csi2_laneend = 0;
|
|
+ u32 csi2_eccend = 0;
|
|
+ u32 csi2_eot = 0;
|
|
+ u32 csi2_parse_error = 0;
|
|
+ u32 csi2_vsynclen = 4;
|
|
+ u32 csi2_hsynclen = 4;
|
|
+// u32 csi2_ana_pu = 1;
|
|
+ u32 csi2_hs_settle = 0x8;
|
|
+ u32 csi2_hs_termen = 0x20;
|
|
+ u32 csi2_ck_settle = 0x10;
|
|
+ u32 csi2_ck_termen = 0x1;
|
|
+// u32 csi2_dsi_mode = 0x0; //1: dsi mode; 0:csi mode
|
|
+// u32 csi2_packet_code = 0x2b;
|
|
+// u32 csi2_hsync_start_code = 0x02;
|
|
+// u32 csi2_vsync_start_code = 0x00;
|
|
+
|
|
+ dptc_csi_write(dptc_csi_ctrl_adr,
|
|
+ ((csi2_hsynclen << 10) + (csi2_vsynclen << 6) +
|
|
+ (csi2_parse_error << 5) + (csi2_eccend << 4) + (csi2_eot << 3) +
|
|
+ (csi2_laneend << 2) + (csi_lanes - 1)));
|
|
+ dptc_csi_write(dptc_csi_image_size_adr, (sensor_height << 16) + sensor_width);
|
|
+
|
|
+ dptc_csi_write(dptc_csi_status_adr, 0xffffffff);
|
|
+
|
|
+// dptc_csi_write(dptc_csi_phy_ctrl_adr, ((csi2_ana_pu << 8) + (CSI2_DPHY5_LANE_ENA(csi_lanes) << 4) + CSI2_DPHY5_LANE_ENA(csi_lanes)));
|
|
+ dptc_csi_write(dptc_csi_phy_timing_adr,
|
|
+ ((csi2_ck_settle << 24) + (csi2_ck_termen << 16) +
|
|
+ (csi2_hs_settle << 8) + csi2_hs_termen));
|
|
+ dptc_csi_write(dptc_csi_phy_ana_cfg0_adr, 0xa28c8888);
|
|
+ dptc_csi_write(dptc_csi_phy_ana_cfg1_adr, 0x00000000);
|
|
+ dptc_csi_write(dptc_csi_status_adr, 0xffffffff);
|
|
+ dptc_csi_write_bits(dptc_csi_phy_status_adr, 0x7f, 1, 7); //dptc_csi_write(dptc_csi_phy_status_adr, 0x0000000f);
|
|
+// dptc_csi_write(dptc_csi_sync_code_adr, (csi2_dsi_mode << 24) + (csi2_packet_code << 16) + (csi2_hsync_start_code << 8) + csi2_vsync_start_code);
|
|
+ dptc_csi_dphy_setting(csi_lanes);
|
|
+}
|
|
+
|
|
+int DPTC_func3_open(void)
|
|
+{
|
|
+ s32 ret = 0;
|
|
+
|
|
+ pr_info("DPTC: DPTC_func3_open enter!\n");
|
|
+
|
|
+ TWSI_Init(STANDARD_MODE, DPTC_I2C_PORT); //STANDARD_MODE
|
|
+ DPTCDELAY_US(100); //thomaszhang DPTCDELAY_MS(100);
|
|
+
|
|
+ ret = top_powerup();
|
|
+ if (ret != 0) {
|
|
+ pr_err("DPTC: DPTC_func3_open: top_powerup fail!\r\n");
|
|
+ return -1;
|
|
+ }
|
|
+ top_getverion();
|
|
+ ret = top_init(DPTC_FUNC_3);
|
|
+ if (ret != 0) {
|
|
+ pr_err("DPTC: DPTC_func3_open: top_init fail!\r\n");
|
|
+ return -1;
|
|
+ }
|
|
+ ret = top_pll_init(DPTC_FUNC_3);
|
|
+ if (ret != 0) {
|
|
+ pr_err("DPTC: DPTC_func3_open: top_pll_init fail!\r\n");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int DPTC_func3_close(void)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void dptc_csi_status_handler(u32 idx)
|
|
+{
|
|
+ u32 irqs, i;
|
|
+
|
|
+ DPTCDELAY_MS(1000);
|
|
+
|
|
+ for (i = 0; i <= 0x28; i += 4) {
|
|
+ irqs = dptc_csi_read(i);
|
|
+ pr_info("dptc_csi_status_adr: 0x%x = 0x%08x\n", i, irqs);
|
|
+ }
|
|
+
|
|
+ for (i = 0; i <= 0x18; i += 4) {
|
|
+ irqs = top_read(i);
|
|
+ pr_info("top_read: 0x%x = 0x%08x\n", i, irqs);
|
|
+ }
|
|
+ dptc_csi_write(dptc_csi_status_adr, 0xffffffff);
|
|
+ dptc_csi_write_bits(dptc_csi_phy_status_adr, 0x7f, 1, 7);
|
|
+}
|
|
+
|
|
+void dptc_csi_status_read(void)
|
|
+{
|
|
+ u32 irqs;
|
|
+
|
|
+ DPTCDELAY_MS(1000);
|
|
+
|
|
+ irqs = dptc_csi_read(dptc_csi_status_adr);
|
|
+ pr_info("dptc_csi_status_adr: 0x%x = 0x%08x\n", dptc_csi_status_adr, irqs);
|
|
+}
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/dptc_drv.h b/drivers/media/platform/spacemit/camera/cam_ccic/dptc_drv.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_ccic/dptc_drv.h
|
|
@@ -0,0 +1,110 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * dptc_drv.h
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef __DPTC_DRV_H__
|
|
+#define __DPTC_DRV_H__
|
|
+
|
|
+#define DPTC_I2C_PORT 0 //8
|
|
+#define TOP_SLAVE_ADDR 0x30 /*7-bit, 8-bit: 0x60 */
|
|
+#define DP_SLAVE_ADDR 0x31 /*7-bit, 8-bit: 0x62 */
|
|
+#define CSI_SLAVE_ADDR 0x32 /*7-bit, 8-bit: 0x64 */
|
|
+
|
|
+/* top reg */
|
|
+#define VERSION_REG 0x0
|
|
+#define FUNCTION_SEL 0x4
|
|
+#define PLL_CTRL 0x8
|
|
+#define PLL_CFG0 0xC
|
|
+#define PLL_CFG1 0x10
|
|
+#define PLL_CFG2 0x14
|
|
+#define CLK_RESET 0x18
|
|
+
|
|
+/*0x8*/
|
|
+#define PLL_CTRL_SEL BIT(0)
|
|
+#define PLL_POWER_UP BIT(1)
|
|
+#define RCAL_POWER_UP BIT(2)
|
|
+#define PLL_LOCK BIT(3)
|
|
+#define RCAL_DONE BIT(4)
|
|
+#define RCAL_TIMEOUT BIT(5)
|
|
+
|
|
+/*0x18*/
|
|
+#define DSI_SW_RST BIT(2)
|
|
+#define CSI_SW_RST BIT(1)
|
|
+#define DPC_SW_RST BIT(0)
|
|
+
|
|
+/* DP PLL */
|
|
+#define PHY_SWING_CTRL 0x8C
|
|
+#define PHY_PU_CTRL 0x90
|
|
+#define PHY_CTRL0 0x94
|
|
+#define PHY_CTRL1 0x98
|
|
+#define PHY_CTRL2 0x9C
|
|
+
|
|
+/*0x90*/
|
|
+#define PHY_PN_SWAP BIT(30)
|
|
+#define PHY_LANE_SWAP BIT(29)
|
|
+#define PLL_LK BIT(28)
|
|
+#define AUX_SINGLE_END BIT(27)
|
|
+#define PU_HPD BIT(26)
|
|
+#define PU_AUX BIT(25)
|
|
+#define PU_PLL BIT(24)
|
|
+#define PU_LANE1 BIT(21)
|
|
+#define PU_LANE0 BIT(20)
|
|
+#define EN_20B_MODE BIT(19)
|
|
+
|
|
+/* csi register */
|
|
+#define dptc_csi_ctrl_adr 0x00
|
|
+#define dptc_csi_image_size_adr 0x04
|
|
+#define dptc_csi_gate_ctrl_adr 0x08
|
|
+#define dptc_csi_status_adr 0x0c
|
|
+#define dptc_csi_phy_ctrl_adr 0x10
|
|
+#define dptc_csi_phy_timing_adr 0x14
|
|
+#define dptc_csi_phy_ana_cfg0_adr 0x18
|
|
+#define dptc_csi_phy_ana_cfg1_adr 0x1c
|
|
+#define dptc_csi_phy_status_adr 0x20
|
|
+#define dptc_csi_sync_code_adr 0x24
|
|
+#define dptc_csi_mem_cfg_adr 0x28
|
|
+
|
|
+#define CSI2_DPHY5_LANE_ENA(n) ((1 << (n)) - 1)
|
|
+
|
|
+enum {
|
|
+ DPTC_FUNC_0, /*DP PHY TEST MODE */
|
|
+ DPTC_FUNC_1, /*DP CONTROLLER + PHY TEST MODE */
|
|
+ DPTC_FUNC_2, /*CSI + DP TEST MODE */
|
|
+ DPTC_FUNC_3, /*CSI PHY TEST MODE */
|
|
+ DPTC_FUNC_4, /*DSI PHY TEST MODE */
|
|
+ DPTC_FUNC_LIMIT,
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ FROM_PLL = 0,
|
|
+ FROM_PAD = 1,
|
|
+ FROM_REF_CLK = 2,
|
|
+ FROM_RESERVE,
|
|
+} CSI_ESC_CLK_SEL;
|
|
+
|
|
+struct k1x_twsi_data {
|
|
+ u8 twsi_no;
|
|
+ u8 reg_len; /* byte num */
|
|
+ u8 val_len; /* byte num */
|
|
+ u8 addr; /* 7 bit i2c address */
|
|
+ u16 reg;
|
|
+ u32 val;
|
|
+};
|
|
+
|
|
+enum sensor_i2c_len {
|
|
+ I2C_8BIT = 1,
|
|
+ I2C_16BIT = 2,
|
|
+ I2C_24BIT = 3,
|
|
+ I2C_32BIT = 4,
|
|
+};
|
|
+
|
|
+u32 top_getverion(void);
|
|
+int DPTC_func3_open(void);
|
|
+int DPTC_func3_close(void);
|
|
+void dptc_csi_status_handler(u32 idx);
|
|
+void dptc_csi_reg_setting(u8 RAW_type, u32 sensor_width, u32 sensor_height,
|
|
+ u8 csi_lanes);
|
|
+#endif /* ifndef __DPTC_DRV_H__ */
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_ccic/dptc_pll_setting.h b/drivers/media/platform/spacemit/camera/cam_ccic/dptc_pll_setting.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_ccic/dptc_pll_setting.h
|
|
@@ -0,0 +1,56 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * dptc_pll_setting.h - pll setting for dptc
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef DPTC_PLL_SETTING_H_
|
|
+#define DPTC_PLL_SETTING_H_
|
|
+
|
|
+enum {
|
|
+ PLL_RATE_1620,
|
|
+ PLL_RATE_2700,
|
|
+ PLL_RATE_5400,
|
|
+ PLL_RATE_LIMIT
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PLL_SSC_0,
|
|
+ PLL_SSC_5000,
|
|
+ PLL_SSC_LIMIT,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ DP_CLK_DSI = 0x1,
|
|
+ DP_CLK_CSI = 0x2,
|
|
+ DP_CLK_LS = 0x4,
|
|
+ DP_CLK_ESC = 0x8,
|
|
+ DP_CLK_20X_40X = 0x10,
|
|
+ DP_CLK_LIMIT
|
|
+};
|
|
+
|
|
+#define DP_CLK_ALL (DP_CLK_DSI|DP_CLK_CSI|DP_CLK_LS|DP_CLK_ESC|DP_CLK_20X_40X)
|
|
+#define DP_CLK_FUNC3 DP_CLK_ESC //(DP_CLK_CSI|DP_CLK_LS|DP_CLK_ESC)//(DP_CLK_CSI|DP_CLK_ESC)
|
|
+
|
|
+#define PLL_CLK_20X_40X BIT(1)
|
|
+#define PLL_CLK_ESC BIT(2)
|
|
+#define PLL_CLK_LS BIT(3)
|
|
+#define PLL_CLK_CSI BIT(4)
|
|
+#define PLL_CLK_DSI BIT(5)
|
|
+
|
|
+struct s_pll_setting {
|
|
+ uint8_t reg0;
|
|
+ uint8_t reg1;
|
|
+ uint8_t reg2;
|
|
+ uint8_t reg3;
|
|
+ uint8_t reg4;
|
|
+ uint8_t reg5;
|
|
+ uint8_t reg6;
|
|
+ uint8_t reg7;
|
|
+ uint8_t reg8;
|
|
+ uint8_t reserved1;
|
|
+ uint16_t reserved2;
|
|
+};
|
|
+
|
|
+#endif /*DPTC_PLL_SETTING_H_ */
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_cpp/cpp-v2p0.c b/drivers/media/platform/spacemit/camera/cam_cpp/cpp-v2p0.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_cpp/cpp-v2p0.c
|
|
@@ -0,0 +1,555 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * cpp-v2p0.c
|
|
+ *
|
|
+ * Driver for SPACEMIT K1X Camera Post Process v2.0
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/clk-provider.h>
|
|
+#include "regs-cpp-v2p0.h"
|
|
+#include "regs-fbc-v2p0.h"
|
|
+#include "k1x_cpp.h"
|
|
+#include "cpp_dmabuf.h"
|
|
+#include "cam_dbg.h"
|
|
+
|
|
+#undef CAM_MODULE_TAG
|
|
+#define CAM_MODULE_TAG CAM_MDL_CPP
|
|
+
|
|
+#ifdef CONFIG_SPACEMIT_FPGA
|
|
+#define CPP_RESET_TIMEOUT_MS (1000)
|
|
+#else
|
|
+#define CPP_RESET_TIMEOUT_MS (500)
|
|
+#endif
|
|
+
|
|
+static void cpp20_3dnr_src_dmad_cfg(struct cpp_device *cpp_dev,
|
|
+ uint64_t yll0_dmad, uint64_t yll1_dmad,
|
|
+ uint64_t yll2_dmad, uint64_t yll3_dmad,
|
|
+ uint64_t yll4_dmad, uint64_t uvll0_dmad,
|
|
+ uint64_t uvll1_dmad, uint64_t uvll2_dmad,
|
|
+ uint64_t uvll3_dmad, uint64_t uvll4_dmad)
|
|
+{
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YINPUTBASEADDR_L4, lower_32_bits(yll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YINPUTBASEADDR_L3, lower_32_bits(yll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YINPUTBASEADDR_L2, lower_32_bits(yll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YINPUTBASEADDR_L1, lower_32_bits(yll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YINPUTBASEADDR_L0, lower_32_bits(yll0_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVINPUTBASEADDR_L4, lower_32_bits(uvll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVINPUTBASEADDR_L3, lower_32_bits(uvll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVINPUTBASEADDR_L2, lower_32_bits(uvll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVINPUTBASEADDR_L1, lower_32_bits(uvll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVINPUTBASEADDR_L0, lower_32_bits(uvll0_dmad));
|
|
+
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YINPUTBASEADDR_L4_H, upper_32_bits(yll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YINPUTBASEADDR_L3_H, upper_32_bits(yll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YINPUTBASEADDR_L2_H, upper_32_bits(yll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YINPUTBASEADDR_L1_H, upper_32_bits(yll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YINPUTBASEADDR_L0_H, upper_32_bits(yll0_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVINPUTBASEADDR_L4_H, upper_32_bits(uvll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVINPUTBASEADDR_L3_H, upper_32_bits(uvll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVINPUTBASEADDR_L2_H, upper_32_bits(uvll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVINPUTBASEADDR_L1_H, upper_32_bits(uvll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVINPUTBASEADDR_L0_H, upper_32_bits(uvll0_dmad));
|
|
+}
|
|
+
|
|
+static void cpp20_3dnr_pre_dmad_cfg(struct cpp_device *cpp_dev,
|
|
+ uint64_t yll0_dmad, uint64_t yll1_dmad,
|
|
+ uint64_t yll2_dmad, uint64_t yll3_dmad,
|
|
+ uint64_t yll4_dmad, uint64_t uvll0_dmad,
|
|
+ uint64_t uvll1_dmad, uint64_t uvll2_dmad,
|
|
+ uint64_t uvll3_dmad, uint64_t uvll4_dmad)
|
|
+{
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L4, lower_32_bits(yll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L3, lower_32_bits(yll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L2, lower_32_bits(yll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L1, lower_32_bits(yll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L0, lower_32_bits(yll0_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L4, lower_32_bits(uvll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L3, lower_32_bits(uvll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L2, lower_32_bits(uvll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L1, lower_32_bits(uvll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L0, lower_32_bits(uvll0_dmad));
|
|
+
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L4_H, upper_32_bits(yll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L3_H, upper_32_bits(yll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L2_H, upper_32_bits(yll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L1_H, upper_32_bits(yll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L0_H, upper_32_bits(yll0_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L4_H, upper_32_bits(uvll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L3_H, upper_32_bits(uvll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L2_H, upper_32_bits(uvll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L1_H, upper_32_bits(uvll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L0_H, upper_32_bits(uvll0_dmad));
|
|
+}
|
|
+
|
|
+static void cpp20_3dnr_pre_kgain_cfg(struct cpp_device *cpp_dev,
|
|
+ uint64_t kll0_dmad, uint64_t kll1_dmad,
|
|
+ uint64_t kll2_dmad, uint64_t kll3_dmad,
|
|
+ uint64_t kll4_dmad)
|
|
+{
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L4, lower_32_bits(kll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L3, lower_32_bits(kll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L2, lower_32_bits(kll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L1, lower_32_bits(kll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L0, lower_32_bits(kll0_dmad));
|
|
+
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L4_H, upper_32_bits(kll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L3_H, upper_32_bits(kll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L2_H, upper_32_bits(kll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L1_H, upper_32_bits(kll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L0_H, upper_32_bits(kll0_dmad));
|
|
+}
|
|
+
|
|
+static void cpp20_3dnr_out_dmad_cfg(struct cpp_device *cpp_dev,
|
|
+ uint64_t yll0_dmad, uint64_t yll1_dmad,
|
|
+ uint64_t yll2_dmad, uint64_t yll3_dmad,
|
|
+ uint64_t yll4_dmad, uint64_t uvll0_dmad,
|
|
+ uint64_t uvll1_dmad, uint64_t uvll2_dmad,
|
|
+ uint64_t uvll3_dmad, uint64_t uvll4_dmad)
|
|
+{
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YWBASEADDR_L4, lower_32_bits(yll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YWBASEADDR_L3, lower_32_bits(yll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YWBASEADDR_L2, lower_32_bits(yll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YWBASEADDR_L1, lower_32_bits(yll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YWBASEADDR_L0, lower_32_bits(yll0_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVWBASEADDR_L4, lower_32_bits(uvll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVWBASEADDR_L3, lower_32_bits(uvll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVWBASEADDR_L2, lower_32_bits(uvll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVWBASEADDR_L1, lower_32_bits(uvll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVWBASEADDR_L0, lower_32_bits(uvll0_dmad));
|
|
+
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YWBASEADDR_L4_H, upper_32_bits(yll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YWBASEADDR_L3_H, upper_32_bits(yll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YWBASEADDR_L2_H, upper_32_bits(yll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YWBASEADDR_L1_H, upper_32_bits(yll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_YWBASEADDR_L0_H, upper_32_bits(yll0_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVWBASEADDR_L4_H, upper_32_bits(uvll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVWBASEADDR_L3_H, upper_32_bits(uvll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVWBASEADDR_L2_H, upper_32_bits(uvll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVWBASEADDR_L1_H, upper_32_bits(uvll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_UVWBASEADDR_L0_H, upper_32_bits(uvll0_dmad));
|
|
+}
|
|
+
|
|
+static void cpp20_3dnr_out_kgain_cfg(struct cpp_device *cpp_dev,
|
|
+ uint64_t kll0_dmad, uint64_t kll1_dmad,
|
|
+ uint64_t kll2_dmad, uint64_t kll3_dmad,
|
|
+ uint64_t kll4_dmad)
|
|
+{
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_KWBASEADDR_L4, lower_32_bits(kll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_KWBASEADDR_L3, lower_32_bits(kll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_KWBASEADDR_L2, lower_32_bits(kll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_KWBASEADDR_L1, lower_32_bits(kll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_KWBASEADDR_L0, lower_32_bits(kll0_dmad));
|
|
+
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_KWBASEADDR_L4_H, upper_32_bits(kll4_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_KWBASEADDR_L3_H, upper_32_bits(kll3_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_KWBASEADDR_L2_H, upper_32_bits(kll2_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_KWBASEADDR_L1_H, upper_32_bits(kll1_dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_KWBASEADDR_L0_H, upper_32_bits(kll0_dmad));
|
|
+}
|
|
+
|
|
+static void cpp20_tnrdec_dmad_cfg(struct cpp_device *cpp_dev, uint64_t dmad)
|
|
+{
|
|
+ cpp_reg_write(cpp_dev, REG_FBC_TNRDEC_HL_ADDR, lower_32_bits(dmad));
|
|
+ cpp_reg_write(cpp_dev, REG_FBC_TNRDEC_HH_ADDR, upper_32_bits(dmad));
|
|
+}
|
|
+
|
|
+static void cpp20_tnrenc_dmad_cfg(struct cpp_device *cpp_dev, uint64_t dmad0,
|
|
+ uint64_t dmad1)
|
|
+{
|
|
+ cpp_reg_write(cpp_dev, REG_FBC_TNRENC_HL_ADDR, lower_32_bits(dmad0));
|
|
+ cpp_reg_write(cpp_dev, REG_FBC_TNRENC_HH_ADDR, upper_32_bits(dmad0));
|
|
+ cpp_reg_write(cpp_dev, REG_FBC_TNRENC_PL_ADDR, lower_32_bits(dmad1));
|
|
+ cpp_reg_write(cpp_dev, REG_FBC_TNRENC_PH_ADDR, upper_32_bits(dmad1));
|
|
+ cpp_reg_write(cpp_dev, REG_FBC_TNRENC_Y_ADDR, 0x00000000);
|
|
+ cpp_reg_write(cpp_dev, REG_FBC_TNRENC_C_ADDR, 0x10000000);
|
|
+}
|
|
+
|
|
+static int cpp_global_reset(struct cpp_device *cpp_dev)
|
|
+{
|
|
+ unsigned long time;
|
|
+
|
|
+ reinit_completion(&cpp_dev->reset_complete);
|
|
+ cpp_dev->state = CPP_STATE_RST;
|
|
+
|
|
+ cpp_reg_set_bit(cpp_dev, REG_CPP_IRQ_MASK, QIRQ_STAT_GLB_RST_DONE);
|
|
+ cpp_reg_write_mask(cpp_dev, REG_CPP_QUEUE_CTRL, (0x10 << 24),
|
|
+ QCTRL_GLB_RST_CYC_MASK);
|
|
+ cpp_reg_set_bit(cpp_dev, REG_CPP_QUEUE_CTRL, QCTRL_GLB_RST);
|
|
+
|
|
+ time = wait_for_completion_timeout(&cpp_dev->reset_complete,
|
|
+ msecs_to_jiffies(CPP_RESET_TIMEOUT_MS));
|
|
+ if (!time) {
|
|
+ cam_err("reset timeout waiting %dms", CPP_RESET_TIMEOUT_MS);
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static uint32_t cpp20_get_hw_version(struct cpp_device *cpp_dev)
|
|
+{
|
|
+ uint32_t chipId, hwVersion;
|
|
+
|
|
+ chipId = cpp_reg_read(cpp_dev, REG_CPP_CHIP_ID);
|
|
+ if (chipId == 0x69950) {
|
|
+ hwVersion = CPP_HW_VERSION_2_0;
|
|
+ } else if (0x699a0) {
|
|
+ hwVersion = CPP_HW_VERSION_2_1;
|
|
+ } else {
|
|
+ pr_err("%s: invalid chip id 0x%x\n", __func__, chipId);
|
|
+ hwVersion = 0;
|
|
+ }
|
|
+
|
|
+ return hwVersion;
|
|
+}
|
|
+
|
|
+static void cpp_enable_clk_gating(struct cpp_device *cpp_dev, u8 enable)
|
|
+{
|
|
+ if (enable) {
|
|
+ cpp_reg_set_bit(cpp_dev, REG_FBC_TNRDEC_CG_EN, 0x1);
|
|
+ cpp_reg_set_bit(cpp_dev, REG_CPP_3DNR_CG_CTRL, 0x3);
|
|
+ cpp_reg_set_bit(cpp_dev, REG_CPP_QUEUE_CTRL, 0x70000);
|
|
+ } else {
|
|
+ cpp_reg_clr_bit(cpp_dev, REG_FBC_TNRDEC_CG_EN, 0x1);
|
|
+ cpp_reg_clr_bit(cpp_dev, REG_CPP_3DNR_CG_CTRL, 0x3);
|
|
+ cpp_reg_clr_bit(cpp_dev, REG_CPP_QUEUE_CTRL, 0x70000);
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
+ * val: 0-64byte 1-128byte 2-256byte 3-512byte 4-1024byte
|
|
+ */
|
|
+static void set_3dnr_rd_burst(struct cpp_device *cpp, u8 val)
|
|
+{
|
|
+ val &= 0xff;
|
|
+ cpp_reg_write_mask(cpp, REG_CPP_3DNR_BST_LEN, val << 8, 0xff00);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * no more than 0x40 on z1, or hardware hang when axi slow
|
|
+ * fixed on asic_a0
|
|
+
|
|
+ * val: burst length=val*16 byte
|
|
+ */
|
|
+static void set_3dnr_wr_burst(struct cpp_device *cpp, u8 val)
|
|
+{
|
|
+ val &= 0xff;
|
|
+ cpp_reg_write_mask(cpp, REG_CPP_3DNR_BST_LEN, val, 0xff);
|
|
+}
|
|
+
|
|
+static void set_fbc_dec_burst(struct cpp_device *cpp, u8 val)
|
|
+{
|
|
+ val &= 0xf;
|
|
+ cpp_reg_write_mask(cpp, REG_FBC_TNRDEC_PERF_CTRL, val << 4, 0xf0);
|
|
+}
|
|
+
|
|
+static void set_fbc_enc_burst(struct cpp_device *cpp, u8 val)
|
|
+{
|
|
+ val &= 0x7f;
|
|
+ cpp_reg_write_mask(cpp, REG_FBC_TNRENC_DMAC_LENGTH, val, 0x7f);
|
|
+}
|
|
+
|
|
+static int cpp_set_burst_len(struct cpp_device *cpp)
|
|
+{
|
|
+ set_3dnr_wr_burst(cpp, 0x10);
|
|
+ set_3dnr_rd_burst(cpp, 0x2);
|
|
+ set_fbc_dec_burst(cpp, 0x7);
|
|
+ set_fbc_enc_burst(cpp, 0x7);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void cpp_enable_irqs_common(struct cpp_device *cpp_dev, u8 enable)
|
|
+{
|
|
+ if (enable) {
|
|
+ cpp_reg_set_bit(cpp_dev, REG_CPP_IRQ_MASK, QIRQ_MASK_GEN);
|
|
+ cpp_reg_set_bit(cpp_dev, REG_FBC_TNRDEC_IRQ_MASK,
|
|
+ FIRQ_MASK_DEC_GEN);
|
|
+ cpp_reg_set_bit(cpp_dev, REG_FBC_TNRENC_IRQ_MASK,
|
|
+ FIRQ_MASK_ENC_GEN);
|
|
+ cpp_dev->mmu_dev->ops->setup_timeout_address(cpp_dev->mmu_dev);
|
|
+ } else {
|
|
+ cpp_reg_clr_bit(cpp_dev, REG_FBC_TNRENC_IRQ_MASK,
|
|
+ FIRQ_MASK_ENC_GEN);
|
|
+ cpp_reg_clr_bit(cpp_dev, REG_FBC_TNRDEC_IRQ_MASK,
|
|
+ FIRQ_MASK_DEC_GEN);
|
|
+ cpp_reg_clr_bit(cpp_dev, REG_CPP_IRQ_MASK, QIRQ_MASK_GEN);
|
|
+ }
|
|
+}
|
|
+
|
|
+__maybe_unused static void iommu_isr(struct cpp_device *cpp_dev)
|
|
+{
|
|
+ u32 iommu_status;
|
|
+
|
|
+ iommu_status = cpp_reg_read(cpp_dev, 0x1010);
|
|
+ cpp_reg_write(cpp_dev, 0x1010, iommu_status);
|
|
+ cam_err("cpp iommu irq status: 0x%08x", iommu_status);
|
|
+}
|
|
+
|
|
+static void cpp_isr_err_process(struct cpp_device *cpp_dev, u32 irq_status,
|
|
+ u32 iommu_status)
|
|
+{
|
|
+ if (irq_status & QIRQ_MASK_ERR) {
|
|
+ cpp_dev->state = CPP_STATE_ERR;
|
|
+ cam_err("irq err status: 0x%08x", irq_status);
|
|
+ }
|
|
+
|
|
+ if (iommu_status) {
|
|
+ cpp_dev->state = CPP_STATE_ERR;
|
|
+ cam_err("iommu irq status: 0x%08x", iommu_status);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void cpp20_fbc_dec_handler(struct cpp_device *cpp_dev)
|
|
+{
|
|
+ uint32_t fbcDecStat;
|
|
+
|
|
+ /*
|
|
+ * static const char *const dec_irq_msg[] = {
|
|
+ * "decode_eof", "cfg_swaped", "hdr_rdma", "pld_rdma",
|
|
+ * "core_eof", "wlbuf_eof", "hdr_err", "payload_err",
|
|
+ * "slv_req_err", "rdma_timeout", "dmac_err",
|
|
+ * };
|
|
+ */
|
|
+ fbcDecStat = cpp_reg_read(cpp_dev, REG_FBC_TNRDEC_IRQ_STATUS);
|
|
+ cpp_reg_write(cpp_dev, REG_FBC_TNRDEC_IRQ_STATUS, fbcDecStat);
|
|
+
|
|
+ if (fbcDecStat & FIRQ_MASK_DEC_ERR)
|
|
+ cam_err("tnrdec: 0x%x = 0x%08x", REG_FBC_TNRDEC_IRQ_STATUS, fbcDecStat);
|
|
+}
|
|
+
|
|
+static void cpp20_fbc_enc_handler(struct cpp_device *cpp_dev)
|
|
+{
|
|
+ uint32_t fbcEncStat;
|
|
+
|
|
+ fbcEncStat = cpp_reg_read(cpp_dev, REG_FBC_TNRENC_IRQ_STATUS);
|
|
+ cpp_reg_write(cpp_dev, REG_FBC_TNRENC_IRQ_RAW, fbcEncStat);
|
|
+
|
|
+ if (fbcEncStat & FIRQ_MASK_ENC_ERR)
|
|
+ cam_err("tnrenc: 0x%x = 0x%08x", REG_FBC_TNRENC_IRQ_STATUS, fbcEncStat);
|
|
+
|
|
+ if (fbcEncStat & BIT(16))
|
|
+ cam_dbg("tnrenc: dma_wr_eof");
|
|
+
|
|
+ if (fbcEncStat & BIT(17))
|
|
+ cam_dbg("tnrenc: cfg_update_done");
|
|
+}
|
|
+
|
|
+static irqreturn_t cpp_isr(int irq, void *data)
|
|
+{
|
|
+ struct cpp_device *cpp_dev = (struct cpp_device *)data;
|
|
+ u32 irq_status, iommu_status = 0;
|
|
+ int i;
|
|
+
|
|
+ static const char *const cpp_irq_msg[] = {
|
|
+ "frame_done", "slice_done",
|
|
+ "slice_sof", "top_ctrl_out_done",
|
|
+ "rdma_done", "dmac_werr",
|
|
+ "dmac_rerr", "wdma_timeout",
|
|
+ "rdma_timeout", "global_reset_done",
|
|
+ "afbc_dec0", "afbc_dec1",
|
|
+ "afbc_enc", "iommu",
|
|
+ "3dnr_eof", "rsvd",
|
|
+ "rsvd", "rsvd",
|
|
+ "rsvd", "rsvd",
|
|
+ "rsvd", "rsvd",
|
|
+ "rsvd", "rsvd",
|
|
+ "rsvd", "rsvd",
|
|
+ "rsvd", "rsvd",
|
|
+ "rsvd", "rsvd",
|
|
+ "rsvd", "rsvd",
|
|
+ };
|
|
+
|
|
+ irq_status = cpp_reg_read(cpp_dev, REG_CPP_IRQ_STATUS);
|
|
+
|
|
+ if (irq_status & QIRQ_MASK_IOMMU) {
|
|
+ iommu_status = cpp_reg_read(cpp_dev, 0x1010);
|
|
+ cpp_reg_write(cpp_dev, 0x1010, iommu_status);
|
|
+ }
|
|
+
|
|
+ /* clear 2nd level irq before 1st */
|
|
+ if (irq_status & QIRQ_STAT_FBC_DEC0)
|
|
+ cpp20_fbc_dec_handler(cpp_dev);
|
|
+ if (irq_status & QIRQ_STAT_FBC_ENC)
|
|
+ cpp20_fbc_enc_handler(cpp_dev);
|
|
+ cpp_reg_write(cpp_dev, REG_CPP_IRQ_STATUS, irq_status);
|
|
+
|
|
+ cpp_isr_err_process(cpp_dev, irq_status, iommu_status);
|
|
+
|
|
+ if (irq_status & QIRQ_STAT_GLB_RST_DONE) {
|
|
+ complete(&cpp_dev->reset_complete);
|
|
+ cam_dbg("global reset done");
|
|
+ return IRQ_HANDLED;
|
|
+ }
|
|
+
|
|
+ if (irq_status & QIRQ_STAT_FRM_DONE || cpp_dev->state == CPP_STATE_ERR)
|
|
+ complete(&cpp_dev->run_work.run_complete);
|
|
+
|
|
+ for (i = 0; i < 32; i++)
|
|
+ if (irq_status & (1 << i))
|
|
+ cam_dbg("isr %s", cpp_irq_msg[i]);
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static int cpp20_3dnr_dmad_cfg(struct cpp_device *cpp_dev,
|
|
+ struct cpp_dma_port_info *port_info, u8 port_id)
|
|
+{
|
|
+ if (port_info == NULL || port_id >= MAX_DMA_PORT) {
|
|
+ cam_err("%s: invalid port_info %p, port_id %d", __func__,
|
|
+ port_info, port_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (port_id == MAC_DMA_PORT_R0 && port_info->fbc_enabled == true) {
|
|
+ cam_err("fbc is not supported on MAC_DMA_PORT_R0");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ if (port_id == MAC_DMA_PORT_R0) {
|
|
+ cpp20_3dnr_src_dmad_cfg(cpp_dev,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L0].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L1].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L2].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L3].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L4].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L0].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L1].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L2].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L3].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L4].phy_addr);
|
|
+ } else if (port_id == MAC_DMA_PORT_R1) {
|
|
+ cpp20_3dnr_pre_dmad_cfg(cpp_dev,
|
|
+ port_info->fbc_enabled == true ? 0x200000000 : port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L0].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L1].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L2].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L3].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L4].phy_addr,
|
|
+ port_info->fbc_enabled == true ? 0x210000000 : port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L0].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L1].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L2].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L3].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L4].phy_addr);
|
|
+
|
|
+ if (port_info->fbc_enabled == true)
|
|
+ cpp20_tnrdec_dmad_cfg(cpp_dev,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_FBC_HEADER].phy_addr);
|
|
+
|
|
+ cpp20_3dnr_pre_kgain_cfg(cpp_dev,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_KGAIN_L0].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_KGAIN_L1].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_KGAIN_L2].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_KGAIN_L3].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_KGAIN_L4].phy_addr);
|
|
+ } else if (port_id == MAC_DMA_PORT_W0) {
|
|
+ cpp20_3dnr_out_dmad_cfg(cpp_dev,
|
|
+ port_info->fbc_enabled == true ? 0x200000000 : port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L0].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L1].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L2].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L3].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_Y_L4].phy_addr,
|
|
+ port_info->fbc_enabled == true ? 0x210000000 : port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L0].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L1].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L2].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L3].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_DWT_C_L4].phy_addr);
|
|
+
|
|
+ if (port_info->fbc_enabled == true)
|
|
+ cpp20_tnrenc_dmad_cfg(cpp_dev,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_FBC_HEADER].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_FBC_HEADER].phy_addr +
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_FBC_PAYLOAD].offset -
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_FBC_HEADER].offset);
|
|
+
|
|
+ cpp20_3dnr_out_kgain_cfg(cpp_dev,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_KGAIN_L0].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_KGAIN_L1].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_KGAIN_L2].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_KGAIN_L3].phy_addr,
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_KGAIN_L4].phy_addr);
|
|
+ } else {
|
|
+ pr_err("%s: invalid dma port id %d\n", __func__, port_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void cpp20_debug_dump(struct cpp_device *cpp_dev)
|
|
+{
|
|
+ cam_info("0x3f8 : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x",
|
|
+ cpp_reg_read(cpp_dev, 0x3f8), cpp_reg_read(cpp_dev, 0x3fc),
|
|
+ cpp_reg_read(cpp_dev, 0x400), cpp_reg_read(cpp_dev, 0x404),
|
|
+ cpp_reg_read(cpp_dev, 0x408), cpp_reg_read(cpp_dev, 0x40c));
|
|
+ cam_info("0x7000: 0x%08x 0x%08x 0x%08x 0x%08x",
|
|
+ cpp_reg_read(cpp_dev, 0x7000), cpp_reg_read(cpp_dev, 0x7004),
|
|
+ cpp_reg_read(cpp_dev, 0x7008), cpp_reg_read(cpp_dev, 0x700c));
|
|
+ cam_info("src-y: 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx",
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_YINPUTBASEADDR_L0, REG_CPP_YINPUTBASEADDR_L0_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_YINPUTBASEADDR_L1, REG_CPP_YINPUTBASEADDR_L1_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_YINPUTBASEADDR_L2, REG_CPP_YINPUTBASEADDR_L2_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_YINPUTBASEADDR_L3, REG_CPP_YINPUTBASEADDR_L3_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_YINPUTBASEADDR_L4, REG_CPP_YINPUTBASEADDR_L4_H));
|
|
+ cam_info("src-c: 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx",
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_UVINPUTBASEADDR_L0, REG_CPP_UVINPUTBASEADDR_L0_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_UVINPUTBASEADDR_L1, REG_CPP_UVINPUTBASEADDR_L1_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_UVINPUTBASEADDR_L2, REG_CPP_UVINPUTBASEADDR_L2_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_UVINPUTBASEADDR_L3, REG_CPP_UVINPUTBASEADDR_L3_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_UVINPUTBASEADDR_L4,REG_CPP_UVINPUTBASEADDR_L4_H));
|
|
+ cam_info("pre-y: 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx",
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L0, REG_CPP_PRE_YINPUTBASEADDR_L0_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L1, REG_CPP_PRE_YINPUTBASEADDR_L1_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L2, REG_CPP_PRE_YINPUTBASEADDR_L2_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L3, REG_CPP_PRE_YINPUTBASEADDR_L3_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_YINPUTBASEADDR_L4, REG_CPP_PRE_YINPUTBASEADDR_L4_H));
|
|
+ cam_info("pre-c: 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx",
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L0, REG_CPP_PRE_UVINPUTBASEADDR_L0_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L1, REG_CPP_PRE_UVINPUTBASEADDR_L1_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L2, REG_CPP_PRE_UVINPUTBASEADDR_L2_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L3, REG_CPP_PRE_UVINPUTBASEADDR_L3_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_UVINPUTBASEADDR_L4, REG_CPP_PRE_UVINPUTBASEADDR_L4_H));
|
|
+ cam_info("pre-k: 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx",
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L0, REG_CPP_PRE_KINPUTBASEADDR_L0_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L1, REG_CPP_PRE_KINPUTBASEADDR_L1_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L2, REG_CPP_PRE_KINPUTBASEADDR_L2_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L3, REG_CPP_PRE_KINPUTBASEADDR_L3_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_PRE_KINPUTBASEADDR_L4, REG_CPP_PRE_KINPUTBASEADDR_L4_H));
|
|
+ cam_info("dst-y: 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx",
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_YWBASEADDR_L0, REG_CPP_YWBASEADDR_L0_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_YWBASEADDR_L1, REG_CPP_YWBASEADDR_L1_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_YWBASEADDR_L2, REG_CPP_YWBASEADDR_L2_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_YWBASEADDR_L3, REG_CPP_YWBASEADDR_L3_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_YWBASEADDR_L4, REG_CPP_YWBASEADDR_L4_H));
|
|
+ cam_info("dst-c: 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx",
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_UVWBASEADDR_L0, REG_CPP_UVWBASEADDR_L0_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_UVWBASEADDR_L1, REG_CPP_UVWBASEADDR_L1_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_UVWBASEADDR_L2, REG_CPP_UVWBASEADDR_L2_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_UVWBASEADDR_L3, REG_CPP_UVWBASEADDR_L3_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_UVWBASEADDR_L4, REG_CPP_UVWBASEADDR_L4_H));
|
|
+ cam_info("dst-k: 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx",
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_KWBASEADDR_L0, REG_CPP_KWBASEADDR_L0_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_KWBASEADDR_L1, REG_CPP_KWBASEADDR_L1_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_KWBASEADDR_L2, REG_CPP_KWBASEADDR_L2_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_KWBASEADDR_L3, REG_CPP_KWBASEADDR_L3_H),
|
|
+ cpp_reg_read64(cpp_dev, REG_CPP_KWBASEADDR_L4, REG_CPP_KWBASEADDR_L4_H));
|
|
+ cam_info("tnrdec: 0x%llx",
|
|
+ cpp_reg_read64(cpp_dev, REG_FBC_TNRDEC_HL_ADDR, REG_FBC_TNRDEC_HH_ADDR));
|
|
+ cam_info("tnrenc: 0x%llx 0x%llx",
|
|
+ cpp_reg_read64(cpp_dev, REG_FBC_TNRENC_HL_ADDR, REG_FBC_TNRENC_HH_ADDR),
|
|
+ cpp_reg_read64(cpp_dev, REG_FBC_TNRENC_PL_ADDR, REG_FBC_TNRENC_PH_ADDR));
|
|
+}
|
|
+
|
|
+const struct cpp_hw_ops cpp_ops_2_0 = {
|
|
+ .global_reset = cpp_global_reset,
|
|
+ .enable_clk_gating = cpp_enable_clk_gating,
|
|
+ .set_burst_len = cpp_set_burst_len,
|
|
+ .enable_irqs_common = cpp_enable_irqs_common,
|
|
+ .isr = cpp_isr,
|
|
+ .debug_dump = cpp20_debug_dump,
|
|
+ .cfg_port_dmad = cpp20_3dnr_dmad_cfg,
|
|
+ .hw_version = cpp20_get_hw_version,
|
|
+};
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_cpp/cpp_compat_ioctl32.c b/drivers/media/platform/spacemit/camera/cam_cpp/cpp_compat_ioctl32.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_cpp/cpp_compat_ioctl32.c
|
|
@@ -0,0 +1,166 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * camera cpp compat ioctl32
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifdef CONFIG_COMPAT
|
|
+#include "linux/uaccess.h"
|
|
+#include <linux/compat.h>
|
|
+#include <media/v4l2-subdev.h>
|
|
+#include <media/k1x/k1x_cpp_uapi.h>
|
|
+
|
|
+#include "cpp_compat_ioctl32.h"
|
|
+
|
|
+#define assign_in_user(to, from) \
|
|
+ ({ \
|
|
+ typeof(*from) __assign_tmp; \
|
|
+ \
|
|
+ get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \
|
|
+ })
|
|
+
|
|
+#define put_user_force(__x, __ptr) \
|
|
+ ({ \
|
|
+ put_user((typeof(*__x) __force *)(__x), __ptr); \
|
|
+ })
|
|
+
|
|
+struct cpp_reg_cfg_cmd32 {
|
|
+ enum cpp_reg_cfg_type reg_type;
|
|
+ uint32_t reg_len;
|
|
+ compat_caddr_t reg_data;
|
|
+};
|
|
+
|
|
+struct cpp_frame_info32 {
|
|
+ uint32_t frame_id;
|
|
+ uint32_t client_id;
|
|
+ struct cpp_reg_cfg_cmd32 regs[MAX_REG_CMDS];
|
|
+ struct cpp_buffer_info src_buf_info;
|
|
+ struct cpp_buffer_info dst_buf_info;
|
|
+ struct cpp_buffer_info pre_buf_info;
|
|
+};
|
|
+
|
|
+static int get_cpp_reg_cfg_cmd32(struct cpp_reg_cfg_cmd __user *p64,
|
|
+ struct cpp_reg_cfg_cmd32 __user *p32)
|
|
+{
|
|
+ compat_caddr_t tmp;
|
|
+
|
|
+ if (!access_ok(p32, sizeof(*p32)) ||
|
|
+ get_user(tmp, &p32->reg_data) ||
|
|
+ put_user_force(compat_ptr(tmp), &p64->reg_data) ||
|
|
+ assign_in_user(&p64->reg_type, &p32->reg_type) ||
|
|
+ assign_in_user(&p64->reg_len, &p32->reg_len)) {
|
|
+ pr_err("%s:%-5d: Error\n", __func__, __LINE__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int get_cpp_frame_info32(struct cpp_frame_info __user *p64,
|
|
+ struct cpp_frame_info32 __user *p32)
|
|
+{
|
|
+ u32 count = MAX_REG_CMDS;
|
|
+
|
|
+ if (!access_ok(p32, sizeof(*p32)) ||
|
|
+ assign_in_user(&p64->frame_id, &p32->frame_id) ||
|
|
+ assign_in_user(&p64->client_id, &p32->client_id) ||
|
|
+ copy_in_user(&p64->src_buf_info, &p32->src_buf_info,
|
|
+ sizeof(p64->src_buf_info)) ||
|
|
+ copy_in_user(&p64->dst_buf_info, &p32->dst_buf_info,
|
|
+ sizeof(p64->dst_buf_info)) ||
|
|
+ copy_in_user(&p64->pre_buf_info, &p32->pre_buf_info,
|
|
+ sizeof(p64->pre_buf_info))) {
|
|
+ pr_err("%s:%-5d: Error\n", __func__, __LINE__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ while (count--)
|
|
+ if (get_cpp_reg_cfg_cmd32(&p64->regs[count], &p32->regs[count]))
|
|
+ return -EFAULT;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#define VIDIOC_K1X_CPP_PROCESS_FRAME32 \
|
|
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct cpp_frame_info32)
|
|
+
|
|
+/**
|
|
+ * alloc_userspace() - Allocates a 64-bits userspace pointer compatible
|
|
+ * for calling the native 64-bits version of an ioctl.
|
|
+ *
|
|
+ * @size: size of the structure itself to be allocated.
|
|
+ * @aux_space: extra size needed to store "extra" data, e.g. space for
|
|
+ * other __user data that is pointed to fields inside the
|
|
+ * structure.
|
|
+ * @new_p64: pointer to a pointer to be filled with the allocated struct.
|
|
+ *
|
|
+ * Return:
|
|
+ *
|
|
+ * if it can't allocate memory, either -ENOMEM or -EFAULT will be returned.
|
|
+ * Zero otherwise.
|
|
+ */
|
|
+static int alloc_userspace(unsigned int size, u32 aux_space, void __user **new_p64)
|
|
+{
|
|
+ *new_p64 = compat_alloc_user_space(size + aux_space);
|
|
+ if (!*new_p64)
|
|
+ return -ENOMEM;
|
|
+ if (clear_user(*new_p64, size))
|
|
+ return -EFAULT;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ struct video_device *vdev = video_devdata(file);
|
|
+ long ret = -ENODEV;
|
|
+
|
|
+ if (vdev->fops->unlocked_ioctl)
|
|
+ if (video_is_registered(vdev))
|
|
+ ret = vdev->fops->unlocked_ioctl(file, cmd, arg);
|
|
+ else
|
|
+ ret = -ENOTTY;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+long k1x_cpp_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ void __user *p32 = compat_ptr(arg);
|
|
+ void __user *new_p64 = NULL;
|
|
+ int compatible_arg = 1;
|
|
+ unsigned int ncmd;
|
|
+ long ret = 0;
|
|
+
|
|
+ switch (cmd) {
|
|
+ case VIDIOC_K1X_CPP_PROCESS_FRAME32:
|
|
+ ncmd = VIDIOC_K1X_CPP_PROCESS_FRAME;
|
|
+ break;
|
|
+ default:
|
|
+ ncmd = cmd;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ switch (cmd) {
|
|
+ case VIDIOC_K1X_CPP_PROCESS_FRAME32:
|
|
+ ret = alloc_userspace(sizeof(struct cpp_frame_info), 0, &new_p64);
|
|
+ if (!ret)
|
|
+ ret = get_cpp_frame_info32(new_p64, p32);
|
|
+ compatible_arg = 0;
|
|
+ break;
|
|
+ }
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (compatible_arg)
|
|
+ ret = native_ioctl(file, ncmd, (unsigned long)p32);
|
|
+ else
|
|
+ ret = native_ioctl(file, ncmd, (unsigned long)new_p64);
|
|
+
|
|
+ if (ret)
|
|
+ pr_err("%s: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
|
|
+ __func__, _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+#endif /* CONFIG_COMPAT */
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_cpp/cpp_compat_ioctl32.h b/drivers/media/platform/spacemit/camera/cam_cpp/cpp_compat_ioctl32.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_cpp/cpp_compat_ioctl32.h
|
|
@@ -0,0 +1,12 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef __CPP_COMPAT_IOCTL32_H__
|
|
+#define __CPP_COMPAT_IOCTL32_H__
|
|
+
|
|
+#include <media/v4l2-subdev.h>
|
|
+
|
|
+long k1x_cpp_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg);
|
|
+#endif /* end of include guard: __CPP_COMPAT_IOCTL32_H__ */
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_cpp/cpp_dmabuf.c b/drivers/media/platform/spacemit/camera/cam_cpp/cpp_dmabuf.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_cpp/cpp_dmabuf.c
|
|
@@ -0,0 +1,250 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * lizhirong <zhirong.li@spacemit.com>
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+//#define DEBUG
|
|
+
|
|
+#include <linux/device.h>
|
|
+#include <linux/dma-direction.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/err.h>
|
|
+#include <linux/gfp.h>
|
|
+#include <linux/scatterlist.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/dma-buf.h>
|
|
+#include <media/k1x/k1x_cpp_uapi.h>
|
|
+#include "k1x_cpp.h"
|
|
+#include "cpp_iommu.h"
|
|
+#include "cpp_dmabuf.h"
|
|
+
|
|
+int cpp_dma_alloc_iommu_channels(struct cpp_device *cpp_dev,
|
|
+ struct cpp_dma_port_info *dma_info)
|
|
+{
|
|
+ struct cpp_iommu_device *mmu_dev = cpp_dev->mmu_dev;
|
|
+ struct cpp_dma_chnl_info *dma_chnl;
|
|
+ unsigned int tid, chnl;
|
|
+ int rc;
|
|
+
|
|
+ for (chnl = 0; chnl < MAX_DMA_CHNLS; ++chnl) {
|
|
+ dma_chnl = &dma_info->dma_chnls[chnl];
|
|
+ tid = dma_chnl->tid;
|
|
+ if (dma_chnl->dbuf_mapped) {
|
|
+ rc = mmu_dev->ops->acquire_channel(mmu_dev, tid);
|
|
+ if (rc)
|
|
+ continue;
|
|
+ dma_info->dma_chnls[chnl].mmu_attached = 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int cpp_dma_fill_iommu_channels(struct cpp_device *cpp_dev,
|
|
+ struct cpp_dma_port_info *dma_info)
|
|
+{
|
|
+ struct cpp_iommu_device *mmu_dev = cpp_dev->mmu_dev;
|
|
+ struct cpp_dma_chnl_info *dma_chnl;
|
|
+ unsigned int tid, chnl;
|
|
+
|
|
+ for (chnl = 0; chnl < MAX_DMA_CHNLS; ++chnl) {
|
|
+ dma_chnl = &dma_info->dma_chnls[chnl];
|
|
+ tid = dma_chnl->tid;
|
|
+ if (dma_chnl->mmu_attached) {
|
|
+ mmu_dev->ops->setup_sglist(mmu_dev, tid, dma_chnl->fd,
|
|
+ dma_chnl->offset, dma_chnl->length);
|
|
+ mmu_dev->ops->config_channel(mmu_dev, tid, NULL, 0);
|
|
+ mmu_dev->ops->enable_channel(mmu_dev, tid);
|
|
+ dma_chnl->phy_addr =
|
|
+ mmu_dev->ops->get_iova(mmu_dev, tid, dma_chnl->offset);
|
|
+
|
|
+ pr_debug
|
|
+ ("channel tid-%x: dma addr 0x%llx, tt base %p, tt size %d\n",
|
|
+ tid, dma_chnl->phy_addr, dma_chnl->tt_base,
|
|
+ dma_chnl->tt_size);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int cpp_dma_free_iommu_channels(struct cpp_device *cpp_dev,
|
|
+ struct cpp_dma_port_info *dma_info)
|
|
+{
|
|
+ struct cpp_iommu_device *mmu_dev = cpp_dev->mmu_dev;
|
|
+ struct cpp_dma_chnl_info *dma_chnl;
|
|
+ unsigned int tid, chnl;
|
|
+
|
|
+ for (chnl = 0; chnl < MAX_DMA_CHNLS; ++chnl) {
|
|
+ dma_chnl = &dma_info->dma_chnls[chnl];
|
|
+ tid = dma_chnl->tid;
|
|
+ if (dma_chnl->mmu_attached) {
|
|
+ mmu_dev->ops->disable_channel(mmu_dev, tid);
|
|
+ mmu_dev->ops->release_channel(mmu_dev, tid);
|
|
+ dma_chnl->mmu_attached = 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int get_dma_channel_id(unsigned int layer_idx, unsigned int plane_idx,
|
|
+ unsigned int is_kgain, enum cpp_pix_format format)
|
|
+{
|
|
+ int chnl_id;
|
|
+
|
|
+ if (layer_idx >= CPP_MAX_LAYERS || plane_idx >= CPP_MAX_PLANAR) {
|
|
+ pr_err("invalid layer %d or plane %d", layer_idx, plane_idx);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (is_kgain) {
|
|
+ chnl_id = MAC_DMA_CHNL_KGAIN_L0;
|
|
+ chnl_id += layer_idx;
|
|
+ } else {
|
|
+ chnl_id = (format == PIXFMT_FBC_DWT && layer_idx == 0) ?
|
|
+ MAC_DMA_CHNL_FBC_HEADER : MAC_DMA_CHNL_DWT_Y_L0;
|
|
+ chnl_id += (layer_idx * 2 + plane_idx);
|
|
+ }
|
|
+
|
|
+ return chnl_id;
|
|
+}
|
|
+
|
|
+struct cpp_dma_port_info *cpp_dmabuf_prepare(struct cpp_device *cpp_dev,
|
|
+ struct cpp_buffer_info *buf_info,
|
|
+ uint8_t port_id)
|
|
+{
|
|
+ struct cpp_dma_port_info *port_info;
|
|
+ struct cpp_plane_info *plane_info;
|
|
+ struct cpp_dma_chnl_info *chnl_info;
|
|
+ int layer, plane, chnl;
|
|
+ uint32_t map_flags = 0;
|
|
+ int rc;
|
|
+
|
|
+ port_info = kzalloc(sizeof(*port_info), GFP_KERNEL);
|
|
+ if (!port_info) {
|
|
+ pr_err("%s: alloc dma port info failed\n", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ port_info->port_id = port_id;
|
|
+
|
|
+ if (port_id == MAC_DMA_PORT_W0)
|
|
+ map_flags |= IOMMU_MAP_FLAG_WRITE_ONLY;
|
|
+ else
|
|
+ map_flags |= IOMMU_MAP_FLAG_READ_ONLY;
|
|
+
|
|
+ if (buf_info->index > 1)
|
|
+ map_flags |= IOMMU_MAP_FLAG_NOSYNC;
|
|
+
|
|
+ for (layer = 0; layer < buf_info->num_layers; ++layer) {
|
|
+ for (plane = 0; plane < CPP_MAX_PLANAR; ++plane) {
|
|
+ chnl = get_dma_channel_id(layer, plane, 0, buf_info->format);
|
|
+ if (chnl < 0) {
|
|
+ pr_err
|
|
+ ("%s: port%d, dwt layer%d, plane%d failed to get channel id\n",
|
|
+ __func__, port_id, layer, plane);
|
|
+ goto err;
|
|
+ }
|
|
+ chnl_info = &port_info->dma_chnls[chnl];
|
|
+ plane_info = &buf_info->dwt_planes[layer][plane];
|
|
+
|
|
+ rc = cpp_iommu_map_dmabuf(cpp_dev->mmu_dev,
|
|
+ plane_info->m.fd, map_flags,
|
|
+ &chnl_info->phy_addr);
|
|
+ if (rc) {
|
|
+ pr_err("%s: port%d, dwt layer%d, plane%d map failed\n",
|
|
+ __func__, port_id, layer, plane);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ chnl_info->fd = plane_info->m.fd;
|
|
+ chnl_info->dbuf_mapped = 1;
|
|
+ chnl_info->chnl_id = chnl;
|
|
+ chnl_info->tid = MMU_TID(port_id, chnl);
|
|
+ chnl_info->length = plane_info->length;
|
|
+ chnl_info->offset = plane_info->data_offset;
|
|
+ if (chnl_info->offset)
|
|
+ chnl_info->phy_addr += chnl_info->offset;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (buf_info->format == PIXFMT_FBC_DWT) {
|
|
+ /* workaround: suppose header and payload exist the same dmabuf fd */
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_FBC_HEADER].length +=
|
|
+ port_info->dma_chnls[MAC_DMA_CHNL_FBC_PAYLOAD].length;
|
|
+ port_info->fbc_enabled = true;
|
|
+ }
|
|
+
|
|
+ for (layer = 0; buf_info->kgain_used && (layer < buf_info->num_layers); ++layer) {
|
|
+ chnl = get_dma_channel_id(layer, 0, 1, 0);
|
|
+ if (chnl < 0) {
|
|
+ pr_err("%s: port%d, kgain layer%d failed to get channel id\n",
|
|
+ __func__, port_id, layer);
|
|
+ goto err;
|
|
+ }
|
|
+ chnl_info = &port_info->dma_chnls[chnl];
|
|
+ plane_info = &buf_info->kgain_planes[layer];
|
|
+
|
|
+ rc = cpp_iommu_map_dmabuf(cpp_dev->mmu_dev, plane_info->m.fd,
|
|
+ map_flags, &chnl_info->phy_addr);
|
|
+ if (rc) {
|
|
+ pr_err("%s: port%d, kgain layer%d map failed\n",
|
|
+ __func__, port_id, layer);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ chnl_info->fd = plane_info->m.fd;
|
|
+ chnl_info->dbuf_mapped = 1;
|
|
+ chnl_info->chnl_id = chnl;
|
|
+ chnl_info->tid = MMU_TID(port_id, chnl);
|
|
+ chnl_info->length = plane_info->length;
|
|
+ chnl_info->offset = plane_info->data_offset;
|
|
+ if (chnl_info->offset)
|
|
+ chnl_info->phy_addr += chnl_info->offset;
|
|
+ }
|
|
+
|
|
+ return port_info;
|
|
+
|
|
+err:
|
|
+ cpp_dmabuf_cleanup(cpp_dev, port_info);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+void cpp_dmabuf_cleanup(struct cpp_device *cpp_dev, struct cpp_dma_port_info *port_info)
|
|
+{
|
|
+ struct cpp_dma_chnl_info *chnl_info;
|
|
+ unsigned int chnl;
|
|
+
|
|
+ if (IS_ERR_OR_NULL(port_info))
|
|
+ return;
|
|
+
|
|
+ for (chnl = 0; chnl < MAX_DMA_CHNLS; ++chnl) {
|
|
+ chnl_info = &port_info->dma_chnls[chnl];
|
|
+ if (!chnl_info->dbuf_mapped)
|
|
+ continue;
|
|
+ cpp_iommu_unmap_dmabuf(cpp_dev->mmu_dev, chnl_info->fd);
|
|
+ chnl_info->dbuf_mapped = 0;
|
|
+ }
|
|
+
|
|
+ kfree(port_info);
|
|
+}
|
|
+
|
|
+void cpp_dmabuf_debug_dump(struct cpp_dma_port_info *port_info)
|
|
+{
|
|
+ unsigned int chnl;
|
|
+
|
|
+ for (chnl = 0; chnl < MAX_DMA_CHNLS; ++chnl) {
|
|
+ if (port_info->dma_chnls[chnl].dbuf_mapped == 0)
|
|
+ continue;
|
|
+ pr_info
|
|
+ ("P%d-CHNL%d: fd=%d, phy_addr=0x%llx, offset=0x%x, length=0x%x, chnl_id=0x%x, tid=0x%x\n",
|
|
+ port_info->port_id, chnl, port_info->dma_chnls[chnl].fd,
|
|
+ port_info->dma_chnls[chnl].phy_addr,
|
|
+ port_info->dma_chnls[chnl].offset,
|
|
+ port_info->dma_chnls[chnl].length,
|
|
+ port_info->dma_chnls[chnl].chnl_id,
|
|
+ port_info->dma_chnls[chnl].tid);
|
|
+ }
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_cpp/cpp_dmabuf.h b/drivers/media/platform/spacemit/camera/cam_cpp/cpp_dmabuf.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_cpp/cpp_dmabuf.h
|
|
@@ -0,0 +1,85 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * lizhirong <zhirong.li@spacemit.com>
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef __CPP_DMABUF_H__
|
|
+#define __CPP_DMABUF_H__
|
|
+
|
|
+#include <linux/types.h>
|
|
+#include <linux/device.h>
|
|
+#include <media/k1x/k1x_cpp_uapi.h>
|
|
+
|
|
+struct cpp_device;
|
|
+
|
|
+enum cpp_mac_dma_port {
|
|
+ MAC_DMA_PORT_R0,
|
|
+ MAC_DMA_PORT_R1,
|
|
+ MAC_DMA_PORT_W0,
|
|
+ MAX_DMA_PORT,
|
|
+};
|
|
+
|
|
+enum mac_dma_channel_type {
|
|
+ MAC_DMA_CHNL_DWT_Y_L0,
|
|
+ MAC_DMA_CHNL_DWT_C_L0,
|
|
+ MAC_DMA_CHNL_DWT_Y_L1,
|
|
+ MAC_DMA_CHNL_DWT_C_L1,
|
|
+ MAC_DMA_CHNL_DWT_Y_L2,
|
|
+ MAC_DMA_CHNL_DWT_C_L2,
|
|
+ MAC_DMA_CHNL_DWT_Y_L3,
|
|
+ MAC_DMA_CHNL_DWT_C_L3,
|
|
+ MAC_DMA_CHNL_DWT_Y_L4,
|
|
+ MAC_DMA_CHNL_DWT_C_L4,
|
|
+ MAC_DMA_CHNL_KGAIN_L0,
|
|
+ MAC_DMA_CHNL_KGAIN_L1,
|
|
+ MAC_DMA_CHNL_KGAIN_L2,
|
|
+ MAC_DMA_CHNL_KGAIN_L3,
|
|
+ MAC_DMA_CHNL_KGAIN_L4,
|
|
+ MAC_DMA_CHNL_FBC_HEADER,
|
|
+ MAC_DMA_CHNL_FBC_PAYLOAD,
|
|
+ MAX_DMA_CHNLS,
|
|
+};
|
|
+
|
|
+struct cpp_dma_chnl_info {
|
|
+ int32_t fd;
|
|
+ unsigned int dbuf_mapped:1;
|
|
+ unsigned int synced:1;
|
|
+ unsigned int prepared:1;
|
|
+ unsigned int need_cache_sync:1;
|
|
+ unsigned int tid;
|
|
+ uint8_t chnl_id;
|
|
+ uint32_t offset;
|
|
+ uint32_t length;
|
|
+ int mmu_attached;
|
|
+ dma_addr_t phy_addr; /* cpp dmac addr */
|
|
+ uint32_t *tt_base; /* translation table cpu base */
|
|
+ uint32_t tt_size; /* translation table size */
|
|
+};
|
|
+
|
|
+struct cpp_dma_port_info {
|
|
+ uint8_t port_id;
|
|
+ bool fbc_enabled;
|
|
+ struct cpp_dma_chnl_info dma_chnls[MAX_DMA_CHNLS];
|
|
+ // dma_addr_t trans_tab_dma_addr;
|
|
+ // void *trans_tab_cpu_addr;
|
|
+ // size_t total_trans_tab_sz;
|
|
+};
|
|
+
|
|
+int cpp_dma_alloc_iommu_channels(struct cpp_device *cpp_dev,
|
|
+ struct cpp_dma_port_info *dma_info);
|
|
+
|
|
+int cpp_dma_free_iommu_channels(struct cpp_device *cpp_dev,
|
|
+ struct cpp_dma_port_info *dma_info);
|
|
+
|
|
+int cpp_dma_fill_iommu_channels(struct cpp_device *cpp_dev,
|
|
+ struct cpp_dma_port_info *dma_info);
|
|
+
|
|
+struct cpp_dma_port_info *cpp_dmabuf_prepare(struct cpp_device *cpp_dev,
|
|
+ struct cpp_buffer_info *buf_info,
|
|
+ uint8_t blk_id);
|
|
+void cpp_dmabuf_cleanup(struct cpp_device *cpp_dev, struct cpp_dma_port_info *dma_info);
|
|
+
|
|
+void cpp_dmabuf_debug_dump(struct cpp_dma_port_info *dma_info);
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_cpp/cpp_iommu.c b/drivers/media/platform/spacemit/camera/cam_cpp/cpp_iommu.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_cpp/cpp_iommu.c
|
|
@@ -0,0 +1,833 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * cpp_iommu.c - Driver for CPP IOMMU
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+//#define DEBUG
|
|
+
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/bitops.h>
|
|
+#include <linux/err.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/printk.h>
|
|
+#include <linux/dma-buf.h>
|
|
+#include <linux/dma-direction.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/scatterlist.h>
|
|
+#include "cam_dbg.h"
|
|
+#include "k1x_cpp.h"
|
|
+#include "cpp_dmabuf.h"
|
|
+#include "regs-cpp-iommu.h"
|
|
+#include "cpp_iommu.h"
|
|
+
|
|
+#undef CAM_MODULE_TAG
|
|
+#define CAM_MODULE_TAG CAM_MDL_CPP
|
|
+
|
|
+static inline uint32_t iommu_reg_read(struct cpp_iommu_device *mmu_dev, uint32_t reg)
|
|
+{
|
|
+ return ioread32(mmu_dev->regs_base + reg);
|
|
+}
|
|
+
|
|
+static inline void iommu_reg_write(struct cpp_iommu_device *mmu_dev,
|
|
+ uint32_t reg, uint32_t val)
|
|
+{
|
|
+ iowrite32(val, mmu_dev->regs_base + reg);
|
|
+}
|
|
+
|
|
+static inline void iommu_reg_write_mask(struct cpp_iommu_device *mmu_dev,
|
|
+ uint32_t reg, uint32_t val, uint32_t mask)
|
|
+{
|
|
+ uint32_t v;
|
|
+
|
|
+ v = iommu_reg_read(mmu_dev, reg);
|
|
+ v = (v & ~mask) | (val & mask);
|
|
+ iommu_reg_write(mmu_dev, reg, v);
|
|
+}
|
|
+
|
|
+static inline void iommu_reg_set_bit(struct cpp_iommu_device *mmu_dev,
|
|
+ uint32_t reg, uint32_t val)
|
|
+{
|
|
+ iommu_reg_write_mask(mmu_dev, reg, val, val);
|
|
+}
|
|
+
|
|
+static inline void iommu_reg_clr_bit(struct cpp_iommu_device *mmu_dev,
|
|
+ uint32_t reg, uint32_t val)
|
|
+{
|
|
+ iommu_reg_write_mask(mmu_dev, reg, 0, val);
|
|
+}
|
|
+
|
|
+static void iommu_enable_tbu(struct cpp_iommu_device *mmu_dev, int tbu)
|
|
+{
|
|
+ iommu_reg_set_bit(mmu_dev, REG_IOMMU_TCR0(tbu), 0x1);
|
|
+}
|
|
+
|
|
+static void iommu_disable_tbu(struct cpp_iommu_device *mmu_dev, int tbu)
|
|
+{
|
|
+ iommu_reg_clr_bit(mmu_dev, REG_IOMMU_TCR0(tbu), 0x1);
|
|
+}
|
|
+
|
|
+static void iommu_set_tbu_ttaddr(struct cpp_iommu_device *mmu_dev, int tbu,
|
|
+ uint64_t addr)
|
|
+{
|
|
+ iommu_reg_write(mmu_dev, REG_IOMMU_TTBL(tbu), addr & 0xffffffff);
|
|
+ iommu_reg_write(mmu_dev, REG_IOMMU_TTBH(tbu), (addr >> 32) & 0x1);
|
|
+}
|
|
+
|
|
+static void iommu_set_tbu_ttsize(struct cpp_iommu_device *mmu_dev, int tbu, int size)
|
|
+{
|
|
+ iommu_reg_write_mask(mmu_dev, REG_IOMMU_TCR0(tbu),
|
|
+ (size & 0x1fff) << 16, 0x1fff << 16);
|
|
+}
|
|
+
|
|
+static void __maybe_unused iommu_set_tbu_qos(struct cpp_iommu_device *mmu_dev,
|
|
+ int tbu, int qos)
|
|
+{
|
|
+ iommu_reg_write_mask(mmu_dev, REG_IOMMU_TCR0(tbu), (qos & 0xf) << 4, 0xf << 4);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * iommu_update_trans_table - TBU translation table update
|
|
+ *
|
|
+ * this bit will be cleared to 0 after TLB preload.
|
|
+ * only work for full frame tbu.
|
|
+ */
|
|
+static void iommu_update_trans_table(struct cpp_iommu_device *mmu_dev, int tbu)
|
|
+{
|
|
+ iommu_reg_set_bit(mmu_dev, REG_IOMMU_TCR0(tbu), 0x1 << 2);
|
|
+}
|
|
+
|
|
+static void iommu_enable_irqs(struct cpp_iommu_device *mmu_dev)
|
|
+{
|
|
+ iommu_reg_write_mask(mmu_dev, REG_IOMMU_GIRQ_ENA, 0x1ffff, 0x1ffff);
|
|
+}
|
|
+
|
|
+static inline uint32_t iommu_bva_low(struct cpp_iommu_device *mmu_dev)
|
|
+{
|
|
+ return iommu_reg_read(mmu_dev, REG_IOMMU_BVAL);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * curY_L0-----TBU0
|
|
+ * curY_L1-----TBU1
|
|
+ * curY_L2-----TBU2
|
|
+ * curY_L3-----TBU3
|
|
+ * preY_L0-----TBU4
|
|
+ * preY_L1-----TBU5
|
|
+ * preY_L2-----TBU6
|
|
+ * preY_L3-----TBU7
|
|
+ * wbY_L0------TBU8
|
|
+ * wbY_L1------TBU9
|
|
+ * wbY_L2------TBU10
|
|
+ * wbY_L3------TBU11
|
|
+ * curUV_L0----TBU12
|
|
+ * curUV_L1----TBU13
|
|
+ * curUV_L2----TBU14
|
|
+ * curUV_L3----TBU15
|
|
+ * preUV_L0----TBU16
|
|
+ * preUV_L1----TBU17
|
|
+ * preUV_L2----TBU18
|
|
+ * preUV_L3----TBU19
|
|
+ * wbUV_L0-----TBU20
|
|
+ * wbUV_L1-----TBU21
|
|
+ * wbUV_L2-----TBU22
|
|
+ * wbUV_L3-----TBU23
|
|
+ * preK_L0-----TBU24
|
|
+ * preK_L1-----TBU25
|
|
+ * preK_L2-----TBU26
|
|
+ * preK_L3-----TBU27
|
|
+ * wbK_L0------TBU28
|
|
+ * wbK_L1------TBU29
|
|
+ * wbK_L2------TBU30
|
|
+ * wbK_L3------TBU31
|
|
+ */
|
|
+#define MMU_TBU(tbu_id) ((tbu_id) << 16)
|
|
+static const uint32_t iommu_ch_tid_map[] = {
|
|
+ MMU_TID(MAC_DMA_PORT_W0, MAC_DMA_CHNL_DWT_Y_L4) | MMU_TBU(0),
|
|
+ MMU_TID(MAC_DMA_PORT_W0, MAC_DMA_CHNL_DWT_C_L4) | MMU_TBU(1),
|
|
+ MMU_TID(MAC_DMA_PORT_W0, MAC_DMA_CHNL_DWT_Y_L3) | MMU_TBU(2),
|
|
+ MMU_TID(MAC_DMA_PORT_W0, MAC_DMA_CHNL_DWT_C_L3) | MMU_TBU(3),
|
|
+ MMU_TID(MAC_DMA_PORT_W0, MAC_DMA_CHNL_DWT_Y_L2) | MMU_TBU(4),
|
|
+ MMU_TID(MAC_DMA_PORT_W0, MAC_DMA_CHNL_DWT_C_L2) | MMU_TBU(5),
|
|
+ MMU_TID(MAC_DMA_PORT_W0, MAC_DMA_CHNL_DWT_Y_L1) | MMU_TBU(6),
|
|
+ MMU_TID(MAC_DMA_PORT_W0, MAC_DMA_CHNL_DWT_C_L1) | MMU_TBU(7),
|
|
+ MMU_TID(MAC_DMA_PORT_W0, MAC_DMA_CHNL_DWT_Y_L0) | MMU_TBU(8), /* if fbc enc mode is on, the tbu is invalid */
|
|
+ MMU_TID(MAC_DMA_PORT_W0, MAC_DMA_CHNL_DWT_C_L0) | MMU_TBU(9), /* if fbc enc mode is on, the tbu works in full frame mode, need to set tbu_update 1 */
|
|
+ MMU_TID(MAC_DMA_PORT_W0, MAC_DMA_CHNL_FBC_HEADER) | MMU_TBU(9),
|
|
+ MMU_TID(MAC_DMA_PORT_R1, MAC_DMA_CHNL_DWT_Y_L4) | MMU_TBU(10),
|
|
+ MMU_TID(MAC_DMA_PORT_R1, MAC_DMA_CHNL_DWT_C_L4) | MMU_TBU(11),
|
|
+ MMU_TID(MAC_DMA_PORT_R1, MAC_DMA_CHNL_DWT_Y_L3) | MMU_TBU(12),
|
|
+ MMU_TID(MAC_DMA_PORT_R1, MAC_DMA_CHNL_DWT_C_L3) | MMU_TBU(13),
|
|
+ MMU_TID(MAC_DMA_PORT_R1, MAC_DMA_CHNL_DWT_Y_L2) | MMU_TBU(14),
|
|
+ MMU_TID(MAC_DMA_PORT_R1, MAC_DMA_CHNL_DWT_C_L2) | MMU_TBU(15),
|
|
+ MMU_TID(MAC_DMA_PORT_R1, MAC_DMA_CHNL_DWT_Y_L0) | MMU_TBU(16), /* if fbc dec mode is on, the tbu is invalid */
|
|
+ MMU_TID(MAC_DMA_PORT_R1, MAC_DMA_CHNL_DWT_C_L0) | MMU_TBU(17), /* if fbc dec mode is on, the tbu works in full frame mode, need to set tbu_update 1 */
|
|
+ MMU_TID(MAC_DMA_PORT_R1, MAC_DMA_CHNL_FBC_HEADER) | MMU_TBU(17),
|
|
+ MMU_TID(MAC_DMA_PORT_R1, MAC_DMA_CHNL_DWT_Y_L1) | MMU_TBU(18),
|
|
+ MMU_TID(MAC_DMA_PORT_R1, MAC_DMA_CHNL_DWT_C_L1) | MMU_TBU(19),
|
|
+ MMU_TID(MAC_DMA_PORT_R0, MAC_DMA_CHNL_DWT_Y_L4) | MMU_TBU(20),
|
|
+ MMU_TID(MAC_DMA_PORT_R0, MAC_DMA_CHNL_DWT_C_L4) | MMU_TBU(21),
|
|
+ MMU_TID(MAC_DMA_PORT_R0, MAC_DMA_CHNL_DWT_Y_L3) | MMU_TBU(22),
|
|
+ MMU_TID(MAC_DMA_PORT_R0, MAC_DMA_CHNL_DWT_C_L3) | MMU_TBU(23),
|
|
+ MMU_TID(MAC_DMA_PORT_R0, MAC_DMA_CHNL_DWT_Y_L2) | MMU_TBU(24),
|
|
+ MMU_TID(MAC_DMA_PORT_R0, MAC_DMA_CHNL_DWT_C_L2) | MMU_TBU(25),
|
|
+ MMU_TID(MAC_DMA_PORT_R0, MAC_DMA_CHNL_DWT_Y_L1) | MMU_TBU(26),
|
|
+ MMU_TID(MAC_DMA_PORT_R0, MAC_DMA_CHNL_DWT_C_L1) | MMU_TBU(27),
|
|
+ MMU_TID(MAC_DMA_PORT_R0, MAC_DMA_CHNL_DWT_Y_L0) | MMU_TBU(28),
|
|
+ MMU_TID(MAC_DMA_PORT_R0, MAC_DMA_CHNL_DWT_C_L0) | MMU_TBU(29),
|
|
+ MMU_TID(MAC_DMA_PORT_W0, MAC_DMA_CHNL_KGAIN_L0) | MMU_TBU(30),
|
|
+ MMU_TID(MAC_DMA_PORT_R1, MAC_DMA_CHNL_KGAIN_L0) | MMU_TBU(31),
|
|
+};
|
|
+
|
|
+static int tid_to_tbu(struct cpp_iommu_device *mmu_dev, uint32_t tid)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(iommu_ch_tid_map); ++i)
|
|
+ if ((iommu_ch_tid_map[i] & 0xffff) == tid)
|
|
+ return iommu_ch_tid_map[i] >> 16;
|
|
+
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * cpp iommu api
|
|
+ **/
|
|
+enum cpp_iommu_buf_state {
|
|
+ CPP_IOMMU_BUFF_EXIST,
|
|
+ CPP_IOMMU_BUFF_NOT_EXIST,
|
|
+};
|
|
+
|
|
+struct cam_dmabuf_info {
|
|
+ int fd;
|
|
+ struct dma_buf *buf;
|
|
+ struct dma_buf_attachment *attach;
|
|
+ struct sg_table *table;
|
|
+ enum dma_data_direction dir;
|
|
+ int ref_count;
|
|
+ dma_addr_t paddr;
|
|
+ struct list_head list;
|
|
+ size_t len;
|
|
+ size_t phys_len;
|
|
+};
|
|
+
|
|
+static enum cpp_iommu_buf_state
|
|
+cpp_iommu_check_fd_in_list(struct cpp_iommu_device *mmu_dev, int fd,
|
|
+ dma_addr_t *paddr_ptr)
|
|
+{
|
|
+ struct cam_dmabuf_info *mapping;
|
|
+
|
|
+ list_for_each_entry(mapping, &mmu_dev->iommu_buf_list, list) {
|
|
+ if (mapping->fd == fd) {
|
|
+ *paddr_ptr = mapping->paddr;
|
|
+ // *len_ptr = mapping->len;
|
|
+ mapping->ref_count++;
|
|
+ return CPP_IOMMU_BUFF_EXIST;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return CPP_IOMMU_BUFF_NOT_EXIST;
|
|
+}
|
|
+
|
|
+static int
|
|
+cpp_iommu_map_buffer_and_add_to_list(struct cpp_iommu_device *mmu_dev, int fd,
|
|
+ enum dma_data_direction dma_dir, bool sync,
|
|
+ dma_addr_t *paddr_ptr)
|
|
+{
|
|
+ struct dma_buf *dbuf;
|
|
+ struct dma_buf_attachment *dba;
|
|
+ struct device *dev = &mmu_dev->cpp_dev->pdev->dev;
|
|
+ struct sg_table *sgt;
|
|
+ dma_addr_t paddr;
|
|
+ struct cam_dmabuf_info *mapping_info;
|
|
+ int rc = 0;
|
|
+
|
|
+ dbuf = dma_buf_get(fd);
|
|
+ if (IS_ERR(dbuf)) {
|
|
+ pr_err("invalid dmabuf fd %d, %ld\n", fd, PTR_ERR(dbuf));
|
|
+ return PTR_ERR(dbuf);
|
|
+ }
|
|
+
|
|
+ dba = dma_buf_attach(dbuf, dev);
|
|
+ if (IS_ERR(dba)) {
|
|
+ pr_err("failed to attach dmabuf, %ld\n", PTR_ERR(dba));
|
|
+ dma_buf_put(dbuf);
|
|
+ return PTR_ERR(dba);
|
|
+ }
|
|
+ // FIXME
|
|
+ // 'struct dma_buf_attachment' has no member named 'dma_map_attrs'
|
|
+ //if (!sync)
|
|
+ // dba->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
|
+
|
|
+ /* get the associated scatterlist for this buffer */
|
|
+ sgt = dma_buf_map_attachment(dba, dma_dir);
|
|
+ if (IS_ERR(sgt)) {
|
|
+ pr_err("Error getting dmabuf scatterlist, %ld\n", PTR_ERR(sgt));
|
|
+ dma_buf_detach(dbuf, dba);
|
|
+ dma_buf_put(dbuf);
|
|
+ return PTR_ERR(sgt);
|
|
+ }
|
|
+ paddr = sg_dma_address(sgt->sgl);
|
|
+
|
|
+ /* fill up mapping_info */
|
|
+ mapping_info = kzalloc(sizeof(*mapping_info), GFP_KERNEL);
|
|
+ if (!mapping_info) {
|
|
+ rc = -ENOMEM;
|
|
+ goto err_alloc;
|
|
+ }
|
|
+
|
|
+ mapping_info->fd = fd;
|
|
+ mapping_info->buf = dbuf;
|
|
+ mapping_info->attach = dba;
|
|
+ mapping_info->table = sgt;
|
|
+ mapping_info->paddr = paddr;
|
|
+ mapping_info->len = dbuf->size;
|
|
+ mapping_info->dir = dma_dir;
|
|
+ mapping_info->ref_count = 1;
|
|
+
|
|
+ if (!mapping_info->paddr || !mapping_info->len) {
|
|
+ pr_err("Error dynamic dma mapping\n");
|
|
+ kfree(mapping_info);
|
|
+ mapping_info = NULL;
|
|
+ rc = -ENOSPC;
|
|
+ goto err_alloc;
|
|
+ }
|
|
+
|
|
+ *paddr_ptr = mapping_info->paddr;
|
|
+ /* add to the list */
|
|
+ list_add(&mapping_info->list, &mmu_dev->iommu_buf_list);
|
|
+
|
|
+ pr_debug("fd=%d, dmabuf=%p, paddr=0x%llx, len=%lu\n", fd, dbuf, paddr,
|
|
+ dbuf->size);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_alloc:
|
|
+ dma_buf_unmap_attachment(dba, sgt, dma_dir);
|
|
+ dma_buf_detach(dbuf, dba);
|
|
+ dma_buf_put(dbuf);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+int cpp_iommu_map_dmabuf(struct cpp_iommu_device *mmu_dev, int fd,
|
|
+ uint32_t map_flags, dma_addr_t *paddr_ptr)
|
|
+{
|
|
+ enum cpp_iommu_buf_state buf_state;
|
|
+ enum dma_data_direction dma_dir;
|
|
+ bool sync;
|
|
+ int rc = 0;
|
|
+
|
|
+ if (fd < 0) {
|
|
+ cam_err("%s: invalid fd=%d", __func__, fd);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (map_flags & IOMMU_MAP_FLAG_READ_ONLY)
|
|
+ dma_dir = DMA_TO_DEVICE;
|
|
+ else if (map_flags & IOMMU_MAP_FLAG_WRITE_ONLY)
|
|
+ dma_dir = DMA_FROM_DEVICE;
|
|
+ else if (map_flags & IOMMU_MAP_FLAG_READ_WRITE)
|
|
+ dma_dir = DMA_BIDIRECTIONAL;
|
|
+ else {
|
|
+ cam_err("%s: map dmabuf without direction", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ sync = !(map_flags & IOMMU_MAP_FLAG_NOSYNC);
|
|
+
|
|
+ mutex_lock(&mmu_dev->list_lock);
|
|
+ if (mmu_dev->state != CPP_IOMMU_ATTACHED) {
|
|
+ cam_err("attach mmu before map dma buffer");
|
|
+ rc = -EINVAL;
|
|
+ goto map_dmabuf_end;
|
|
+ }
|
|
+
|
|
+ buf_state = cpp_iommu_check_fd_in_list(mmu_dev, fd, paddr_ptr);
|
|
+ if (buf_state == CPP_IOMMU_BUFF_EXIST) {
|
|
+ pr_debug("fd=%d already in list", fd);
|
|
+ rc = 0;
|
|
+ goto map_dmabuf_end;
|
|
+ }
|
|
+
|
|
+ rc = cpp_iommu_map_buffer_and_add_to_list(mmu_dev, fd, dma_dir, sync,
|
|
+ paddr_ptr);
|
|
+ if (rc < 0)
|
|
+ cam_err("mapping or add list fail, fd=%d, rc=%d", fd, rc);
|
|
+
|
|
+map_dmabuf_end:
|
|
+ mutex_unlock(&mmu_dev->list_lock);
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(cpp_iommu_map_dmabuf);
|
|
+
|
|
+static struct cam_dmabuf_info *cpp_iommu_find_mapping_by_fd(struct cpp_iommu_device
|
|
+ *mmu_dev, int fd)
|
|
+{
|
|
+ struct cam_dmabuf_info *mapping;
|
|
+
|
|
+ list_for_each_entry(mapping, &mmu_dev->iommu_buf_list, list) {
|
|
+ if (mapping->fd == fd)
|
|
+ return mapping;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static int cpp_iommu_unmap_buffer_and_remove_from_list(struct cam_dmabuf_info
|
|
+ *mapping_info)
|
|
+{
|
|
+ if (!mapping_info->buf || !mapping_info->table || !mapping_info->attach) {
|
|
+ cam_err("%s: invalid params fd=%d, buf=%pK, table=%pK, attach=%pk",
|
|
+ __func__, mapping_info->fd, mapping_info->buf,
|
|
+ mapping_info->table, mapping_info->attach);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ dma_buf_unmap_attachment(mapping_info->attach, mapping_info->table,
|
|
+ mapping_info->dir);
|
|
+ dma_buf_detach(mapping_info->buf, mapping_info->attach);
|
|
+ dma_buf_put(mapping_info->buf);
|
|
+
|
|
+ mapping_info->buf = NULL;
|
|
+
|
|
+ list_del_init(&mapping_info->list);
|
|
+
|
|
+ kfree(mapping_info);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int cpp_iommu_unmap_dmabuf(struct cpp_iommu_device *mmu_dev, int fd)
|
|
+{
|
|
+ struct cam_dmabuf_info *mapping_info;
|
|
+ int rc = 0;
|
|
+
|
|
+ if (fd < 0) {
|
|
+ cam_err("%s: invalid fd=%d", __func__, fd);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ mutex_lock(&mmu_dev->list_lock);
|
|
+ if (mmu_dev->state != CPP_IOMMU_ATTACHED) {
|
|
+ cam_err("attach mmu before unmap dma buffer");
|
|
+ rc = -EINVAL;
|
|
+ goto unmap_dmabuf_end;
|
|
+ }
|
|
+
|
|
+ mapping_info = cpp_iommu_find_mapping_by_fd(mmu_dev, fd);
|
|
+ if (!mapping_info) {
|
|
+ cam_err("%s: fd=%d mapping not found", __func__, fd);
|
|
+ rc = -EINVAL;
|
|
+ goto unmap_dmabuf_end;
|
|
+ }
|
|
+
|
|
+ mapping_info->ref_count--;
|
|
+ if (mapping_info->ref_count > 0) {
|
|
+ rc = 0;
|
|
+ goto unmap_dmabuf_end;
|
|
+ }
|
|
+
|
|
+ rc = cpp_iommu_unmap_buffer_and_remove_from_list(mapping_info);
|
|
+ if (rc < 0)
|
|
+ cam_err("unmapping or remove list fail, fd=%d, rc=%d", fd, rc);
|
|
+
|
|
+unmap_dmabuf_end:
|
|
+ mutex_unlock(&mmu_dev->list_lock);
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(cpp_iommu_unmap_dmabuf);
|
|
+
|
|
+static int cpp_iommu_acquire_channel(struct cpp_iommu_device *mmu_dev, uint32_t tid)
|
|
+{
|
|
+ int tbu;
|
|
+ unsigned long flags;
|
|
+
|
|
+ tbu = tid_to_tbu(mmu_dev, tid);
|
|
+ if (tbu < 0) {
|
|
+ pr_debug("%s: no such channel %x to acquire\n", __func__, tid);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ spin_lock_irqsave(&mmu_dev->ops_lock, flags);
|
|
+ if (test_bit(tbu, &mmu_dev->ch_map)) {
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+ pr_err("%s: channel %x not free\n", __func__, tid);
|
|
+ return -EBUSY;
|
|
+ }
|
|
+ set_bit(tbu, &mmu_dev->ch_map);
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cpp_iommu_release_channel(struct cpp_iommu_device *mmu_dev, uint32_t tid)
|
|
+{
|
|
+ int tbu;
|
|
+ unsigned long flags;
|
|
+
|
|
+ tbu = tid_to_tbu(mmu_dev, tid);
|
|
+ if (tbu < 0) {
|
|
+ pr_err("%s: no such channel %x to release\n", __func__, tid);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ spin_lock_irqsave(&mmu_dev->ops_lock, flags);
|
|
+ clear_bit(tbu, &mmu_dev->ch_map);
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cpp_iommu_enable_channel(struct cpp_iommu_device *mmu_dev, uint32_t tid)
|
|
+{
|
|
+ int tbu;
|
|
+ unsigned long flags;
|
|
+
|
|
+ tbu = tid_to_tbu(mmu_dev, tid);
|
|
+ if (tbu < 0) {
|
|
+ pr_err("%s: no such channel %x to enable\n", __func__, tid);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ spin_lock_irqsave(&mmu_dev->ops_lock, flags);
|
|
+ if (!test_bit(tbu, &mmu_dev->ch_map)) {
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ iommu_enable_tbu(mmu_dev, tbu);
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cpp_iommu_disable_channel(struct cpp_iommu_device *mmu_dev, uint32_t tid)
|
|
+{
|
|
+ int tbu;
|
|
+ unsigned long flags;
|
|
+
|
|
+ tbu = tid_to_tbu(mmu_dev, tid);
|
|
+ if (tbu < 0) {
|
|
+ pr_err("%s: no such channel %x to disable\n", __func__, tid);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ spin_lock_irqsave(&mmu_dev->ops_lock, flags);
|
|
+ if (!test_bit(tbu, &mmu_dev->ch_map)) {
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ iommu_disable_tbu(mmu_dev, tbu);
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * cpp_iommu_config_channel - copy tt into iommu rsvd contig memory
|
|
+ *
|
|
+ * @mmu_dev:
|
|
+ * @tid:
|
|
+ * @ttAddr:
|
|
+ * @ttSize:
|
|
+ *
|
|
+ * Return: 0 on success, error code otherwise.
|
|
+ */
|
|
+static int cpp_iommu_config_channel(struct cpp_iommu_device *mmu_dev,
|
|
+ uint32_t tid, uint32_t *ttAddr, uint32_t ttSize)
|
|
+{
|
|
+ int tbu;
|
|
+ unsigned long flags;
|
|
+
|
|
+ tbu = tid_to_tbu(mmu_dev, tid);
|
|
+ if (tbu < 0) {
|
|
+ pr_err("%s: no such channel %x to configure\n", __func__, tid);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ spin_lock_irqsave(&mmu_dev->ops_lock, flags);
|
|
+ if (!test_bit(tbu, &mmu_dev->ch_map)) {
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ if (ttAddr && ttSize) {
|
|
+ memcpy(mmu_dev->info[tbu].ttVirt, ttAddr, ttSize * sizeof(uint32_t));
|
|
+ mmu_dev->info[tbu].ttSize = ttSize;
|
|
+ }
|
|
+
|
|
+ /* iommu_set_tbu_qos(mmu_dev, tbu, 4); */
|
|
+ iommu_set_tbu_ttaddr(mmu_dev, tbu, mmu_dev->info[tbu].ttPhys);
|
|
+ iommu_set_tbu_ttsize(mmu_dev, tbu, mmu_dev->info[tbu].ttSize);
|
|
+ iommu_update_trans_table(mmu_dev, tbu);
|
|
+ iommu_enable_irqs(mmu_dev);
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+const static uint64_t IOMMU_VADDR_BASE = 0x80000000;
|
|
+static uint64_t cpp_iommu_get_iova(struct cpp_iommu_device *mmu_dev,
|
|
+ uint32_t tid, uint32_t offset)
|
|
+{
|
|
+ int tbu;
|
|
+ uint64_t iovAddr;
|
|
+
|
|
+ tbu = tid_to_tbu(mmu_dev, tid);
|
|
+ if (tbu < 0) {
|
|
+ pr_err("%s: no such channel %x to get sva\n", __func__, tid);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ iovAddr = iommu_bva_low(mmu_dev) + 0x2000000ULL * tbu + (offset & 0xfff);
|
|
+
|
|
+ return iovAddr;
|
|
+}
|
|
+
|
|
+static int cpp_iommu_setup_timeout_address(struct cpp_iommu_device *mmu_dev)
|
|
+{
|
|
+ iommu_reg_write(mmu_dev, REG_IOMMU_TOAL,
|
|
+ mmu_dev->to_dma_addr & 0xffffffff);
|
|
+ iommu_reg_write(mmu_dev, REG_IOMMU_TOAH,
|
|
+ (mmu_dev->to_dma_addr >> 32) & 0x1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __iommu_fill_ttb_by_sg(struct sg_table *sgt, uint32_t offset,
|
|
+ uint32_t length, uint32_t *tt_base)
|
|
+{
|
|
+ size_t temp_size = 0, temp_offset, temp_length;
|
|
+ dma_addr_t start_addr, end_addr, dmad = 0;
|
|
+ struct scatterlist *sg;
|
|
+ int i, tt_size = 0;
|
|
+
|
|
+ sg = sgt->sgl;
|
|
+ for (i = 0; i < sgt->nents; ++i, sg = sg_next(sg)) {
|
|
+ pr_debug("sg%d: addr 0x%llx, size 0x%x", i, sg_phys(sg),
|
|
+ sg_dma_len(sg));
|
|
+ temp_size += sg_dma_len(sg);
|
|
+ if (temp_size <= offset)
|
|
+ continue;
|
|
+
|
|
+ if (offset > temp_size - sg_dma_len(sg))
|
|
+ temp_offset = offset - temp_size + sg_dma_len(sg);
|
|
+ else
|
|
+ temp_offset = 0;
|
|
+
|
|
+ start_addr = ((phys_cpu2cam(sg_phys(sg)) + temp_offset) >> 12) << 12;
|
|
+
|
|
+ temp_length = temp_size - offset;
|
|
+ if (temp_length >= length)
|
|
+ temp_offset = sg_dma_len(sg) - temp_length + length;
|
|
+ else
|
|
+ temp_offset = sg_dma_len(sg);
|
|
+
|
|
+ end_addr = ((phys_cpu2cam(sg_phys(sg)) + temp_offset + 0xfff) >> 12) << 12;
|
|
+
|
|
+ for (dmad = start_addr; dmad < end_addr; dmad += 0x1000)
|
|
+ tt_base[tt_size++] = (dmad >> 12) & 0x3fffff;
|
|
+
|
|
+ if (temp_length >= length)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (dmad) { /* extend trans table */
|
|
+ tt_base[tt_size++] = (dmad >> 12) & 0x3fffff;
|
|
+ tt_base[tt_size++] = (dmad >> 12) & 0x3fffff;
|
|
+ tt_base[tt_size++] = (dmad >> 12) & 0x3fffff;
|
|
+ }
|
|
+
|
|
+ return tt_size;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * cpp_iommu_setup_sglist - setup sglist for tbu
|
|
+ *
|
|
+ * @mmu_dev:
|
|
+ * @tid:
|
|
+ * @fd: mapped fd
|
|
+ * @offset: planar data offset in dma buffer
|
|
+ * @length: planar length
|
|
+ *
|
|
+ * Return: 0 on success, error code otherwise.
|
|
+ */
|
|
+static int cpp_iommu_setup_sglist(struct cpp_iommu_device *mmu_dev,
|
|
+ uint32_t tid, int fd, uint32_t offset,
|
|
+ uint32_t length)
|
|
+{
|
|
+ struct cam_dmabuf_info *mapping_info;
|
|
+ int tbu;
|
|
+ uint32_t *tt_base; /* translation table cpu base */
|
|
+
|
|
+ tbu = tid_to_tbu(mmu_dev, tid);
|
|
+ if (tbu < 0) {
|
|
+ pr_err("%s: invalid tid 0x%x\n", __func__, tid);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ mutex_lock(&mmu_dev->list_lock);
|
|
+ mapping_info = cpp_iommu_find_mapping_by_fd(mmu_dev, fd);
|
|
+ if (!mapping_info) {
|
|
+ mutex_unlock(&mmu_dev->list_lock);
|
|
+ cam_err("%s: tid=0x%x, fd=%d mapping not found", __func__, tid, fd);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ tt_base = (uint32_t *) mmu_dev->info[tbu].ttVirt;
|
|
+ mmu_dev->info[tbu].ttSize =
|
|
+ __iommu_fill_ttb_by_sg(mapping_info->table, offset, length, tt_base);
|
|
+
|
|
+ mutex_unlock(&mmu_dev->list_lock);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+__maybe_unused static int cpp_iommu_dump_register(struct cpp_iommu_device *mmu_dev)
|
|
+{
|
|
+ int tbu;
|
|
+ struct cam_dmabuf_info *mapping, *_mapping;
|
|
+
|
|
+ mutex_lock(&mmu_dev->list_lock);
|
|
+ if (!list_empty(&mmu_dev->iommu_buf_list)) {
|
|
+ cam_err("%s: iommu buffer liset not empty==>", __func__);
|
|
+ list_for_each_entry_safe(mapping, _mapping,
|
|
+ &mmu_dev->iommu_buf_list, list) {
|
|
+ cam_err("fd=%d, dmabuf=%p, length=%ld, direction=%d",
|
|
+ mapping->fd, mapping->buf, mapping->len, mapping->dir);
|
|
+ cpp_iommu_unmap_buffer_and_remove_from_list(mapping);
|
|
+ }
|
|
+ }
|
|
+ mutex_unlock(&mmu_dev->list_lock);
|
|
+
|
|
+ for (tbu = 0; tbu < 32; ++tbu) {
|
|
+ cam_dbg("TBU%d: ttAddr = 0x%llx, ttSize = 0x%zx\n", tbu,
|
|
+ mmu_dev->info[tbu].ttPhys, mmu_dev->info[tbu].ttSize);
|
|
+ cam_dbg("REG_IOMMU_TTBL%d 0x%x=0x%08x\n", tbu,
|
|
+ REG_IOMMU_TTBL(tbu),
|
|
+ iommu_reg_read(mmu_dev, REG_IOMMU_TTBL(tbu)));
|
|
+ cam_dbg("REG_IOMMU_TTBH%d 0x%x=0x%08x\n", tbu,
|
|
+ REG_IOMMU_TTBH(tbu),
|
|
+ iommu_reg_read(mmu_dev, REG_IOMMU_TTBH(tbu)));
|
|
+ cam_dbg("REG_IOMMU_TCR0%d 0x%x=0x%08x\n", tbu,
|
|
+ REG_IOMMU_TCR0(tbu),
|
|
+ iommu_reg_read(mmu_dev, REG_IOMMU_TCR0(tbu)));
|
|
+ cam_dbg("REG_IOMMU_TCR1%d 0x%x=0x%08x\n", tbu,
|
|
+ REG_IOMMU_TCR1(tbu),
|
|
+ iommu_reg_read(mmu_dev, REG_IOMMU_TCR1(tbu)));
|
|
+ cam_dbg("REG_IOMMU_STAT%d 0x%x=0x%08x\n", tbu,
|
|
+ REG_IOMMU_STAT(tbu),
|
|
+ iommu_reg_read(mmu_dev, REG_IOMMU_STAT(tbu)));
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cpp_iommu_dump_dmabuf(struct cpp_iommu_device *mmu_dev)
|
|
+{
|
|
+ struct cam_dmabuf_info *mapping, *_mapping;
|
|
+
|
|
+ mutex_lock(&mmu_dev->list_lock);
|
|
+ if (!list_empty(&mmu_dev->iommu_buf_list)) {
|
|
+ cam_err("%s: iommu buffer liset not empty==>", __func__);
|
|
+ list_for_each_entry_safe (mapping, _mapping,
|
|
+ &mmu_dev->iommu_buf_list, list) {
|
|
+ cam_err("fd=%d, dmabuf=%p, length=%zd, direction=%d",
|
|
+ mapping->fd, mapping->buf, mapping->len,
|
|
+ mapping->dir);
|
|
+ cpp_iommu_unmap_buffer_and_remove_from_list(mapping);
|
|
+ }
|
|
+ }
|
|
+ mutex_unlock(&mmu_dev->list_lock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+static struct cpp_iommu_ops mmu_ops = {
|
|
+ .acquire_channel = cpp_iommu_acquire_channel,
|
|
+ .release_channel = cpp_iommu_release_channel,
|
|
+ .enable_channel = cpp_iommu_enable_channel,
|
|
+ .disable_channel = cpp_iommu_disable_channel,
|
|
+ .config_channel = cpp_iommu_config_channel,
|
|
+ .setup_timeout_address = cpp_iommu_setup_timeout_address,
|
|
+ .get_iova = cpp_iommu_get_iova,
|
|
+ .setup_sglist = cpp_iommu_setup_sglist,
|
|
+ .dump_status = cpp_iommu_dump_dmabuf,
|
|
+};
|
|
+
|
|
+int cpp_iommu_register(struct cpp_device *cpp_dev)
|
|
+{
|
|
+ struct cpp_iommu_device *mmu_dev;
|
|
+ size_t size, offset;
|
|
+ int i;
|
|
+
|
|
+ mmu_dev = devm_kzalloc(&(cpp_dev->pdev->dev),
|
|
+ sizeof(struct cpp_iommu_device), GFP_KERNEL);
|
|
+ if (!mmu_dev)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ size = IOMMU_TRANS_TAB_MAX_NUM * sizeof(uint32_t) * CPP_IOMMU_CH_NUM;
|
|
+ mmu_dev->rsvd_cpu_addr =
|
|
+ dmam_alloc_coherent(&cpp_dev->pdev->dev, size, &mmu_dev->rsvd_dma_addr,
|
|
+ GFP_KERNEL);
|
|
+ if (!mmu_dev->rsvd_cpu_addr) {
|
|
+ pr_err("%s: alloc reserved memory failed\n", __func__);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < CPP_IOMMU_CH_NUM; ++i) {
|
|
+ offset = IOMMU_TRANS_TAB_MAX_NUM * sizeof(uint32_t) * i;
|
|
+ mmu_dev->info[i].ttPhys = mmu_dev->rsvd_dma_addr + offset;
|
|
+ mmu_dev->info[i].ttVirt = mmu_dev->rsvd_cpu_addr + offset;
|
|
+ }
|
|
+
|
|
+ mmu_dev->to_cpu_addr = dmam_alloc_coherent(
|
|
+ &cpp_dev->pdev->dev, 0x1000, &mmu_dev->to_dma_addr, GFP_KERNEL);
|
|
+ if (!mmu_dev->to_cpu_addr) {
|
|
+ pr_err("%s: alloc timeout memory failed\n", __func__);
|
|
+ return -ENOMEM;
|
|
+ } else {
|
|
+ pr_debug("%s: timeout memory %pad\n", __func__,
|
|
+ &mmu_dev->to_dma_addr);
|
|
+ }
|
|
+
|
|
+ mmu_dev->regs_base = cpp_dev->regs_base;
|
|
+ mmu_dev->ops = &mmu_ops;
|
|
+
|
|
+ INIT_LIST_HEAD(&mmu_dev->iommu_buf_list);
|
|
+ mutex_init(&mmu_dev->list_lock);
|
|
+ spin_lock_init(&mmu_dev->ops_lock);
|
|
+ mmu_dev->cpp_dev = cpp_dev;
|
|
+ cpp_dev->mmu_dev = mmu_dev;
|
|
+
|
|
+ pr_debug("%s X\n", __func__);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(cpp_iommu_register);
|
|
+
|
|
+void cpp_iommu_unregister(struct cpp_device *cpp_dev)
|
|
+{
|
|
+ struct cpp_iommu_device *mmu_dev = cpp_dev->mmu_dev;
|
|
+ size_t size;
|
|
+
|
|
+ mutex_destroy(&mmu_dev->list_lock);
|
|
+ size = IOMMU_TRANS_TAB_MAX_NUM * sizeof(uint32_t) * CPP_IOMMU_CH_NUM;
|
|
+ dmam_free_coherent(&cpp_dev->pdev->dev, size, mmu_dev->rsvd_cpu_addr,
|
|
+ mmu_dev->rsvd_dma_addr);
|
|
+ dmam_free_coherent(&cpp_dev->pdev->dev, size, mmu_dev->to_cpu_addr,
|
|
+ mmu_dev->to_dma_addr);
|
|
+ devm_kfree(&(cpp_dev->pdev->dev), cpp_dev->mmu_dev);
|
|
+ cpp_dev->mmu_dev = NULL;
|
|
+
|
|
+ pr_debug("%s X\n", __func__);
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(cpp_iommu_unregister);
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_cpp/cpp_iommu.h b/drivers/media/platform/spacemit/camera/cam_cpp/cpp_iommu.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_cpp/cpp_iommu.h
|
|
@@ -0,0 +1,73 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * cpp_iommu.h - Driver for CPP IOMMU
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef __CPP_IOMMU_H__
|
|
+#define __CPP_IOMMU_H__
|
|
+
|
|
+#include <linux/types.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/scatterlist.h>
|
|
+
|
|
+#define CPP_IOMMU_TBU_NUM (32)
|
|
+#define CPP_IOMMU_CH_NUM (CPP_IOMMU_TBU_NUM)
|
|
+#define IOMMU_TRANS_TAB_MAX_NUM (8192)
|
|
+#define MMU_TID(port_id, chnl_id) ((port_id << 8) | chnl_id)
|
|
+
|
|
+#define IOMMU_MAP_FLAG_READ_ONLY (1 << 0)
|
|
+#define IOMMU_MAP_FLAG_WRITE_ONLY (1 << 1)
|
|
+#define IOMMU_MAP_FLAG_READ_WRITE (1 << 2)
|
|
+#define IOMMU_MAP_FLAG_NOSYNC (1 << 3)
|
|
+
|
|
+enum cpp_iommu_state {
|
|
+ CPP_IOMMU_DETACHED,
|
|
+ CPP_IOMMU_ATTACHED,
|
|
+};
|
|
+
|
|
+struct iommu_ch_info {
|
|
+ void *ttVirt;
|
|
+ uint64_t ttPhys;
|
|
+ size_t ttSize;
|
|
+};
|
|
+
|
|
+struct cpp_iommu_device {
|
|
+ struct cpp_device *cpp_dev;
|
|
+ void __iomem *regs_base;
|
|
+ unsigned long ch_map;
|
|
+ struct iommu_ch_info info[CPP_IOMMU_CH_NUM];
|
|
+ spinlock_t ops_lock;
|
|
+ dma_addr_t rsvd_dma_addr;
|
|
+ void *rsvd_cpu_addr;
|
|
+ dma_addr_t to_dma_addr;
|
|
+ void *to_cpu_addr;
|
|
+ struct mutex list_lock;
|
|
+ struct list_head iommu_buf_list;
|
|
+ enum cpp_iommu_state state;
|
|
+
|
|
+ struct cpp_iommu_ops *ops;
|
|
+};
|
|
+
|
|
+struct cpp_iommu_ops {
|
|
+ int (*acquire_channel)(struct cpp_iommu_device *mmu_dev, uint32_t tid);
|
|
+ int (*release_channel)(struct cpp_iommu_device *mmu_dev, uint32_t tid);
|
|
+ int (*enable_channel)(struct cpp_iommu_device *mmu_dev, uint32_t tid);
|
|
+ int (*disable_channel)(struct cpp_iommu_device *mmu_dev, uint32_t tid);
|
|
+ int (*config_channel)(struct cpp_iommu_device *mmu_dev, uint32_t tid,
|
|
+ uint32_t *ttAddr, uint32_t ttSize);
|
|
+ uint64_t(*get_iova) (struct cpp_iommu_device *mmu_dev, uint32_t tid,
|
|
+ uint32_t offset);
|
|
+ int (*setup_timeout_address)(struct cpp_iommu_device *mmu_dev);
|
|
+ int (*setup_sglist)(struct cpp_iommu_device *mmu_dev, uint32_t tid,
|
|
+ int fd, uint32_t offset, uint32_t length);
|
|
+ int (*dump_status)(struct cpp_iommu_device *mmu_dev);
|
|
+};
|
|
+
|
|
+int cpp_iommu_register(struct cpp_device *cpp_dev);
|
|
+void cpp_iommu_unregister(struct cpp_device *cpp_dev);
|
|
+int cpp_iommu_map_dmabuf(struct cpp_iommu_device *mmu_dev, int fd,
|
|
+ uint32_t map_flags, dma_addr_t *paddr_ptr);
|
|
+int cpp_iommu_unmap_dmabuf(struct cpp_iommu_device *mmu_dev, int fd);
|
|
+#endif /* ifndef __CPP_IOMMU_H__ */
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_cpp/k1x_cpp.c b/drivers/media/platform/spacemit/camera/cam_cpp/k1x_cpp.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_cpp/k1x_cpp.c
|
|
@@ -0,0 +1,1451 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * k1x_cpp.c - Driver for SPACEMIT K1X Camera Post Process
|
|
+ * lizhirong <zhirong.li@spacemit.com>
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include <linux/completion.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/pm_runtime.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/clk-provider.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/of_device.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/ioport.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/workqueue.h>
|
|
+#include <linux/timekeeping.h>
|
|
+#include <linux/pm_qos.h>
|
|
+#include <media/v4l2-event.h>
|
|
+#include <media/k1x/k1x_plat_cam.h>
|
|
+#include <media/k1x/k1x_cpp_uapi.h>
|
|
+#include "cam_dbg.h"
|
|
+//#include "cpp_compat_ioctl32.h"
|
|
+#include "cpp_dmabuf.h"
|
|
+#include "cpp_iommu.h"
|
|
+#include "k1x_cpp.h"
|
|
+
|
|
+#ifdef CONFIG_ARCH_SPACEMIT
|
|
+//#include <soc/spm/plat.h>
|
|
+#endif
|
|
+
|
|
+#undef CAM_MODULE_TAG
|
|
+#define CAM_MODULE_TAG CAM_MDL_CPP
|
|
+
|
|
+#define CPP_DRV_NAME "mars-cpp"
|
|
+
|
|
+#define CPP_FNC_DEFAULT_FREQ (307200000)
|
|
+#define ISP_BUS_DEFAULT_FREQ (307200000)
|
|
+
|
|
+#ifdef CONFIG_SPACEMIT_FPGA
|
|
+#define CPP_FRMCMD_TIMEOUT_MS (800)
|
|
+#else
|
|
+#define CPP_FRMCMD_TIMEOUT_MS (300)
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_SPACEMIT_DEBUG
|
|
+struct dev_running_info {
|
|
+ bool b_dev_running;
|
|
+ bool (*is_dev_running)(struct dev_running_info *p_devinfo);
|
|
+ struct notifier_block nb;
|
|
+} cpp_running_info;
|
|
+
|
|
+static bool check_dev_running_status(struct dev_running_info *p_devinfo)
|
|
+{
|
|
+ return p_devinfo->b_dev_running;
|
|
+}
|
|
+
|
|
+#define to_devinfo(_nb) container_of(_nb, struct dev_running_info, nb)
|
|
+
|
|
+static int dev_clkoffdet_notifier_handler(struct notifier_block *nb,
|
|
+ unsigned long msg, void *data)
|
|
+{
|
|
+ struct clk_notifier_data *cnd = data;
|
|
+ struct dev_running_info *p_devinfo = to_devinfo(nb);
|
|
+
|
|
+ if ((__clk_is_enabled(cnd->clk)) && (msg & PRE_RATE_CHANGE) &&
|
|
+ (cnd->new_rate == 0) && (cnd->old_rate != 0)) {
|
|
+ if (p_devinfo->is_dev_running(p_devinfo))
|
|
+ return NOTIFY_BAD;
|
|
+ }
|
|
+
|
|
+ return NOTIFY_OK;
|
|
+}
|
|
+#endif
|
|
+
|
|
+//#if IS_ENABLED(CONFIG_SPACEMIT_DDR_FC) && defined(CONFIG_PM)
|
|
+#if 0 // FIXME
|
|
+static struct spm_bw_con *ddr_qos_cons;
|
|
+static int cpp_init_bandwidth(void)
|
|
+{
|
|
+ ddr_qos_cons =
|
|
+ register_spm_ddr_bw_cons(CPP_DRV_NAME,
|
|
+ PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE,
|
|
+ PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
|
|
+
|
|
+ if (IS_ERR_OR_NULL(ddr_qos_cons)) {
|
|
+ cam_err("freq qos regsiter failed\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cpp_deinit_bandwidth(void)
|
|
+{
|
|
+ rm_spm_ddr_bw_cons(ddr_qos_cons);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cpp_update_bandwidth(int32_t rsum, int32_t wsum)
|
|
+{
|
|
+ update_spm_ddr_bw_read_req(ddr_qos_cons, rsum);
|
|
+ update_spm_ddr_bw_write_req(ddr_qos_cons, wsum);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#else
|
|
+static int cpp_init_bandwidth(void)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cpp_deinit_bandwidth(void)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cpp_update_bandwidth(s32 rsum, s32 wsum)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int cpp_hw_reg_config(struct cpp_device *cpp_dev, void *arg)
|
|
+{
|
|
+ struct k1x_cpp_reg_cfg *reg_cfg = arg;
|
|
+
|
|
+ /* validate argument */
|
|
+ if (reg_cfg->u.rw_info.reg_offset > resource_size(cpp_dev->mem)) {
|
|
+ cam_err("%s: reg offset 0x%08x res len 0x%llx", __func__,
|
|
+ reg_cfg->u.rw_info.reg_offset, resource_size(cpp_dev->mem));
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ switch (reg_cfg->cmd_type) {
|
|
+ case CPP_WRITE32:
|
|
+ cpp_reg_write_mask(cpp_dev, reg_cfg->u.rw_info.reg_offset,
|
|
+ reg_cfg->u.rw_info.val, reg_cfg->u.rw_info.mask);
|
|
+ break;
|
|
+ case CPP_READ32:
|
|
+ reg_cfg->u.rw_info.val =
|
|
+ cpp_reg_read(cpp_dev, reg_cfg->u.rw_info.reg_offset);
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void cmd_queue_init(struct device_queue *queue, const char *name)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ cam_dbg("%s E", __func__);
|
|
+
|
|
+ spin_lock_init(&queue->lock);
|
|
+ spin_lock_irqsave(&queue->lock, flags);
|
|
+ INIT_LIST_HEAD(&queue->list);
|
|
+ queue->len = 0;
|
|
+ queue->max = 0;
|
|
+ queue->name = name;
|
|
+ spin_unlock_irqrestore(&queue->lock, flags);
|
|
+}
|
|
+
|
|
+static void cmd_enqueue(struct device_queue *queue, struct list_head *entry)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&queue->lock, flags);
|
|
+ queue->len++;
|
|
+ if (queue->len > queue->max) {
|
|
+ queue->max = queue->len;
|
|
+ cam_dbg("%s new max is %d", queue->name, queue->max);
|
|
+ }
|
|
+ list_add_tail(entry, &queue->list);
|
|
+ spin_unlock_irqrestore(&queue->lock, flags);
|
|
+}
|
|
+
|
|
+static struct cpp_queue_cmd *cmd_dequeue(struct device_queue *queue)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ struct cpp_queue_cmd *qcmd;
|
|
+
|
|
+ spin_lock_irqsave(&queue->lock, flags);
|
|
+ qcmd = list_first_entry_or_null(&queue->list, struct cpp_queue_cmd, list_frame);
|
|
+ if (!qcmd) {
|
|
+ spin_unlock_irqrestore(&queue->lock, flags);
|
|
+ return NULL;
|
|
+ }
|
|
+ list_del(&qcmd->list_frame);
|
|
+ queue->len--;
|
|
+ spin_unlock_irqrestore(&queue->lock, flags);
|
|
+
|
|
+ return qcmd;
|
|
+}
|
|
+
|
|
+static struct cpp_queue_cmd *queue_cmd_alloc(void)
|
|
+{
|
|
+ struct cpp_queue_cmd *qcmd;
|
|
+ int i;
|
|
+
|
|
+ qcmd = kzalloc(sizeof(struct cpp_queue_cmd), GFP_KERNEL);
|
|
+ if (!qcmd) {
|
|
+ cam_err("failed to allocate memory for cpp_queue_cmd");
|
|
+ goto err_qcmd_alloc;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < MAX_REG_CMDS; i++) {
|
|
+ qcmd->hw_cmds[i].reg_data =
|
|
+ kzalloc(sizeof(struct reg_val_mask_info) * MAX_REG_DATA,
|
|
+ GFP_KERNEL);
|
|
+ if (!qcmd->hw_cmds[i].reg_data) {
|
|
+ cam_err("failed to allocate memory for reg cmd %d", i);
|
|
+ goto err_cmds_alloc;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return qcmd;
|
|
+
|
|
+err_cmds_alloc:
|
|
+ for (i = 0; i < MAX_REG_CMDS; i++)
|
|
+ kfree(qcmd->hw_cmds[i].reg_data);
|
|
+
|
|
+ kfree(qcmd);
|
|
+
|
|
+err_qcmd_alloc:
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static int queue_cmd_free(struct cpp_queue_cmd *qcmd)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ if (!qcmd) {
|
|
+ cam_err("queue cmd is NULL");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < MAX_REG_CMDS; i++)
|
|
+ kfree(qcmd->hw_cmds[i].reg_data);
|
|
+
|
|
+ kfree(qcmd);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void cmd_queue_empty(struct device_queue *queue)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ struct cpp_queue_cmd *qcmd = NULL;
|
|
+
|
|
+ if (queue) {
|
|
+ cam_dbg("%s len %d, is empty", queue->name, queue->len);
|
|
+
|
|
+ spin_lock_irqsave(&queue->lock, flags);
|
|
+ while (!list_empty(&queue->list)) {
|
|
+ queue->len--;
|
|
+ qcmd =
|
|
+ list_first_entry(&queue->list, struct cpp_queue_cmd,
|
|
+ list_frame);
|
|
+ list_del_init(&qcmd->list_frame);
|
|
+ queue_cmd_free(qcmd); /* release frame qcmd */
|
|
+ qcmd = NULL;
|
|
+ }
|
|
+ queue->len = 0;
|
|
+ queue->max = 0;
|
|
+ spin_unlock_irqrestore(&queue->lock, flags);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int cmd_queue_request(struct device_queue *queue, int len)
|
|
+{
|
|
+ int i;
|
|
+ static struct cpp_queue_cmd *qcmd;
|
|
+
|
|
+ if (!queue) {
|
|
+ cam_err("device queue is NULL");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < len; i++) {
|
|
+ qcmd = queue_cmd_alloc();
|
|
+ if (qcmd)
|
|
+ cmd_enqueue(queue, &qcmd->list_frame);
|
|
+ else
|
|
+ goto err_queue_alloc;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_queue_alloc:
|
|
+ cmd_queue_empty(queue);
|
|
+
|
|
+ return -ENOMEM;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * cpp_low_power_mode_set - enable auto clock gating to save power
|
|
+ * @cpp_dev: cpp device
|
|
+ * @en: switch on/off
|
|
+ *
|
|
+ * Return 0 on success or a negative error code otherwise
|
|
+ */
|
|
+static int cpp_low_power_mode_set(struct cpp_device *cpp_dev, int en)
|
|
+{
|
|
+ if (cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_1_0) {
|
|
+ cam_err("CPP_HW_VERSION_1_0 not support low power mode");
|
|
+ return -ENOTSUPP;
|
|
+ }
|
|
+
|
|
+ if (en) {
|
|
+ cpp_dev->ops->enable_clk_gating(cpp_dev, 1);
|
|
+ cpp_dev->hw_info.low_pwr_mode = 1;
|
|
+ } else {
|
|
+ cpp_dev->ops->enable_clk_gating(cpp_dev, 0);
|
|
+ cpp_dev->hw_info.low_pwr_mode = 0;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int k1x_cpp_send_reg_cmd(struct cpp_device *cpp_dev,
|
|
+ struct cpp_reg_cfg_cmd *reg_cmd)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ if (!cpp_dev || !reg_cmd) {
|
|
+ cam_err("invalid args cpp_dev %p reg_cmd %p\n", cpp_dev, reg_cmd);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* reg cmd skip */
|
|
+ if (!reg_cmd->reg_data || !reg_cmd->reg_len)
|
|
+ return 0;
|
|
+
|
|
+ switch (reg_cmd->reg_type) {
|
|
+ case CPP_WRITE32:
|
|
+ for (i = 0; i < reg_cmd->reg_len; i++) {
|
|
+ if (reg_cmd->reg_data[i].mask) /* mask 0x0 skip */
|
|
+ cpp_reg_write_mask(cpp_dev,
|
|
+ reg_cmd->reg_data[i].
|
|
+ reg_offset,
|
|
+ reg_cmd->reg_data[i].val,
|
|
+ reg_cmd->reg_data[i].mask);
|
|
+ }
|
|
+ break;
|
|
+ case CPP_WRITE32_RLX:
|
|
+ for (i = 0; i < reg_cmd->reg_len; i++) {
|
|
+ cpp_reg_write_relaxed(cpp_dev,
|
|
+ reg_cmd->reg_data[i].reg_offset,
|
|
+ reg_cmd->reg_data[i].val);
|
|
+ }
|
|
+ break;
|
|
+ case CPP_WRITE32_NOP:
|
|
+ cam_dbg("cpp write32 %d nops", reg_cmd->reg_len);
|
|
+ return 0;
|
|
+ default:
|
|
+ cam_err("invalid reg cmd type %d", reg_cmd->reg_type);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void cpp_send_frame_to_hardware(struct cpp_device *cpp_dev,
|
|
+ struct cpp_queue_cmd *qcmd)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ if (atomic_read(&qcmd->in_processing)) {
|
|
+ cam_err("frame_cmd has been processed");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ atomic_set(&qcmd->in_processing, 1);
|
|
+ qcmd->ts_reg_config = ktime_get_ns();
|
|
+
|
|
+ for (i = 0; i < MAX_REG_CMDS; i++)
|
|
+ k1x_cpp_send_reg_cmd(cpp_dev, &qcmd->hw_cmds[i]);
|
|
+ qcmd->ts_frm_trigger = ktime_get_ns();
|
|
+}
|
|
+
|
|
+static void cpp_device_run(struct cpp_device *cdev)
|
|
+{
|
|
+ struct cpp_run_work *run_work = &cdev->run_work;
|
|
+
|
|
+ queue_work(run_work->run_wq, &run_work->work);
|
|
+}
|
|
+
|
|
+static void k1x_cpp_try_run(struct cpp_device *cdev)
|
|
+{
|
|
+ unsigned long flags_job;
|
|
+
|
|
+ spin_lock_irqsave(&cdev->job_spinlock, flags_job);
|
|
+ if (NULL != cdev->curr_ctx) {
|
|
+ spin_unlock_irqrestore(&cdev->job_spinlock, flags_job);
|
|
+ cam_dbg("cpp_ctx: %p is running, won't run now", cdev->curr_ctx);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (list_empty(&cdev->job_queue)) {
|
|
+ spin_unlock_irqrestore(&cdev->job_spinlock, flags_job);
|
|
+ cam_dbg("No job pending");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ cdev->curr_ctx = list_first_entry(&cdev->job_queue, struct cpp_ctx, queue);
|
|
+ cdev->curr_ctx->job_flags |= TRANS_RUNNING;
|
|
+ spin_unlock_irqrestore(&cdev->job_spinlock, flags_job);
|
|
+
|
|
+ cam_dbg("Running job on cpp_ctx: %p", cdev->curr_ctx);
|
|
+ cpp_device_run(cdev);
|
|
+}
|
|
+
|
|
+static void k1x_cpp_try_queue(struct cpp_device *cdev, struct cpp_ctx *ctx)
|
|
+{
|
|
+ unsigned long flags_job, flags_frmq;
|
|
+
|
|
+ spin_lock_irqsave(&cdev->job_spinlock, flags_job);
|
|
+
|
|
+ if (ctx->job_flags & TRANS_ABORT) {
|
|
+ spin_unlock_irqrestore(&cdev->job_spinlock, flags_job);
|
|
+ cam_dbg("Abort context");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (ctx->job_flags & TRANS_QUEUED) {
|
|
+ spin_unlock_irqrestore(&cdev->job_spinlock, flags_job);
|
|
+ cam_dbg("On job queue already");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ spin_lock_irqsave(&ctx->frmq.lock, flags_frmq);
|
|
+ if (list_empty(&ctx->frmq.list)) {
|
|
+ spin_unlock_irqrestore(&ctx->frmq.lock, flags_frmq);
|
|
+ spin_unlock_irqrestore(&cdev->job_spinlock, flags_job);
|
|
+ cam_dbg("no frame cmd available");
|
|
+ return;
|
|
+ }
|
|
+ spin_unlock_irqrestore(&ctx->frmq.lock, flags_frmq);
|
|
+
|
|
+ list_add_tail(&ctx->queue, &cdev->job_queue);
|
|
+ ctx->job_flags |= TRANS_QUEUED;
|
|
+
|
|
+ spin_unlock_irqrestore(&cdev->job_spinlock, flags_job);
|
|
+}
|
|
+
|
|
+static void k1x_cpp_try_schedule(struct cpp_ctx *ctx)
|
|
+{
|
|
+ struct cpp_device *cdev = ctx->cpp_dev;
|
|
+
|
|
+ k1x_cpp_try_queue(cdev, ctx);
|
|
+ k1x_cpp_try_run(cdev);
|
|
+}
|
|
+
|
|
+static void k1x_cpp_job_finish(struct cpp_device *cdev, struct cpp_ctx *ctx)
|
|
+{
|
|
+ unsigned long flags_job;
|
|
+
|
|
+ spin_lock_irqsave(&cdev->job_spinlock, flags_job);
|
|
+ if (!cdev->curr_ctx || cdev->curr_ctx != ctx) {
|
|
+ spin_unlock_irqrestore(&cdev->job_spinlock, flags_job);
|
|
+ cam_err("Called by an instance not currently running\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ list_del(&cdev->curr_ctx->queue);
|
|
+ cdev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
|
|
+ cdev->curr_ctx = NULL;
|
|
+
|
|
+ spin_unlock_irqrestore(&cdev->job_spinlock, flags_job);
|
|
+}
|
|
+
|
|
+static int k1x_cpp_get_frame(struct cpp_device *cdev,
|
|
+ struct cpp_queue_cmd *qcmd,
|
|
+ struct cpp_frame_info *frame_info)
|
|
+{
|
|
+ int ret, i;
|
|
+
|
|
+ for (i = 0; i < MAX_REG_CMDS; i++) {
|
|
+ if (frame_info->regs[i].reg_len > MAX_REG_DATA) {
|
|
+ cam_err("insufficient to copy reg cmd %d with %d entries",
|
|
+ i, frame_info->regs[i].reg_len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (frame_info->regs[i].reg_len) {
|
|
+ if (copy_from_user(qcmd->hw_cmds[i].reg_data,
|
|
+ (void __user *)frame_info->regs[i].reg_data,
|
|
+ sizeof(struct reg_val_mask_info) *
|
|
+ frame_info->regs[i].reg_len)) {
|
|
+ cam_err("failed to copy reg cmd %d from user", i);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ }
|
|
+ qcmd->hw_cmds[i].reg_len = frame_info->regs[i].reg_len;
|
|
+ qcmd->hw_cmds[i].reg_type = frame_info->regs[i].reg_type;
|
|
+ }
|
|
+
|
|
+ qcmd->dma_ports[MAC_DMA_PORT_R0] =
|
|
+ cpp_dmabuf_prepare(cdev, &frame_info->src_buf_info, MAC_DMA_PORT_R0);
|
|
+ if (IS_ERR_OR_NULL(qcmd->dma_ports[MAC_DMA_PORT_R0])) {
|
|
+ cam_err("failed to prepare dmabuf R0");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ qcmd->dma_ports[MAC_DMA_PORT_R1] =
|
|
+ cpp_dmabuf_prepare(cdev, &frame_info->pre_buf_info, MAC_DMA_PORT_R1);
|
|
+ if (IS_ERR_OR_NULL(qcmd->dma_ports[MAC_DMA_PORT_R1])) {
|
|
+ cam_err("failed to prepare dmabuf R1");
|
|
+ ret = -EINVAL;
|
|
+ goto err_r1;
|
|
+ }
|
|
+
|
|
+ qcmd->dma_ports[MAC_DMA_PORT_W0] =
|
|
+ cpp_dmabuf_prepare(cdev, &frame_info->dst_buf_info, MAC_DMA_PORT_W0);
|
|
+ if (IS_ERR_OR_NULL(qcmd->dma_ports[MAC_DMA_PORT_W0])) {
|
|
+ cam_err("failed to prepare dmabuf W0");
|
|
+ ret = -EINVAL;
|
|
+ goto err_r2;
|
|
+ }
|
|
+
|
|
+ qcmd->frame_id = frame_info->frame_id;
|
|
+ qcmd->client_id = frame_info->client_id;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_r2:
|
|
+ cpp_dmabuf_cleanup(cdev, qcmd->dma_ports[MAC_DMA_PORT_R1]);
|
|
+err_r1:
|
|
+ cpp_dmabuf_cleanup(cdev, qcmd->dma_ports[MAC_DMA_PORT_R0]);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int k1x_cpp_process_frame(struct cpp_ctx *ctx, struct cpp_frame_info *info)
|
|
+{
|
|
+ int ret;
|
|
+ struct cpp_queue_cmd *qcmd;
|
|
+
|
|
+ qcmd = cmd_dequeue(&ctx->idleq);
|
|
+ if (!qcmd) {
|
|
+ cam_err("%s: %s is not enough", __func__, ctx->idleq.name);
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+
|
|
+ ret = k1x_cpp_get_frame(ctx->cpp_dev, qcmd, info);
|
|
+ if (ret) {
|
|
+ cmd_enqueue(&ctx->idleq, &qcmd->list_frame);
|
|
+ } else {
|
|
+ cmd_enqueue(&ctx->frmq, &qcmd->list_frame);
|
|
+ k1x_cpp_try_schedule(ctx);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1x_cpp_send_event(struct cpp_device *cpp_dev, u32 event_type,
|
|
+ struct k1x_cpp_event_data *event_data)
|
|
+{
|
|
+ struct v4l2_event cpp_event;
|
|
+
|
|
+ memset(&cpp_event, 0, sizeof(struct v4l2_event));
|
|
+ cpp_event.id = 0;
|
|
+ cpp_event.type = event_type;
|
|
+ memcpy(&cpp_event.u.data[0], event_data, sizeof(struct k1x_cpp_event_data));
|
|
+ v4l2_event_queue(cpp_dev->csd.sd.devnode, &cpp_event);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void cpp_update_axi_cfg(struct cpp_device *cpp_dev,
|
|
+ struct cpp_dma_port_info **dma_info)
|
|
+{
|
|
+ cpp_dev->ops->cfg_port_dmad(cpp_dev, dma_info[MAC_DMA_PORT_R0],
|
|
+ MAC_DMA_PORT_R0);
|
|
+ cpp_dev->ops->cfg_port_dmad(cpp_dev, dma_info[MAC_DMA_PORT_R1],
|
|
+ MAC_DMA_PORT_R1);
|
|
+ cpp_dev->ops->cfg_port_dmad(cpp_dev, dma_info[MAC_DMA_PORT_W0],
|
|
+ MAC_DMA_PORT_W0);
|
|
+}
|
|
+
|
|
+static void k1x_cpp_device_run_work(struct work_struct *work)
|
|
+{
|
|
+ struct cpp_run_work *run_work = container_of(work, struct cpp_run_work, work);
|
|
+ struct cpp_device *cdev = container_of(run_work, struct cpp_device, run_work);
|
|
+ struct cpp_ctx *curr_ctx = cdev->curr_ctx;
|
|
+ struct cpp_queue_cmd *frm_cmd = NULL;
|
|
+ u32 evt_type;
|
|
+ struct k1x_cpp_event_data data;
|
|
+
|
|
+ int ret, port_id;
|
|
+ int iommu_state = cdev->mmu_dev->state;
|
|
+
|
|
+ if (!curr_ctx) {
|
|
+ cam_err("current ctx is Null when device running");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ frm_cmd = cmd_dequeue(&curr_ctx->frmq);
|
|
+ if (!frm_cmd) {
|
|
+ cam_err("cpp_ctx: %p %s is Null when device running", curr_ctx,
|
|
+ curr_ctx->frmq.name);
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+ if (iommu_state == CPP_IOMMU_ATTACHED) {
|
|
+ for (port_id = 0; port_id < MAX_DMA_PORT; ++port_id) {
|
|
+ ret = cpp_dma_alloc_iommu_channels(cdev,
|
|
+ frm_cmd->dma_ports[port_id]);
|
|
+ if (ret) {
|
|
+ pr_err
|
|
+ ("%s: dma port%d failed to alloc iommu channels\n",
|
|
+ __func__, port_id);
|
|
+ goto done;
|
|
+ }
|
|
+ cpp_dma_fill_iommu_channels(cdev, frm_cmd->dma_ports[port_id]);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ cpp_update_axi_cfg(cdev, frm_cmd->dma_ports);
|
|
+
|
|
+ reinit_completion(&run_work->run_complete);
|
|
+ cpp_send_frame_to_hardware(cdev, frm_cmd);
|
|
+
|
|
+ if (!wait_for_completion_timeout(&run_work->run_complete,
|
|
+ msecs_to_jiffies(CPP_FRMCMD_TIMEOUT_MS))) {
|
|
+ evt_type = V4L2_EVENT_CPP_FRAME_ERR;
|
|
+ data.u.err_info.err_type = 0;
|
|
+ data.u.err_info.frame_id = frm_cmd->frame_id;
|
|
+ data.u.err_info.client_id = frm_cmd->client_id;
|
|
+ cam_err("c%dframe%d run timeout", frm_cmd->client_id,
|
|
+ frm_cmd->frame_id);
|
|
+ cdev->ops->debug_dump(cdev);
|
|
+ } else {
|
|
+ if (cdev->state == CPP_STATE_ERR) {
|
|
+ evt_type = V4L2_EVENT_CPP_FRAME_ERR;
|
|
+ data.u.err_info.err_type = 1;
|
|
+ data.u.err_info.frame_id = frm_cmd->frame_id;
|
|
+ data.u.err_info.client_id = frm_cmd->client_id;
|
|
+ cam_err("c%dframe%d run error", frm_cmd->client_id,
|
|
+ frm_cmd->frame_id);
|
|
+ cdev->ops->debug_dump(cdev);
|
|
+ } else {
|
|
+ frm_cmd->ts_frm_finish = ktime_get_ns();
|
|
+
|
|
+ evt_type = V4L2_EVENT_CPP_FRAME_DONE;
|
|
+ data.u.done_info.success = 1;
|
|
+ data.u.done_info.frame_id = frm_cmd->frame_id;
|
|
+ data.u.done_info.client_id = frm_cmd->client_id;
|
|
+ data.u.done_info.seg_reg_cfg = frm_cmd->ts_frm_trigger -
|
|
+ frm_cmd->ts_reg_config;
|
|
+ data.u.done_info.seg_stream = frm_cmd->ts_frm_finish -
|
|
+ frm_cmd->ts_frm_trigger;
|
|
+ cam_dbg("c%dframe%d run finish", frm_cmd->client_id,
|
|
+ frm_cmd->frame_id);
|
|
+ }
|
|
+ }
|
|
+
|
|
+done:
|
|
+ if (iommu_state == CPP_IOMMU_ATTACHED) {
|
|
+ for (port_id = 0; port_id < MAX_DMA_PORT; ++port_id)
|
|
+ cpp_dma_free_iommu_channels(cdev, frm_cmd->dma_ports[port_id]);
|
|
+ }
|
|
+
|
|
+ cpp_dmabuf_cleanup(cdev, frm_cmd->dma_ports[MAC_DMA_PORT_W0]);
|
|
+ cpp_dmabuf_cleanup(cdev, frm_cmd->dma_ports[MAC_DMA_PORT_R1]);
|
|
+ cpp_dmabuf_cleanup(cdev, frm_cmd->dma_ports[MAC_DMA_PORT_R0]);
|
|
+ k1x_cpp_send_event(cdev, evt_type, &data);
|
|
+
|
|
+ atomic_set(&frm_cmd->in_processing, 0);
|
|
+ cmd_enqueue(&curr_ctx->idleq, &frm_cmd->list_frame);
|
|
+
|
|
+exit:
|
|
+ k1x_cpp_job_finish(cdev, curr_ctx);
|
|
+
|
|
+ /* process next frame */
|
|
+ k1x_cpp_try_schedule(curr_ctx);
|
|
+}
|
|
+
|
|
+static int cpp_setup_run_work(struct cpp_device *cdev)
|
|
+{
|
|
+ struct cpp_run_work *run_work = &cdev->run_work;
|
|
+
|
|
+ cam_dbg("Installing cpp run work");
|
|
+
|
|
+ run_work->run_wq = create_singlethread_workqueue(CPP_DRV_NAME);
|
|
+ if (!run_work->run_wq) {
|
|
+ cam_err("Can't create %s run wq", CPP_DRV_NAME);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ INIT_WORK(&run_work->work, k1x_cpp_device_run_work);
|
|
+ init_completion(&run_work->run_complete);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void cpp_cancel_run_work(struct cpp_device *cdev)
|
|
+{
|
|
+ struct cpp_ctx *ctx = &cdev->priv;
|
|
+ struct cpp_run_work *run_work = &cdev->run_work;
|
|
+ unsigned long flags;
|
|
+
|
|
+ cam_dbg("Canceling cpp run work");
|
|
+
|
|
+ spin_lock_irqsave(&cdev->job_spinlock, flags);
|
|
+
|
|
+ ctx->job_flags |= TRANS_ABORT;
|
|
+ if (ctx->job_flags & TRANS_RUNNING) {
|
|
+ spin_unlock_irqrestore(&cdev->job_spinlock, flags);
|
|
+ cam_dbg("cpp_ctx %p running, will wait to complete", ctx);
|
|
+ if (run_work->run_wq)
|
|
+ flush_workqueue(run_work->run_wq);
|
|
+ } else if (ctx->job_flags & TRANS_QUEUED) {
|
|
+ list_del(&ctx->queue);
|
|
+ ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
|
|
+ spin_unlock_irqrestore(&cdev->job_spinlock, flags);
|
|
+ } else {
|
|
+ /* Do nothing, was not on queue/running */
|
|
+ spin_unlock_irqrestore(&cdev->job_spinlock, flags);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void cpp_destroy_run_work(struct cpp_device *cdev)
|
|
+{
|
|
+ struct cpp_run_work *run_work = &cdev->run_work;
|
|
+
|
|
+ cam_dbg("Destroying cpp run work");
|
|
+
|
|
+ if (run_work->run_wq) {
|
|
+ flush_workqueue(run_work->run_wq);
|
|
+ destroy_workqueue(run_work->run_wq);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int cpp_update_clock_rate(struct cpp_device *cpp_dev,
|
|
+ unsigned long func_rate, unsigned long bus_rate);
|
|
+
|
|
+static long k1x_cpp_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
|
|
+{
|
|
+ struct cpp_device *cpp_dev;
|
|
+ int ret = 0;
|
|
+
|
|
+ cpp_dev = v4l2_get_subdevdata(sd);
|
|
+ if (!cpp_dev) {
|
|
+ cam_err("cpp_dev is null");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (mutex_lock_interruptible(&cpp_dev->mutex))
|
|
+ return -ERESTARTSYS;
|
|
+
|
|
+ switch (cmd) {
|
|
+ case VIDIOC_K1X_CPP_HW_INFO: {
|
|
+ struct cpp_hw_info *hw_info = arg;
|
|
+
|
|
+ cam_dbg("VIDIOC_K1X_CPP_HW_INFO");
|
|
+ memset(hw_info, 0, sizeof(*hw_info));
|
|
+ hw_info->cpp_hw_version = cpp_dev->hw_info.cpp_hw_version;
|
|
+ hw_info->low_pwr_mode = cpp_dev->hw_info.low_pwr_mode;
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_K1X_CPP_REG_CFG: {
|
|
+ cam_dbg("VIDIOC_K1X_CPP_REG_CFG");
|
|
+ if (cpp_dev->state != CPP_STATE_IDLE) {
|
|
+ cam_err("check cpp state %d when reg cfg",
|
|
+ cpp_dev->state);
|
|
+ return -EIO;
|
|
+ }
|
|
+ ret = cpp_hw_reg_config(cpp_dev, arg);
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_K1X_CPP_HW_RST: {
|
|
+ cam_dbg("VIDIOC_K1X_CPP_HW_RST");
|
|
+ if (cpp_dev->state == CPP_STATE_OFF) {
|
|
+ cam_err("check cpp state %d when hw reset",
|
|
+ cpp_dev->state);
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ cam_dbg("cpp state %d when hw reset", cpp_dev->state);
|
|
+ ret = cpp_dev->ops->global_reset(cpp_dev);
|
|
+ /* recover submodule registers */
|
|
+ cpp_dev->ops->enable_irqs_common(cpp_dev, 1);
|
|
+ cpp_dev->ops->set_burst_len(cpp_dev);
|
|
+ cpp_dev->state = CPP_STATE_IDLE;
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_K1X_CPP_LOW_PWR: {
|
|
+ cam_dbg("VIDIOC_K1X_CPP_LOW_PWR");
|
|
+ ret = cpp_low_power_mode_set(cpp_dev, *((int *)arg));
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_K1X_CPP_PROCESS_FRAME: {
|
|
+ struct cpp_frame_info *proc_info = arg;
|
|
+
|
|
+ cam_dbg("VIDIOC_K1X_CPP_PROCESS_FRAME");
|
|
+ ret = k1x_cpp_process_frame(&cpp_dev->priv, proc_info);
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_K1X_CPP_FLUSH_QUEUE: {
|
|
+ cam_dbg("VIDIOC_K1X_CPP_FLUSH_QUEUE");
|
|
+ if (cpp_dev->state == CPP_STATE_OFF) {
|
|
+ cam_err("check cpp state %d when flush",
|
|
+ cpp_dev->state);
|
|
+ return -EIO;
|
|
+ }
|
|
+ flush_workqueue(cpp_dev->run_work.run_wq);
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_K1X_CPP_IOMMU_ATTACH: {
|
|
+ cam_dbg("VIDIOC_K1X_CPP_IOMMU_ATTACH");
|
|
+
|
|
+ cpp_dev->mmu_dev->state = CPP_IOMMU_ATTACHED;
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_K1X_CPP_IOMMU_DETACH: {
|
|
+ cam_dbg("VIDIOC_K1X_CPP_IOMMU_DETACH");
|
|
+
|
|
+ cpp_dev->mmu_dev->state = CPP_IOMMU_DETACHED;
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_K1X_CPP_UPDATE_BANDWIDTH: {
|
|
+ struct cpp_bandwidth_info *bw_info =
|
|
+ (struct cpp_bandwidth_info *)arg;
|
|
+
|
|
+ cam_dbg("VIDIOC_K1X_CPP_UPDATE_BANDWIDTH");
|
|
+ ret = cpp_update_bandwidth(bw_info->rsum, bw_info->wsum);
|
|
+ break;
|
|
+ }
|
|
+ case VIDIOC_K1X_CPP_UPDATE_CLOCKRATE: {
|
|
+ struct cpp_clock_info *clk_info = (struct cpp_clock_info *)arg;
|
|
+
|
|
+ cam_dbg("VIDIOC_K1X_CPP_UPDATE_CLOCKRATE");
|
|
+ ret = cpp_update_clock_rate(cpp_dev, clk_info->func_rate, -1);
|
|
+ break;
|
|
+ }
|
|
+ default:
|
|
+ ret = -ENOTTY;
|
|
+ break;
|
|
+ }
|
|
+ mutex_unlock(&cpp_dev->mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int k1x_cpp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
|
|
+ struct v4l2_event_subscription *sub)
|
|
+{
|
|
+ cam_dbg("%s E", __func__);
|
|
+ return v4l2_event_subscribe(fh, sub, MAX_CPP_V4L2_EVENTS, NULL);
|
|
+}
|
|
+
|
|
+static int k1x_cpp_unsubscribe_event(struct v4l2_subdev *sd,
|
|
+ struct v4l2_fh *fh,
|
|
+ struct v4l2_event_subscription *sub)
|
|
+{
|
|
+ cam_dbg("%s E", __func__);
|
|
+ return v4l2_event_unsubscribe(fh, sub);
|
|
+}
|
|
+
|
|
+static struct v4l2_subdev_core_ops k1x_cpp_subdev_core_ops = {
|
|
+ .ioctl = k1x_cpp_subdev_ioctl,
|
|
+ .subscribe_event = k1x_cpp_subscribe_event,
|
|
+ .unsubscribe_event = k1x_cpp_unsubscribe_event,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_ops cpp_subdev_ops = {
|
|
+ .core = &k1x_cpp_subdev_core_ops,
|
|
+};
|
|
+
|
|
+static int cpp_update_clock_rate(struct cpp_device *cpp_dev,
|
|
+ unsigned long func_rate, unsigned long bus_rate)
|
|
+{
|
|
+ long clk_val;
|
|
+ int ret;
|
|
+
|
|
+ if (func_rate > 0) {
|
|
+ clk_val = clk_round_rate(cpp_dev->fnc_clk, func_rate);
|
|
+ if (clk_val < 0) {
|
|
+ cam_err("fnc clk round rate failed: %ld", clk_val);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ ret = clk_set_rate(cpp_dev->fnc_clk, clk_val);
|
|
+ if (ret < 0) {
|
|
+ cam_err("fnc clk set rate failed: %d", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ }
|
|
+
|
|
+ if (bus_rate > 0) {
|
|
+ clk_val = clk_round_rate(cpp_dev->bus_clk, bus_rate);
|
|
+ if (clk_val < 0) {
|
|
+ cam_err("bus clk round rate failed: %ld", clk_val);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ ret = clk_set_rate(cpp_dev->bus_clk, clk_val);
|
|
+ if (ret < 0) {
|
|
+ cam_err("bus clk set rate failed: %d", ret);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ cam_dbg("func clock rate: %ld, bus clock rate: %ld",
|
|
+ clk_get_rate(cpp_dev->fnc_clk), clk_get_rate(cpp_dev->bus_clk));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cpp_enable_clocks(struct cpp_device *cpp_dev)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ reset_control_deassert(cpp_dev->ahb_reset);
|
|
+
|
|
+// ret = clk_prepare_enable(cpp_dev->ahb_clk);
|
|
+// if (ret)
|
|
+// return ret;
|
|
+
|
|
+ ret = clk_prepare_enable(cpp_dev->fnc_clk);
|
|
+ if (ret)
|
|
+ goto err_clks_ahb;
|
|
+ reset_control_deassert(cpp_dev->isp_cpp_reset);
|
|
+
|
|
+ ret = clk_prepare_enable(cpp_dev->bus_clk);
|
|
+ if (ret)
|
|
+ goto err_clks_fnc;
|
|
+ reset_control_deassert(cpp_dev->isp_ci_reset);
|
|
+
|
|
+ ret = clk_prepare_enable(cpp_dev->dpu_clk);
|
|
+ if (ret)
|
|
+ goto err_clks_bus;
|
|
+ reset_control_deassert(cpp_dev->lcd_mclk_reset);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_clks_bus:
|
|
+ reset_control_assert(cpp_dev->isp_ci_reset);
|
|
+ clk_disable_unprepare(cpp_dev->bus_clk);
|
|
+err_clks_fnc:
|
|
+ reset_control_assert(cpp_dev->isp_cpp_reset);
|
|
+ clk_disable_unprepare(cpp_dev->fnc_clk);
|
|
+err_clks_ahb:
|
|
+// clk_disable_unprepare(cpp_dev->ahb_clk);
|
|
+ reset_control_assert(cpp_dev->ahb_reset);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void cpp_disable_clocks(struct cpp_device *cpp_dev)
|
|
+{
|
|
+ reset_control_assert(cpp_dev->isp_ci_reset);
|
|
+ clk_disable_unprepare(cpp_dev->bus_clk);
|
|
+
|
|
+ reset_control_assert(cpp_dev->isp_cpp_reset);
|
|
+ clk_disable_unprepare(cpp_dev->fnc_clk);
|
|
+// clk_disable_unprepare(cpp_dev->ahb_clk);
|
|
+ reset_control_assert(cpp_dev->ahb_reset);
|
|
+
|
|
+ reset_control_assert(cpp_dev->lcd_mclk_reset);
|
|
+ clk_disable_unprepare(cpp_dev->dpu_clk);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * cpp_init_hardware - pd, clock on
|
|
+ *
|
|
+ * @cpp_dev:
|
|
+ *
|
|
+ * Return: 0 on success, error code otherwise.
|
|
+ */
|
|
+static int cpp_init_hardware(struct cpp_device *cpp_dev)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ /* get runtime pm */
|
|
+ ret = pm_runtime_get_sync(&cpp_dev->pdev->dev);
|
|
+ if (ret < 0) {
|
|
+ cam_err("rpm get failed: %d", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+
|
|
+ ret = cpp_enable_clocks(cpp_dev);
|
|
+ if (ret) {
|
|
+ pm_runtime_put_sync(&cpp_dev->pdev->dev);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = cpp_update_clock_rate(cpp_dev, CPP_FNC_DEFAULT_FREQ,
|
|
+ ISP_BUS_DEFAULT_FREQ);
|
|
+ if (ret) {
|
|
+ pm_runtime_put_sync(&cpp_dev->pdev->dev);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* Do HW Reset, checking cpp function properly */
|
|
+ ret = cpp_dev->ops->global_reset(cpp_dev);
|
|
+ if (ret) {
|
|
+ cpp_disable_clocks(cpp_dev);
|
|
+ pm_runtime_put_sync(&cpp_dev->pdev->dev);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ cpp_dev->ops->enable_irqs_common(cpp_dev, 1);
|
|
+ cpp_dev->ops->set_burst_len(cpp_dev);
|
|
+ cpp_dev->hw_info.cpp_hw_version = cpp_dev->ops->hw_version(cpp_dev);
|
|
+ cpp_dev->hw_info.low_pwr_mode = 1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * cpp_release_hardware - pd, clock off
|
|
+ *
|
|
+ * @cpp_dev:
|
|
+ *
|
|
+ */
|
|
+static void cpp_release_hardware(struct cpp_device *cpp_dev)
|
|
+{
|
|
+ /* reset bandwidth */
|
|
+ cpp_update_bandwidth(0, 0);
|
|
+
|
|
+ /* hang workaround */
|
|
+ cpp_dev->ops->global_reset(cpp_dev);
|
|
+
|
|
+ /* disable all irqs */
|
|
+ cpp_dev->ops->enable_irqs_common(cpp_dev, 0);
|
|
+
|
|
+ /* disable clock(s) */
|
|
+ cpp_disable_clocks(cpp_dev);
|
|
+
|
|
+ /* put runtime pm */
|
|
+ pm_runtime_put_sync(&cpp_dev->pdev->dev);
|
|
+}
|
|
+
|
|
+static int cpp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
|
|
+{
|
|
+ struct cpp_device *cpp_dev;
|
|
+ unsigned long flags;
|
|
+ int ret;
|
|
+
|
|
+ cam_dbg("%s E", __func__);
|
|
+
|
|
+ cpp_dev = v4l2_get_subdevdata(sd);
|
|
+ if (!cpp_dev) {
|
|
+ cam_err("cpp_dev is null\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (mutex_lock_interruptible(&cpp_dev->mutex))
|
|
+ return -ERESTARTSYS;
|
|
+
|
|
+ if (cpp_dev->open_cnt == MAX_ACTIVE_CPP_INSTANCE) {
|
|
+ cam_err("no available cpp instance");
|
|
+ mutex_unlock(&cpp_dev->mutex);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ cpp_dev->open_cnt++;
|
|
+ if (cpp_dev->open_cnt == 1) {
|
|
+ ret = cpp_init_hardware(cpp_dev);
|
|
+ if (ret < 0) {
|
|
+ cam_err("cpp init hardware failed!");
|
|
+ cpp_dev->open_cnt--;
|
|
+ mutex_unlock(&cpp_dev->mutex);
|
|
+ return ret;
|
|
+ }
|
|
+ cpp_dev->state = CPP_STATE_IDLE;
|
|
+
|
|
+ ret = cmd_queue_request(&cpp_dev->priv.idleq, 6);
|
|
+ if (ret) {
|
|
+ cpp_release_hardware(cpp_dev);
|
|
+ cpp_dev->open_cnt--;
|
|
+ mutex_unlock(&cpp_dev->mutex);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_SPACEMIT_DEBUG
|
|
+ cpp_running_info.b_dev_running = true;
|
|
+#endif
|
|
+ spin_lock_irqsave(&cpp_dev->job_spinlock, flags);
|
|
+ cpp_dev->priv.job_flags = 0; /* stream on */
|
|
+ spin_unlock_irqrestore(&cpp_dev->job_spinlock, flags);
|
|
+ }
|
|
+
|
|
+ if (cpp_dev->mapped) {
|
|
+ vfree(cpp_dev->shared_mem);
|
|
+ cpp_dev->shared_mem = NULL;
|
|
+ cpp_dev->shared_size = 0;
|
|
+ cpp_dev->mapped = 0;
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&cpp_dev->mutex);
|
|
+ cam_dbg("%s X", __func__);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
|
|
+{
|
|
+ struct cpp_device *cpp_dev;
|
|
+ int ret = 0;
|
|
+
|
|
+ cam_dbg("%s E", __func__);
|
|
+
|
|
+ cpp_dev = v4l2_get_subdevdata(sd);
|
|
+ if (!cpp_dev) {
|
|
+ pr_err("cpp_dev is null\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ mutex_lock(&cpp_dev->mutex);
|
|
+ if (cpp_dev->open_cnt == 0) {
|
|
+ cam_err("no existing cpp instance");
|
|
+ mutex_unlock(&cpp_dev->mutex);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ cpp_dev->open_cnt--;
|
|
+ if (cpp_dev->open_cnt == 0) {
|
|
+#ifdef CONFIG_SPACEMIT_DEBUG
|
|
+ cpp_running_info.b_dev_running = false;
|
|
+#endif
|
|
+ cpp_cancel_run_work(cpp_dev);
|
|
+ cmd_queue_empty(&cpp_dev->priv.frmq);
|
|
+ cmd_queue_empty(&cpp_dev->priv.idleq);
|
|
+ cpp_release_hardware(cpp_dev);
|
|
+ cpp_dev->state = CPP_STATE_OFF;
|
|
+ cpp_dev->mmu_dev->ops->dump_status(cpp_dev->mmu_dev);
|
|
+ pm_relax(&cpp_dev->pdev->dev);
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&cpp_dev->mutex);
|
|
+ cam_dbg("%s X", __func__);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int cpp_mmap(struct file *file, struct vm_area_struct *vma)
|
|
+{
|
|
+ struct video_device *vdev = video_devdata(file);
|
|
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
|
|
+ struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
|
|
+
|
|
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
+
|
|
+ return vm_iomap_memory(vma, cpp_dev->mem->start, resource_size(cpp_dev->mem));
|
|
+}
|
|
+
|
|
+static int cpp_subdev_registered(struct v4l2_subdev *sd)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct v4l2_file_operations *fops;
|
|
+
|
|
+ fops = kzalloc(sizeof(*fops), GFP_KERNEL);
|
|
+ if (!fops)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ *fops = *sd->devnode->fops;
|
|
+ fops->mmap = cpp_mmap;
|
|
+#ifdef CONFIG_COMPAT
|
|
+ //fops->compat_ioctl32 = k1x_cpp_compat_ioctl32;
|
|
+#endif
|
|
+
|
|
+ sd->devnode->fops = fops;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void cpp_subdev_unregistered(struct v4l2_subdev *sd)
|
|
+{
|
|
+ const struct v4l2_file_operations *fops;
|
|
+
|
|
+ fops = sd->devnode->fops;
|
|
+ kfree(fops);
|
|
+ sd->devnode->fops = NULL;
|
|
+}
|
|
+
|
|
+static const struct v4l2_subdev_internal_ops k1x_cpp_internal_ops = {
|
|
+ .open = cpp_open_node,
|
|
+ .close = cpp_close_node,
|
|
+};
|
|
+
|
|
+static const struct spm_v4l2_subdev_ops cpp_spm_subdev_ops = {
|
|
+ .registered = cpp_subdev_registered,
|
|
+ .unregistered = cpp_subdev_unregistered,
|
|
+};
|
|
+
|
|
+static int cpp_init_subdev(struct cpp_device *cpp_dev)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ cpp_dev->csd.internal_ops = &k1x_cpp_internal_ops;
|
|
+ cpp_dev->csd.ops = &cpp_subdev_ops;
|
|
+ cpp_dev->csd.spm_ops = &cpp_spm_subdev_ops;
|
|
+ cpp_dev->csd.name = CPP_DRV_NAME;
|
|
+ cpp_dev->csd.sd_flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
|
|
+ cpp_dev->csd.sd_flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
|
|
+ cpp_dev->csd.ent_function = MEDIA_ENT_F_K1X_CPP;
|
|
+ cpp_dev->csd.pads_cnt = 0;
|
|
+ cpp_dev->csd.token = cpp_dev;
|
|
+
|
|
+ ret = plat_cam_register_subdev(&cpp_dev->csd);
|
|
+ if (ret)
|
|
+ pr_err("Fail to create platform camera subdev ");
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static const struct of_device_id k1xcpp_dt_match[] = {
|
|
+ {
|
|
+ .compatible = "spacemit,k1xcpp",
|
|
+ .data = &cpp_ops_2_0,
|
|
+ },
|
|
+ {}
|
|
+};
|
|
+
|
|
+MODULE_DEVICE_TABLE(of, k1xcpp_dt_match);
|
|
+
|
|
+static int cpp_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct cpp_device *cpp_dev;
|
|
+ const struct of_device_id *match_dev;
|
|
+ int ret = 0;
|
|
+ int irq = 0;
|
|
+
|
|
+ cam_dbg("enter cpp_probe\n");
|
|
+ match_dev = of_match_device(k1xcpp_dt_match, &pdev->dev);
|
|
+ if (!match_dev || !match_dev->data) {
|
|
+ dev_err(&pdev->dev, "no match data\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ cpp_dev = devm_kzalloc(&pdev->dev, sizeof(struct cpp_device), GFP_KERNEL);
|
|
+ if (!cpp_dev)
|
|
+ return -ENOMEM;
|
|
+ cpp_dev->ops = (struct cpp_hw_ops *)match_dev->data;
|
|
+
|
|
+ /* get mem */
|
|
+ cpp_dev->mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpp");
|
|
+ if (!cpp_dev->mem) {
|
|
+ dev_err(&pdev->dev, "no mem resource");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ cpp_dev->regs_base = devm_ioremap_resource(&pdev->dev, cpp_dev->mem);
|
|
+ if (IS_ERR(cpp_dev->regs_base)) {
|
|
+ dev_err(&pdev->dev, "fail to remap iomem\n");
|
|
+ return PTR_ERR(cpp_dev->regs_base);
|
|
+ }
|
|
+
|
|
+ irq = platform_get_irq_byname(pdev, "cpp");
|
|
+ if (irq < 0) {
|
|
+ dev_err(&pdev->dev, "no irq resource");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ dev_dbg(&pdev->dev, "cpp irq: %d\n", irq);
|
|
+ ret = devm_request_irq(&pdev->dev, irq,
|
|
+ cpp_dev->ops->isr, 0, CPP_DRV_NAME, cpp_dev);
|
|
+ if (ret) {
|
|
+ dev_err(&pdev->dev, "fail to request irq\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* get clock(s) */
|
|
+#ifdef CONFIG_ARCH_SPACEMIT
|
|
+/*
|
|
+ cpp_dev->ahb_clk = devm_clk_get(&pdev->dev, "isp_ahb");
|
|
+ if (IS_ERR(cpp_dev->ahb_clk)) {
|
|
+ ret = PTR_ERR(cpp_dev->ahb_clk);
|
|
+ dev_err(&pdev->dev, "failed to get ahb clock: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+*/
|
|
+ cpp_dev->ahb_reset = devm_reset_control_get_optional_shared(&pdev->dev, "isp_ahb_reset");
|
|
+ if (IS_ERR_OR_NULL(cpp_dev->ahb_reset)) {
|
|
+ dev_err(&pdev->dev, "not found core isp_ahb_reset\n");
|
|
+ return PTR_ERR(cpp_dev->ahb_reset);
|
|
+ }
|
|
+
|
|
+ cpp_dev->isp_cpp_reset = devm_reset_control_get_optional_shared(&pdev->dev, "isp_cpp_reset");
|
|
+ if (IS_ERR_OR_NULL(cpp_dev->isp_cpp_reset)) {
|
|
+ dev_err(&pdev->dev, "not found core isp_cpp_reset\n");
|
|
+ return PTR_ERR(cpp_dev->isp_cpp_reset);
|
|
+ }
|
|
+
|
|
+ cpp_dev->isp_ci_reset = devm_reset_control_get_optional_shared(&pdev->dev, "isp_ci_reset");
|
|
+ if (IS_ERR_OR_NULL(cpp_dev->isp_ci_reset)) {
|
|
+ dev_err(&pdev->dev, "not found core isp_ci_reset\n");
|
|
+ return PTR_ERR(cpp_dev->isp_ci_reset);
|
|
+ }
|
|
+
|
|
+ cpp_dev->lcd_mclk_reset = devm_reset_control_get_optional_shared(&pdev->dev, "lcd_mclk_reset");
|
|
+ if (IS_ERR_OR_NULL(cpp_dev->lcd_mclk_reset)) {
|
|
+ dev_err(&pdev->dev, "not found core lcd_mclk_reset\n");
|
|
+ return PTR_ERR(cpp_dev->lcd_mclk_reset);
|
|
+ }
|
|
+
|
|
+ cpp_dev->fnc_clk = devm_clk_get(&pdev->dev, "cpp_func");
|
|
+ if (IS_ERR(cpp_dev->fnc_clk)) {
|
|
+ ret = PTR_ERR(cpp_dev->fnc_clk);
|
|
+ dev_err(&pdev->dev, "failed to get function clock: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_SPACEMIT_DEBUG
|
|
+ cpp_running_info.is_dev_running = check_dev_running_status;
|
|
+ cpp_running_info.nb.notifier_call = dev_clkoffdet_notifier_handler;
|
|
+ clk_notifier_register(cpp_dev->fnc_clk, &cpp_running_info.nb);
|
|
+ clk_notifier_register(cpp_dev->bus_clk, &cpp_running_info.nb);
|
|
+// clk_notifier_register(cpp_dev->ahb_clk, &cpp_running_info.nb);
|
|
+#endif
|
|
+
|
|
+ cpp_dev->bus_clk = devm_clk_get(&pdev->dev, "isp_axi");
|
|
+ if (IS_ERR(cpp_dev->bus_clk)) {
|
|
+ ret = PTR_ERR(cpp_dev->bus_clk);
|
|
+ dev_err(&pdev->dev, "failed to get bus clock: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ cpp_dev->dpu_clk = devm_clk_get(&pdev->dev, "dpu_mclk");
|
|
+ if (IS_ERR(cpp_dev->dpu_clk)) {
|
|
+ ret = PTR_ERR(cpp_dev->dpu_clk);
|
|
+ dev_err(&pdev->dev, "failed to get dpu clock: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33));
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ cpp_dev->pdev = pdev;
|
|
+ platform_set_drvdata(pdev, cpp_dev);
|
|
+ ret = cpp_iommu_register(cpp_dev);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ cpp_dev->mmu_dev->state = CPP_IOMMU_ATTACHED;
|
|
+
|
|
+ ret = cpp_init_subdev(cpp_dev);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ mutex_init(&cpp_dev->mutex);
|
|
+ init_completion(&cpp_dev->reset_complete);
|
|
+ spin_lock_init(&cpp_dev->job_spinlock);
|
|
+ INIT_LIST_HEAD(&cpp_dev->job_queue);
|
|
+ cmd_queue_init(&cpp_dev->priv.idleq, "cpp idle queue");
|
|
+ cmd_queue_init(&cpp_dev->priv.frmq, "cpp frm queue");
|
|
+ cpp_dev->priv.cpp_dev = cpp_dev;
|
|
+ cpp_dev->priv.job_flags = 0;
|
|
+ cpp_dev->state = CPP_STATE_OFF;
|
|
+
|
|
+ ret = cpp_setup_run_work(cpp_dev);
|
|
+ if (ret)
|
|
+ goto err_work;
|
|
+
|
|
+ ret = cpp_init_bandwidth();
|
|
+ if (ret)
|
|
+ goto err_work;
|
|
+
|
|
+ /* enable runtime pm */
|
|
+ pm_runtime_enable(&pdev->dev);
|
|
+ device_init_wakeup(&pdev->dev, true);
|
|
+
|
|
+ cam_dbg("%s probed", dev_name(&pdev->dev));
|
|
+ return ret;
|
|
+
|
|
+err_work:
|
|
+ mutex_destroy(&cpp_dev->mutex);
|
|
+ plat_cam_unregister_subdev(&cpp_dev->csd);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int cpp_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct cpp_device *cpp_dev;
|
|
+
|
|
+ cpp_dev = platform_get_drvdata(pdev);
|
|
+ if (!cpp_dev) {
|
|
+ dev_err(&pdev->dev, "cpp device is NULL");
|
|
+ return 0;
|
|
+ }
|
|
+ device_init_wakeup(&pdev->dev, false);
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
+ cpp_deinit_bandwidth();
|
|
+
|
|
+ cpp_destroy_run_work(cpp_dev);
|
|
+ plat_cam_unregister_subdev(&cpp_dev->csd);
|
|
+ cpp_iommu_unregister(cpp_dev);
|
|
+ mutex_destroy(&cpp_dev->mutex);
|
|
+ devm_kfree(&pdev->dev, cpp_dev);
|
|
+ cam_dbg("%s removed", dev_name(&pdev->dev));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+static int k1xcpp_suspend(struct device *dev)
|
|
+{
|
|
+ /* TODO: */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int k1xcpp_resume(struct device *dev)
|
|
+{
|
|
+ /* TODO: */
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+static int k1xcpp_runtime_suspend(struct device *dev)
|
|
+{
|
|
+ /* TODO: */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int k1xcpp_runtime_resume(struct device *dev)
|
|
+{
|
|
+ /* TODO: */
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static const struct dev_pm_ops k1xcpp_pm_ops = { SET_RUNTIME_PM_OPS(
|
|
+ k1xcpp_runtime_suspend, k1xcpp_runtime_resume,
|
|
+ NULL) SET_SYSTEM_SLEEP_PM_OPS(k1xcpp_suspend, k1xcpp_resume)
|
|
+};
|
|
+static struct platform_driver cpp_driver = {
|
|
+ .driver = {
|
|
+ .name = CPP_DRV_NAME,
|
|
+ .of_match_table = k1xcpp_dt_match,
|
|
+ .pm = &k1xcpp_pm_ops,
|
|
+ },
|
|
+ .probe = cpp_probe,
|
|
+ .remove = cpp_remove,
|
|
+};
|
|
+
|
|
+module_platform_driver(cpp_driver);
|
|
+
|
|
+MODULE_DESCRIPTION("SPACEMIT Camera Post Process Driver");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_cpp/k1x_cpp.h b/drivers/media/platform/spacemit/camera/cam_cpp/k1x_cpp.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_cpp/k1x_cpp.h
|
|
@@ -0,0 +1,188 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * k1x_cpp.h - Driver for SPACEMIT K1X Camera Post Process
|
|
+ * lizhirong <zhirong.li@spacemit.com>
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef __K1X_CPP_H__
|
|
+#define __K1X_CPP_H__
|
|
+
|
|
+#include <linux/types.h>
|
|
+#include <linux/completion.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <media/k1x/k1x_cpp_uapi.h>
|
|
+#include <linux/reset.h>
|
|
+#include "cam_plat.h"
|
|
+#include "cpp_dmabuf.h"
|
|
+#include "cpp_iommu.h"
|
|
+
|
|
+#define MAX_ACTIVE_CPP_INSTANCE (1)
|
|
+#define MAX_CPP_V4L2_EVENTS (8)
|
|
+
|
|
+enum cpp_state {
|
|
+ CPP_STATE_OFF,
|
|
+ CPP_STATE_IDLE,
|
|
+ CPP_STATE_ACTIVE,
|
|
+ CPP_STATE_RST,
|
|
+ CPP_STATE_ERR,
|
|
+};
|
|
+
|
|
+struct cpp_queue_cmd {
|
|
+ struct list_head list_frame;
|
|
+ u64 ts_reg_config;
|
|
+ u64 ts_frm_trigger;
|
|
+ u64 ts_frm_finish;
|
|
+ u32 frame_id;
|
|
+ u32 client_id;
|
|
+ atomic_t in_processing;
|
|
+ // struct cpp_frame_info frame_info;
|
|
+ struct cpp_reg_cfg_cmd hw_cmds[MAX_REG_CMDS];
|
|
+ struct cpp_dma_port_info *dma_ports[MAX_DMA_PORT];
|
|
+};
|
|
+
|
|
+struct device_queue {
|
|
+ struct list_head list;
|
|
+ spinlock_t lock;
|
|
+ int max;
|
|
+ int len;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+struct cpp_tasklet_event {
|
|
+ u8 used;
|
|
+ u32 irq_status;
|
|
+};
|
|
+
|
|
+struct cpp_run_work {
|
|
+ struct workqueue_struct *run_wq;
|
|
+ struct work_struct work;
|
|
+ struct completion run_complete;
|
|
+};
|
|
+
|
|
+/* Instance is already queued on the job_queue */
|
|
+#define TRANS_QUEUED (1 << 0)
|
|
+/* Instance is currently running in hardware */
|
|
+#define TRANS_RUNNING (1 << 1)
|
|
+/* Instance is currently aborting */
|
|
+#define TRANS_ABORT (1 << 2)
|
|
+
|
|
+struct cpp_device;
|
|
+
|
|
+struct cpp_ctx {
|
|
+ struct cpp_device *cpp_dev;
|
|
+ struct device_queue idleq;
|
|
+ struct device_queue frmq;
|
|
+
|
|
+ /* For device job queue */
|
|
+ struct list_head queue;
|
|
+ unsigned long job_flags;
|
|
+};
|
|
+
|
|
+struct cpp_hw_ops {
|
|
+ int (*global_reset)(struct cpp_device *vfe);
|
|
+ void (*enable_clk_gating)(struct cpp_device *cpp, u8 enable);
|
|
+ int (*set_burst_len)(struct cpp_device *cpp);
|
|
+ void (*enable_irqs_common)(struct cpp_device *cpp, u8 enable);
|
|
+ irqreturn_t (*isr)(int irq, void *data);
|
|
+ void (*debug_dump)(struct cpp_device *cpp);
|
|
+ int (*cfg_port_dmad)(struct cpp_device *cpp,
|
|
+ struct cpp_dma_port_info *port_info, u8 port_id);
|
|
+ u32 (*hw_version)(struct cpp_device *cpp);
|
|
+};
|
|
+
|
|
+struct cpp_device {
|
|
+ struct platform_device *pdev;
|
|
+ struct plat_cam_subdev csd;
|
|
+ struct resource *mem;
|
|
+ struct resource *irq;
|
|
+ void __iomem *regs_base;
|
|
+// struct clk *ahb_clk;
|
|
+ struct reset_control *ahb_reset;
|
|
+ struct reset_control *isp_cpp_reset;
|
|
+ struct reset_control *isp_ci_reset;
|
|
+ struct reset_control *lcd_mclk_reset;
|
|
+
|
|
+ struct clk *fnc_clk;
|
|
+ struct clk *bus_clk;
|
|
+ struct clk *dpu_clk;
|
|
+ struct mutex mutex;
|
|
+ struct completion reset_complete;
|
|
+ u32 open_cnt;
|
|
+ enum cpp_state state;
|
|
+ u8 mapped;
|
|
+ void *shared_mem;
|
|
+ unsigned long shared_size;
|
|
+
|
|
+ struct cpp_hw_info hw_info;
|
|
+ struct cpp_run_work run_work;
|
|
+
|
|
+ spinlock_t job_spinlock;
|
|
+ struct list_head job_queue;
|
|
+ struct cpp_ctx *curr_ctx;
|
|
+ struct cpp_ctx priv;
|
|
+
|
|
+ const struct cpp_hw_ops *ops;
|
|
+
|
|
+ struct cpp_iommu_device *mmu_dev;
|
|
+};
|
|
+
|
|
+extern const struct cpp_hw_ops cpp_ops_2_0;
|
|
+
|
|
+static inline u32 cpp_reg_read_relaxed(struct cpp_device *cpp_dev, u32 reg)
|
|
+{
|
|
+ return readl_relaxed(cpp_dev->regs_base + reg);
|
|
+}
|
|
+
|
|
+static inline void cpp_reg_write_relaxed(struct cpp_device *cpp_dev, u32 reg, u32 val)
|
|
+{
|
|
+ if (0xffff8000 & reg) { /* block reg violation */
|
|
+ pr_err("reg write relaxed violation, 0x%x", reg);
|
|
+ return;
|
|
+ }
|
|
+ writel_relaxed(val, cpp_dev->regs_base + reg);
|
|
+}
|
|
+
|
|
+static inline u64 cpp_reg_read64(struct cpp_device *cpp_dev, u32 lower, u32 upper)
|
|
+{
|
|
+ u64 val;
|
|
+ val = (u64)ioread32(cpp_dev->regs_base + upper) << 32;
|
|
+ val += (u64)ioread32(cpp_dev->regs_base + lower);
|
|
+ return val;
|
|
+}
|
|
+
|
|
+static inline u32 cpp_reg_read(struct cpp_device *cpp_dev, u32 reg)
|
|
+{
|
|
+ return ioread32(cpp_dev->regs_base + reg);
|
|
+}
|
|
+
|
|
+static inline void cpp_reg_write(struct cpp_device *cpp_dev, u32 reg, u32 val)
|
|
+{
|
|
+ iowrite32(val, cpp_dev->regs_base + reg);
|
|
+}
|
|
+
|
|
+static inline void cpp_reg_write_mask(struct cpp_device *cpp_dev, u32 reg,
|
|
+ u32 val, u32 mask)
|
|
+{
|
|
+ u32 v;
|
|
+
|
|
+ if (!mask)
|
|
+ return;
|
|
+
|
|
+ v = cpp_reg_read(cpp_dev, reg);
|
|
+ v = (v & ~mask) | (val & mask);
|
|
+ cpp_reg_write(cpp_dev, reg, v);
|
|
+}
|
|
+
|
|
+static inline void cpp_reg_set_bit(struct cpp_device *cpp_dev, u32 reg, u32 val)
|
|
+{
|
|
+ cpp_reg_write_mask(cpp_dev, reg, val, val);
|
|
+}
|
|
+
|
|
+static inline void cpp_reg_clr_bit(struct cpp_device *cpp_dev, u32 reg, u32 val)
|
|
+{
|
|
+ cpp_reg_write_mask(cpp_dev, reg, 0, val);
|
|
+}
|
|
+
|
|
+#endif /* ifndef __K1X_CPP_H__ */
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_cpp/regs-cpp-iommu.h b/drivers/media/platform/spacemit/camera/cam_cpp/regs-cpp-iommu.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_cpp/regs-cpp-iommu.h
|
|
@@ -0,0 +1,37 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * regs-cpp-iommu.h
|
|
+ *
|
|
+ * register for cpp iommu
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef __REGS_CPP_IOMMU_H__
|
|
+#define __REGS_CPP_IOMMU_H__
|
|
+
|
|
+/* TBU(n) registers */
|
|
+#define REG_IOMMU_TTBL(n) (0x1040 + 0x20 * (n))
|
|
+#define REG_IOMMU_TTBH(n) (0x1044 + 0x20 * (n))
|
|
+#define REG_IOMMU_TCR0(n) (0x1048 + 0x20 * (n))
|
|
+#define REG_IOMMU_TCR1(n) (0x104c + 0x20 * (n))
|
|
+#define REG_IOMMU_STAT(n) (0x1050 + 0x20 * (n))
|
|
+
|
|
+/* TOP registers */
|
|
+#define REG_IOMMU_BVAL (0x1000)
|
|
+#define REG_IOMMU_BVAH (0x1004)
|
|
+#define REG_IOMMU_TVAL (0x1008)
|
|
+#define REG_IOMMU_TVAH (0x100c)
|
|
+#define REG_IOMMU_GIRQ_STAT (0x1010)
|
|
+#define REG_IOMMU_GIRQ_ENA (0x1014)
|
|
+#define REG_IOMMU_TIMEOUT (0x1018)
|
|
+#define REG_IOMMU_ERR_CLR (0x101c)
|
|
+#define REG_IOMMU_LVAL (0x1020)
|
|
+#define REG_IOMMU_LVAH (0x1024)
|
|
+#define REG_IOMMU_LPAL (0x1028)
|
|
+#define REG_IOMMU_LPAH (0x102c)
|
|
+#define REG_IOMMU_TOAL (0x1034)
|
|
+#define REG_IOMMU_TOAH (0x1038)
|
|
+#define REG_IOMMU_VER (0x103c)
|
|
+
|
|
+#endif /* ifndef __REGS_CPP_IOMMU_H__ */
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_cpp/regs-cpp-v2p0.h b/drivers/media/platform/spacemit/camera/cam_cpp/regs-cpp-v2p0.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_cpp/regs-cpp-v2p0.h
|
|
@@ -0,0 +1,147 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * regs-cpp-v2p0.h
|
|
+ *
|
|
+ * register settings for SPACEMIT K1X Camera Post Process
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef __REGS_CPP_V2P0_H__
|
|
+#define __REGS_CPP_V2P0_H__
|
|
+
|
|
+#define REG_CPP_3DNR_BST_LEN (0x018C)
|
|
+#define REG_CPP_3DNR_CG_CTRL (0x0194)
|
|
+#define REG_CPP_3DNR_TOP_CTRL (0x01bc)
|
|
+#define REG_CPP_QUEUE_CTRL (0x7000)
|
|
+#define REG_CPP_IRQ_STATUS (0x7004)
|
|
+#define REG_CPP_IRQ_MASK (0x7008)
|
|
+#define REG_CPP_IRQ_RAW (0x700C)
|
|
+#define REG_CPP_CHIP_ID (0x7020)
|
|
+
|
|
+/* REG_CPP_QUEUE_CTRL */
|
|
+#define QCTRL_GLB_RST_CYC_MASK (0xff << 24)
|
|
+#define QCTRL_AXI_FAB_CG (0x1 << 18)
|
|
+#define QCTRL_FBC_ENC_CG (0x1 << 17)
|
|
+#define QCTRL_QUEUE_CG (0x1 << 16)
|
|
+#define QCTRL_QUEUE_FBC (0x1 << 12)
|
|
+#define QCTRL_GLB_RST (0x1 << 1)
|
|
+
|
|
+/* REG_CPP_IRQ_STATUS */
|
|
+#define QIRQ_STAT_3DNR_EOF (0x1 << 14)
|
|
+#define QIRQ_STAT_IOMMU (0x1 << 13)
|
|
+#define QIRQ_STAT_FBC_ENC (0x1 << 12)
|
|
+#define QIRQ_STAT_FBC_DEC1 (0x1 << 11)
|
|
+#define QIRQ_STAT_FBC_DEC0 (0x1 << 10)
|
|
+#define QIRQ_STAT_GLB_RST_DONE (0x1 << 9)
|
|
+#define QIRQ_STAT_RDMA_TIMEOUT (0x1 << 8)
|
|
+#define QIRQ_STAT_WDMA_TIMEOUT (0x1 << 7)
|
|
+#define QIRQ_STAT_DMAC_RD_ERR (0x1 << 6)
|
|
+#define QIRQ_STAT_DMAC_WR_ERR (0x1 << 5)
|
|
+#define QIRQ_STAT_RDMA_DONE (0x1 << 4)
|
|
+#define QIRQ_STAT_TOP_CTRL_DONE (0x1 << 3)
|
|
+#define QIRQ_STAT_SLC_START (0x1 << 2)
|
|
+#define QIRQ_STAT_SLC_DONE (0x1 << 1)
|
|
+#define QIRQ_STAT_FRM_DONE (0x1 << 0)
|
|
+
|
|
+/* REG_CPP_IRQ_MASK */
|
|
+#define QIRQ_MASK_ERR \
|
|
+ (QIRQ_STAT_RDMA_TIMEOUT | QIRQ_STAT_WDMA_TIMEOUT | \
|
|
+ QIRQ_STAT_DMAC_RD_ERR | QIRQ_STAT_DMAC_WR_ERR)
|
|
+
|
|
+#define QIRQ_MASK_FBC \
|
|
+ (QIRQ_STAT_FBC_ENC | QIRQ_STAT_FBC_DEC0)
|
|
+
|
|
+#define QIRQ_MASK_IOMMU (QIRQ_STAT_IOMMU)
|
|
+
|
|
+#define QIRQ_MASK_GEN \
|
|
+ (QIRQ_MASK_ERR | QIRQ_STAT_GLB_RST_DONE | QIRQ_STAT_FRM_DONE | \
|
|
+ QIRQ_MASK_FBC | QIRQ_MASK_IOMMU)
|
|
+
|
|
+#define REG_CPP_YINPUTBASEADDR_L4 (0x144)
|
|
+#define REG_CPP_YINPUTBASEADDR_L3 (0x148)
|
|
+#define REG_CPP_YINPUTBASEADDR_L2 (0x14c)
|
|
+#define REG_CPP_YINPUTBASEADDR_L1 (0x150)
|
|
+#define REG_CPP_YINPUTBASEADDR_L0 (0x154)
|
|
+#define REG_CPP_UVINPUTBASEADDR_L4 (0x158)
|
|
+#define REG_CPP_UVINPUTBASEADDR_L3 (0x15c)
|
|
+#define REG_CPP_UVINPUTBASEADDR_L2 (0x160)
|
|
+#define REG_CPP_UVINPUTBASEADDR_L1 (0x164)
|
|
+#define REG_CPP_UVINPUTBASEADDR_L0 (0x168)
|
|
+
|
|
+#define REG_CPP_PRE_YINPUTBASEADDR_L4 (0x2a0)
|
|
+#define REG_CPP_PRE_YINPUTBASEADDR_L3 (0x2a4)
|
|
+#define REG_CPP_PRE_YINPUTBASEADDR_L2 (0x2a8)
|
|
+#define REG_CPP_PRE_YINPUTBASEADDR_L1 (0x2ac)
|
|
+#define REG_CPP_PRE_YINPUTBASEADDR_L0 (0x2b0)
|
|
+#define REG_CPP_PRE_UVINPUTBASEADDR_L4 (0x2b4)
|
|
+#define REG_CPP_PRE_UVINPUTBASEADDR_L3 (0x2b8)
|
|
+#define REG_CPP_PRE_UVINPUTBASEADDR_L2 (0x2bc)
|
|
+#define REG_CPP_PRE_UVINPUTBASEADDR_L1 (0x2c0)
|
|
+#define REG_CPP_PRE_UVINPUTBASEADDR_L0 (0x2c4)
|
|
+#define REG_CPP_PRE_KINPUTBASEADDR_L4 (0x2c8)
|
|
+#define REG_CPP_PRE_KINPUTBASEADDR_L3 (0x2cc)
|
|
+#define REG_CPP_PRE_KINPUTBASEADDR_L2 (0x2d0)
|
|
+#define REG_CPP_PRE_KINPUTBASEADDR_L1 (0x2d4)
|
|
+#define REG_CPP_PRE_KINPUTBASEADDR_L0 (0x2d8)
|
|
+
|
|
+#define REG_CPP_YWBASEADDR_L4 (0x2f0)
|
|
+#define REG_CPP_YWBASEADDR_L3 (0x2f4)
|
|
+#define REG_CPP_YWBASEADDR_L2 (0x2f8)
|
|
+#define REG_CPP_YWBASEADDR_L1 (0x2fc)
|
|
+#define REG_CPP_YWBASEADDR_L0 (0x300)
|
|
+#define REG_CPP_UVWBASEADDR_L4 (0x304)
|
|
+#define REG_CPP_UVWBASEADDR_L3 (0x308)
|
|
+#define REG_CPP_UVWBASEADDR_L2 (0x30c)
|
|
+#define REG_CPP_UVWBASEADDR_L1 (0x310)
|
|
+#define REG_CPP_UVWBASEADDR_L0 (0x314)
|
|
+#define REG_CPP_KWBASEADDR_L4 (0x318)
|
|
+#define REG_CPP_KWBASEADDR_L3 (0x31c)
|
|
+#define REG_CPP_KWBASEADDR_L2 (0x320)
|
|
+#define REG_CPP_KWBASEADDR_L1 (0x324)
|
|
+#define REG_CPP_KWBASEADDR_L0 (0x328)
|
|
+
|
|
+/* DMA address high extension */
|
|
+#define REG_CPP_YINPUTBASEADDR_L4_H (0x354)
|
|
+#define REG_CPP_YINPUTBASEADDR_L3_H (0x358)
|
|
+#define REG_CPP_YINPUTBASEADDR_L2_H (0x35c)
|
|
+#define REG_CPP_YINPUTBASEADDR_L1_H (0x360)
|
|
+#define REG_CPP_YINPUTBASEADDR_L0_H (0x364)
|
|
+#define REG_CPP_UVINPUTBASEADDR_L4_H (0x368)
|
|
+#define REG_CPP_UVINPUTBASEADDR_L3_H (0x36c)
|
|
+#define REG_CPP_UVINPUTBASEADDR_L2_H (0x370)
|
|
+#define REG_CPP_UVINPUTBASEADDR_L1_H (0x374)
|
|
+#define REG_CPP_UVINPUTBASEADDR_L0_H (0x378)
|
|
+
|
|
+#define REG_CPP_PRE_YINPUTBASEADDR_L4_H (0x37c)
|
|
+#define REG_CPP_PRE_YINPUTBASEADDR_L3_H (0x380)
|
|
+#define REG_CPP_PRE_YINPUTBASEADDR_L2_H (0x384)
|
|
+#define REG_CPP_PRE_YINPUTBASEADDR_L1_H (0x388)
|
|
+#define REG_CPP_PRE_YINPUTBASEADDR_L0_H (0x38c)
|
|
+#define REG_CPP_PRE_UVINPUTBASEADDR_L4_H (0x390)
|
|
+#define REG_CPP_PRE_UVINPUTBASEADDR_L3_H (0x394)
|
|
+#define REG_CPP_PRE_UVINPUTBASEADDR_L2_H (0x398)
|
|
+#define REG_CPP_PRE_UVINPUTBASEADDR_L1_H (0x39c)
|
|
+#define REG_CPP_PRE_UVINPUTBASEADDR_L0_H (0x3a0)
|
|
+#define REG_CPP_PRE_KINPUTBASEADDR_L4_H (0x3a4)
|
|
+#define REG_CPP_PRE_KINPUTBASEADDR_L3_H (0x3a8)
|
|
+#define REG_CPP_PRE_KINPUTBASEADDR_L2_H (0x3ac)
|
|
+#define REG_CPP_PRE_KINPUTBASEADDR_L1_H (0x3b0)
|
|
+#define REG_CPP_PRE_KINPUTBASEADDR_L0_H (0x3b4)
|
|
+
|
|
+#define REG_CPP_YWBASEADDR_L4_H (0x3b8)
|
|
+#define REG_CPP_YWBASEADDR_L3_H (0x3bc)
|
|
+#define REG_CPP_YWBASEADDR_L2_H (0x3c0)
|
|
+#define REG_CPP_YWBASEADDR_L1_H (0x3c4)
|
|
+#define REG_CPP_YWBASEADDR_L0_H (0x3c8)
|
|
+#define REG_CPP_UVWBASEADDR_L4_H (0x3cc)
|
|
+#define REG_CPP_UVWBASEADDR_L3_H (0x3d0)
|
|
+#define REG_CPP_UVWBASEADDR_L2_H (0x3d4)
|
|
+#define REG_CPP_UVWBASEADDR_L1_H (0x3d8)
|
|
+#define REG_CPP_UVWBASEADDR_L0_H (0x3dc)
|
|
+#define REG_CPP_KWBASEADDR_L4_H (0x3e0)
|
|
+#define REG_CPP_KWBASEADDR_L3_H (0x3e4)
|
|
+#define REG_CPP_KWBASEADDR_L2_H (0x3e8)
|
|
+#define REG_CPP_KWBASEADDR_L1_H (0x3ec)
|
|
+#define REG_CPP_KWBASEADDR_L0_H (0x3f0)
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_cpp/regs-fbc-v2p0.h b/drivers/media/platform/spacemit/camera/cam_cpp/regs-fbc-v2p0.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_cpp/regs-fbc-v2p0.h
|
|
@@ -0,0 +1,70 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * regs-fbc-v2p0.h
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef __REGS_FBC_V2P0_H__
|
|
+#define __REGS_FBC_V2P0_H__
|
|
+
|
|
+#define REG_FBC_TNRDEC_HL_ADDR (0x800)
|
|
+#define REG_FBC_TNRDEC_HH_ADDR (0x804)
|
|
+#define REG_FBC_TNRDEC_BBOX_X (0x808)
|
|
+#define REG_FBC_TNRDEC_BBOX_Y (0x80C)
|
|
+#define REG_FBC_TNRDEC_IMG_SIZE (0x810)
|
|
+#define REG_FBC_TNRDEC_SPLIT_MODE (0x814)
|
|
+#define REG_FBC_TNRDEC_PERF_CTRL (0x818)
|
|
+#define REG_FBC_TNRDEC_CG_EN (0x81C)
|
|
+#define REG_FBC_TNRDEC_DMAC_CTRL (0x820)
|
|
+#define REG_FBC_TNRDEC_TRIG_CTRL (0x824)
|
|
+#define REG_FBC_TNRDEC_IRQ_MASK (0x828)
|
|
+#define REG_FBC_TNRDEC_IRQ_RAW (0x82C)
|
|
+#define REG_FBC_TNRDEC_IRQ_STATUS (0x830)
|
|
+
|
|
+#define REG_FBC_TNRENC_HL_ADDR (0xa00)
|
|
+#define REG_FBC_TNRENC_HH_ADDR (0xa04)
|
|
+#define REG_FBC_TNRENC_PL_ADDR (0xa08)
|
|
+#define REG_FBC_TNRENC_PH_ADDR (0xa0c)
|
|
+#define REG_FBC_TNRENC_BBOX_X (0xa10)
|
|
+#define REG_FBC_TNRENC_BBOX_Y (0xa14)
|
|
+#define REG_FBC_TNRENC_Y_ADDR (0xa18)
|
|
+#define REG_FBC_TNRENC_PITCH_Y (0xa1C)
|
|
+#define REG_FBC_TNRENC_C_ADDR (0xa20)
|
|
+#define REG_FBC_TNRENC_PITCH_UV (0xa24)
|
|
+#define REG_FBC_TNRENC_Y_BUF_SIZE (0xa28)
|
|
+#define REG_FBC_TNRENC_C_BUF_SIZE (0xa2C)
|
|
+#define REG_FBC_TNRENC_SWAP_CTRL (0xa30)
|
|
+#define REG_FBC_TNRENC_IRQ_MASK (0xa34)
|
|
+#define REG_FBC_TNRENC_IRQ_RAW (0xa38)
|
|
+#define REG_FBC_TNRENC_IRQ_STATUS (0xa48)
|
|
+#define REG_FBC_TNRENC_MODE_CTRL (0xa40)
|
|
+#define REG_FBC_TNRENC_DMAC_LENGTH (0xa44)
|
|
+
|
|
+/* fbc irq status */
|
|
+#define FIRQ_DEC_DMAC_ERR (0x1 << 10)
|
|
+#define FIRQ_DEC_RDMA_TIMEOUT (0x1 << 9)
|
|
+#define FIRQ_DEC_SLV_REQ_ERR (0x1 << 8)
|
|
+#define FIRQ_DEC_PAYLOAD_ERR (0x1 << 7)
|
|
+#define FIRQ_DEC_HDR_ERR (0x1 << 6)
|
|
+#define FIRQ_DEC_WLBUF_EOF (0x1 << 5)
|
|
+#define FIRQ_DEC_CORE_EOF (0x1 << 4)
|
|
+#define FIRQ_DEC_PLD_RDMA_EOF (0x1 << 3)
|
|
+#define FIRQ_DEC_HDR_RDMA_EOF (0x1 << 2)
|
|
+#define FIRQ_DEC_CFG_SWAPED (0x1 << 1)
|
|
+#define FIRQ_DEC_EOF (0x1 << 0)
|
|
+
|
|
+#define FIRQ_STAT_CFG_DONE (0x1 << 17)
|
|
+#define FIRQ_STAT_ENC_EOF (0x1 << 16)
|
|
+#define FIRQ_STAT_ENC_ERR (0xFFFF)
|
|
+
|
|
+/* fbc irq mask */
|
|
+#define FIRQ_MASK_DEC_ERR \
|
|
+ (FIRQ_DEC_HDR_ERR | FIRQ_DEC_PAYLOAD_ERR | FIRQ_DEC_SLV_REQ_ERR | \
|
|
+ FIRQ_DEC_RDMA_TIMEOUT | FIRQ_DEC_DMAC_ERR)
|
|
+#define FIRQ_MASK_ENC_ERR (FIRQ_STAT_ENC_ERR)
|
|
+
|
|
+#define FIRQ_MASK_DEC_GEN (FIRQ_MASK_DEC_ERR)
|
|
+#define FIRQ_MASK_ENC_GEN (FIRQ_MASK_ENC_ERR)
|
|
+
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_drv.c b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_drv.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_drv.c
|
|
@@ -0,0 +1,794 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * Description on this file
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include "k1x_isp_drv.h"
|
|
+#include "k1x_isp_pipe.h"
|
|
+#include <cam_plat.h>
|
|
+
|
|
+#include <linux/clk.h>
|
|
+#include <linux/clk-provider.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/fs.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/pm_runtime.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/uaccess.h>
|
|
+#include <media/v4l2-device.h>
|
|
+#include <linux/dma-buf.h>
|
|
+
|
|
+#define ISP_FUNC_CLK_FREQ (307200000)
|
|
+#define ISP_AXI_CLK_FREQ (307200000)
|
|
+
|
|
+struct isp_char_device g_isp_cdevice = { 0 };
|
|
+
|
|
+struct k1xisp_dev *g_isp_dev = NULL;
|
|
+struct spm_camera_ispfirm_ops g_ispfirm_ops;
|
|
+
|
|
+#define GET_ISP_DEV(isp_dev) (isp_dev = g_isp_dev)
|
|
+#define SET_ISP_DEV(isp_dev) (g_isp_dev = isp_dev)
|
|
+
|
|
+#define IS_COMBINATION_PIPE_MODE(mode, work_mode) { \
|
|
+ mode = ((ISP_WORK_MODE_HDR == work_mode) \
|
|
+ || (ISP_WORK_MODE_RGBW == work_mode) \
|
|
+ || (ISP_WORK_MODE_RGBIR == work_mode)); \
|
|
+ }
|
|
+
|
|
+int k1xisp_vi_send_cmd(unsigned int cmd, void *cmd_payload, unsigned int payload_len);
|
|
+int k1xisp_irq_callback(int irq_num, void *irq_data, unsigned int data_len);
|
|
+
|
|
+/*********************************export to outside********************************************/
|
|
+int k1xisp_dev_open(void)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct k1xisp_dev *isp_dev = NULL;
|
|
+ struct isp_firm isp_vi_ops = { };
|
|
+ struct v4l2_device *v4l2_dev = NULL;
|
|
+
|
|
+ GET_ISP_DEV(isp_dev);
|
|
+ ISP_DRV_CHECK_POINTER(isp_dev);
|
|
+
|
|
+ isp_dev->open_cnt++;
|
|
+ if (1 == isp_dev->open_cnt) {
|
|
+ v4l2_dev = plat_cam_v4l2_device_get();
|
|
+ if (!v4l2_dev) {
|
|
+ isp_log_err("isp get v4l2 device fail");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ //register callback to vi modulej, just need register once.
|
|
+ isp_vi_ops.frameinfo_size = 0;
|
|
+ g_ispfirm_ops.send_cmd = k1xisp_vi_send_cmd;
|
|
+ g_ispfirm_ops.irq_callback = k1xisp_irq_callback;
|
|
+ isp_vi_ops.ispfirm_ops = &g_ispfirm_ops;
|
|
+ //should use v4l2_subdev_notify instead, if isp registed as v4l2 subdev.
|
|
+ v4l2_dev->notify(NULL, PLAT_SD_NOTIFY_REGISTER_ISPFIRM, &isp_vi_ops);
|
|
+ isp_dev->vi_funs = isp_vi_ops.vi_ops;
|
|
+ plat_cam_v4l2_device_put(v4l2_dev);
|
|
+
|
|
+ pm_runtime_get_sync(&isp_dev->plat_dev->dev);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_dev_release(void)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct k1xisp_dev *isp_dev = NULL;
|
|
+ struct v4l2_device *v4l2_dev = NULL;
|
|
+
|
|
+ GET_ISP_DEV(isp_dev);
|
|
+ ISP_DRV_CHECK_POINTER(isp_dev);
|
|
+
|
|
+ isp_dev->open_cnt--;
|
|
+ if (0 == isp_dev->open_cnt) {
|
|
+ v4l2_dev = plat_cam_v4l2_device_get();
|
|
+ if (!v4l2_dev) {
|
|
+ isp_log_err("isp get v4l2 device fail");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ //should use v4l2_subdev_notify instead, if isp registed as v4l2 subdev.
|
|
+ v4l2_dev->notify(NULL, PLAT_SD_NOTIFY_REGISTER_ISPFIRM, NULL);
|
|
+ isp_dev->vi_funs = NULL;
|
|
+ plat_cam_v4l2_device_put(v4l2_dev);
|
|
+
|
|
+ if (atomic_read(&isp_dev->clk_ref)) {
|
|
+ isp_log_warn("ispdev clks haven't been closed, now shutdown!");
|
|
+ reset_control_assert(isp_dev->ahb_reset);
|
|
+// clk_disable_unprepare(isp_dev->ahb_clk);
|
|
+ clk_disable_unprepare(isp_dev->fnc_clk);
|
|
+ reset_control_assert(isp_dev->isp_reset);
|
|
+
|
|
+ clk_disable_unprepare(isp_dev->axi_clk);
|
|
+ reset_control_assert(isp_dev->isp_ci_reset);
|
|
+
|
|
+ clk_disable_unprepare(isp_dev->dpu_clk);
|
|
+ reset_control_assert(isp_dev->lcd_mclk_reset);
|
|
+ atomic_set(&isp_dev->clk_ref, 0);
|
|
+ }
|
|
+
|
|
+ pm_runtime_put_sync(&isp_dev->plat_dev->dev);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_dev_clock_set(int enable)
|
|
+{
|
|
+ int ret = 0;
|
|
+ // unsigned long clk_val = 0;
|
|
+ struct k1xisp_dev *isp_dev = NULL;
|
|
+
|
|
+ GET_ISP_DEV(isp_dev);
|
|
+ ISP_DRV_CHECK_POINTER(isp_dev);
|
|
+
|
|
+ if (0 == isp_dev->open_cnt) {
|
|
+ isp_log_err("Please open ispdev first before operate clock!");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ if (enable) {
|
|
+ atomic_inc(&isp_dev->clk_ref);
|
|
+ if (1 == atomic_read(&isp_dev->clk_ref)) {
|
|
+ reset_control_deassert(isp_dev->ahb_reset);
|
|
+// clk_prepare_enable(isp_dev->ahb_clk);
|
|
+ clk_prepare_enable(isp_dev->fnc_clk);
|
|
+ reset_control_deassert(isp_dev->isp_reset);
|
|
+
|
|
+ clk_prepare_enable(isp_dev->axi_clk);
|
|
+ reset_control_deassert(isp_dev->isp_ci_reset);
|
|
+
|
|
+ clk_prepare_enable(isp_dev->dpu_clk);
|
|
+ reset_control_deassert(isp_dev->lcd_mclk_reset);
|
|
+ }
|
|
+ } else {
|
|
+ atomic_dec(&isp_dev->clk_ref);
|
|
+ if (0 == atomic_read(&isp_dev->clk_ref)) {
|
|
+ reset_control_assert(isp_dev->ahb_reset);
|
|
+// clk_disable_unprepare(isp_dev->ahb_clk);
|
|
+ clk_disable_unprepare(isp_dev->fnc_clk);
|
|
+ reset_control_assert(isp_dev->isp_reset);
|
|
+
|
|
+ clk_disable_unprepare(isp_dev->axi_clk);
|
|
+ reset_control_assert(isp_dev->isp_ci_reset);
|
|
+
|
|
+ clk_disable_unprepare(isp_dev->dpu_clk);
|
|
+ reset_control_assert(isp_dev->lcd_mclk_reset);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_dev_get_pipedev(u32 hw_pipe_id, struct k1xisp_pipe_dev **pp_pipedev)
|
|
+{
|
|
+ struct k1xisp_dev *isp_dev = NULL;
|
|
+ u32 pipedev_id = hw_pipe_id;
|
|
+
|
|
+ GET_ISP_DEV(isp_dev);
|
|
+ ISP_DRV_CHECK_POINTER(isp_dev);
|
|
+
|
|
+ *pp_pipedev = isp_dev->pipe_devs[pipedev_id];
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int k1xisp_dev_get_vi_ops(struct spm_camera_vi_ops **pp_vi_ops)
|
|
+{
|
|
+ struct k1xisp_dev *isp_dev = NULL;
|
|
+
|
|
+ GET_ISP_DEV(isp_dev);
|
|
+ ISP_DRV_CHECK_POINTER(isp_dev);
|
|
+
|
|
+ *pp_vi_ops = isp_dev->vi_funs;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int k1xisp_dev_get_viraddr_from_dma_buf(struct dma_buf *dma_buffer, void **pp_vir_addr)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct iosys_map map;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(dma_buffer);
|
|
+
|
|
+ //dma-heap buffer
|
|
+ if (dma_buffer->ops->begin_cpu_access) {
|
|
+ ret = dma_buffer->ops->begin_cpu_access(dma_buffer, DMA_TO_DEVICE);
|
|
+ if (ret < 0) {
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ ret = dma_buffer->ops->vmap(dma_buffer, &map);
|
|
+ if (0 == ret) {
|
|
+ *pp_vir_addr = map.vaddr;
|
|
+ isp_log_dbg("%s: get dma buf vir addr=0x%p!", __func__,
|
|
+ map.vaddr);
|
|
+ } else {
|
|
+ isp_log_info("%s: get dma buf vir addr failed!", __func__);
|
|
+ ret = -EPERM;
|
|
+ }
|
|
+ } else {
|
|
+ isp_log_err("%s: this dma buf has no begin_cpu_access function!",
|
|
+ __func__);
|
|
+ ret = -EPERM;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_dev_put_viraddr_to_dma_buf(struct dma_buf *dma_buffer, void *vir_addr)
|
|
+{
|
|
+ struct iosys_map map;
|
|
+ ISP_DRV_CHECK_POINTER(dma_buffer);
|
|
+ {
|
|
+ //dma-heap buffer
|
|
+ map.vaddr = vir_addr;
|
|
+ map.is_iomem = false;
|
|
+ if (dma_buffer->ops->vunmap)
|
|
+ dma_buffer->ops->vunmap(dma_buffer, &map);
|
|
+
|
|
+ if (dma_buffer->ops->end_cpu_access)
|
|
+ dma_buffer->ops->end_cpu_access(dma_buffer, DMA_TO_DEVICE);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int _isp_dev_put_phyaddr_to_dma_buf(struct dma_buf *dma_buffer,
|
|
+ struct dma_buf_attachment *attach,
|
|
+ struct sg_table *sgt)
|
|
+{
|
|
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
|
|
+ dma_buf_detach(dma_buffer, attach);
|
|
+ dma_buf_put(dma_buffer);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int k1xisp_dev_get_phyaddr_from_dma_buf(int fd, __u64 *phy_addr)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct dma_buf *dma_buffer = NULL;
|
|
+ struct dma_buf_attachment *attach;
|
|
+ struct k1xisp_dev *isp_dev = NULL;
|
|
+ struct sg_table *sgt;
|
|
+
|
|
+ dma_buffer = dma_buf_get(fd);
|
|
+ if (IS_ERR(dma_buffer)) {
|
|
+ isp_log_err("%s: get dma buffer failed!", __func__);
|
|
+ return -EBADF;
|
|
+ }
|
|
+
|
|
+ GET_ISP_DEV(isp_dev);
|
|
+ attach = dma_buf_attach(dma_buffer, &isp_dev->plat_dev->dev);
|
|
+ if (IS_ERR(attach)) {
|
|
+ ret = -EPERM;
|
|
+ goto fail_put;
|
|
+ }
|
|
+
|
|
+ get_dma_buf(dma_buffer);
|
|
+
|
|
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
|
|
+ if (IS_ERR(sgt)) {
|
|
+ ret = -EPERM;
|
|
+ goto fail_detach;
|
|
+ }
|
|
+
|
|
+ if (sgt->orig_nents != 1) {
|
|
+ isp_log_err
|
|
+ ("%s: the count of sg table in this dma buffer isn't one, but %d!",
|
|
+ __func__, sgt->orig_nents);
|
|
+ ret = -EPERM;
|
|
+ } else {
|
|
+ *phy_addr = sg_dma_address(sgt->sgl);
|
|
+ }
|
|
+
|
|
+ //we only get phy addr
|
|
+ _isp_dev_put_phyaddr_to_dma_buf(dma_buffer, attach, sgt);
|
|
+ dma_buf_put(dma_buffer);
|
|
+ return ret;
|
|
+
|
|
+fail_detach:
|
|
+ dma_buf_detach(dma_buffer, attach);
|
|
+ dma_buf_put(dma_buffer);
|
|
+fail_put:
|
|
+ dma_buf_put(dma_buffer);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*********************************end export to outside********************************************/
|
|
+
|
|
+#ifdef CONFIG_SPACEMIT_DEBUG
|
|
+struct dev_running_info {
|
|
+ bool b_dev_running;
|
|
+ bool (*is_dev_running)(struct dev_running_info *p_devinfo);
|
|
+ struct notifier_block nb;
|
|
+} isp_running_info;
|
|
+
|
|
+static bool check_dev_running_status(struct dev_running_info *p_devinfo)
|
|
+{
|
|
+ return p_devinfo->b_dev_running;
|
|
+}
|
|
+
|
|
+#define to_devinfo(_nb) container_of(_nb, struct dev_running_info, nb)
|
|
+
|
|
+static int dev_clkoffdet_notifier_handler(struct notifier_block *nb,
|
|
+ unsigned long msg, void *data)
|
|
+{
|
|
+ struct clk_notifier_data *cnd = data;
|
|
+ struct dev_running_info *p_devinfo = to_devinfo(nb);
|
|
+
|
|
+ if ((__clk_is_enabled(cnd->clk)) && (msg & PRE_RATE_CHANGE) &&
|
|
+ (cnd->new_rate == 0) && (cnd->old_rate != 0)) {
|
|
+ if (p_devinfo->is_dev_running(p_devinfo))
|
|
+ return NOTIFY_BAD;
|
|
+ }
|
|
+
|
|
+ return NOTIFY_OK;
|
|
+}
|
|
+#endif
|
|
+
|
|
+int k1xisp_dev_context_create(struct platform_device *pdev)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct k1xisp_dev *isp_dev = NULL;
|
|
+
|
|
+ //1. self struct initiate
|
|
+ isp_dev = devm_kzalloc(&pdev->dev, sizeof(struct k1xisp_dev), GFP_KERNEL);
|
|
+ if (unlikely(isp_dev == NULL)) {
|
|
+ dev_err(&pdev->dev, "could not allocate memory");
|
|
+ ret = -ENOMEM;
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ isp_dev->plat_dev = pdev;
|
|
+
|
|
+ //2. analyze dts and get reg addr base and length.
|
|
+ /* get registers mem */
|
|
+ isp_dev->isp_reg_source =
|
|
+ platform_get_resource_byname(pdev, IORESOURCE_MEM, "isp");
|
|
+ if (!isp_dev->isp_reg_source) {
|
|
+ dev_err(&pdev->dev, "no mem resource");
|
|
+ ret = -ENODEV;
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ isp_dev->isp_regbase =
|
|
+ (ulong) devm_ioremap(&pdev->dev, isp_dev->isp_reg_source->start,
|
|
+ resource_size(isp_dev->isp_reg_source));
|
|
+ if (IS_ERR((void *)isp_dev->isp_regbase)) {
|
|
+ dev_err(&pdev->dev, "fail to remap iomem\n");
|
|
+ ret = -EPERM;
|
|
+ return ret;
|
|
+ }
|
|
+#ifdef CONFIG_ARCH_SPACEMIT
|
|
+ /* get clock(s) */
|
|
+/*
|
|
+ isp_dev->ahb_clk = devm_clk_get(&pdev->dev, "isp_ahb");
|
|
+ if (IS_ERR(isp_dev->ahb_clk)) {
|
|
+ ret = PTR_ERR(isp_dev->ahb_clk);
|
|
+ dev_err(&pdev->dev, "failed to get ahb clock: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+*/
|
|
+ isp_dev->ahb_reset = devm_reset_control_get_optional_shared(&pdev->dev, "isp_ahb_reset");
|
|
+ if (IS_ERR_OR_NULL(isp_dev->ahb_reset)) {
|
|
+ dev_err(&pdev->dev, "not found core isp_ahb_reset\n");
|
|
+ return PTR_ERR(isp_dev->ahb_reset);
|
|
+ }
|
|
+
|
|
+ isp_dev->isp_reset = devm_reset_control_get_optional_shared(&pdev->dev, "isp_reset");
|
|
+ if (IS_ERR_OR_NULL(isp_dev->isp_reset)) {
|
|
+ dev_err(&pdev->dev, "not found core isp_reset\n");
|
|
+ return PTR_ERR(isp_dev->isp_reset);
|
|
+ }
|
|
+
|
|
+ isp_dev->isp_ci_reset = devm_reset_control_get_optional_shared(&pdev->dev, "isp_ci_reset");
|
|
+ if (IS_ERR_OR_NULL(isp_dev->isp_ci_reset)) {
|
|
+ dev_err(&pdev->dev, "not found core isp_ci_reset\n");
|
|
+ return PTR_ERR(isp_dev->isp_ci_reset);
|
|
+ }
|
|
+
|
|
+ isp_dev->lcd_mclk_reset = devm_reset_control_get_optional_shared(&pdev->dev, "lcd_mclk_reset");
|
|
+ if (IS_ERR_OR_NULL(isp_dev->lcd_mclk_reset)) {
|
|
+ dev_err(&pdev->dev, "not found core lcd_mclk_reset\n");
|
|
+ return PTR_ERR(isp_dev->lcd_mclk_reset);
|
|
+ }
|
|
+
|
|
+ isp_dev->fnc_clk = devm_clk_get(&pdev->dev, "isp_func");
|
|
+ if (IS_ERR(isp_dev->fnc_clk)) {
|
|
+ ret = PTR_ERR(isp_dev->fnc_clk);
|
|
+ dev_err(&pdev->dev, "failed to get function clock: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+#ifdef CONFIG_SPACEMIT_DEBUG
|
|
+ isp_running_info.is_dev_running = check_dev_running_status;
|
|
+ isp_running_info.nb.notifier_call = dev_clkoffdet_notifier_handler;
|
|
+ clk_notifier_register(isp_dev->fnc_clk, &isp_running_info.nb);
|
|
+#endif
|
|
+
|
|
+ isp_dev->axi_clk = devm_clk_get(&pdev->dev, "isp_axi");
|
|
+ if (IS_ERR(isp_dev->axi_clk)) {
|
|
+ ret = PTR_ERR(isp_dev->axi_clk);
|
|
+ dev_err(&pdev->dev, "failed to get bus clock: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ isp_dev->dpu_clk = devm_clk_get(&pdev->dev, "dpu_mclk");
|
|
+ if (IS_ERR(isp_dev->dpu_clk)) {
|
|
+ ret = PTR_ERR(isp_dev->dpu_clk);
|
|
+ dev_err(&pdev->dev, "failed to get dpu clock: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ isp_dev->isp_regend =
|
|
+ isp_dev->isp_regbase + resource_size(isp_dev->isp_reg_source) - 1;
|
|
+ k1xisp_reg_set_base_addr(isp_dev->isp_regbase, isp_dev->isp_regend);
|
|
+
|
|
+ ret = k1xisp_pipe_dev_init(pdev, isp_dev->pipe_devs);
|
|
+ if (ret) {
|
|
+ dev_err(&pdev->dev, "pipedev init failed:%d!\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ init_completion(&isp_dev->reset_irq_complete);
|
|
+ init_completion(&isp_dev->restart_complete);
|
|
+ atomic_set(&isp_dev->clk_ref, 0);
|
|
+ SET_ISP_DEV(isp_dev);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+long k1xisp_dev_copy_user(struct file *file, unsigned int cmd, void *arg,
|
|
+ k1xisp_ioctl_func func)
|
|
+{
|
|
+ char sbuf[128];
|
|
+ void *mbuf = NULL;
|
|
+ void *parg = arg;
|
|
+ long err = -EINVAL;
|
|
+
|
|
+ /* Copy arguments into temp kernel buffer */
|
|
+ if (_IOC_DIR(cmd) != _IOC_NONE) {
|
|
+ if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
|
|
+ parg = sbuf;
|
|
+ } else {
|
|
+ /* too big to allocate from stack */
|
|
+ mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
|
|
+ if (NULL == mbuf)
|
|
+ return -ENOMEM;
|
|
+ parg = mbuf;
|
|
+ }
|
|
+
|
|
+ err = -EFAULT;
|
|
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
|
|
+ unsigned int n = _IOC_SIZE(cmd);
|
|
+
|
|
+ if (copy_from_user(parg, (void __user *)arg, n))
|
|
+ goto out;
|
|
+
|
|
+ /* zero out anything we don't copy from userspace */
|
|
+ if (n < _IOC_SIZE(cmd))
|
|
+ memset((u8 *)parg + n, 0, _IOC_SIZE(cmd) - n);
|
|
+ } else {
|
|
+ /* read-only ioctl */
|
|
+ memset(parg, 0, _IOC_SIZE(cmd));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Handles IOCTL */
|
|
+ err = func(file, cmd, (unsigned long)parg);
|
|
+ if (err == -ENOIOCTLCMD)
|
|
+ err = -ENOTTY;
|
|
+
|
|
+ if (_IOC_DIR(cmd) & _IOC_READ) {
|
|
+ unsigned int n = _IOC_SIZE(cmd);
|
|
+ if (copy_to_user((void __user *)arg, parg, n))
|
|
+ goto out;
|
|
+ }
|
|
+out:
|
|
+ if (mbuf) {
|
|
+ kfree(mbuf);
|
|
+ mbuf = NULL;
|
|
+ }
|
|
+ return err;
|
|
+}
|
|
+
|
|
+int k1xisp_vi_send_cmd(unsigned int cmd, void *cmd_payload, unsigned int payload_len)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_dma_irq_handler(void *irq_data)
|
|
+{
|
|
+ struct k1xisp_dev *isp_dev = NULL;
|
|
+ struct k1xisp_pipe_dev *pipe_dev = NULL;
|
|
+ int i;
|
|
+
|
|
+ GET_ISP_DEV(isp_dev);
|
|
+ ISP_DRV_CHECK_POINTER(isp_dev);
|
|
+
|
|
+ for (i = 0; i < ISP_PIPE_DEV_ID_MAX; i++) {
|
|
+ pipe_dev = isp_dev->pipe_devs[i];
|
|
+ k1xisp_pipe_dma_irq_handler(pipe_dev, irq_data);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+* the irq handler is registed in vi module,so isp just set callback to vi and called by vi. these happens in interrupt context.
|
|
+*/
|
|
+int k1xisp_irq_callback(int irq_num, void *irq_data, unsigned int data_len)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ if (ISP_IRQ == irq_num)
|
|
+ k1xisp_pipe_dev_irq_handler(irq_data); /* isp irq */
|
|
+ else if (DMA_IRQ == irq_num)
|
|
+ k1xisp_dma_irq_handler(irq_data); /* dma irq */
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_cdevs_create(void)
|
|
+{
|
|
+ int ret = 0, cdev_count = 0, minor = ISP_PIPE_DEV_ID_0;
|
|
+ dev_t devNum;
|
|
+ int i = 0, devno = 0;
|
|
+ struct cdev *cdev_array[ISP_PIPE_LINE_COUNT] = { NULL };
|
|
+ struct device *device_array[ISP_PIPE_LINE_COUNT] = { NULL };
|
|
+ struct file_operations *fops = NULL;
|
|
+
|
|
+ //1. alloc cdev obj
|
|
+ //two hardware pipelines + abstract pipes(1) + isp device
|
|
+ memset(&g_isp_cdevice, 0, sizeof(struct isp_char_device));
|
|
+ cdev_count = ISP_PIPE_LINE_COUNT;
|
|
+ g_isp_cdevice.cdev_info =
|
|
+ (struct isp_cdev_info *)kzalloc(sizeof(struct isp_cdev_info) * cdev_count, GFP_KERNEL);
|
|
+ if (NULL == g_isp_cdevice.cdev_info)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ g_isp_cdevice.isp_cdev_cnt = cdev_count;
|
|
+ ret = alloc_chrdev_region(&devNum, minor, cdev_count, K1X_ISP_DEV_NAME);
|
|
+ if (ret)
|
|
+ goto ERR_STEP;
|
|
+
|
|
+ g_isp_cdevice.cdev_num = devNum;
|
|
+ g_isp_cdevice.cdev_major = MAJOR(devNum);
|
|
+
|
|
+ g_isp_cdevice.isp_class = class_create(THIS_MODULE, K1X_ISP_DEV_NAME);
|
|
+ if (IS_ERR(g_isp_cdevice.isp_class)) {
|
|
+ ret = PTR_ERR(g_isp_cdevice.isp_class);
|
|
+ goto ERR_STEP1;
|
|
+ }
|
|
+
|
|
+ fops = k1xisp_pipe_get_fops();
|
|
+ for (i = ISP_PIPE_DEV_ID_0; i < cdev_count; i++) {
|
|
+ //2. init cdev obj
|
|
+ cdev_array[i] = &g_isp_cdevice.cdev_info[i].isp_cdev;
|
|
+ if (i <= ISP_HW_PIPELINE_ID_MAX) {
|
|
+ //isp pipe dev
|
|
+ cdev_init(cdev_array[i], fops);
|
|
+ } else {
|
|
+ //isp device
|
|
+ // cdev_init(cdev_array[i], &g_isp_dev_fops);
|
|
+ }
|
|
+ cdev_array[i]->owner = THIS_MODULE;
|
|
+
|
|
+ //3. register cdev obj
|
|
+ devno = MKDEV(g_isp_cdevice.cdev_major, i);
|
|
+ ret = cdev_add(cdev_array[i], devno, 1);
|
|
+ if (ret) {
|
|
+ cdev_array[i] = NULL;
|
|
+ goto ERR_STEP2;
|
|
+ }
|
|
+ //4. create device
|
|
+ if (i < ISP_HW_PIPELINE_ID_MAX) {
|
|
+ //normal pipeline
|
|
+ device_array[i] =
|
|
+ device_create(g_isp_cdevice.isp_class, NULL, devno, NULL,
|
|
+ "%s%d", K1X_ISP_PIPE_DEV_NAME, i);
|
|
+ }
|
|
+
|
|
+ if (IS_ERR(device_array[i])) {
|
|
+ ret = PTR_ERR(device_array[i]);
|
|
+ device_array[i] = NULL;
|
|
+ goto ERR_STEP2;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+
|
|
+ERR_STEP2:
|
|
+ for (i = ISP_PIPE_DEV_ID_0; i < cdev_count; i++) {
|
|
+ if (cdev_array[i])
|
|
+ cdev_del(cdev_array[i]);
|
|
+
|
|
+ if (device_array[i])
|
|
+ device_destroy(g_isp_cdevice.isp_class,
|
|
+ MKDEV(g_isp_cdevice.cdev_major, i));
|
|
+ }
|
|
+ class_destroy(g_isp_cdevice.isp_class);
|
|
+
|
|
+ERR_STEP1:
|
|
+ unregister_chrdev_region(devNum, cdev_count);
|
|
+
|
|
+ERR_STEP:
|
|
+ kfree(g_isp_cdevice.cdev_info);
|
|
+
|
|
+ isp_log_err("%s : %s : %d - fail!", __FILE__, __func__, __LINE__);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void k1xisp_cdevs_destroy(void)
|
|
+{
|
|
+ int i = 0;
|
|
+
|
|
+ for (i = ISP_PIPE_DEV_ID_0; i < g_isp_cdevice.isp_cdev_cnt; i++) {
|
|
+ device_destroy(g_isp_cdevice.isp_class,
|
|
+ MKDEV(g_isp_cdevice.cdev_major, i));
|
|
+ cdev_del(&(g_isp_cdevice.cdev_info[i]).isp_cdev);
|
|
+ }
|
|
+
|
|
+ class_destroy(g_isp_cdevice.isp_class);
|
|
+ unregister_chrdev_region(g_isp_cdevice.cdev_num, g_isp_cdevice.isp_cdev_cnt);
|
|
+ kfree(g_isp_cdevice.cdev_info);
|
|
+ g_isp_cdevice.cdev_info = NULL;
|
|
+}
|
|
+
|
|
+void k1xisp_cdev_link_devices(void)
|
|
+{
|
|
+ int i = 0;
|
|
+ struct k1xisp_dev *isp_dev = NULL;
|
|
+
|
|
+ GET_ISP_DEV(isp_dev);
|
|
+ if (isp_dev && g_isp_cdevice.cdev_info) {
|
|
+ for (i = ISP_PIPE_DEV_ID_0; i < g_isp_cdevice.isp_cdev_cnt; i++) {
|
|
+ if (i < ISP_PIPE_DEV_ID_MAX)
|
|
+ g_isp_cdevice.cdev_info[i].p_dev =
|
|
+ (void *)isp_dev->pipe_devs[i]; //pipe devices
|
|
+ else
|
|
+ g_isp_cdevice.cdev_info[i].p_dev = (void *)isp_dev; //isp device
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static int k1xisp_dev_probe(struct platform_device *pdev)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct k1xisp_dev *isp_dev = NULL;
|
|
+
|
|
+ isp_log_dbg("k1xisp begin to probe");
|
|
+
|
|
+ ret = k1xisp_cdevs_create();
|
|
+ if (ret) {
|
|
+ isp_log_info("k1xisp create cdevs fail!");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33));
|
|
+ ret = k1xisp_dev_context_create(pdev);
|
|
+ if (ret)
|
|
+ goto ERR_STEP;
|
|
+
|
|
+ k1xisp_cdev_link_devices();
|
|
+ /* enable runtime pm */
|
|
+ pm_runtime_enable(&pdev->dev);
|
|
+
|
|
+ GET_ISP_DEV(isp_dev);
|
|
+ isp_log_dbg
|
|
+ ("k1xisp device drvier probe successful,regBase=0x%lx,regEnd=0x%lx!",
|
|
+ isp_dev->isp_regbase, isp_dev->isp_regend);
|
|
+ return 0;
|
|
+
|
|
+ERR_STEP:
|
|
+ k1xisp_cdevs_destroy();
|
|
+ isp_log_err("%s : %s : %d - fail!", __FILE__, __func__, __LINE__);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_dev_context_destroy(struct platform_device *pdev)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct k1xisp_dev *isp_dev = NULL;
|
|
+
|
|
+ GET_ISP_DEV(isp_dev);
|
|
+ ISP_DRV_CHECK_POINTER(isp_dev);
|
|
+
|
|
+ ret = k1xisp_pipe_dev_exit(pdev, isp_dev->pipe_devs);
|
|
+
|
|
+ /* put clock(s) */
|
|
+#ifdef CONFIG_ARCH_SPACEMIT
|
|
+// devm_clk_put(&pdev->dev, isp_dev->ahb_clk);
|
|
+ devm_clk_put(&pdev->dev, isp_dev->axi_clk);
|
|
+
|
|
+#ifdef CONFIG_SPACEMIT_DEBUG
|
|
+ clk_notifier_unregister(isp_dev->fnc_clk, &isp_running_info.nb);
|
|
+#endif
|
|
+ devm_clk_put(&pdev->dev, isp_dev->fnc_clk);
|
|
+#endif
|
|
+
|
|
+ // iounmap
|
|
+ devm_iounmap(&pdev->dev, (void __iomem *)isp_dev->isp_regbase);
|
|
+ //destory self struct
|
|
+ devm_kfree(&pdev->dev, (void *)isp_dev);
|
|
+ SET_ISP_DEV(NULL);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int k1xisp_dev_remove(struct platform_device *pdev)
|
|
+{
|
|
+ /* disable runtime pm */
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
+ k1xisp_dev_context_destroy(pdev);
|
|
+ k1xisp_cdevs_destroy();
|
|
+
|
|
+ isp_log_dbg("k1xisp device drvier remove successful!");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+static int k1xisp_dev_suspend(struct device *dev)
|
|
+{
|
|
+ /* TODO: */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int k1xisp_dev_resume(struct device *dev)
|
|
+{
|
|
+ /* TODO: */
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+static int k1xisp_dev_runtime_suspend(struct device *dev)
|
|
+{
|
|
+ /* TODO: */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int k1xisp_dev_runtime_resume(struct device *dev)
|
|
+{
|
|
+ /* TODO: */
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static const struct dev_pm_ops k1xisp_pm_ops = {
|
|
+ SET_RUNTIME_PM_OPS(k1xisp_dev_runtime_suspend, k1xisp_dev_runtime_resume, NULL)
|
|
+ SET_SYSTEM_SLEEP_PM_OPS(k1xisp_dev_suspend, k1xisp_dev_resume)
|
|
+};
|
|
+static const struct of_device_id k1xisp_dev_match[] = {
|
|
+ {.compatible = "spacemit,k1xisp" },
|
|
+ { },
|
|
+};
|
|
+
|
|
+MODULE_DEVICE_TABLE(of, k1xisp_dev_match);
|
|
+
|
|
+struct platform_driver k1xisp_dev_driver = {
|
|
+ .driver = {
|
|
+ .name = K1X_ISP_DEV_NAME,
|
|
+ .of_match_table = of_match_ptr(k1xisp_dev_match),
|
|
+ .pm = &k1xisp_pm_ops,
|
|
+ },
|
|
+ .probe = k1xisp_dev_probe,
|
|
+ .remove = k1xisp_dev_remove,
|
|
+};
|
|
+
|
|
+module_platform_driver(k1xisp_dev_driver);
|
|
+
|
|
+MODULE_AUTHOR("SPACEMIT Inc.");
|
|
+MODULE_DESCRIPTION("SPACEMIT K1X ISP device driver");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_drv.h b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_drv.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_drv.h
|
|
@@ -0,0 +1,469 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+#ifndef k1x_ISP_DRV_H
|
|
+#define k1x_ISP_DRV_H
|
|
+
|
|
+#include "k1x_isp_reg.h"
|
|
+
|
|
+#include <media/k1x/k1x_isp_drv_uapi.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/cdev.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/reset.h>
|
|
+
|
|
+#define k1x_ISPDEV_DRV_NAME "k1x-ispdev-drv"
|
|
+
|
|
+#define CAM_MODULE_TAG CAM_MDL_ISP
|
|
+#include "cam_dbg.h"
|
|
+
|
|
+/* isp log print */
|
|
+#define isp_log_err cam_err
|
|
+#define isp_log_info cam_info
|
|
+#define isp_log_warn cam_warn
|
|
+#define isp_log_dbg cam_dbg
|
|
+
|
|
+#define ISP_DEV_COUNT (1)
|
|
+#define ISP_PIPE_LINE_COUNT (ISP_PIPE_DEV_ID_MAX)
|
|
+#define ISP_STAT_THROUGH_DMA_COUNT 2 //eis and pdc write through dma
|
|
+#define ISP_STAT_DMA_IRQ_BIT_MAX 32
|
|
+#define ISP_STAT_THROUGH_MEM_COUNT 4 //ae,awb,af,ltm result read from mem reg.
|
|
+#define ISP_VOTER_MAX_NUM 2
|
|
+
|
|
+#define ISP_DRV_CHECK_POINTER(ptr) \
|
|
+ do { \
|
|
+ if (NULL == ptr) { \
|
|
+ isp_log_err("%s:Null Pointer!", __FUNCTION__); \
|
|
+ return -EINVAL; \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
+#define ISP_DRV_CHECK_PARAMETERS(value, min, max, name) \
|
|
+ do { \
|
|
+ if (value < min || value > max) { \
|
|
+ isp_log_err("%s: invalid parameter(%s):%d!", __FUNCTION__, name, value); \
|
|
+ return -EINVAL; \
|
|
+ } \
|
|
+ } while(0)
|
|
+
|
|
+#define ISP_DRV_CHECK_MAX_PARAMETERS(value, max, name) \
|
|
+ do { \
|
|
+ if (value > max) { \
|
|
+ isp_log_err("%s: invalid parameter(%s):%d!", __FUNCTION__, name, value); \
|
|
+ return -EINVAL; \
|
|
+ } \
|
|
+ } while(0)
|
|
+
|
|
+#define ISP_DRV_ARRAY_LENGTH(x) (sizeof(x) / sizeof(x[0]))
|
|
+
|
|
+#define ISP_STAT_ID_IR_AVG ISP_STAT_ID_MAX
|
|
+
|
|
+enum isp_slave_stat_id {
|
|
+ ISP_SLAVE_STAT_ID_AE = ISP_STAT_ID_IR_AVG + 1,
|
|
+ ISP_SLAVE_STAT_ID_AWB,
|
|
+ ISP_SLAVE_STAT_ID_EIS,
|
|
+ ISP_SLAVE_STAT_ID_AF,
|
|
+ ISP_SLAVE_STAT_ID_MAX,
|
|
+};
|
|
+
|
|
+enum isp_pipe_dev_id {
|
|
+ ISP_PIPE_DEV_ID_0, // hardware pipeline0
|
|
+ ISP_PIPE_DEV_ID_1, // hardware pipeline1
|
|
+ // ISP_PIPE_DEV_ID_COMBINATION, //HDR,RGBW,RGBIR, use two pipelines once.
|
|
+ ISP_PIPE_DEV_ID_MAX,
|
|
+};
|
|
+
|
|
+enum isp_buffer_status {
|
|
+ ISP_BUFFER_STATUS_INVALID = 0,
|
|
+ ISP_BUFFER_STATUS_IDLE = 1,
|
|
+ ISP_BUFFER_STATUS_BUSY,
|
|
+ ISP_BUFFER_STATUS_DONE,
|
|
+ ISP_BUFFER_STATUS_ERROR,
|
|
+ ISP_BUFFER_STATUS_MAX = ISP_BUFFER_STATUS_ERROR,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum isp_dma_irq_type - the types of dma irq.
|
|
+ *
|
|
+ * @ISP_DMA_IRQ_TYPE_ERR: deal with error first,because eof also come when error.
|
|
+ * @ISP_DMA_IRQ_TYPE_EOF: deal with eof sencod,because the sof of next frame may
|
|
+ * come before current eof.
|
|
+ * @ISP_DMA_IRQ_TYPE_SOF: deal with sof last.
|
|
+ */
|
|
+enum isp_dma_irq_type {
|
|
+ ISP_DMA_IRQ_TYPE_ERR,
|
|
+ ISP_DMA_IRQ_TYPE_EOF,
|
|
+ ISP_DMA_IRQ_TYPE_SOF,
|
|
+ ISP_DMA_IRQ_TYPE_NUM,
|
|
+};
|
|
+
|
|
+typedef long (*k1xisp_ioctl_func)(struct file *file, unsigned int cmd,
|
|
+ unsigned long arg);
|
|
+
|
|
+struct isp_irq_func_params {
|
|
+ struct k1xisp_pipe_dev *pipe_dev;
|
|
+ u32 frame_num;
|
|
+ u32 irq_status;
|
|
+ u32 hw_pipe_id;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct k1xisp_irq_handler - the function for handling isp irq.
|
|
+ *
|
|
+ * @pipe_dev: point to the current pipe devices.
|
|
+ * @irq_status: status of irq.
|
|
+ *
|
|
+ * The return values:
|
|
+ * 1 : need schedule the bottom of irq context.
|
|
+ * 0 : needn't schedule the bottom of irq context.
|
|
+ */
|
|
+typedef int (*k1xisp_irq_handler)(struct isp_irq_func_params *param);
|
|
+
|
|
+struct isp_cdev_info {
|
|
+ struct cdev isp_cdev;
|
|
+ void *p_dev;
|
|
+};
|
|
+
|
|
+struct isp_char_device {
|
|
+ struct isp_cdev_info *cdev_info;
|
|
+ u32 cdev_major;
|
|
+ dev_t cdev_num;
|
|
+ u32 isp_cdev_cnt;
|
|
+ struct class *isp_class;
|
|
+ void *isp_dev;
|
|
+};
|
|
+
|
|
+struct isp_kmem_info {
|
|
+ union {
|
|
+ u64 phy_addr;
|
|
+ int fd;
|
|
+ } mem;
|
|
+ void *kvir_addr; //virtual addr of kenel
|
|
+ u32 mem_size;
|
|
+ u32 config;
|
|
+ struct dma_buf *dma_buffer;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct isp_kbuffer_info - the description of buffer used in kernel.
|
|
+ *
|
|
+ * @frame_id: the frame id of this buffer.
|
|
+ * @buf_index: the index of this buffer in the queue.
|
|
+ * @plane_count: the count of planes in this buffer.
|
|
+ * @buf_status: the status of the buffer whose value is defined by &enum isp_buffer_status.
|
|
+ * @kvir_addr: the virtual addr of the buffer used in kernel.
|
|
+ * @phy_addr: record the phy addr of this buffer when fd memory.
|
|
+ * @hook: the list hook.
|
|
+ * @buf_planes: planes in this buffer.
|
|
+ * @dma_buffer: the dma buffer according to fd in the plane.
|
|
+ */
|
|
+struct isp_kbuffer_info {
|
|
+ int frame_id;
|
|
+ int buf_index;
|
|
+ u32 plane_count;
|
|
+ u32 buf_status;
|
|
+ void *kvir_addr; //kernel vir addr.
|
|
+ u64 phy_addr;
|
|
+ struct list_head hook;
|
|
+ struct isp_buffer_plane buf_planes[K1X_ISP_MAX_PLANE_NUM];
|
|
+ struct dma_buf *dma_buffer;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct isp_stat_buffer_queue - buffer queue used in stat node.
|
|
+ *
|
|
+ * @fd_memory: use fd to identify the buffer if true.
|
|
+ * @fill_by_cpu: datas are filled by cpu.
|
|
+ * @stat_id: the queue belongs to which stats, defined by &enum isp_stat_id.
|
|
+ * @buf_count: the count of buffers in the queue.
|
|
+ * @busy_bufcnt: the count of buffers in busy queue.
|
|
+ * @@idle_bufcnt: the count of buffers in idle queue.
|
|
+ * @queue_lock: the spinlock.
|
|
+ * @busy_buflist: busy buffer queue.
|
|
+ * @idle_buflist: idle buffer queue.
|
|
+ * @buf_info: all the buffer can used in this queue on kernel space.
|
|
+ */
|
|
+struct isp_stat_buffer_queue {
|
|
+ u8 fd_memory;
|
|
+ u8 fill_by_cpu;
|
|
+ u32 stat_id;
|
|
+ u32 buf_count;
|
|
+ u32 busy_bufcnt;
|
|
+ u32 idle_bufcnt;
|
|
+ spinlock_t queue_lock;
|
|
+ struct list_head busy_buflist;
|
|
+ struct list_head idle_buflist;
|
|
+ struct isp_kbuffer_info buf_info[K1X_ISP_MAX_BUFFER_NUM];
|
|
+ // bool bNextEofMiss; //when sof find no idle buffer, the next eof must miss.
|
|
+ // bool bLastEofMiss;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct isp_stat_done_info - buffer info for isp stat done.
|
|
+ *
|
|
+ * @done_cnt: the count of buffers on done list.
|
|
+ * @done_list: all the stat buffer are ready for user(dequeued).
|
|
+ * @done_lock: spinlock for protecting the access to done list.
|
|
+ */
|
|
+struct isp_stat_done_info {
|
|
+ u32 done_cnt;
|
|
+ struct list_head done_list;
|
|
+ spinlock_t done_lock;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct stat_dma_irq_bits - irq bit info of isp stat dma.
|
|
+ *
|
|
+ * @stat_id: the dma belongs to which stat, defined by &enum isp_stat_id.
|
|
+ * @dma_ch_id: the dma channel id.
|
|
+ * @irq_bit: the irq bit value.
|
|
+ */
|
|
+struct stat_dma_irq_bits {
|
|
+ u32 stat_id;
|
|
+ u32 dma_ch_id;
|
|
+ u32 irq_bit[ISP_DMA_IRQ_TYPE_NUM];
|
|
+};
|
|
+
|
|
+enum stat_dma_switch_status {
|
|
+ STAT_DMA_SWITCH_DYNAMIC_ON = 1,
|
|
+ STAT_DMA_SWITCH_DYNAMIC_OFF,
|
|
+ STAT_DMA_SWITCH_STATUS_MAX,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct stat_dma_irq_info - current irq info of isp stat dma.
|
|
+ *
|
|
+ * @stat_id: the dma belongs to which stat, defined by &enum isp_stat_id.
|
|
+ * @flag_lock: spinlock for irq_flag.
|
|
+ * @irq_flag: it's true if irq happens, such as sof, eof, error.
|
|
+ * @dynamic_switch: dma on/off is dynamic by user, 1 means always open,defined by &enum stat_dma_switch_status
|
|
+ */
|
|
+struct stat_dma_irq_info {
|
|
+ u32 stat_id;
|
|
+ spinlock_t flag_lock;
|
|
+ u8 irq_flag[ISP_DMA_IRQ_TYPE_NUM];
|
|
+ u32 dynamic_switch;
|
|
+ u32 dynamic_trigger; //1->trigger on; 2->trigger off
|
|
+};
|
|
+
|
|
+struct stat_mem_irq_info {
|
|
+ u32 stat_id;
|
|
+ u8 start_read;
|
|
+ spinlock_t mem_flag_lock;
|
|
+};
|
|
+
|
|
+enum pipe_event_type {
|
|
+ PIPE_EVENT_TRIGGER_VOTE_SYS = 1,
|
|
+ PIPE_EVENT_CAST_VOTE,
|
|
+ PIPE_EVENT_TYPE_MAX,
|
|
+};
|
|
+
|
|
+struct stats_notify_params {
|
|
+ u32 stat_id;
|
|
+ u32 event_enable;
|
|
+ u32 frame_id;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct k1xisp_stats_node - isp stat node including all stats on one pipeline.
|
|
+ *
|
|
+ * @stat_active: the stat is active if true.
|
|
+ * @hw_pipe_id: this stats node belong to which hardware pipeline.
|
|
+ * @stat_bufqueue: the buffer queue in this stats node.
|
|
+ * @stat_done_info: buffer info for isp stat done.
|
|
+ * @stat_dma_irq_bits: irq bits of stat written through dma.
|
|
+ * @dma_irq_info: the stat's dma irq info, the results of this stat are written through dma.
|
|
+ * @mem_irq_index: the index in mem_irq_info array for every stat.
|
|
+ * @mem_irq_info: the stat's mem irq info, the results of this stat are read through registers.
|
|
+ */
|
|
+struct k1xisp_stats_node {
|
|
+ void *private_dev;
|
|
+ bool stat_active[ISP_STAT_ID_MAX];
|
|
+ u32 hw_pipe_id;
|
|
+ struct isp_stat_buffer_queue stat_bufqueue[ISP_STAT_ID_MAX];
|
|
+ struct isp_stat_done_info stat_done_info[ISP_STAT_ID_MAX];
|
|
+ struct stat_dma_irq_bits stat_dma_irq_bitmap[ISP_STAT_THROUGH_DMA_COUNT];
|
|
+ struct stat_dma_irq_info dma_irq_info[ISP_STAT_THROUGH_DMA_COUNT];
|
|
+ int mem_irq_index[ISP_STAT_ID_MAX];
|
|
+ struct stat_mem_irq_info mem_irq_info[ISP_STAT_THROUGH_MEM_COUNT];
|
|
+ int (*notify_event)(void *private_dev, u32 event, void *payload,
|
|
+ u32 payload_len);
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct k1x_isp_irq_context - all isp irq work
|
|
+ *
|
|
+ * @hw_pipe_id: the master hardware pipeline for this irq ctx.
|
|
+ * @isp_irq_bitmap: isp irq bit map.
|
|
+ * @cur_frame_num: the current frame number.
|
|
+ * @isp_irq_tasklet: the bottom of isp irq handler.
|
|
+ * @isp_dma_irq_tasklet: the bottom of isp dma irq handler.
|
|
+ */
|
|
+struct k1xisp_irq_context {
|
|
+ u32 hw_pipe_id;
|
|
+ ulong isp_irq_bitmap;
|
|
+ atomic_t cur_frame_num;
|
|
+ k1xisp_irq_handler isp_irq_handler[ISP_IRQ_BIT_MAX_NUM];
|
|
+ struct tasklet_struct isp_irq_tasklet;
|
|
+ struct tasklet_struct isp_dma_irq_tasklet;
|
|
+};
|
|
+
|
|
+struct pipe_task_stat_config {
|
|
+ u32 frame_num;
|
|
+ ulong mem_stat_bitmap;
|
|
+};
|
|
+
|
|
+enum task_voter_type {
|
|
+ TASK_VOTER_SDE_SOF,
|
|
+ TASK_VOTER_AE_EOF,
|
|
+ TASK_VOTER_AF_EOF,
|
|
+ TASK_VOTER_PDC_EOF,
|
|
+ TASK_VOTER_TYP_MAX,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct task_voting_system - a simple vote system for deal with all irq conditions.
|
|
+ *
|
|
+ * @sys_trigger_num: the trigger number(voter number) of this vote system.
|
|
+ * @cur_ticket_cnt: current vote tickets.
|
|
+ * @voter_index: the index of voter in the system(voter_validity array).
|
|
+ * @voter_validity: the flag to recording ticket for voter.
|
|
+ * @voter_frameID: the frame number for current ticket.
|
|
+ */
|
|
+struct task_voting_system {
|
|
+ u32 sys_trigger_num;
|
|
+ u32 cur_ticket_cnt;
|
|
+ int voter_index[TASK_VOTER_TYP_MAX];
|
|
+ u8 voter_validity[ISP_VOTER_MAX_NUM];
|
|
+ u32 voter_frameID[ISP_VOTER_MAX_NUM];
|
|
+ spinlock_t vote_lock;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct isp_pipe_task - the pipe's main task oriented to user.
|
|
+ *
|
|
+ * @task_type: task's type defined by &enum isp_pipe_task_type
|
|
+ * @frame_num: the current frame number.
|
|
+ * @task_trigger: wakeup user for this task if ture;
|
|
+ * @complete_cnt: task complete count.
|
|
+ * @complete_frame_num: current complete frame num.
|
|
+ * @stat_bits_cnt: the total count of bits in stat_bitmap
|
|
+ * @stat_bitmap: which stats are used in this task.
|
|
+ * @wait_complete: the completion for user.
|
|
+ * @task_lock: spinlock for irq ctx and tasklet.
|
|
+ * @complete_lock: spinlock for user wait complete, used between takslet and thread.
|
|
+ * @user_stat_cfg: the stat config setted by user for this task.
|
|
+ * @vote_system: voting system for multi condtions to wakeup user
|
|
+ * @use_vote_sys: if ture use voting system.
|
|
+ */
|
|
+struct isp_pipe_task {
|
|
+ u32 task_type;
|
|
+ atomic_t frame_num;
|
|
+ u32 task_trigger;
|
|
+ int complete_cnt;
|
|
+ u32 complete_frame_num;
|
|
+ u32 stat_bits_cnt;
|
|
+ ulong stat_bitmap;
|
|
+ struct completion wait_complete;
|
|
+ spinlock_t task_lock;
|
|
+ spinlock_t complete_lock;
|
|
+ struct pipe_task_stat_config user_stat_cfg;
|
|
+ struct task_voting_system vote_system;
|
|
+ u8 use_vote_sys;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct k1xisp_pipe_dev - abstraction of isp pipeline
|
|
+ *
|
|
+ * @dev_num: belongs to which isp device.
|
|
+ * @open_cnt: the count of open isp pipe device.
|
|
+ * @pipedev_id: this pipeline's ID whose value is defined by &enum isp_pipe_dev_id. Notice that the isp
|
|
+ * hardware has two pipelines, but the combined pipe(like HDR,RGBW) is abstracted by software.
|
|
+ * @stats_node_cnt: the count of stats node.
|
|
+ * @work_status: current work status of pipedev, defined by &enum isp_work_status.
|
|
+ * @work_type: current work type of pipedev, defined by enum isp_pipe_work_type
|
|
+ * @fd_buffer: use fd to describe buffer if true;
|
|
+ * @capture_client_num: the number of capture clinets on this pipe dev.
|
|
+ * @stream_restart: the stream restart and frameid comes for zero.
|
|
+ * @eof_task_hd_by_sof: eof task process at sof task(ae handle at startframe).
|
|
+ * @isp_reg_mem: the register buffer memory setted by user space.
|
|
+ * @frame_info_mem: the frame info buffer memory setted by user space.
|
|
+ * @pipe_task: info of pipe task.
|
|
+ * @stats_nodes: ae,awb,af,eis stat work of pipeline, combination pipe may have two nodes, like hdr mode.
|
|
+ * @hook: list hook.
|
|
+ * @isp_pipedev_mutex: mutex for isp pipe device.
|
|
+ * @pipedev_capture_mutex: mutex for isp pipe device do capture.
|
|
+ * @slice_reg_mem: the memory for a slice regs.
|
|
+ */
|
|
+struct k1xisp_pipe_dev {
|
|
+ u32 dev_num;
|
|
+ u32 open_cnt;
|
|
+ u32 pipedev_id;
|
|
+ u32 stats_node_cnt;
|
|
+ u32 work_status;
|
|
+ u32 work_type;
|
|
+ u32 fd_buffer;
|
|
+ u32 capture_client_num;
|
|
+ u32 stream_restart;
|
|
+ u32 eof_task_hd_by_sof;
|
|
+ u32 frameinfo_get_by_eof;
|
|
+ struct isp_kmem_info isp_reg_mem[ISP_PIPE_WORK_TYPE_CAPTURE + 1]; //0:preview,1:capture_a, 2: capture_b
|
|
+ // struct isp_kmem_info frame_info_mem;
|
|
+ struct isp_pipe_task pipe_tasks[ISP_PIPE_TASK_TYPE_MAX];
|
|
+ struct k1xisp_stats_node *stats_nodes;
|
|
+ struct k1xisp_irq_context isp_irq_ctx;
|
|
+ struct list_head hook;
|
|
+ struct mutex isp_pipedev_mutex;
|
|
+ struct mutex pipedev_capture_mutex;
|
|
+ void *slice_reg_mem;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct k1x_isp_dev - abstraction of isp device.
|
|
+ *
|
|
+ * @dev_num: this isp device's ID, we may support two isp devices.;
|
|
+ * @open_cnt: the count of open isp device.
|
|
+ * @clk_ref: the reference of clock.
|
|
+ * @pdev: platform device.
|
|
+ * @isp_reg_source: isp registers from dts file.
|
|
+ * @ahb_clk: ahb clock of isp
|
|
+ * @fnc_clk: isp func clk
|
|
+ * @axi_clk: axi clock
|
|
+ * @pipe_devs: isp pipeline devices.
|
|
+ * @reset_irq_complete: global reset irq done complete
|
|
+ * @restart_complete: restart done complete, which called by vi moudles.
|
|
+ * @isp_dev_lock: spinlock for isp device.
|
|
+ */
|
|
+struct k1xisp_dev {
|
|
+ u8 dev_num;
|
|
+ u32 open_cnt;
|
|
+ atomic_t clk_ref;
|
|
+ struct platform_device *plat_dev;
|
|
+ struct resource *isp_reg_source;
|
|
+ ulong __iomem isp_regbase;
|
|
+ ulong __iomem isp_regend;
|
|
+// struct clk *ahb_clk;
|
|
+ struct reset_control *ahb_reset;
|
|
+ struct reset_control *isp_reset;
|
|
+ struct reset_control *isp_ci_reset;
|
|
+ struct reset_control *lcd_mclk_reset;
|
|
+
|
|
+ struct clk *fnc_clk;
|
|
+ struct clk *axi_clk;
|
|
+ struct clk *dpu_clk;
|
|
+ struct k1xisp_pipe_dev *pipe_devs[ISP_PIPE_DEV_ID_MAX];
|
|
+ struct completion reset_irq_complete;
|
|
+ struct completion restart_complete;
|
|
+ struct spm_camera_vi_ops *vi_funs;
|
|
+};
|
|
+
|
|
+int k1xisp_dev_clock_set(int enable);
|
|
+int k1xisp_dev_open(void);
|
|
+int k1xisp_dev_release(void);
|
|
+long k1xisp_dev_copy_user(struct file *file, unsigned int cmd, void *arg,
|
|
+ k1xisp_ioctl_func func);
|
|
+int k1xisp_dev_get_pipedev(u32 hw_pipe_id, struct k1xisp_pipe_dev **pp_pipedev);
|
|
+int k1xisp_dev_get_viraddr_from_dma_buf(struct dma_buf *dma_buffer, void **pp_vir_addr);
|
|
+int k1xisp_dev_put_viraddr_to_dma_buf(struct dma_buf *dma_buffer, void *vir_addr);
|
|
+int k1xisp_dev_get_phyaddr_from_dma_buf(int fd, __u64 *phy_addr);
|
|
+int k1xisp_dev_get_vi_ops(struct spm_camera_vi_ops **pp_vi_ops);
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_pipe.c b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_pipe.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_pipe.c
|
|
@@ -0,0 +1,1845 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * Description on this file
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include "k1x_isp_drv.h"
|
|
+#include "k1x_isp_statistic.h"
|
|
+#include <cam_plat.h>
|
|
+
|
|
+#include <linux/fs.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/compat.h>
|
|
+#include <linux/dma-buf.h>
|
|
+
|
|
+#define PIPE_DEVID_TO_HW_PIPELINE_ID(dev_id, hw_pipe_id) ({ \
|
|
+ if (dev_id <= ISP_PIPE_DEV_ID_1) \
|
|
+ hw_pipe_id = dev_id; \
|
|
+ else \
|
|
+ hw_pipe_id = ISP_HW_PIPELINE_ID_0; \
|
|
+ })
|
|
+
|
|
+#define PIPE_WORK_TYPE_TO_MEM_INDEX(type, mem_index) { \
|
|
+ mem_index = type - ISP_PIPE_WORK_TYPE_PREVIEW; \
|
|
+ }
|
|
+
|
|
+int isp_pipe_sof_irq_handler(struct isp_irq_func_params *param);
|
|
+int isp_pipe_irq_err_print_handler(struct isp_irq_func_params *param);
|
|
+int isp_pipe_sde_sof_irq_handler(struct isp_irq_func_params *param);
|
|
+int isp_pipe_sde_eof_irq_handler(struct isp_irq_func_params *param);
|
|
+int isp_pipe_reset_done_irq_handler(struct isp_irq_func_params *param);
|
|
+int isp_pipe_afc_eof_irq_handler(struct isp_irq_func_params *param);
|
|
+int isp_pipe_aem_eof_irq_handler(struct isp_irq_func_params *param);
|
|
+
|
|
+int isp_pipe_task_job_init(struct isp_pipe_task *pipe_task);
|
|
+int _isp_pipe_job_clear(struct k1xisp_pipe_dev *pipe_dev);
|
|
+int isp_pipe_task_vote_handler(struct k1xisp_pipe_dev *pipe_dev,
|
|
+ struct isp_pipe_task *pipe_task, u32 frame_num,
|
|
+ u32 voter_type);
|
|
+
|
|
+struct isp_irq_handler_info {
|
|
+ u32 irq_bit;
|
|
+ k1xisp_irq_handler irq_handler;
|
|
+};
|
|
+
|
|
+static struct isp_irq_handler_info g_host_irq_handler_infos[] = {
|
|
+ { ISP_IRQ_BIT_PIPE_SOF, isp_pipe_sof_irq_handler },
|
|
+ { ISP_IRQ_BIT_STAT_ERR, isp_pipe_irq_err_print_handler },
|
|
+ { ISP_IRQ_BIT_SDE_SOF, isp_pipe_sde_sof_irq_handler },
|
|
+ { ISP_IRQ_BIT_SDE_EOF, isp_pipe_sde_eof_irq_handler },
|
|
+ // {ISP_IRQ_BIT_RESET_DONE, isp_pipe_reset_done_irq_handler}, //always pipe0, vi do it
|
|
+ { ISP_IRQ_BIT_AEM_EOF, isp_pipe_aem_eof_irq_handler },
|
|
+ { ISP_IRQ_BIT_AFC_EOF, isp_pipe_afc_eof_irq_handler },
|
|
+ // {ISP_IRQ_BIT_ISP_ERR, isp_pipe_irq_err_print_handler},
|
|
+};
|
|
+
|
|
+struct isp_task_stat_map_info {
|
|
+ u8 count;
|
|
+ char stat_ids[3]; //max 3 stat at once
|
|
+};
|
|
+
|
|
+static struct isp_task_stat_map_info g_task_stat_map_infos[ISP_PIPE_TASK_TYPE_MAX] = {
|
|
+ { 3, { ISP_STAT_ID_AWB, ISP_STAT_ID_LTM, ISP_STAT_ID_EIS} }, //sof firmware calc task
|
|
+ { 1, { ISP_STAT_ID_AE, -1, -1} }, //eof firmware calc task
|
|
+ { 1, { ISP_STAT_ID_AF, -1, -1} }, // af
|
|
+};
|
|
+
|
|
+int isp_pipe_get_task_type_by_stat_id(u32 stat_id)
|
|
+{
|
|
+ int task_type = -1;
|
|
+
|
|
+ if (stat_id == ISP_STAT_ID_AE || stat_id == ISP_STAT_ID_AWB)
|
|
+ task_type = ISP_PIPE_TASK_TYPE_SOF;
|
|
+ else if (stat_id == ISP_STAT_ID_AF)
|
|
+ task_type = ISP_PIPE_TASK_TYPE_AF;
|
|
+ else
|
|
+ isp_log_err("%s:unsupported stat id(%d) for task type!", __func__, stat_id);
|
|
+
|
|
+ return task_type;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * the functions prefixed with k1xisp are all exposured to external.
|
|
+ */
|
|
+
|
|
+int isp_pipe_config_irqmask(struct k1xisp_pipe_dev *pipe_dev)
|
|
+{
|
|
+ int ret = 0;
|
|
+ u32 reg_addr = 0, reg_value = 0, reg_mask = 0;
|
|
+ u32 hw_pipe_id;
|
|
+
|
|
+ /*
|
|
+ * need irq:
|
|
+ * bit[0]:host_isp_pipe_sof_irq_mask
|
|
+ * bit[11]:host_isp_statistics_err_irq_mask
|
|
+ * bit[12]:host_isp_sde_sof_irq_mask
|
|
+ * bit[26]:host_isp_aem_eof_irq
|
|
+ * bit[29]:host_isp_afc_eof_irq_mask
|
|
+ */
|
|
+
|
|
+ hw_pipe_id = pipe_dev->isp_irq_ctx.hw_pipe_id;
|
|
+ if (pipe_dev->pipedev_id <= ISP_PIPE_DEV_ID_1) {
|
|
+ //1. clear irq raw status
|
|
+ reg_addr = ISP_REG_OFFSET_TOP_PIPE(hw_pipe_id) + ISP_REG_IRQ_STATUS;
|
|
+ k1xisp_reg_writel(reg_addr, 0xffffffff, 0xffffffff);
|
|
+
|
|
+ if (pipe_dev->frameinfo_get_by_eof || !pipe_dev->eof_task_hd_by_sof)
|
|
+ reg_value = BIT(0) | BIT(11) | BIT(12) | BIT(26) | BIT(29);
|
|
+ else
|
|
+ reg_value = BIT(0) | BIT(11) | BIT(12) | BIT(29);
|
|
+
|
|
+ if (pipe_dev->eof_task_hd_by_sof)
|
|
+ bitmap_set(&pipe_dev->pipe_tasks[ISP_PIPE_TASK_TYPE_SOF].
|
|
+ stat_bitmap, ISP_STAT_ID_AE, 1);
|
|
+
|
|
+ reg_mask = reg_value;
|
|
+ reg_addr = ISP_REG_OFFSET_TOP_PIPE(hw_pipe_id) + ISP_REG_IRQ_MASK;
|
|
+ k1xisp_reg_writel(reg_addr, reg_value, reg_mask);
|
|
+ } else {
|
|
+ ret = -1;
|
|
+ isp_log_err("unsupport k1xisp pipe dev%d!", pipe_dev->pipedev_id);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int isp_pipe_clear_irqmask(struct k1xisp_pipe_dev *pipe_dev)
|
|
+{
|
|
+ u32 reg_addr = 0, reg_value = 0, reg_mask = 0;
|
|
+ u32 hw_pipe_id;
|
|
+
|
|
+ hw_pipe_id = pipe_dev->isp_irq_ctx.hw_pipe_id;
|
|
+ if (pipe_dev->pipedev_id <= ISP_PIPE_DEV_ID_1) {
|
|
+ reg_value = 0;
|
|
+ reg_mask = BIT(0) | BIT(11) | BIT(12) | BIT(26) | BIT(29);
|
|
+ reg_addr = ISP_REG_OFFSET_TOP_PIPE(hw_pipe_id) + ISP_REG_IRQ_MASK;
|
|
+ k1xisp_reg_writel(reg_addr, reg_value, reg_mask);
|
|
+ }
|
|
+
|
|
+ if (pipe_dev->eof_task_hd_by_sof)
|
|
+ bitmap_clear(&pipe_dev->pipe_tasks[ISP_PIPE_TASK_TYPE_SOF].stat_bitmap,
|
|
+ ISP_STAT_ID_AE, 1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int _isp_pipe_prepare_capture_memory(struct k1xisp_pipe_dev *pipe_dev)
|
|
+{
|
|
+ pipe_dev->slice_reg_mem =
|
|
+ kzalloc(K1XISP_SLICE_REG_MAX_NUM * sizeof(struct isp_reg_unit), GFP_KERNEL);
|
|
+ if (!pipe_dev->slice_reg_mem) {
|
|
+ isp_log_err("%s: alloc memory for slice regs failed!", __func__);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int _isp_pipe_free_capture_memory(struct k1xisp_pipe_dev *pipe_dev)
|
|
+{
|
|
+ if (pipe_dev->slice_reg_mem) {
|
|
+ kfree(pipe_dev->slice_reg_mem);
|
|
+ pipe_dev->slice_reg_mem = NULL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int k1xisp_pipe_open(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct isp_cdev_info *cdev_info =
|
|
+ container_of(inode->i_cdev, struct isp_cdev_info, isp_cdev);
|
|
+ struct k1xisp_pipe_dev *pipe_dev = NULL;
|
|
+
|
|
+ pipe_dev = (struct k1xisp_pipe_dev *)(cdev_info->p_dev);
|
|
+ ISP_DRV_CHECK_POINTER(pipe_dev);
|
|
+ mutex_lock(&pipe_dev->isp_pipedev_mutex);
|
|
+ pipe_dev->open_cnt++;
|
|
+ if (1 == pipe_dev->open_cnt) {
|
|
+ k1xisp_dev_open();
|
|
+ ret = k1xisp_dev_clock_set(1);
|
|
+ if (ret)
|
|
+ goto fail_close_dev;;
|
|
+ ret = _isp_pipe_prepare_capture_memory(pipe_dev);
|
|
+ if (ret)
|
|
+ goto fail_clock;
|
|
+ }
|
|
+
|
|
+ filp->private_data = pipe_dev;
|
|
+ mutex_unlock(&pipe_dev->isp_pipedev_mutex);
|
|
+ isp_log_dbg("open k1xisp pipe dev%d!", pipe_dev->pipedev_id);
|
|
+ return ret;
|
|
+
|
|
+fail_clock:
|
|
+ k1xisp_dev_clock_set(0);
|
|
+fail_close_dev:
|
|
+ k1xisp_dev_release();
|
|
+ pipe_dev->open_cnt--;
|
|
+ mutex_unlock(&pipe_dev->isp_pipedev_mutex);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int _isp_pipe_undeploy_driver(struct k1xisp_pipe_dev *pipe_dev)
|
|
+{
|
|
+ int mem_index = 0;
|
|
+
|
|
+ for (mem_index = 0; mem_index < ISP_PIPE_WORK_TYPE_CAPTURE + 1; mem_index++) {
|
|
+ if (pipe_dev->isp_reg_mem[mem_index].config) {
|
|
+ if (pipe_dev->fd_buffer) {
|
|
+ k1xisp_dev_put_viraddr_to_dma_buf(pipe_dev->isp_reg_mem[mem_index].dma_buffer,
|
|
+ pipe_dev->isp_reg_mem[mem_index].kvir_addr);
|
|
+ dma_buf_put(pipe_dev->isp_reg_mem[mem_index].dma_buffer);
|
|
+ } else {
|
|
+ //phy addr to viraddr in kernel.
|
|
+ isp_log_err("%s: need to realize!", __func__);
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ pipe_dev->isp_reg_mem[mem_index].config = false;
|
|
+ memset(&pipe_dev->isp_reg_mem[mem_index], 0, sizeof(struct isp_kmem_info));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int _isp_pipe_close_with_exception(struct k1xisp_pipe_dev *pipe_dev)
|
|
+{
|
|
+ int i, cur_work_type = 0;
|
|
+
|
|
+ if (ISP_WORK_STATUS_START == pipe_dev->work_status) {
|
|
+ //stop and clear job first.
|
|
+ cur_work_type = pipe_dev->work_type;
|
|
+ _isp_pipe_job_clear(pipe_dev);
|
|
+ }
|
|
+ //try flush buffers when exit by unusual.
|
|
+ for (i = 0; i < pipe_dev->stats_node_cnt; i++)
|
|
+ k1xisp_stat_try_flush_buffer(&pipe_dev->stats_nodes[i]);
|
|
+
|
|
+ if (atomic_read(&pipe_dev->isp_irq_ctx.cur_frame_num) > 0)
|
|
+ atomic_set(&pipe_dev->isp_irq_ctx.cur_frame_num, 0);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int k1xisp_pipe_release(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ int ret = 0, i;
|
|
+ struct isp_cdev_info *cdev_info =
|
|
+ container_of(inode->i_cdev, struct isp_cdev_info, isp_cdev);
|
|
+ struct k1xisp_pipe_dev *pipe_dev = NULL;
|
|
+
|
|
+ pipe_dev = (struct k1xisp_pipe_dev *)(cdev_info->p_dev);
|
|
+ ISP_DRV_CHECK_POINTER(pipe_dev);
|
|
+ mutex_lock(&pipe_dev->isp_pipedev_mutex);
|
|
+ pipe_dev->open_cnt--;
|
|
+ if (0 == pipe_dev->open_cnt) {
|
|
+ _isp_pipe_close_with_exception(pipe_dev);
|
|
+ for (i = ISP_PIPE_TASK_TYPE_SOF; i <= ISP_PIPE_TASK_TYPE_AF; i++)
|
|
+ isp_pipe_task_job_init(&pipe_dev->pipe_tasks[i]);
|
|
+ _isp_pipe_undeploy_driver(pipe_dev);
|
|
+ k1xisp_dev_clock_set(0);
|
|
+ k1xisp_dev_release();
|
|
+ _isp_pipe_free_capture_memory(pipe_dev);
|
|
+ pipe_dev->capture_client_num = 0;
|
|
+ }
|
|
+
|
|
+ filp->private_data = NULL;
|
|
+ mutex_unlock(&pipe_dev->isp_pipedev_mutex);
|
|
+ isp_log_dbg("close k1xisp pipe dev%d!", pipe_dev->pipedev_id);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void isp_pipe_fill_user_task_stat_result(struct isp_ubuf_uint *ubuf_uint,
|
|
+ struct isp_kbuffer_info *kbuf_info)
|
|
+{
|
|
+ int plane_size = 0;
|
|
+
|
|
+ if (ubuf_uint) {
|
|
+ if (kbuf_info) {
|
|
+ ubuf_uint->plane_count = kbuf_info->plane_count;
|
|
+ ubuf_uint->buf_index = kbuf_info->buf_index;
|
|
+ plane_size = kbuf_info->plane_count * sizeof(struct isp_buffer_plane);
|
|
+ memcpy(ubuf_uint->buf_planes, kbuf_info->buf_planes, plane_size);
|
|
+ } else {
|
|
+ memset(ubuf_uint, 0, sizeof(struct isp_ubuf_uint));
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+void isp_pipe_fill_user_task_data(struct isp_user_task_info *user_task, struct isp_kbuffer_info *kbuf_info, u32 stat_id)
|
|
+{
|
|
+ if (ISP_PIPE_TASK_TYPE_AF == user_task->task_type) {
|
|
+ if (ISP_STAT_ID_AF == stat_id)
|
|
+ isp_pipe_fill_user_task_stat_result(&user_task->stats_result.af_task.af_result, kbuf_info);
|
|
+ else if (ISP_STAT_ID_PDC == stat_id)
|
|
+ isp_pipe_fill_user_task_stat_result(&user_task->stats_result.af_task.pdc_result, kbuf_info);
|
|
+ } else if (ISP_PIPE_TASK_TYPE_SOF == user_task->task_type) {
|
|
+ if (ISP_STAT_ID_AWB == stat_id)
|
|
+ isp_pipe_fill_user_task_stat_result(&user_task->stats_result.sof_task.awb_result, kbuf_info);
|
|
+ else if (ISP_STAT_ID_EIS == stat_id)
|
|
+ isp_pipe_fill_user_task_stat_result(&user_task->stats_result.sof_task.eis_result, kbuf_info);
|
|
+ else if (ISP_STAT_ID_LTM == stat_id)
|
|
+ isp_pipe_fill_user_task_stat_result(&user_task->stats_result.sof_task.ltm_result, kbuf_info);
|
|
+ else if (ISP_STAT_ID_AE == stat_id)
|
|
+ isp_pipe_fill_user_task_stat_result(&user_task->stats_result.sof_task.ae_result, kbuf_info);
|
|
+ } else if (ISP_PIPE_TASK_TYPE_EOF == user_task->task_type) {
|
|
+ isp_pipe_fill_user_task_stat_result(&user_task->stats_result.eof_task.ae_result, kbuf_info);
|
|
+ }
|
|
+}
|
|
+
|
|
+void isp_pipe_task_get_stats_result(struct k1xisp_pipe_dev *pipe_dev,
|
|
+ struct isp_pipe_task *pipe_task,
|
|
+ struct isp_user_task_info *user_task, u32 frame_num,
|
|
+ u32 discard)
|
|
+{
|
|
+ int set_bit = -1, node_index = 0, stat_id = 0;
|
|
+ struct isp_kbuffer_info *kbuf_info = NULL;
|
|
+ u32 find_frame = 0;
|
|
+ struct k1xisp_stats_node *stats_node = NULL;
|
|
+
|
|
+ for_each_set_bit(set_bit, &pipe_task->stat_bitmap, ISP_STAT_ID_MAX) {
|
|
+ if (set_bit > ISP_STAT_ID_MAX) {
|
|
+ //slave pipe stat
|
|
+ if (pipe_dev->stats_node_cnt < 2) {
|
|
+ isp_log_err
|
|
+ ("fatal error:stat bitmap use slave pipe, but stat node count is %d!",
|
|
+ pipe_dev->stats_node_cnt);
|
|
+ continue;
|
|
+ }
|
|
+ node_index = 1;
|
|
+ } else {
|
|
+ node_index = 0;
|
|
+ }
|
|
+
|
|
+ stat_id = set_bit - (node_index * ISP_SLAVE_STAT_ID_AE);
|
|
+ stats_node = &pipe_dev->stats_nodes[node_index];
|
|
+ if (stat_id == ISP_STAT_ID_EIS)
|
|
+ find_frame = frame_num - 1; //EIS must find the previous frame.
|
|
+ else
|
|
+ find_frame = frame_num;
|
|
+ if (discard) {
|
|
+ k1xisp_stat_get_donebuf_by_frameid(stats_node, stat_id, find_frame, true);
|
|
+ } else {
|
|
+ kbuf_info = k1xisp_stat_get_donebuf_by_frameid(stats_node, stat_id,
|
|
+ find_frame, false);
|
|
+ isp_pipe_fill_user_task_data(user_task, kbuf_info, stat_id);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+int isp_pipe_task_job_init(struct isp_pipe_task *pipe_task)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ atomic_set(&pipe_task->frame_num, 0);
|
|
+ pipe_task->complete_cnt = 0;
|
|
+ pipe_task->complete_frame_num = 0;
|
|
+ reinit_completion(&pipe_task->wait_complete);
|
|
+
|
|
+ pipe_task->task_trigger = false;
|
|
+
|
|
+ pipe_task->vote_system.cur_ticket_cnt = 0;
|
|
+ pipe_task->use_vote_sys = false;
|
|
+ for (i = 0; i < pipe_task->vote_system.sys_trigger_num; i++)
|
|
+ pipe_task->vote_system.voter_validity[i] = false;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int k1xisp_pipe_wait_interrupts(struct k1xisp_pipe_dev *pipe_dev,
|
|
+ struct isp_user_task_info *user_task)
|
|
+{
|
|
+ int ret = 0, hw_pipe_id = 0, task_type = -1, work_cnt = 0;
|
|
+ struct completion *wait_complete = NULL;
|
|
+ struct isp_pipe_task *cur_pipe_task = NULL;
|
|
+ unsigned long timeout = 0;
|
|
+ int frame_num = 0;
|
|
+
|
|
+ PIPE_DEVID_TO_HW_PIPELINE_ID(pipe_dev->pipedev_id, hw_pipe_id);
|
|
+ if (hw_pipe_id >= ISP_HW_PIPELINE_ID_MAX) {
|
|
+ isp_log_err("%s: Please check the pipeline id(%d)!", __func__,
|
|
+ hw_pipe_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ task_type = user_task->task_type;
|
|
+ if (task_type >= ISP_PIPE_TASK_TYPE_MAX) {
|
|
+ isp_log_err("%s: Invalid task type(%d)!", __func__, task_type);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ cur_pipe_task = &pipe_dev->pipe_tasks[task_type];
|
|
+ wait_complete = &cur_pipe_task->wait_complete;
|
|
+
|
|
+ timeout = msecs_to_jiffies(3000);
|
|
+ ret = wait_for_completion_timeout(wait_complete, timeout);
|
|
+ if (0 == ret) {
|
|
+ if (ISP_PIPE_WORK_TYPE_PREVIEW == pipe_dev->work_type)
|
|
+ isp_log_warn
|
|
+ ("wait for isp(p%d) interrupt irq timeout for task(%d), state=%ld,%ld!",
|
|
+ hw_pipe_id, task_type,
|
|
+ pipe_dev->isp_irq_ctx.isp_irq_tasklet.state,
|
|
+ pipe_dev->isp_irq_ctx.isp_dma_irq_tasklet.state);
|
|
+ //wakeup user
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+ //lock used only between thread and soft irq.
|
|
+ spin_lock_bh(&cur_pipe_task->complete_lock);
|
|
+ work_cnt = cur_pipe_task->complete_cnt--;
|
|
+ frame_num = cur_pipe_task->complete_frame_num;
|
|
+ spin_unlock_bh(&cur_pipe_task->complete_lock);
|
|
+ //get stat result.
|
|
+ if (work_cnt > 0) {
|
|
+ user_task->result_valid = true;
|
|
+ user_task->frame_number = frame_num;
|
|
+ user_task->work_status = pipe_dev->work_status;
|
|
+ if (ISP_WORK_STATUS_START == pipe_dev->work_status) {
|
|
+ isp_pipe_task_get_stats_result(pipe_dev, cur_pipe_task,
|
|
+ user_task, frame_num, false);
|
|
+ } else if (ISP_WORK_STATUS_STOP == pipe_dev->work_status) {
|
|
+ //clear some flag during job.
|
|
+ }
|
|
+ ret = 0;
|
|
+ } else {
|
|
+ isp_log_err("%s: fatal error! the complete count is %d!", __func__,
|
|
+ work_cnt);
|
|
+ ret = -EPERM;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int isp_pipe_start_task_vote(struct k1xisp_pipe_dev *pipe_dev, unsigned int enable)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct isp_pipe_task *af_task = NULL;
|
|
+
|
|
+ af_task = &pipe_dev->pipe_tasks[ISP_PIPE_TASK_TYPE_AF];
|
|
+ if (enable) {
|
|
+ //atomic bitops
|
|
+ set_bit(ISP_STAT_ID_PDC, &af_task->stat_bitmap);
|
|
+ af_task->use_vote_sys = true;
|
|
+ af_task->stat_bits_cnt++;
|
|
+ isp_log_dbg("enable pdc voter,bitmap=0x%lx!", af_task->stat_bitmap);
|
|
+ } else {
|
|
+ clear_bit(ISP_STAT_ID_PDC, &af_task->stat_bitmap);
|
|
+ af_task->stat_bits_cnt--;
|
|
+ af_task->use_vote_sys = false;
|
|
+ isp_log_dbg("disable pdc voter,bitmap=0x%lx!", af_task->stat_bitmap);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+//tasklet context
|
|
+int k1xisp_pipe_notify_event(void *pdev, u32 event, void *payload, u32 load_len)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct k1xisp_pipe_dev *pipe_dev = (struct k1xisp_pipe_dev *)pdev;
|
|
+ struct stats_notify_params *event_params = NULL;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(pipe_dev);
|
|
+ ISP_DRV_CHECK_POINTER(payload);
|
|
+
|
|
+ event_params = (struct stats_notify_params *)payload;
|
|
+ if (event_params->stat_id != ISP_STAT_ID_PDC) {
|
|
+ isp_log_err("only support pdc dma trigger vote system!");
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ switch (event) {
|
|
+ case PIPE_EVENT_TRIGGER_VOTE_SYS:
|
|
+ ret = isp_pipe_start_task_vote(pipe_dev, event_params->event_enable);
|
|
+ break;
|
|
+ case PIPE_EVENT_CAST_VOTE:
|
|
+ ret = isp_pipe_task_vote_handler(pipe_dev,
|
|
+ &pipe_dev->pipe_tasks[ISP_PIPE_TASK_TYPE_AF],
|
|
+ event_params->frame_id,
|
|
+ TASK_VOTER_PDC_EOF);
|
|
+ break;
|
|
+ default:
|
|
+ isp_log_err("unknown this envent:%d!", event);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_pipe_enable_pdc_af(struct k1xisp_pipe_dev *pipe_dev, u32 *enable)
|
|
+{
|
|
+ int ret = 0, i;
|
|
+ u32 pdc_enable = *enable;
|
|
+
|
|
+ for (i = 0; i < pipe_dev->stats_node_cnt; i++) {
|
|
+ ret = k1xisp_stat_dma_dynamic_enable(&pipe_dev->stats_nodes[i],
|
|
+ ISP_STAT_ID_PDC, pdc_enable);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int __isp_pipe_start_preview_job(struct k1xisp_pipe_dev *pipe_dev, u32 switch_stream)
|
|
+{
|
|
+ int i, ret = 0;
|
|
+
|
|
+ //preview need all stats while capture don't
|
|
+ for (i = 0; i < pipe_dev->stats_node_cnt; i++) {
|
|
+ if (!switch_stream) {
|
|
+ ret = k1xisp_stat_node_streamon_dma_port(&pipe_dev->stats_nodes[i]);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ k1xisp_stat_node_cfg_dma_irqmask(&pipe_dev->stats_nodes[i]);
|
|
+ }
|
|
+
|
|
+ isp_pipe_config_irqmask(pipe_dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int isp_pipe_start_job(struct k1xisp_pipe_dev *pipe_dev, u32 work_type)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&pipe_dev->isp_pipedev_mutex);
|
|
+ if (ISP_WORK_STATUS_START == pipe_dev->work_status) {
|
|
+ isp_log_err("the isp pipedev(%d) is already start work at type:%d!",
|
|
+ pipe_dev->pipedev_id, pipe_dev->work_type);
|
|
+ ret = -EPERM;
|
|
+ goto Safe_Exit;
|
|
+ }
|
|
+
|
|
+ if (ISP_PIPE_WORK_TYPE_PREVIEW == work_type) {
|
|
+ ret = __isp_pipe_start_preview_job(pipe_dev, false);
|
|
+ if (ret)
|
|
+ goto Safe_Exit;
|
|
+ }
|
|
+
|
|
+ pipe_dev->work_type = work_type;
|
|
+ pipe_dev->work_status = ISP_WORK_STATUS_START;
|
|
+
|
|
+Safe_Exit:
|
|
+ mutex_unlock(&pipe_dev->isp_pipedev_mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int __isp_pipe_clear_preview_job(struct k1xisp_pipe_dev *pipe_dev, u32 switch_stream)
|
|
+{
|
|
+ int i = 0;
|
|
+
|
|
+ //wait tasklet run and disable it.
|
|
+ tasklet_disable(&pipe_dev->isp_irq_ctx.isp_irq_tasklet);
|
|
+ tasklet_disable(&pipe_dev->isp_irq_ctx.isp_dma_irq_tasklet);
|
|
+
|
|
+ for (i = 0; i < pipe_dev->stats_node_cnt; i++) {
|
|
+ k1xisp_stat_node_clear_dma_irqmask(&pipe_dev->stats_nodes[i]);
|
|
+ k1xisp_stat_reset_dma_busybuf_frameid(&pipe_dev->stats_nodes[i]);
|
|
+ if (!switch_stream) {
|
|
+ k1xisp_stat_node_streamoff_dma_port(&pipe_dev->stats_nodes[i]);
|
|
+ k1xisp_stat_job_flags_init(&pipe_dev->stats_nodes[i]);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ isp_pipe_clear_irqmask(pipe_dev);
|
|
+ //enable tasklet, make sure enable/disable tasklet operation is symmetric.
|
|
+ tasklet_enable(&pipe_dev->isp_irq_ctx.isp_irq_tasklet);
|
|
+ tasklet_enable(&pipe_dev->isp_irq_ctx.isp_dma_irq_tasklet);
|
|
+ atomic_set(&pipe_dev->isp_irq_ctx.cur_frame_num, 0);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int _isp_pipe_job_clear(struct k1xisp_pipe_dev *pipe_dev)
|
|
+{
|
|
+ //declare stop work flag first.
|
|
+ pipe_dev->work_status = ISP_WORK_STATUS_STOP;
|
|
+
|
|
+ if (ISP_PIPE_WORK_TYPE_PREVIEW == pipe_dev->work_type)
|
|
+ __isp_pipe_clear_preview_job(pipe_dev, false);
|
|
+
|
|
+ pipe_dev->work_type = ISP_PIPE_WORK_TYPE_INIT;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int isp_pipe_switch_stream_job(struct k1xisp_pipe_dev *pipe_dev, u32 work_type)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ if (pipe_dev->open_cnt == 1)
|
|
+ return ret; //needn't switch
|
|
+
|
|
+ mutex_lock(&pipe_dev->isp_pipedev_mutex);
|
|
+ //clear last work
|
|
+ if (work_type != pipe_dev->work_type)
|
|
+ if (ISP_PIPE_WORK_TYPE_PREVIEW == pipe_dev->work_type)
|
|
+ __isp_pipe_clear_preview_job(pipe_dev, true);
|
|
+ //start next work
|
|
+ if (ISP_PIPE_WORK_TYPE_CAPTURE == work_type) {
|
|
+ pipe_dev->capture_client_num++;
|
|
+ if (1 == pipe_dev->capture_client_num) {
|
|
+ pipe_dev->work_type = work_type;
|
|
+ pipe_dev->work_status = ISP_WORK_STATUS_START;
|
|
+ }
|
|
+ } else {
|
|
+ //start preview wait all capture client done
|
|
+ pipe_dev->capture_client_num--;
|
|
+ if (0 == pipe_dev->capture_client_num) {
|
|
+ __isp_pipe_start_preview_job(pipe_dev, true);
|
|
+ pipe_dev->work_type = work_type;
|
|
+ pipe_dev->work_status = ISP_WORK_STATUS_START;
|
|
+ }
|
|
+ }
|
|
+ mutex_unlock(&pipe_dev->isp_pipedev_mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int isp_pipe_stop_job(struct k1xisp_pipe_dev *pipe_dev, u32 work_type)
|
|
+{
|
|
+ int ret = 0, i;
|
|
+ struct isp_pipe_task *pipe_task = NULL;
|
|
+
|
|
+ mutex_lock(&pipe_dev->isp_pipedev_mutex);
|
|
+ if (pipe_dev->work_status != ISP_WORK_STATUS_START) {
|
|
+ isp_log_err("the isp pipedev(%d) isn't start work!", pipe_dev->pipedev_id);
|
|
+ ret = -EPERM;
|
|
+ goto Safe_Exit;
|
|
+ }
|
|
+
|
|
+ if (pipe_dev->work_type != work_type) {
|
|
+ isp_log_warn("the isp pipedev(%d) worktype(%d) isn't match your's(%d)!",
|
|
+ pipe_dev->pipedev_id, pipe_dev->work_type, work_type);
|
|
+ }
|
|
+
|
|
+ _isp_pipe_job_clear(pipe_dev);
|
|
+
|
|
+ //wakeup user to stop work
|
|
+ if (ISP_PIPE_WORK_TYPE_PREVIEW == work_type) {
|
|
+ for (i = ISP_PIPE_TASK_TYPE_SOF; i <= ISP_PIPE_TASK_TYPE_AF; i++) {
|
|
+ pipe_task = &pipe_dev->pipe_tasks[i];
|
|
+ //lock used only between thread and soft irq.
|
|
+ spin_lock_bh(&pipe_task->complete_lock);
|
|
+ pipe_task->complete_cnt++;
|
|
+ complete(&pipe_task->wait_complete);
|
|
+ spin_unlock_bh(&pipe_task->complete_lock);
|
|
+ }
|
|
+ }
|
|
+
|
|
+Safe_Exit:
|
|
+ mutex_unlock(&pipe_dev->isp_pipedev_mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_pipe_notify_jobs(struct k1xisp_pipe_dev *pipe_dev,
|
|
+ struct isp_job_describer *job_action)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(job_action);
|
|
+ ISP_DRV_CHECK_PARAMETERS(job_action->work_type, ISP_PIPE_WORK_TYPE_PREVIEW,
|
|
+ ISP_PIPE_WORK_TYPE_CAPTURE, "job type");
|
|
+
|
|
+ if (ISP_JOB_ACTION_START == job_action->action) {
|
|
+ ret = isp_pipe_start_job(pipe_dev, job_action->work_type);
|
|
+ } else if (ISP_JOB_ACTION_STOP == job_action->action) {
|
|
+ ret = isp_pipe_stop_job(pipe_dev, job_action->work_type);
|
|
+ } else if (ISP_JOB_ACTION_RESTART == job_action->action) {
|
|
+
|
|
+ } else if (ISP_JOB_ACTION_SWITCH == job_action->action) {
|
|
+ ret = isp_pipe_switch_stream_job(pipe_dev, job_action->work_type);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_pipe_deploy_driver(struct k1xisp_pipe_dev *pipe_dev, struct isp_drv_deployment *drv_deploy)
|
|
+{
|
|
+ int ret = 0, mem_index = 0;
|
|
+ struct dma_buf *dma_buffer = NULL;
|
|
+
|
|
+ //such as reg mem,we should map kvir_addr?
|
|
+ ISP_DRV_CHECK_POINTER(drv_deploy);
|
|
+ ISP_DRV_CHECK_PARAMETERS(drv_deploy->work_type, ISP_PIPE_WORK_TYPE_PREVIEW,
|
|
+ ISP_PIPE_WORK_TYPE_CAPTURE, "pipe_work_type");
|
|
+ PIPE_WORK_TYPE_TO_MEM_INDEX(drv_deploy->work_type, mem_index);
|
|
+ mutex_lock(&pipe_dev->isp_pipedev_mutex);
|
|
+ if (ISP_PIPE_WORK_TYPE_PREVIEW == drv_deploy->work_type) {
|
|
+ if (pipe_dev->isp_reg_mem[mem_index].config) {
|
|
+ isp_log_err("%s:redeploy isp pipe(%d), index:%d!", __func__, pipe_dev->pipedev_id, mem_index);
|
|
+ ret = -EPERM;
|
|
+ goto Safe_Exit;
|
|
+ }
|
|
+ } else {
|
|
+ if (pipe_dev->isp_reg_mem[mem_index].config
|
|
+ && pipe_dev->isp_reg_mem[mem_index + 1].config) {
|
|
+ isp_log_err("%s:redeploy isp pipe(%d), index:%d!", __func__, pipe_dev->pipedev_id, mem_index);
|
|
+ ret = -EPERM;
|
|
+ goto Safe_Exit;
|
|
+ }
|
|
+
|
|
+ if (pipe_dev->isp_reg_mem[mem_index].config)
|
|
+ mem_index += 1;
|
|
+ }
|
|
+
|
|
+ pipe_dev->fd_buffer = drv_deploy->fd_buffer;
|
|
+ if (pipe_dev->fd_buffer) {
|
|
+ dma_buffer = dma_buf_get(drv_deploy->reg_mem.fd);
|
|
+ if (IS_ERR(dma_buffer)) {
|
|
+ isp_log_err("%s: get dma buffer failed!", __func__);
|
|
+ ret = -EBADF;
|
|
+ goto Safe_Exit;
|
|
+ }
|
|
+ pipe_dev->isp_reg_mem[mem_index].mem.fd = drv_deploy->reg_mem.fd;
|
|
+ pipe_dev->isp_reg_mem[mem_index].dma_buffer = dma_buffer;
|
|
+ pipe_dev->isp_reg_mem[mem_index].mem_size = drv_deploy->reg_mem_size;
|
|
+ ret = k1xisp_dev_get_viraddr_from_dma_buf(dma_buffer, &pipe_dev->isp_reg_mem[mem_index].kvir_addr);
|
|
+ if (ret)
|
|
+ goto Safe_Exit;
|
|
+ } else {
|
|
+ //phy addr to viraddr in kernel.
|
|
+ isp_log_err("%s: need to realize!", __func__);
|
|
+ ret = -EPERM;
|
|
+ goto Safe_Exit;
|
|
+ }
|
|
+
|
|
+ pipe_dev->isp_reg_mem[mem_index].config = true;
|
|
+ drv_deploy->reg_mem_index = mem_index;
|
|
+
|
|
+Safe_Exit:
|
|
+ mutex_unlock(&pipe_dev->isp_pipedev_mutex);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_pipe_undeploy_driver(struct k1xisp_pipe_dev *pipe_dev, u32 mem_index)
|
|
+{
|
|
+ ISP_DRV_CHECK_MAX_PARAMETERS(mem_index, ISP_PIPE_WORK_TYPE_CAPTURE, "pipe mem index");
|
|
+ if (pipe_dev->isp_reg_mem[mem_index].config) {
|
|
+ if (pipe_dev->fd_buffer) {
|
|
+ k1xisp_dev_put_viraddr_to_dma_buf(pipe_dev->isp_reg_mem[mem_index].dma_buffer,
|
|
+ pipe_dev->isp_reg_mem[mem_index].kvir_addr);
|
|
+ dma_buf_put(pipe_dev->isp_reg_mem[mem_index].dma_buffer);
|
|
+ } else {
|
|
+ //phy addr to viraddr in kernel.
|
|
+ isp_log_err("%s: need to realize!", __func__);
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ pipe_dev->isp_reg_mem[mem_index].config = false;
|
|
+ memset(&pipe_dev->isp_reg_mem[mem_index], 0, sizeof(struct isp_kmem_info));
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int k1xisp_pipe_request_stats_buffer(struct k1xisp_pipe_dev *pipe_dev,
|
|
+ struct isp_buffer_request_info *request_info)
|
|
+{
|
|
+ int ret = 0, i;
|
|
+
|
|
+ for (i = 0; i < pipe_dev->stats_node_cnt; i++) //normal 1
|
|
+ ret = k1xisp_stat_reqbuffer(&pipe_dev->stats_nodes[i], request_info);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_pipe_enqueue_stats_buffer(struct k1xisp_pipe_dev *pipe_dev,
|
|
+ struct isp_buffer_enqueue_info *enqueue_info)
|
|
+{
|
|
+ int ret = 0, i;
|
|
+
|
|
+ for (i = 0; i < pipe_dev->stats_node_cnt; i++) //normal 1
|
|
+ ret = k1xisp_stat_qbuffer(&pipe_dev->stats_nodes[i], enqueue_info);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_pipe_flush_stats_buffer(struct k1xisp_pipe_dev *pipe_dev)
|
|
+{
|
|
+ int ret = 0, i;
|
|
+
|
|
+ for (i = 0; i < pipe_dev->stats_node_cnt; i++) //normal 1
|
|
+ ret = k1xisp_stat_flush_buffer(&pipe_dev->stats_nodes[i]);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int _isp_pipe_set_slice_regs(void *kreg_mem, struct isp_slice_regs *slice_reg,
|
|
+ int slice_index)
|
|
+{
|
|
+ int ret = 0, reg_count;
|
|
+
|
|
+ reg_count = slice_reg->reg_count;
|
|
+ if (reg_count > K1XISP_SLICE_REG_MAX_NUM) {
|
|
+ isp_log_err("%s:slice reg count:%d is greater than maxnum(%d)!",
|
|
+ __func__, reg_count, K1XISP_SLICE_REG_MAX_NUM);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (copy_from_user(kreg_mem, (void __user *)slice_reg->data,
|
|
+ sizeof(struct isp_reg_unit) * reg_count)) {
|
|
+ isp_log_err("failed to copy slice(%d) reg from user", slice_index);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ ret = k1xisp_reg_write_brust(kreg_mem, reg_count, false, NULL);
|
|
+ memset(kreg_mem, 0, sizeof(struct isp_reg_unit) * reg_count);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_pipe_trigger_capture(struct k1xisp_pipe_dev *pipe_dev,
|
|
+ struct isp_capture_package *capture_package)
|
|
+{
|
|
+ int ret = 0, slice_count, i, stop_job = 0, switch_job = 0;
|
|
+ struct camera_capture_slice_info camera_slice_info;
|
|
+ struct spm_camera_vi_ops *vi_ops = NULL;
|
|
+ u32 wait_time = 5000, vi_exit = 0;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(capture_package);
|
|
+ slice_count = capture_package->slice_count;
|
|
+ if (slice_count > K1XISP_SLICE_MAX_NUM) {
|
|
+ isp_log_err("%s:slice count:%d is greater than maxnum(%d)!", __func__,
|
|
+ slice_count, K1XISP_SLICE_MAX_NUM);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ISP_WORK_STATUS_START == pipe_dev->work_status
|
|
+ && ISP_PIPE_WORK_TYPE_PREVIEW == pipe_dev->work_type) {
|
|
+ isp_log_err("%s:isp pipedev%d is working on preview!", __func__,
|
|
+ pipe_dev->pipedev_id);
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ k1xisp_dev_get_vi_ops(&vi_ops);
|
|
+ if (vi_ops == NULL) {
|
|
+ isp_log_err("%s:isp pipedev%d get vi operation failed!", __func__,
|
|
+ pipe_dev->pipedev_id);
|
|
+ // return -EPERM;
|
|
+ }
|
|
+
|
|
+ mutex_lock(&pipe_dev->pipedev_capture_mutex);
|
|
+ if (pipe_dev->work_status != ISP_WORK_STATUS_START) {
|
|
+ isp_pipe_start_job(pipe_dev, ISP_PIPE_WORK_TYPE_CAPTURE);
|
|
+ stop_job = true;
|
|
+ } else {
|
|
+ switch_job = true;
|
|
+ }
|
|
+
|
|
+ memset(&camera_slice_info, 0, sizeof(struct camera_capture_slice_info));
|
|
+ camera_slice_info.total_slice_cnt = slice_count;
|
|
+ for (i = 0; i < slice_count; i++) {
|
|
+ //1. set slice regs
|
|
+ ret = _isp_pipe_set_slice_regs(pipe_dev->slice_reg_mem,
|
|
+ &capture_package->capture_slice_packs[i].slice_reg,
|
|
+ i);
|
|
+ if (ret)
|
|
+ goto vi_fail;
|
|
+ //2. notify vi start and wait done
|
|
+ camera_slice_info.hw_pipe_id = pipe_dev->pipedev_id;
|
|
+ camera_slice_info.slice_width =
|
|
+ capture_package->capture_slice_packs[i].slice_width;
|
|
+ camera_slice_info.raw_read_offset =
|
|
+ capture_package->capture_slice_packs[i].raw_read_offset;
|
|
+ camera_slice_info.yuv_out_offset =
|
|
+ capture_package->capture_slice_packs[i].yuv_out_offset;
|
|
+ camera_slice_info.dwt_offset[0] =
|
|
+ capture_package->capture_slice_packs[i].dwt_offset[0];
|
|
+ camera_slice_info.dwt_offset[1] =
|
|
+ capture_package->capture_slice_packs[i].dwt_offset[1];
|
|
+ camera_slice_info.dwt_offset[2] =
|
|
+ capture_package->capture_slice_packs[i].dwt_offset[2];
|
|
+ camera_slice_info.dwt_offset[3] =
|
|
+ capture_package->capture_slice_packs[i].dwt_offset[3];
|
|
+ if (vi_ops) {
|
|
+ ret = vi_ops->notify_caputre_until_done(i, &camera_slice_info, wait_time); //ms
|
|
+ if (ret) {
|
|
+ isp_log_err("isp pipedev%d capture slice%d failed!",
|
|
+ pipe_dev->pipedev_id, i);
|
|
+ goto job_exit;
|
|
+ }
|
|
+
|
|
+ if (0 == vi_exit)
|
|
+ vi_exit = 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ isp_log_dbg("isp pipe(%d) capture done!", pipe_dev->pipedev_id);
|
|
+ if (stop_job)
|
|
+ isp_pipe_stop_job(pipe_dev, ISP_PIPE_WORK_TYPE_CAPTURE);
|
|
+
|
|
+ if (switch_job)
|
|
+ isp_pipe_switch_stream_job(pipe_dev, ISP_PIPE_WORK_TYPE_PREVIEW);
|
|
+
|
|
+ mutex_unlock(&pipe_dev->pipedev_capture_mutex);
|
|
+ return ret;
|
|
+
|
|
+vi_fail:
|
|
+ if (vi_exit) {
|
|
+ camera_slice_info.exception_exit = 1;
|
|
+ vi_ops->notify_caputre_until_done(0, &camera_slice_info, wait_time);
|
|
+ }
|
|
+job_exit:
|
|
+ if (stop_job)
|
|
+ isp_pipe_stop_job(pipe_dev, ISP_PIPE_WORK_TYPE_CAPTURE);
|
|
+
|
|
+ if (switch_job)
|
|
+ isp_pipe_switch_stream_job(pipe_dev, ISP_PIPE_WORK_TYPE_PREVIEW);
|
|
+
|
|
+ mutex_unlock(&pipe_dev->pipedev_capture_mutex);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_pipe_set_endframe_work(struct k1xisp_pipe_dev *pipe_dev,
|
|
+ struct isp_endframe_work_info *end_info)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ pipe_dev->eof_task_hd_by_sof = end_info->process_ae_by_sof;
|
|
+ pipe_dev->frameinfo_get_by_eof = end_info->get_frameinfo_by_eof;
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+long k1xisp_pipe_ioctl_core(struct file *file, unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct k1xisp_pipe_dev *pipe_dev = file->private_data;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(pipe_dev);
|
|
+ switch (cmd) {
|
|
+ case ISP_IOC_DEPLOY_DRV:
|
|
+ ret = k1xisp_pipe_deploy_driver(pipe_dev, (struct isp_drv_deployment*)arg);
|
|
+ break;
|
|
+ case ISP_IOC_UNDEPLOY_DRV:
|
|
+ ret = k1xisp_pipe_undeploy_driver(pipe_dev, *((u32*)arg));
|
|
+ break;
|
|
+ case ISP_IOC_SET_REG:
|
|
+ struct isp_regs_info *user_regs = (struct isp_regs_info*)arg;
|
|
+
|
|
+ if (pipe_dev->isp_reg_mem[user_regs->mem_index].config) {
|
|
+ if (pipe_dev->isp_reg_mem[user_regs->mem_index].mem.fd != user_regs->mem.fd) {
|
|
+ isp_log_err("please use deploy isp driver reg memory(fd:%d), your's %d!",
|
|
+ pipe_dev->isp_reg_mem[user_regs->mem_index].mem.fd, user_regs->mem.fd);
|
|
+ ret = -EINVAL;
|
|
+ } else {
|
|
+ ret = k1xisp_reg_write_brust(user_regs->data, user_regs->size, true,
|
|
+ pipe_dev->isp_reg_mem[user_regs->mem_index].kvir_addr);
|
|
+ }
|
|
+ } else {
|
|
+ isp_log_err("please deploy isp driver first, index:%d!", user_regs->mem_index);
|
|
+ ret = -EPERM;
|
|
+ }
|
|
+ break;
|
|
+ case ISP_IOC_GET_REG:
|
|
+ ret = k1xisp_reg_read_brust((struct isp_regs_info*)arg);
|
|
+ break;
|
|
+ case ISP_IOC_SET_PDC:
|
|
+ ret = k1xisp_pipe_enable_pdc_af(pipe_dev, (u32*)arg);
|
|
+ break;
|
|
+ case ISP_IOC_SET_JOB:
|
|
+ ret = k1xisp_pipe_notify_jobs(pipe_dev, (struct isp_job_describer*)arg);
|
|
+ break;
|
|
+ case ISP_IOC_GET_INTERRUPT:
|
|
+ ret = k1xisp_pipe_wait_interrupts(pipe_dev, (struct isp_user_task_info*)arg);
|
|
+ break;
|
|
+ case ISP_IOC_REQUEST_BUFFER:
|
|
+ ret = k1xisp_pipe_request_stats_buffer(pipe_dev, (struct isp_buffer_request_info*)arg);
|
|
+ break;
|
|
+ case ISP_IOC_ENQUEUE_BUFFER:
|
|
+ ret = k1xisp_pipe_enqueue_stats_buffer(pipe_dev, (struct isp_buffer_enqueue_info*)arg);
|
|
+ break;
|
|
+ case ISP_IOC_FLUSH_BUFFER:
|
|
+ ret = k1xisp_pipe_flush_stats_buffer(pipe_dev);
|
|
+ break;
|
|
+ case ISP_IOC_TRIGGER_CAPTURE:
|
|
+ ret = k1xisp_pipe_trigger_capture(pipe_dev, (struct isp_capture_package*)arg);
|
|
+ break;
|
|
+ case ISP_IOC_SET_SINGLE_REG:
|
|
+ ret = k1xisp_reg_write_single((struct isp_reg_unit*)arg);
|
|
+ break;
|
|
+ case ISP_IOC_SET_END_FRAME_WORK:
|
|
+ ret = k1xisp_pipe_set_endframe_work(pipe_dev, ((struct isp_endframe_work_info*)arg));
|
|
+ break;
|
|
+ default:
|
|
+ isp_log_err("unsupport the cmd: %d!", cmd);
|
|
+ ret = -ENOIOCTLCMD;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (ret)
|
|
+ isp_log_dbg("the cmd: %d! ioctl failed, ret=%d!", cmd, ret);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static long k1xisp_pipe_unlocked_ioctl(struct file *file, unsigned int cmd,
|
|
+ unsigned long arg)
|
|
+{
|
|
+ long ret = 0;
|
|
+
|
|
+ ret = k1xisp_dev_copy_user(file, cmd, (void *)arg, k1xisp_pipe_ioctl_core);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+//fixme: add compat in the future
|
|
+#if 0
|
|
+//#ifdef CONFIG_COMPAT
|
|
+
|
|
+struct isp_regs_info32 {
|
|
+ union {
|
|
+ __u64 phy_addr;
|
|
+ __s32 fd;
|
|
+ } mem;
|
|
+ __u32 size;
|
|
+ compat_caddr_t data; /* contains some isp_reg_unit */
|
|
+ __u32 mem_index;
|
|
+};
|
|
+
|
|
+struct isp_slice_regs32 {
|
|
+ __u32 reg_count;
|
|
+ compat_caddr_t data; /* contains some isp_reg_unit */
|
|
+};
|
|
+
|
|
+struct isp_capture_slice_pack32 {
|
|
+ __s32 slice_width;
|
|
+ __s32 raw_read_offset;
|
|
+ __s32 yuv_out_offset;
|
|
+ __s32 dwt_offset[4];
|
|
+ struct isp_slice_regs32 slice_reg;
|
|
+};
|
|
+
|
|
+struct isp_capture_package32 {
|
|
+ __u32 slice_count;
|
|
+ struct isp_capture_slice_pack32 capture_slice_packs[K1XISP_SLICE_MAX_NUM];
|
|
+};
|
|
+
|
|
+#define ISP_IOC_SET_REG32 _IOW(IOC_K1X_ISP_TYPE, ISP_IOC_NR_SET_REG, struct isp_regs_info32)
|
|
+#define ISP_IOC_GET_REG32 _IOWR(IOC_K1X_ISP_TYPE, ISP_IOC_NR_GET_REG, struct isp_regs_info32)
|
|
+#define ISP_IOC_TRIGGER_CAPTURE32 _IOWR(IOC_K1X_ISP_TYPE, ISP_IOC_NR_TRIGGER_CAPTURE, struct isp_capture_package32)
|
|
+
|
|
+/* Use the same argument order as copy_in_user */
|
|
+#define assign_in_user(to, from) \
|
|
+({ \
|
|
+ typeof(*from) __assign_tmp; \
|
|
+ \
|
|
+ get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \
|
|
+})
|
|
+
|
|
+static int k1xisp_alloc_userspace(unsigned int size, u32 aux_space,
|
|
+ void __user **up_native)
|
|
+{
|
|
+ *up_native = compat_alloc_user_space(size + aux_space);
|
|
+ if (!*up_native)
|
|
+ return -ENOMEM;
|
|
+ if (clear_user(*up_native, size))
|
|
+ return -EFAULT;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int k1xisp_get_user_regs(struct isp_regs_info __user *kp,
|
|
+ struct isp_regs_info32 __user *up)
|
|
+{
|
|
+ compat_uptr_t tmp;
|
|
+
|
|
+ if (!access_ok(up, sizeof(struct isp_regs_info32)) ||
|
|
+ assign_in_user(&kp->size, &up->size) ||
|
|
+ assign_in_user(&kp->mem_index, &up->mem_index) ||
|
|
+ get_user(tmp, &up->data) ||
|
|
+ put_user(compat_ptr(tmp), &kp->data) ||
|
|
+ copy_in_user(&kp->mem, &up->mem, sizeof(kp->mem)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int k1xisp_put_user_regs(struct isp_regs_info __user *kp,
|
|
+ struct isp_regs_info32 __user *up)
|
|
+{
|
|
+ void *edid;
|
|
+
|
|
+ if (!access_ok(up, sizeof(*up)) ||
|
|
+ assign_in_user(&up->size, &kp->size) ||
|
|
+ assign_in_user(&up->mem_index, &kp->mem_index) ||
|
|
+ get_user(edid, &kp->data) ||
|
|
+ put_user(ptr_to_compat(edid), &up->data) ||
|
|
+ copy_in_user(&up->mem, &kp->mem, sizeof(kp->mem)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int k1xisp_get_user_capture_package(struct isp_capture_package __user *kp,
|
|
+ struct isp_capture_package32 __user *up)
|
|
+{
|
|
+ compat_uptr_t tmp;
|
|
+ __u32 i, count;
|
|
+
|
|
+ if (!access_ok(up, sizeof(struct isp_capture_package32)) ||
|
|
+ assign_in_user(&kp->slice_count, &up->slice_count) ||
|
|
+ get_user(count, &up->slice_count))
|
|
+ return -EFAULT;
|
|
+
|
|
+ for (i = 0; i < count; i++) {
|
|
+ if (assign_in_user(&kp->capture_slice_packs[i].slice_width, &up->capture_slice_packs[i].slice_width) ||
|
|
+ assign_in_user(&kp->capture_slice_packs[i].raw_read_offset, &up->capture_slice_packs[i].raw_read_offset) ||
|
|
+ assign_in_user(&kp->capture_slice_packs[i].yuv_out_offset, &up->capture_slice_packs[i].yuv_out_offset) ||
|
|
+ copy_in_user(&kp->capture_slice_packs[i].dwt_offset, &up->capture_slice_packs[i].dwt_offset,
|
|
+ sizeof(kp->capture_slice_packs[i].dwt_offset)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ if (assign_in_user(&kp->capture_slice_packs[i].slice_reg.reg_count, &up->capture_slice_packs[i].slice_reg.reg_count) ||
|
|
+ get_user(tmp, &up->capture_slice_packs[i].slice_reg.data) ||
|
|
+ put_user(compat_ptr(tmp), &kp->capture_slice_packs[i].slice_reg.data))
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static long k1xisp_pipe_compat_ioctl(struct file *file, unsigned int cmd,
|
|
+ unsigned long arg)
|
|
+{
|
|
+ long ret = 0;
|
|
+ void __user *up_native = NULL;
|
|
+ void __user *up = compat_ptr(arg);
|
|
+ int compatible_arg = 1;
|
|
+
|
|
+ /* maybe do some convention, like user space memeory or 64bit to 32bit */
|
|
+
|
|
+ switch (cmd) {
|
|
+ case ISP_IOC_SET_REG32:
|
|
+ cmd = ISP_IOC_SET_REG;
|
|
+ ret =
|
|
+ k1xisp_alloc_userspace(sizeof(struct isp_regs_info), 0, &up_native);
|
|
+ if (!ret)
|
|
+ k1xisp_get_user_regs(up_native, up);
|
|
+ else
|
|
+ isp_log_err("alloc userspace for ISP_IOC_SET_REG failed!");
|
|
+ compatible_arg = 0;
|
|
+ break;
|
|
+ case ISP_IOC_GET_REG32:
|
|
+ cmd = ISP_IOC_GET_REG;
|
|
+ ret =
|
|
+ k1xisp_alloc_userspace(sizeof(struct isp_regs_info), 0, &up_native);
|
|
+ if (!ret)
|
|
+ k1xisp_get_user_regs(up_native, up);
|
|
+ else
|
|
+ isp_log_err("alloc userspace for ISP_IOC_GET_REG failed!");
|
|
+ compatible_arg = 0;
|
|
+ break;
|
|
+ case ISP_IOC_TRIGGER_CAPTURE32:
|
|
+ cmd = ISP_IOC_TRIGGER_CAPTURE;
|
|
+ ret =
|
|
+ k1xisp_alloc_userspace(sizeof(struct isp_capture_package), 0,
|
|
+ &up_native);
|
|
+ if (!ret)
|
|
+ k1xisp_get_user_capture_package(up_native, up);
|
|
+ else
|
|
+ isp_log_err
|
|
+ ("alloc userspace for ISP_IOC_TRIGGER_CAPTURE failed!");
|
|
+ compatible_arg = 0;
|
|
+ break;
|
|
+ case ISP_IOC_DEPLOY_DRV:
|
|
+ case ISP_IOC_UNDEPLOY_DRV:
|
|
+ case ISP_IOC_SET_PDC:
|
|
+ case ISP_IOC_SET_JOB:
|
|
+ case ISP_IOC_GET_INTERRUPT:
|
|
+ case ISP_IOC_REQUEST_BUFFER:
|
|
+ case ISP_IOC_ENQUEUE_BUFFER:
|
|
+ case ISP_IOC_FLUSH_BUFFER:
|
|
+ case ISP_IOC_SET_SINGLE_REG:
|
|
+ case ISP_IOC_SET_END_FRAME_WORK:
|
|
+ break;
|
|
+ default:
|
|
+ isp_log_err("unsupport the cmd: %d!", cmd);
|
|
+ ret = -ENOIOCTLCMD;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (compatible_arg)
|
|
+ ret = k1xisp_dev_copy_user(file, cmd, up, k1xisp_pipe_ioctl_core);
|
|
+ else
|
|
+ ret = k1xisp_dev_copy_user(file, cmd, up_native, k1xisp_pipe_ioctl_core);
|
|
+
|
|
+ if (ret) {
|
|
+ isp_log_err("k1x_isp_copy_user for cmd 0x%x failed!", cmd);
|
|
+ goto ERROR_EXIT;
|
|
+ }
|
|
+ // TODO:if need to copy to user space pointer, should do more things.
|
|
+ switch (cmd) {
|
|
+ case ISP_IOC_GET_REG:
|
|
+ ret = k1xisp_put_user_regs(up_native, up);
|
|
+ if (ret)
|
|
+ isp_log_err("k1x_isp_put_user_regs for cmd 0x%x failed!", cmd);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ERROR_EXIT:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+static struct file_operations g_isp_pipe_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = k1xisp_pipe_open,
|
|
+ .release = k1xisp_pipe_release,
|
|
+ .unlocked_ioctl = k1xisp_pipe_unlocked_ioctl,
|
|
+#ifdef CONFIG_COMPAT
|
|
+ //fixme: add compat in the future
|
|
+ //.compat_ioctl = k1xisp_pipe_compat_ioctl,
|
|
+#endif
|
|
+};
|
|
+
|
|
+struct file_operations *k1xisp_pipe_get_fops(void)
|
|
+{
|
|
+ return &g_isp_pipe_fops;
|
|
+}
|
|
+
|
|
+int isp_pipe_task_wakeup_user(struct k1xisp_pipe_dev *pipe_dev,
|
|
+ struct isp_pipe_task *pipe_task, u32 frame_num)
|
|
+{
|
|
+ static DEFINE_RATELIMIT_STATE(rs, 5 * HZ, 6);
|
|
+ u32 last_num = 0;
|
|
+
|
|
+ spin_lock(&pipe_task->complete_lock); //lock used only between thread and soft irq.
|
|
+ pipe_task->complete_cnt++;
|
|
+ last_num = pipe_task->complete_frame_num;
|
|
+ pipe_task->complete_frame_num = frame_num;
|
|
+ if (pipe_task->complete_cnt >= 2) {
|
|
+ //user task may delay, needn't wakeup this time.
|
|
+ if (__ratelimit(&rs))
|
|
+ isp_log_warn("%s:task(%d) delay at frame%d,now frame%d !",
|
|
+ __func__, pipe_task->task_type, last_num,
|
|
+ frame_num);
|
|
+
|
|
+ pipe_task->complete_cnt--;
|
|
+ spin_unlock(&pipe_task->complete_lock);
|
|
+ //return the previous buffer because they have no chance to deal.
|
|
+ isp_pipe_task_get_stats_result(pipe_dev, pipe_task, NULL, frame_num - 1, true);
|
|
+ } else {
|
|
+ complete(&pipe_task->wait_complete);
|
|
+ spin_unlock(&pipe_task->complete_lock);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void isp_pipe_task_ignore_a_vote(struct k1xisp_stats_node *stats_node, u32 voter_type,
|
|
+ u32 frame_num)
|
|
+{
|
|
+ u32 stat_id = 0;
|
|
+
|
|
+ if (TASK_VOTER_AF_EOF == voter_type)
|
|
+ stat_id = ISP_STAT_ID_AF;
|
|
+ else if (TASK_VOTER_PDC_EOF == voter_type)
|
|
+ stat_id = ISP_STAT_ID_PDC;
|
|
+
|
|
+ k1xisp_stat_get_donebuf_by_frameid(stats_node, stat_id, frame_num - 1, true);
|
|
+}
|
|
+
|
|
+int isp_pipe_task_vote_handler(struct k1xisp_pipe_dev *pipe_dev,
|
|
+ struct isp_pipe_task *pipe_task, u32 frame_num,
|
|
+ u32 voter_type)
|
|
+{
|
|
+ int ret = 0, vote_index, system_trigger = 0, i, j;
|
|
+ struct task_voting_system *voting_sys = NULL;
|
|
+ static DEFINE_RATELIMIT_STATE(limt_print, 5 * HZ, 3);
|
|
+ u32 ticket_frame = 0, fail_vote = 0;
|
|
+
|
|
+ voting_sys = &pipe_task->vote_system;
|
|
+ ISP_DRV_CHECK_POINTER(voting_sys);
|
|
+ ISP_DRV_CHECK_MAX_PARAMETERS(voter_type, TASK_VOTER_PDC_EOF, "voter_type");
|
|
+
|
|
+ if (false == pipe_task->use_vote_sys) {
|
|
+ //user disable vote system.
|
|
+ pr_err_ratelimited("user disable vote system, ignore this result!");
|
|
+ //frame_num+1:discard the current frame.
|
|
+ isp_pipe_task_ignore_a_vote(&pipe_dev->stats_nodes[0], voter_type,
|
|
+ frame_num + 1);
|
|
+ //clear flag
|
|
+ spin_lock(&voting_sys->vote_lock);
|
|
+ voting_sys->cur_ticket_cnt = 0;
|
|
+ for (i = 0; i < voting_sys->sys_trigger_num; i++)
|
|
+ voting_sys->voter_validity[i] = false;
|
|
+ spin_unlock(&voting_sys->vote_lock);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ vote_index = voting_sys->voter_index[voter_type];
|
|
+ if (vote_index < 0) {
|
|
+ isp_log_err("valide index(%d) for task voter(%d)!", vote_index, voter_type);
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ if (true == voting_sys->voter_validity[vote_index]) {
|
|
+ if (__ratelimit(&limt_print)) {
|
|
+ isp_log_warn
|
|
+ ("voter(%d)'s previous isn't clear at frame%d, pdc's status:0x%lx!",
|
|
+ voter_type, frame_num,
|
|
+ k1xisp_reg_readl(REG_ISP_PDC_BASE(pipe_dev->pipedev_id)));
|
|
+ }
|
|
+ spin_lock(&voting_sys->vote_lock);
|
|
+ voting_sys->voter_frameID[vote_index] = frame_num;
|
|
+ spin_unlock(&voting_sys->vote_lock);
|
|
+ isp_pipe_task_ignore_a_vote(&pipe_dev->stats_nodes[0], voter_type, frame_num);
|
|
+ } else {
|
|
+ spin_lock(&voting_sys->vote_lock);
|
|
+ voting_sys->voter_validity[vote_index] = true;
|
|
+ voting_sys->voter_frameID[vote_index] = frame_num;
|
|
+ voting_sys->cur_ticket_cnt++;
|
|
+ if (voting_sys->cur_ticket_cnt == voting_sys->sys_trigger_num) {
|
|
+ ticket_frame = voting_sys->voter_frameID[0];
|
|
+ for (i = 1; i < voting_sys->sys_trigger_num; i++) {
|
|
+ if (ticket_frame < voting_sys->voter_frameID[i]) {
|
|
+ voting_sys->voter_validity[i - 1] = false;
|
|
+ fail_vote = true;
|
|
+ } else if (ticket_frame > voting_sys->voter_frameID[i]) {
|
|
+ voting_sys->voter_validity[i] = false;
|
|
+ fail_vote = true;
|
|
+ }
|
|
+ ticket_frame = voting_sys->voter_frameID[i];
|
|
+ }
|
|
+
|
|
+ //trigger system
|
|
+ if (fail_vote)
|
|
+ voting_sys->cur_ticket_cnt--;
|
|
+ else {
|
|
+ system_trigger = 1;
|
|
+ //clear flag
|
|
+ voting_sys->cur_ticket_cnt = 0;
|
|
+ for (i = 0; i < voting_sys->sys_trigger_num; i++)
|
|
+ voting_sys->voter_validity[i] = false;
|
|
+ }
|
|
+ }
|
|
+ spin_unlock(&voting_sys->vote_lock);
|
|
+ }
|
|
+
|
|
+ if (system_trigger)
|
|
+ isp_pipe_task_wakeup_user(pipe_dev, pipe_task, frame_num);
|
|
+
|
|
+ if (fail_vote) {
|
|
+ for (i = 0; i < voting_sys->sys_trigger_num; i++) {
|
|
+ if (voting_sys->voter_validity[i] == false) {
|
|
+ for (j = TASK_VOTER_SDE_SOF; j < TASK_VOTER_TYP_MAX; j++) {
|
|
+ if (i == voting_sys->voter_index[j])
|
|
+ break;
|
|
+ }
|
|
+ //discard current frame
|
|
+ isp_pipe_task_ignore_a_vote(&pipe_dev->stats_nodes[0],
|
|
+ j,
|
|
+ voting_sys->voter_frameID[i] + 1);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+//tasklet context
|
|
+int isp_pipe_task_mem_stat_handler(struct k1xisp_pipe_dev *pipe_dev, u32 task_type, u32 frame_num)
|
|
+{
|
|
+ int i = 0, ret = 0;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(pipe_dev);
|
|
+
|
|
+ for (i = 0; i < pipe_dev->stats_node_cnt; i++) {
|
|
+ if (task_type == ISP_PIPE_TASK_TYPE_SOF) {
|
|
+ k1xisp_stat_mem_lower_half_irq(&pipe_dev->stats_nodes[i], ISP_STAT_ID_AWB, frame_num);
|
|
+ k1xisp_stat_mem_lower_half_irq(&pipe_dev->stats_nodes[i], ISP_STAT_ID_LTM, frame_num);
|
|
+ if (pipe_dev->eof_task_hd_by_sof)
|
|
+ k1xisp_stat_mem_lower_half_irq(&pipe_dev->stats_nodes[i], ISP_STAT_ID_AE, frame_num);
|
|
+ } else if (task_type == ISP_PIPE_TASK_TYPE_EOF) {
|
|
+ k1xisp_stat_mem_lower_half_irq(&pipe_dev->stats_nodes[i], ISP_STAT_ID_AE, frame_num);
|
|
+ } else if (task_type == ISP_PIPE_TASK_TYPE_AF) {
|
|
+ k1xisp_stat_mem_lower_half_irq(&pipe_dev->stats_nodes[i], ISP_STAT_ID_AF, frame_num);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void isp_pipe_tasklet_handler(unsigned long data)
|
|
+{
|
|
+ struct k1xisp_pipe_dev *pipe_dev = (struct k1xisp_pipe_dev *)data;
|
|
+ u8 trigger_task = false;
|
|
+ int type_index = 0, frame_num = 0, i;
|
|
+ struct k1xisp_irq_context *isp_irq_ctx = NULL;
|
|
+ struct isp_pipe_task *pipe_task = NULL;
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ if (!pipe_dev) {
|
|
+ isp_log_err("%s: invalid pointer!", __func__);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (ISP_PIPE_WORK_TYPE_PREVIEW != pipe_dev->work_type) {
|
|
+ pr_err_ratelimited("isp pipe tasklet triggered by none-preview work!");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (pipe_dev->stream_restart) {
|
|
+ for (i = 0; i < pipe_dev->stats_node_cnt; i++)
|
|
+ k1xisp_stat_reset_dma_busybuf_frameid(&pipe_dev->stats_nodes[i]);
|
|
+ pipe_dev->stream_restart = false;
|
|
+ }
|
|
+ isp_irq_ctx = &pipe_dev->isp_irq_ctx;
|
|
+
|
|
+ //handle af and ae first duing to their done may come together with next sof
|
|
+ for (type_index = ISP_PIPE_TASK_TYPE_AF; type_index >= 0; type_index--) {
|
|
+ pipe_task = &pipe_dev->pipe_tasks[type_index];
|
|
+ frame_num = atomic_read(&pipe_task->frame_num);
|
|
+ spin_lock_irqsave(&pipe_task->task_lock, flags);
|
|
+ trigger_task = pipe_task->task_trigger;
|
|
+ if (trigger_task == true)
|
|
+ pipe_task->task_trigger = false;
|
|
+ spin_unlock_irqrestore(&pipe_task->task_lock, flags);
|
|
+ if (trigger_task) {
|
|
+ isp_pipe_task_mem_stat_handler(pipe_dev, type_index, frame_num);
|
|
+ //wakeup user
|
|
+
|
|
+ if (pipe_task->use_vote_sys) {
|
|
+ if (ISP_PIPE_TASK_TYPE_AF != type_index) {
|
|
+ isp_log_err("%s: Only AF task use voting system", __func__);
|
|
+ return;
|
|
+ }
|
|
+ isp_pipe_task_vote_handler(pipe_dev, pipe_task, frame_num, TASK_VOTER_AF_EOF);
|
|
+ } else {
|
|
+ isp_pipe_task_wakeup_user(pipe_dev, pipe_task, frame_num);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+//the upper half irq context
|
|
+void isp_pipe_dev_call_each_irqbit_handler(u32 irq_status, u32 hw_pipe_id, u32 frame_id)
|
|
+{
|
|
+ struct k1xisp_pipe_dev *pipe_dev = NULL;
|
|
+ struct k1xisp_irq_context *isp_irq_ctx = NULL;
|
|
+ int set_bit = -1, schedule_lower_irq = 0;
|
|
+ struct isp_irq_func_params irq_func_param;
|
|
+
|
|
+ k1xisp_dev_get_pipedev(hw_pipe_id, &pipe_dev);
|
|
+ if (!pipe_dev) {
|
|
+ isp_log_err("Can't find work pipe device for hw pipeline%d!", hw_pipe_id);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (pipe_dev->work_status != ISP_WORK_STATUS_START) {
|
|
+ pr_err_ratelimited("irq(fn%d) with isp pipedev(%d) doesn't start work!",
|
|
+ frame_id, hw_pipe_id);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ isp_irq_ctx = &pipe_dev->isp_irq_ctx;
|
|
+ irq_func_param.pipe_dev = pipe_dev;
|
|
+ irq_func_param.frame_num = frame_id;
|
|
+ irq_func_param.irq_status = irq_status;
|
|
+ irq_func_param.hw_pipe_id = hw_pipe_id;
|
|
+ for_each_set_bit(set_bit, &isp_irq_ctx->isp_irq_bitmap, ISP_IRQ_BIT_MAX_NUM) {
|
|
+ if (irq_status & BIT(set_bit)) {
|
|
+ if (isp_irq_ctx->isp_irq_handler[set_bit])
|
|
+ schedule_lower_irq |= isp_irq_ctx->isp_irq_handler[set_bit](&irq_func_param);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (schedule_lower_irq)
|
|
+ tasklet_hi_schedule(&isp_irq_ctx->isp_irq_tasklet);
|
|
+}
|
|
+
|
|
+//the upper half irq context
|
|
+void k1xisp_pipe_dev_irq_handler(void *irq_data)
|
|
+{
|
|
+ struct isp_irq_data *isp_irq = (struct isp_irq_data *)irq_data;
|
|
+
|
|
+ if (isp_irq->pipe0_irq_status)
|
|
+ isp_pipe_dev_call_each_irqbit_handler(isp_irq->pipe0_irq_status,
|
|
+ ISP_HW_PIPELINE_ID_0,
|
|
+ isp_irq->pipe0_frame_id);
|
|
+
|
|
+ if (isp_irq->pipe1_irq_status)
|
|
+ isp_pipe_dev_call_each_irqbit_handler(isp_irq->pipe1_irq_status,
|
|
+ ISP_HW_PIPELINE_ID_1,
|
|
+ isp_irq->pipe1_frame_id);
|
|
+}
|
|
+
|
|
+//the upper half irq context
|
|
+void k1xisp_pipe_dma_irq_handler(struct k1xisp_pipe_dev *pipe_dev, void *irq_data)
|
|
+{
|
|
+ int i = 0;
|
|
+ u32 schedule_lower_irq = 0;
|
|
+ struct k1xisp_irq_context *isp_irq_ctx = NULL;
|
|
+
|
|
+ if (pipe_dev) {
|
|
+ if (ISP_WORK_STATUS_START != pipe_dev->work_status)
|
|
+ return;
|
|
+
|
|
+ for (i = 0; i < pipe_dev->stats_node_cnt; i++) // max is 2.
|
|
+ schedule_lower_irq |= k1xisp_stat_dma_irq_handler(&pipe_dev->stats_nodes[i], irq_data);
|
|
+
|
|
+ if (schedule_lower_irq) {
|
|
+ isp_irq_ctx = &pipe_dev->isp_irq_ctx;
|
|
+ tasklet_hi_schedule(&isp_irq_ctx->isp_dma_irq_tasklet);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static void isp_dma_tasklet_handler(unsigned long data)
|
|
+{
|
|
+ int i = 0;
|
|
+ struct k1xisp_pipe_dev *pipe_dev = (struct k1xisp_pipe_dev *)data;
|
|
+ u32 frame_id = 0;
|
|
+
|
|
+ if (!pipe_dev) {
|
|
+ isp_log_err("Invalid pipe dev pointer for isp dma tasklet!");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ frame_id = atomic_read(&pipe_dev->isp_irq_ctx.cur_frame_num);
|
|
+ for (i = 0; i < pipe_dev->stats_node_cnt; i++)
|
|
+ k1xisp_stat_dma_lower_half_irq(&pipe_dev->stats_nodes[i], frame_id);
|
|
+}
|
|
+
|
|
+//pipe sof irq handler:get frame id from vi
|
|
+int isp_pipe_sof_irq_handler(struct isp_irq_func_params *param)
|
|
+{
|
|
+ int schedule_lower_irq = 0;
|
|
+ struct k1xisp_pipe_dev *pipe_dev = NULL;
|
|
+ struct k1xisp_irq_context *irq_ctx = NULL;
|
|
+ u32 hw_pipe_id = 0, last_frame_num = 0;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(param);
|
|
+ pipe_dev = param->pipe_dev;
|
|
+ if (ISP_PIPE_WORK_TYPE_PREVIEW != pipe_dev->work_type)
|
|
+ return 0;
|
|
+ irq_ctx = &pipe_dev->isp_irq_ctx;
|
|
+ last_frame_num = atomic_read(&irq_ctx->cur_frame_num);
|
|
+ atomic_set(&irq_ctx->cur_frame_num, param->frame_num);
|
|
+
|
|
+ if (last_frame_num != 0 && (last_frame_num != param->frame_num - 1)) {
|
|
+ hw_pipe_id = param->hw_pipe_id;
|
|
+ if (0 == param->frame_num) { //stream restart and frameid comes from zero.
|
|
+ pipe_dev->stream_restart = true;
|
|
+ schedule_lower_irq = 1;
|
|
+ } else {
|
|
+ pr_err_ratelimited ("the frameID is not serial on pipe%d,%d to %d!",
|
|
+ hw_pipe_id, last_frame_num, param->frame_num);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return schedule_lower_irq;
|
|
+}
|
|
+
|
|
+//just print err log.
|
|
+int isp_pipe_irq_err_print_handler(struct isp_irq_func_params *param)
|
|
+{
|
|
+ int schedule_lower_irq = 0;
|
|
+ u32 irq_value = 0, hw_pipe_id = 0;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(param);
|
|
+ irq_value = param->irq_status;
|
|
+ hw_pipe_id = param->hw_pipe_id;
|
|
+
|
|
+ // if (irq_value & BIT(ISP_IRQ_BIT_STAT_ERR))
|
|
+ // isp_log_err("host_isp_statistics_err in pipe:%d!", hw_pipe_id);
|
|
+
|
|
+ if (irq_value & BIT(ISP_IRQ_BIT_ISP_ERR))
|
|
+ isp_log_err("host_isp_err_irq in pipe%d!", hw_pipe_id);
|
|
+
|
|
+ return schedule_lower_irq;
|
|
+}
|
|
+
|
|
+int isp_pipe_sde_sof_irq_handler(struct isp_irq_func_params *param)
|
|
+{
|
|
+ int schedule_lower_irq = 1, frame_num = 0;
|
|
+ struct k1xisp_pipe_dev *pipe_dev = NULL;
|
|
+ struct isp_pipe_task *pipe_task = NULL;
|
|
+ int i = 0;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(param);
|
|
+ pipe_dev = param->pipe_dev;
|
|
+ frame_num = atomic_read(&pipe_dev->isp_irq_ctx.cur_frame_num);
|
|
+ pipe_task = &pipe_dev->pipe_tasks[ISP_PIPE_TASK_TYPE_SOF];
|
|
+ atomic_set(&pipe_task->frame_num, frame_num);
|
|
+ spin_lock(&pipe_task->task_lock);
|
|
+ // use_frame_id = pipe_task->user_stat_cfg.frame_num;
|
|
+ pipe_task->task_trigger = true;
|
|
+ spin_unlock(&pipe_task->task_lock);
|
|
+
|
|
+ //get awb ltm read sof
|
|
+ for (i = 0; i < pipe_dev->stats_node_cnt; i++) {
|
|
+ k1xisp_stat_mem_set_irq_flag(&pipe_dev->stats_nodes[i], ISP_STAT_ID_AWB,
|
|
+ param->hw_pipe_id);
|
|
+ k1xisp_stat_mem_set_irq_flag(&pipe_dev->stats_nodes[i], ISP_STAT_ID_LTM,
|
|
+ param->hw_pipe_id);
|
|
+ if (pipe_dev->eof_task_hd_by_sof)
|
|
+ k1xisp_stat_mem_set_irq_flag(&pipe_dev->stats_nodes[i],
|
|
+ ISP_STAT_ID_AE, param->hw_pipe_id);
|
|
+ }
|
|
+
|
|
+ return schedule_lower_irq;
|
|
+}
|
|
+
|
|
+int isp_pipe_sde_eof_irq_handler(struct isp_irq_func_params *param)
|
|
+{
|
|
+ int schedule_lower_irq = 0;
|
|
+
|
|
+ return schedule_lower_irq;
|
|
+}
|
|
+
|
|
+int isp_pipe_aem_eof_irq_handler(struct isp_irq_func_params *param)
|
|
+{
|
|
+ int schedule_lower_irq = 1, frame_num = 0;
|
|
+ struct k1xisp_pipe_dev *pipe_dev = NULL;
|
|
+ struct isp_pipe_task *pipe_task = NULL;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(param);
|
|
+ pipe_dev = param->pipe_dev;
|
|
+ frame_num = atomic_read(&pipe_dev->isp_irq_ctx.cur_frame_num);
|
|
+ pipe_task = &pipe_dev->pipe_tasks[ISP_PIPE_TASK_TYPE_EOF];
|
|
+ atomic_set(&pipe_task->frame_num, frame_num);
|
|
+
|
|
+ if (pipe_dev->stats_node_cnt == 1)
|
|
+ k1xisp_stat_mem_set_irq_flag(pipe_dev->stats_nodes, ISP_STAT_ID_AE, param->hw_pipe_id);
|
|
+ // else {
|
|
+ // //combine pipe
|
|
+ // k1xisp_stat_mem_set_irq_flag(&pipe_dev->stats_nodes[param->hw_pipe_id], ISP_STAT_ID_AF, param->hw_pipe_id);
|
|
+ // }
|
|
+
|
|
+ spin_lock(&pipe_task->task_lock);
|
|
+ pipe_task->task_trigger = true;
|
|
+ spin_unlock(&pipe_task->task_lock);
|
|
+
|
|
+ return schedule_lower_irq;
|
|
+}
|
|
+
|
|
+int isp_pipe_afc_eof_irq_handler(struct isp_irq_func_params *param)
|
|
+{
|
|
+ int schedule_lower_irq = 1, frame_num = 0;
|
|
+ struct k1xisp_pipe_dev *pipe_dev = NULL;
|
|
+ struct isp_pipe_task *pipe_task = NULL;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(param);
|
|
+ pipe_dev = param->pipe_dev;
|
|
+ frame_num = atomic_read(&pipe_dev->isp_irq_ctx.cur_frame_num);
|
|
+ pipe_task = &pipe_dev->pipe_tasks[ISP_PIPE_TASK_TYPE_AF];
|
|
+ atomic_set(&pipe_task->frame_num, frame_num);
|
|
+
|
|
+ if (pipe_dev->stats_node_cnt == 1)
|
|
+ k1xisp_stat_mem_set_irq_flag(pipe_dev->stats_nodes, ISP_STAT_ID_AF, param->hw_pipe_id);
|
|
+ // else {
|
|
+ // //combine pipe
|
|
+ // k1xisp_stat_mem_set_irq_flag(&pipe_dev->stats_nodes[param->hw_pipe_id], ISP_STAT_ID_AF, param->hw_pipe_id);
|
|
+ // }
|
|
+
|
|
+ spin_lock(&pipe_task->task_lock);
|
|
+ pipe_task->task_trigger = true;
|
|
+ spin_unlock(&pipe_task->task_lock);
|
|
+
|
|
+ return schedule_lower_irq;
|
|
+}
|
|
+
|
|
+int isp_pipe_irq_ctx_constructed(struct k1xisp_pipe_dev *pipe_dev, u32 pipedev_id)
|
|
+{
|
|
+ int ret = 0, i = 0, irq_bit = 0;
|
|
+ u32 hw_pipe_id = 0;
|
|
+ struct k1xisp_irq_context *irq_ctx = NULL;
|
|
+
|
|
+ PIPE_DEVID_TO_HW_PIPELINE_ID(pipedev_id, hw_pipe_id);
|
|
+ irq_ctx = &pipe_dev->isp_irq_ctx;
|
|
+ irq_ctx->hw_pipe_id = hw_pipe_id;
|
|
+
|
|
+ //define isp irq we need.
|
|
+ bitmap_zero(&irq_ctx->isp_irq_bitmap, ISP_IRQ_BIT_MAX_NUM);
|
|
+ memset(irq_ctx->isp_irq_handler, 0, sizeof(irq_ctx->isp_irq_handler));
|
|
+
|
|
+ for (i = 0; i < ISP_DRV_ARRAY_LENGTH(g_host_irq_handler_infos); i++) {
|
|
+ irq_bit = g_host_irq_handler_infos[i].irq_bit;
|
|
+ bitmap_set(&irq_ctx->isp_irq_bitmap, irq_bit, 1);
|
|
+ irq_ctx->isp_irq_handler[irq_bit] =
|
|
+ g_host_irq_handler_infos[i].irq_handler;
|
|
+ }
|
|
+
|
|
+ //init tasklet for the bottom of interrupt handler.
|
|
+ tasklet_init(&irq_ctx->isp_irq_tasklet, isp_pipe_tasklet_handler,
|
|
+ (unsigned long)pipe_dev);
|
|
+ tasklet_init(&irq_ctx->isp_dma_irq_tasklet, isp_dma_tasklet_handler,
|
|
+ (unsigned long)pipe_dev);
|
|
+
|
|
+ atomic_set(&irq_ctx->cur_frame_num, 0);
|
|
+ isp_log_dbg("construct pipe irq_ctx, bitmap=0x%lx", irq_ctx->isp_irq_bitmap);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int isp_pipe_dev_stats_node_create(struct k1xisp_pipe_dev *pipe_dev)
|
|
+{
|
|
+ int ret = -1, count = 1;
|
|
+
|
|
+ if (pipe_dev) {
|
|
+ // if (pipe_dev->pipedev_id > ISP_PIPE_DEV_ID_1 && pipe_dev->pipedev_id < ISP_PIPE_DEV_ID_MAX) {
|
|
+ // count = ISP_HW_PIPELINE_ID_MAX;
|
|
+ // }
|
|
+
|
|
+ pipe_dev->stats_nodes =
|
|
+ kzalloc(sizeof(struct k1xisp_stats_node) * count, GFP_KERNEL);
|
|
+ if (unlikely(pipe_dev->stats_nodes == NULL)) {
|
|
+ isp_log_err("could not allocate memory for isp stats node!");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ pipe_dev->stats_nodes->private_dev = (void *)pipe_dev;
|
|
+ pipe_dev->stats_nodes->notify_event = k1xisp_pipe_notify_event;
|
|
+ pipe_dev->stats_node_cnt = count;
|
|
+ ret = 0;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int isp_pipe_task_constructed(struct isp_pipe_task *pipe_tasks)
|
|
+{
|
|
+ int ret = 0, task_type, i = 0;
|
|
+
|
|
+ for (task_type = ISP_PIPE_TASK_TYPE_SOF; task_type < ISP_PIPE_TASK_TYPE_MAX; task_type++) {
|
|
+ pipe_tasks[task_type].task_type = task_type;
|
|
+
|
|
+ memset(&pipe_tasks[task_type].vote_system, 0, sizeof(struct task_voting_system));
|
|
+ for (i = 0; i < TASK_VOTER_TYP_MAX; i++)
|
|
+ pipe_tasks[task_type].vote_system.voter_index[i] = -1;
|
|
+
|
|
+ if (ISP_PIPE_TASK_TYPE_AF == task_type) {
|
|
+ pipe_tasks[task_type].vote_system.sys_trigger_num = 2; //af eof and pdc eof
|
|
+ pipe_tasks[task_type].vote_system.voter_index[TASK_VOTER_AF_EOF] = 0;
|
|
+ pipe_tasks[task_type].vote_system.voter_index[TASK_VOTER_PDC_EOF] = 1;
|
|
+ spin_lock_init(&pipe_tasks[task_type].vote_system.vote_lock);
|
|
+ }
|
|
+ //define stat bitmap for task.
|
|
+ bitmap_zero(&pipe_tasks[task_type].stat_bitmap, ISP_STAT_ID_MAX);
|
|
+ for (i = 0; i < g_task_stat_map_infos[task_type].count; i++) {
|
|
+ bitmap_set(&pipe_tasks[task_type].stat_bitmap,
|
|
+ g_task_stat_map_infos[task_type].stat_ids[i], 1);
|
|
+ }
|
|
+ pipe_tasks[task_type].stat_bits_cnt = g_task_stat_map_infos[task_type].count;
|
|
+
|
|
+ // memset(&pipe_tasks[task_type].user_stat_cfg, 0, sizeof(struct pipe_task_stat_config));
|
|
+
|
|
+ init_completion(&pipe_tasks[task_type].wait_complete);
|
|
+ spin_lock_init(&pipe_tasks[task_type].task_lock);
|
|
+ spin_lock_init(&pipe_tasks[task_type].complete_lock);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void isp_pipe_task_exit(struct isp_pipe_task *pipe_tasks)
|
|
+{
|
|
+ if (pipe_tasks)
|
|
+ memset(pipe_tasks, 0, sizeof(struct isp_pipe_task) * ISP_PIPE_TASK_TYPE_MAX);
|
|
+}
|
|
+
|
|
+int k1xisp_pipe_dev_init(struct platform_device *pdev,
|
|
+ struct k1xisp_pipe_dev *isp_pipe_dev[])
|
|
+{
|
|
+ int ret = 0, i = 0, j = 0;
|
|
+ struct k1xisp_pipe_dev *pipe_dev = NULL;
|
|
+ u32 hw_pipe_id = 0;
|
|
+
|
|
+ for (i = ISP_PIPE_DEV_ID_0; i < ISP_PIPE_DEV_ID_MAX; i++) {
|
|
+ pipe_dev =
|
|
+ devm_kzalloc(&pdev->dev, sizeof(struct k1xisp_pipe_dev), GFP_KERNEL);
|
|
+ if (unlikely(pipe_dev == NULL)) {
|
|
+ dev_err(&pdev->dev, "could not allocate memory");
|
|
+ ret = -ENOMEM;
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ pipe_dev->dev_num = 0;
|
|
+ pipe_dev->pipedev_id = i;
|
|
+ isp_pipe_task_constructed(pipe_dev->pipe_tasks);
|
|
+ for (j = ISP_PIPE_TASK_TYPE_SOF; j < ISP_PIPE_TASK_TYPE_MAX; j++)
|
|
+ isp_pipe_task_job_init(&pipe_dev->pipe_tasks[j]);
|
|
+ isp_pipe_irq_ctx_constructed(pipe_dev, i);
|
|
+
|
|
+ //isp stats node create and init.
|
|
+ ret = isp_pipe_dev_stats_node_create(pipe_dev);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ PIPE_DEVID_TO_HW_PIPELINE_ID(i, hw_pipe_id);
|
|
+ for (j = 0; j < pipe_dev->stats_node_cnt; j++) {
|
|
+ if (j > 0)
|
|
+ hw_pipe_id = ISP_HW_PIPELINE_ID_1; //combine pipe, need stats of another pipeline
|
|
+ k1xisp_stat_node_init(&pipe_dev->stats_nodes[j], hw_pipe_id);
|
|
+ k1xisp_stat_job_flags_init(&pipe_dev->stats_nodes[j]);
|
|
+ }
|
|
+
|
|
+ mutex_init(&pipe_dev->isp_pipedev_mutex);
|
|
+ mutex_init(&pipe_dev->pipedev_capture_mutex);
|
|
+ isp_pipe_dev[i] = pipe_dev;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int isp_pipe_irq_ctx_exit(struct k1xisp_irq_context *isp_irq_ctx)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ bitmap_zero(&isp_irq_ctx->isp_irq_bitmap, ISP_IRQ_BIT_MAX_NUM);
|
|
+ memset(isp_irq_ctx->isp_irq_handler, 0, sizeof(isp_irq_ctx->isp_irq_handler));
|
|
+ tasklet_kill(&isp_irq_ctx->isp_irq_tasklet);
|
|
+ tasklet_kill(&isp_irq_ctx->isp_dma_irq_tasklet);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_pipe_dev_exit(struct platform_device *pdev,
|
|
+ struct k1xisp_pipe_dev *isp_pipe_dev[])
|
|
+{
|
|
+ int ret = 0, i = 0;
|
|
+ struct k1xisp_pipe_dev *pipe_dev = NULL;
|
|
+
|
|
+ for (i = ISP_PIPE_DEV_ID_0; i < ISP_PIPE_DEV_ID_MAX; i++) {
|
|
+ pipe_dev = isp_pipe_dev[i];
|
|
+ if (pipe_dev) {
|
|
+ kfree((void *)pipe_dev->stats_nodes);
|
|
+ pipe_dev->stats_nodes = NULL;
|
|
+ isp_pipe_irq_ctx_exit(&pipe_dev->isp_irq_ctx);
|
|
+ isp_pipe_task_exit(pipe_dev->pipe_tasks);
|
|
+ devm_kfree(&pdev->dev, (void *)pipe_dev);
|
|
+ mutex_destroy(&pipe_dev->isp_pipedev_mutex);
|
|
+ mutex_destroy(&pipe_dev->pipedev_capture_mutex);
|
|
+ isp_pipe_dev[i] = NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_pipe.h b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_pipe.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_pipe.h
|
|
@@ -0,0 +1,12 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+#ifndef K1X_ISP_PIPE_DEV_H
|
|
+#define K1X_ISP_PIPE_DEV_H
|
|
+
|
|
+struct file_operations *k1xisp_pipe_get_fops(void);
|
|
+int k1xisp_pipe_dev_init(struct platform_device *pdev,
|
|
+ struct k1xisp_pipe_dev *isp_pipe_dev[]);
|
|
+int k1xisp_pipe_dev_exit(struct platform_device *pdev,
|
|
+ struct k1xisp_pipe_dev *isp_pipe_dev[]);
|
|
+void k1xisp_pipe_dev_irq_handler(void *irq_data);
|
|
+void k1xisp_pipe_dma_irq_handler(struct k1xisp_pipe_dev *pipe_dev, void *irq_data);
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_reg.c b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_reg.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_reg.c
|
|
@@ -0,0 +1,171 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * Description on this file
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include "k1x_isp_reg.h"
|
|
+#include "k1x_isp_drv.h"
|
|
+
|
|
+#include <linux/uaccess.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/i2c.h>
|
|
+
|
|
+ulong __iomem g_base_addr = 0, g_end_addr = 0;
|
|
+
|
|
+void k1xisp_reg_set_base_addr(ulong __iomem base_reg_addr, ulong __iomem end_reg_addr)
|
|
+{
|
|
+ g_base_addr = base_reg_addr;
|
|
+ g_end_addr = end_reg_addr;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function must make sure the addr is subtraction ISP_REG_BASE_OFFSET(0x30000)
|
|
+ */
|
|
+ulong k1xisp_reg_readl(ulong __iomem addr)
|
|
+{
|
|
+ ulong value = 0, temp_addr = 0;
|
|
+ ulong __iomem reg_addr = 0;
|
|
+
|
|
+ temp_addr = (addr & ISP_REG_MASK) - ISP_REG_BASE_OFFSET;
|
|
+ reg_addr = g_base_addr + temp_addr;
|
|
+
|
|
+ if (reg_addr > g_end_addr) {
|
|
+ isp_log_err
|
|
+ ("read the reg_addr(0x%lx) is beyond the range[0x%lx-0x%lx]!",
|
|
+ reg_addr, g_base_addr, g_end_addr);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ value = readl((void __iomem *)reg_addr);
|
|
+// isp_log_dbg("reg[addr:0x%lx] read, reg_addr=0x%lx!", addr, reg_addr);
|
|
+
|
|
+ return value;
|
|
+}
|
|
+
|
|
+/*
|
|
+* This function must make sure the addr is subtraction ISP_REG_BASE_OFFSET(0x30000)
|
|
+*/
|
|
+int k1xisp_reg_writel(ulong __iomem addr, ulong value, ulong mask)
|
|
+{
|
|
+ ulong temp_value = 0, temp_addr = 0;
|
|
+ ulong __iomem reg_addr = 0;
|
|
+ static DEFINE_RATELIMIT_STATE(rs, 5 * HZ, 20);
|
|
+
|
|
+ if (0 == mask && __ratelimit(&rs)) {
|
|
+ isp_log_err("reg[addr:0x%lx] mask is zeor!", addr);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ temp_addr = (addr & ISP_REG_MASK) - ISP_REG_BASE_OFFSET;
|
|
+ reg_addr = g_base_addr + temp_addr;
|
|
+
|
|
+ if (reg_addr < g_base_addr || reg_addr > g_end_addr) {
|
|
+ isp_log_err
|
|
+ ("write the reg_addr(0x%lx) is beyond the range[0x%lx-0x%lx]!",
|
|
+ reg_addr, g_base_addr, g_end_addr);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ if (mask != 0xffffffff) {
|
|
+ temp_value = readl((void __iomem *)reg_addr);
|
|
+ temp_value = (temp_value & ~mask) | (value & mask);
|
|
+ } else
|
|
+ temp_value = value;
|
|
+
|
|
+ writel(temp_value, (void __iomem *)reg_addr);
|
|
+// isp_log_dbg("reg[addr:0x%lx] write=0x%lx, mask=0x%lx, temp_addr=0x%lx, reg_addr=0x%lx!", addr, value, mask, temp_addr, reg_addr);
|
|
+
|
|
+#ifdef ISP_REG_DEBUG
|
|
+ ulong after = 0;
|
|
+ after = readl(reg_addr);
|
|
+ if (after != temp_value) {
|
|
+ isp_log_info
|
|
+ ("reg[addr:0x%llx] write may be failed, write=0x%x, after=0x%x!",
|
|
+ addr, value, after);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int k1xisp_reg_write_single(struct isp_reg_unit *reg_unit)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ if (!reg_unit)
|
|
+ return -EINVAL;
|
|
+
|
|
+ ret = k1xisp_reg_writel(reg_unit->reg_addr, reg_unit->reg_value, reg_unit->reg_mask);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_reg_write_brust(void *reg_data, u32 reg_size, bool user_space,
|
|
+ void *kvir_addr)
|
|
+{
|
|
+ int i = 0, ret = 0;
|
|
+ struct isp_reg_unit *reg_list = NULL;
|
|
+
|
|
+ if (!reg_data)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (user_space) {
|
|
+ reg_list = (struct isp_reg_unit *)kvir_addr;
|
|
+ if (!reg_list) {
|
|
+ isp_log_err("the reg mem hasn't been kmap!");
|
|
+ return -EPERM;
|
|
+ }
|
|
+ } else {
|
|
+ reg_list = (struct isp_reg_unit *)reg_data;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < reg_size; i++) {
|
|
+ // isp_log_info("the reg_addr(0x%x),value=0x%x, mask=0x%x!", reg_list->reg_addr, reg_list->reg_value, reg_list->reg_mask);
|
|
+ k1xisp_reg_writel(reg_list->reg_addr, reg_list->reg_value, reg_list->reg_mask);
|
|
+ reg_list++;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_reg_read_brust(struct isp_regs_info *regs_info)
|
|
+{
|
|
+ int i = 0, ret = 0;
|
|
+ struct isp_reg_unit *reg_list = NULL;
|
|
+ void *temp_buf = NULL;
|
|
+ ulong buf_size = 0;
|
|
+
|
|
+ if (!regs_info)
|
|
+ return -EINVAL;
|
|
+
|
|
+ buf_size = regs_info->size * sizeof(struct isp_reg_unit);
|
|
+ temp_buf = kmalloc(buf_size, GFP_KERNEL);
|
|
+ if (NULL == temp_buf)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ reg_list = (struct isp_reg_unit *)temp_buf;
|
|
+ if (copy_from_user((void *)reg_list, regs_info->data, buf_size)) {
|
|
+ isp_log_err("copy isp reg from user failed!");
|
|
+ kfree(temp_buf);
|
|
+ temp_buf = NULL;
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < regs_info->size; i++) {
|
|
+ reg_list->reg_value = k1xisp_reg_readl(reg_list->reg_addr);
|
|
+ // isp_log_info("the reg_addr(0x%x),value=0x%x, mask=0x%x!", reg_list->reg_addr, reg_list->reg_value, reg_list->reg_mask);
|
|
+ reg_list++;
|
|
+ }
|
|
+
|
|
+ if (copy_to_user(regs_info->data, temp_buf, buf_size)) {
|
|
+ isp_log_err("copy isp reg to user failed!");
|
|
+ ret = -EPERM;
|
|
+ }
|
|
+
|
|
+ kfree(temp_buf);
|
|
+ temp_buf = NULL;
|
|
+ return ret;
|
|
+}
|
|
+
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_reg.h b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_reg.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_reg.h
|
|
@@ -0,0 +1,117 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+#ifndef K1X_ISP_REG_H
|
|
+#define K1X_ISP_REG_H
|
|
+
|
|
+#include <media/k1x/k1x_isp_drv_uapi.h>
|
|
+
|
|
+#include <linux/types.h>
|
|
+
|
|
+#define ISP_REG_MASK 0x7ffff
|
|
+#define ISP_REG_BASE_OFFSET 0x30000
|
|
+
|
|
+#define ISP_REG_OFFSET_TOP_PIPE(n) (0x31700 + 0x8000 * (n))
|
|
+#define ISP_REG_OFFSET_GLOBALRESET 0x158
|
|
+#define ISP_REG_IRQ_STATUS 0x80
|
|
+#define ISP_REG_IRQ_MASK 0x84
|
|
+#define ISP_RE_IRQ_RAW 0x88
|
|
+#define ISP_REG_IDI_GAP_OFFSET 0x134
|
|
+
|
|
+//RGBIR AVG
|
|
+#define ISP_REG_OFFSET_AVG0 0x150
|
|
+#define ISP_REG_OFFSET_AVG1 0x154
|
|
+
|
|
+//dma base addr 0xc0241000, isp base 0xc0230000
|
|
+#define REG_ISP_OFFSET_DMASYS 0x41000
|
|
+//dma mux ctrl
|
|
+#define REG_ISP_OFFSET_DMA_MUX_CTRL 0x40038
|
|
+//dma channel master
|
|
+#define REG_ISP_DMA_CHANNEL_MASTER(n) (0x410d0 + 4 * (n))
|
|
+//pdc dma master
|
|
+#define REG_ISP_PDC_DMA_MASTER (REG_ISP_OFFSET_DMASYS + 0x108)
|
|
+
|
|
+//write pitch
|
|
+#define REG_ISP_DMA_CHANNEL_WR_PITCH(n) (0x41098 + 4 * (n))
|
|
+//dma irq mask
|
|
+#define REG_ISP_DMA_IRQ_MASK1 (REG_ISP_OFFSET_DMASYS + 0x124) //ch0~ch10(except ch10 err)
|
|
+#define REG_ISP_DMA_IRQ_MASK2 (REG_ISP_OFFSET_DMASYS + 0x128) //ch10(only err)~ch15, read ch0~ch2
|
|
+
|
|
+//dma channel addr, ch0 ~ ch13
|
|
+#define REG_ISP_DMA_Y_ADDR(n) (REG_ISP_OFFSET_DMASYS + ((n) * 0x8))
|
|
+
|
|
+//dma channel high addr, ch0 ~ ch13
|
|
+#define REG_ISP_DMA_Y_HIGH_ADDR(n) (REG_ISP_OFFSET_DMASYS + 0X158 + ((n) * 0x8))
|
|
+
|
|
+//pdc dma channel addr, p0 and p1
|
|
+#define REG_ISP_PDC_DMA_BASE_ADDR(n) (REG_ISP_OFFSET_DMASYS + 0x70 + ((n) * 0x10))
|
|
+#define REG_ISP_PDC_DMA_HIGH_BASE_ADDR(n) (REG_ISP_OFFSET_DMASYS + 0X1c8 + ((n) * 0x10))
|
|
+
|
|
+//stat mem result
|
|
+#define REG_STAT_AEM_RESULT_MEM(n) (0x34000 + ((n) * 0x8000) + 0x50)
|
|
+#define REG_STAT_AFC_RESULT_MEM(n) (0x32000 + ((n) * 0x8000) + 0x2c)
|
|
+#define REG_STAT_WBM_RESULT_MEM(n) (0x32800 + ((n) * 0x8000) + 0x46c)
|
|
+#define REG_STAT_LTM_RESULT_MEM(n) (0x35000 + ((n) * 0x8000) + 0xc14)
|
|
+
|
|
+#define REG_ISP_PDC_BASE(n) (0x30100 + ((n) * 0x8000))
|
|
+
|
|
+/*isp irq info*/
|
|
+enum isp_host_irq_bit {
|
|
+ ISP_IRQ_BIT_PIPE_SOF = 0,
|
|
+ ISP_IRQ_BIT_PDC_SOF,
|
|
+ ISP_IRQ_BIT_PDF_SOF,
|
|
+ ISP_IRQ_BIT_BPC_SOF,
|
|
+ ISP_IRQ_BIT_LSC_SOF,
|
|
+ ISP_IRQ_BIT_DENOISE_SOF,
|
|
+ ISP_IRQ_BIT_BINNING_SOF,
|
|
+ ISP_IRQ_BIT_DEMOSAIC_SOF,
|
|
+ ISP_IRQ_BIT_HDR_SOF,
|
|
+ ISP_IRQ_BIT_LTM_SOF,
|
|
+ ISP_IRQ_BIT_MCU_TRIGGER,
|
|
+ ISP_IRQ_BIT_STAT_ERR, // 11
|
|
+ ISP_IRQ_BIT_SDE_SOF, // 12
|
|
+ ISP_IRQ_BIT_SDE_EOF, // 13
|
|
+ ISP_IRQ_BIT_RESET_DONE,
|
|
+ ISP_IRQ_BIT_IDI_SHADOW_DONE,
|
|
+ ISP_IRQ_BIT_PIPE_EOF,
|
|
+ ISP_IRQ_BIT_PDC_EOF,
|
|
+ ISP_IRQ_BIT_PDF_EOF,
|
|
+ ISP_IRQ_BIT_BPC_EOF,
|
|
+ ISP_IRQ_BIT_LSC_EOF,
|
|
+ ISP_IRQ_BIT_DENOISE_EOF,
|
|
+ ISP_IRQ_BIT_BINNING_EOF,
|
|
+ ISP_IRQ_BIT_DEMOSAIC_EOF,
|
|
+ ISP_IRQ_BIT_HDR_EOF,
|
|
+ ISP_IRQ_BIT_LTM_EOF, //25
|
|
+ ISP_IRQ_BIT_AEM_EOF,
|
|
+ ISP_IRQ_BIT_WBM_EOF,
|
|
+ ISP_IRQ_BIT_LSCM_EOF,
|
|
+ ISP_IRQ_BIT_AFC_EOF, // 29
|
|
+ ISP_IRQ_BIT_FICKER_EOF,
|
|
+ ISP_IRQ_BIT_ISP_ERR,
|
|
+ ISP_IRQ_BIT_MAX_NUM,
|
|
+};
|
|
+
|
|
+void k1xisp_reg_set_base_addr(ulong __iomem base_reg_addr, ulong __iomem end_reg_addr);
|
|
+
|
|
+/**
|
|
+ * k1xisp_reg_write_brust - write some registers together.
|
|
+ * @reg_data: pointer to reg memory contains some struct isp_reg_unit, come from user space or ourself.
|
|
+ * @reg_size: the number of struct isp_reg_unit.
|
|
+ * @user_space: true if the regs come from user space.
|
|
+ * @kvir_addr: the kernel virtual addr for reg memory which alloced by userspace. Only valid when
|
|
+ * user_space is true.
|
|
+ *
|
|
+ * The return values:
|
|
+ * 0 : success.
|
|
+ * <0 : failed.
|
|
+ */
|
|
+int k1xisp_reg_write_brust(void *reg_data, u32 reg_size, bool user_space, void *kvir_addr);
|
|
+
|
|
+int k1xisp_reg_read_brust(struct isp_regs_info *regs_info);
|
|
+
|
|
+int k1xisp_reg_write_single(struct isp_reg_unit *reg_unit);
|
|
+
|
|
+ulong k1xisp_reg_readl(ulong __iomem addr);
|
|
+
|
|
+int k1xisp_reg_writel(ulong __iomem addr, ulong value, ulong mask);
|
|
+
|
|
+#endif //K1X_ISP_REG_H
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_statistic.c b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_statistic.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_statistic.c
|
|
@@ -0,0 +1,1255 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * Description on this file
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+#include "k1x_isp_drv.h"
|
|
+#include "k1x_isp_reg.h"
|
|
+#include <cam_plat.h>
|
|
+
|
|
+#include <linux/dma-buf.h>
|
|
+
|
|
+#define PIPE0_EIS_DMA_CH_ID 9
|
|
+#define PIPE1_EIS_DMA_CH_ID 10
|
|
+
|
|
+#define PIPE0_PDC_DMA_CH_ID 14
|
|
+#define PIPE1_PDC_DMA_CH_ID 15
|
|
+
|
|
+static struct stat_dma_irq_bits g_dma_irq_bits_p0[ISP_STAT_THROUGH_DMA_COUNT] = {
|
|
+ { ISP_STAT_ID_PDC, PIPE0_PDC_DMA_CH_ID, { 44, 43, 42} }, //> 32 means locate at another dma status register.
|
|
+ { ISP_STAT_ID_EIS, PIPE0_EIS_DMA_CH_ID, { 29, 28, 27} },
|
|
+};
|
|
+
|
|
+static struct stat_dma_irq_bits g_dma_irq_bits_p1[ISP_STAT_THROUGH_DMA_COUNT] = {
|
|
+ { ISP_STAT_ID_PDC, PIPE1_PDC_DMA_CH_ID, { 47, 46, 45} }, //> 32 means locate at another dma status register.
|
|
+ { ISP_STAT_ID_EIS, PIPE1_EIS_DMA_CH_ID, { 32, 31, 30} }, //> 32 means locate at another dma status register.
|
|
+};
|
|
+
|
|
+static int g_mem_stat_ids[ISP_STAT_THROUGH_MEM_COUNT] = {
|
|
+ ISP_STAT_ID_AE,
|
|
+ ISP_STAT_ID_AWB,
|
|
+ ISP_STAT_ID_AF,
|
|
+ ISP_STAT_ID_LTM,
|
|
+};
|
|
+
|
|
+int isp_stat_dma_sof_handler(struct k1xisp_stats_node *stats_node, u32 stat_id,
|
|
+ u32 dma_ch_id, u32 frame_id);
|
|
+
|
|
+void k1xisp_stat_bufqueue_init(struct k1xisp_stats_node *isp_stats_node)
|
|
+{
|
|
+ int i = 0;
|
|
+ struct isp_stat_buffer_queue *stat_bufqueue = NULL;
|
|
+
|
|
+ for (i = 0; i < ISP_STAT_ID_MAX; i++) {
|
|
+ /* array of bufferque is AE,AWB,EIS,AF */
|
|
+ stat_bufqueue = &isp_stats_node->stat_bufqueue[i];
|
|
+ stat_bufqueue->stat_id = i;
|
|
+ stat_bufqueue->buf_count = 0;
|
|
+ stat_bufqueue->busy_bufcnt = 0;
|
|
+ stat_bufqueue->idle_bufcnt = 0;
|
|
+ stat_bufqueue->fd_memory = true;
|
|
+ if (i != ISP_STAT_ID_PDC && i != ISP_STAT_ID_EIS)
|
|
+ stat_bufqueue->fill_by_cpu = true;
|
|
+ else
|
|
+ stat_bufqueue->fill_by_cpu = false;
|
|
+ spin_lock_init(&stat_bufqueue->queue_lock);
|
|
+ INIT_LIST_HEAD(&stat_bufqueue->busy_buflist);
|
|
+ INIT_LIST_HEAD(&stat_bufqueue->idle_buflist);
|
|
+ memset((void *)stat_bufqueue->buf_info, 0, sizeof(stat_bufqueue->buf_info));
|
|
+
|
|
+ //done list init.
|
|
+ isp_stats_node->stat_done_info[i].done_cnt = 0;
|
|
+ INIT_LIST_HEAD(&isp_stats_node->stat_done_info[i].done_list);
|
|
+ spin_lock_init(&isp_stats_node->stat_done_info[i].done_lock);
|
|
+ }
|
|
+}
|
|
+
|
|
+int k1xisp_stat_job_flags_init(struct k1xisp_stats_node *stats_node)
|
|
+{
|
|
+ int i, j;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+ //dma irq flag init
|
|
+ for (i = 0; i < ISP_STAT_THROUGH_DMA_COUNT; i++) {
|
|
+ for (j = 0; j < ISP_DMA_IRQ_TYPE_NUM; j++) {
|
|
+ stats_node->dma_irq_info[i].irq_flag[j] = false;
|
|
+ stats_node->dma_irq_info[i].dynamic_switch = 0;
|
|
+ stats_node->dma_irq_info[i].dynamic_trigger = 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ //mem stat irq flag init.
|
|
+ for (i = 0; i < ISP_STAT_THROUGH_MEM_COUNT; i++)
|
|
+ stats_node->mem_irq_info[i].start_read = false;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int k1xisp_stat_node_init(struct k1xisp_stats_node *stats_node, u32 hw_pipe_id)
|
|
+{
|
|
+ int i = 0, stat_id = 0;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+
|
|
+ stats_node->hw_pipe_id = hw_pipe_id;
|
|
+ for (i = 0; i < ISP_STAT_ID_MAX; i++) {
|
|
+ stats_node->stat_active[i] = true;
|
|
+ stats_node->mem_irq_index[i] = -1;
|
|
+ }
|
|
+
|
|
+ k1xisp_stat_bufqueue_init(stats_node);
|
|
+
|
|
+ //irq bit init.
|
|
+ if (ISP_HW_PIPELINE_ID_0 == hw_pipe_id)
|
|
+ memcpy(stats_node->stat_dma_irq_bitmap, g_dma_irq_bits_p0, sizeof(g_dma_irq_bits_p0));
|
|
+ else if (ISP_HW_PIPELINE_ID_1 == hw_pipe_id)
|
|
+ memcpy(stats_node->stat_dma_irq_bitmap, g_dma_irq_bits_p1, sizeof(g_dma_irq_bits_p1));
|
|
+ //dma irq info init.
|
|
+ memset(&stats_node->dma_irq_info, 0, sizeof(stats_node->dma_irq_info));
|
|
+ for (i = 0; i < ISP_STAT_THROUGH_DMA_COUNT; i++) {
|
|
+ stats_node->dma_irq_info[i].stat_id =
|
|
+ stats_node->stat_dma_irq_bitmap[i].stat_id;
|
|
+ spin_lock_init(&stats_node->dma_irq_info[i].flag_lock);
|
|
+ }
|
|
+
|
|
+ //mem stat irq info init.
|
|
+ for (i = 0; i < ISP_STAT_THROUGH_MEM_COUNT; i++) {
|
|
+ stat_id = g_mem_stat_ids[i];
|
|
+ stats_node->mem_irq_info[i].stat_id = stat_id;
|
|
+ stats_node->mem_irq_index[stat_id] = i;
|
|
+ spin_lock_init(&stats_node->mem_irq_info[i].mem_flag_lock);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int k1xisp_stat_node_streamon_dma_port(struct k1xisp_stats_node *stats_node)
|
|
+{
|
|
+ int ret = 0, array_index = 0, dma_cnt, dma_ch_id;
|
|
+ struct isp_reg_unit reg_unit[10] = { 0 };
|
|
+ struct isp_stat_buffer_queue *stat_bufqueue = NULL;
|
|
+ u32 reg_tmp_value = 0;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+ for (dma_cnt = 0; dma_cnt < ISP_STAT_THROUGH_DMA_COUNT; dma_cnt++) { // normal is 2.
|
|
+ dma_ch_id = stats_node->stat_dma_irq_bitmap[dma_cnt].dma_ch_id;
|
|
+ if (PIPE0_EIS_DMA_CH_ID == dma_ch_id || PIPE1_EIS_DMA_CH_ID == dma_ch_id) {
|
|
+ //dma mux only for eis
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_OFFSET_DMA_MUX_CTRL;
|
|
+ reg_unit[array_index].reg_value = 3 << (6 + ((dma_ch_id - PIPE0_EIS_DMA_CH_ID) * 2));
|
|
+ reg_unit[array_index].reg_mask = reg_unit[array_index].reg_value;
|
|
+ array_index++;
|
|
+
|
|
+ //pitch
|
|
+ stat_bufqueue = &stats_node->stat_bufqueue[ISP_STAT_ID_EIS];
|
|
+ if (0 == stat_bufqueue->buf_count) {
|
|
+ isp_log_err("no buffer for eis stat queue on pipe%d!", stats_node->hw_pipe_id);
|
|
+ return -EPERM;
|
|
+ }
|
|
+ reg_tmp_value = stat_bufqueue->buf_info[0].buf_planes[0].pitch & 0xffff;
|
|
+ reg_tmp_value |= (stat_bufqueue->buf_info[0].buf_planes[1].pitch << 16) & 0xffff0000;
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_DMA_CHANNEL_WR_PITCH(dma_ch_id);
|
|
+ reg_unit[array_index].reg_value = reg_tmp_value;
|
|
+ reg_unit[array_index].reg_mask = 0xffffffff;
|
|
+ array_index++;
|
|
+ }
|
|
+
|
|
+ //for the first frame addr and ready.
|
|
+ ret = isp_stat_dma_sof_handler(stats_node, stats_node->stat_dma_irq_bitmap[dma_cnt].stat_id, dma_ch_id, 0);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (array_index > 0)
|
|
+ k1xisp_reg_write_brust(reg_unit, array_index, false, NULL);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int isp_stat_node_clear_dma_ready(u32 dma_ch_id)
|
|
+{
|
|
+ int ret = 0, i, array_index = 0;
|
|
+ u32 reg_base_addr = 0, plane_count = 0;
|
|
+ struct isp_reg_unit reg_unit[10] = { 0 };
|
|
+
|
|
+ if (dma_ch_id < PIPE0_PDC_DMA_CH_ID) {
|
|
+ //dma channel y and uv addr.
|
|
+ reg_base_addr = REG_ISP_DMA_Y_ADDR(dma_ch_id);
|
|
+ plane_count = 2;
|
|
+ } else if (dma_ch_id <= PIPE1_PDC_DMA_CH_ID) {
|
|
+ //pdc dma
|
|
+ reg_base_addr = REG_ISP_PDC_DMA_BASE_ADDR(dma_ch_id - PIPE0_PDC_DMA_CH_ID);
|
|
+ plane_count = 4;
|
|
+ } else {
|
|
+ isp_log_err("unknown this dma ch ID:%d!", dma_ch_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < plane_count; i++) {
|
|
+ reg_unit[array_index].reg_addr = reg_base_addr + (i * 0x4);
|
|
+ reg_unit[array_index].reg_value = 0;
|
|
+ reg_unit[array_index].reg_mask = 0xffffffff;
|
|
+ array_index++;
|
|
+ }
|
|
+
|
|
+ //dma channel ready.
|
|
+ if (dma_ch_id < PIPE0_PDC_DMA_CH_ID) {
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_DMA_CHANNEL_MASTER(dma_ch_id);
|
|
+ reg_unit[array_index].reg_value = 0;
|
|
+ reg_unit[array_index].reg_mask = BIT(31);
|
|
+ array_index++;
|
|
+ } else if (dma_ch_id <= PIPE1_PDC_DMA_CH_ID) {
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_PDC_DMA_MASTER;
|
|
+ if (PIPE0_PDC_DMA_CH_ID == dma_ch_id)
|
|
+ reg_unit[array_index].reg_mask = BIT(30);
|
|
+ else
|
|
+ reg_unit[array_index].reg_mask = BIT(31);
|
|
+ reg_unit[array_index].reg_value = 0;
|
|
+ array_index++;
|
|
+ }
|
|
+
|
|
+ k1xisp_reg_write_brust(reg_unit, array_index, false, NULL);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_stat_node_streamoff_dma_port(struct k1xisp_stats_node *stats_node)
|
|
+{
|
|
+ int ret = 0, array_index = 0, dma_cnt, dma_ch_id;
|
|
+ struct isp_reg_unit reg_unit[10] = { 0 };
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+ for (dma_cnt = 0; dma_cnt < ISP_STAT_THROUGH_DMA_COUNT; dma_cnt++) { // normal is 2.
|
|
+ dma_ch_id = stats_node->stat_dma_irq_bitmap[dma_cnt].dma_ch_id;
|
|
+ if (PIPE0_EIS_DMA_CH_ID == dma_ch_id
|
|
+ || PIPE1_EIS_DMA_CH_ID == dma_ch_id) {
|
|
+ //dma mux only for eis
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_OFFSET_DMA_MUX_CTRL;
|
|
+ reg_unit[array_index].reg_value = 0;
|
|
+ reg_unit[array_index].reg_mask =
|
|
+ 3 << (6 + ((dma_ch_id - PIPE0_EIS_DMA_CH_ID) * 2));
|
|
+ array_index++;
|
|
+
|
|
+ //pitch
|
|
+ reg_unit[array_index].reg_addr =
|
|
+ REG_ISP_DMA_CHANNEL_WR_PITCH(dma_ch_id);
|
|
+ reg_unit[array_index].reg_value = 0;
|
|
+ reg_unit[array_index].reg_mask = 0xffffffff;
|
|
+ array_index++;
|
|
+ }
|
|
+ //clear addr and ready.
|
|
+ isp_stat_node_clear_dma_ready(dma_ch_id);
|
|
+ }
|
|
+
|
|
+ if (array_index > 0)
|
|
+ k1xisp_reg_write_brust(reg_unit, array_index, false, NULL);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_stat_node_cfg_dma_irqmask(struct k1xisp_stats_node *stats_node)
|
|
+{
|
|
+ int ret = 0, array_index = 0, dma_cnt, dma_ch_id;
|
|
+ struct isp_reg_unit reg_unit[10] = { 0 };
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+ for (dma_cnt = 0; dma_cnt < ISP_STAT_THROUGH_DMA_COUNT; dma_cnt++) { // normal is 2.
|
|
+ dma_ch_id = stats_node->stat_dma_irq_bitmap[dma_cnt].dma_ch_id;
|
|
+ if (PIPE0_EIS_DMA_CH_ID == dma_ch_id) {
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_DMA_IRQ_MASK1;
|
|
+ reg_unit[array_index].reg_value = BIT(27) | BIT(28) | BIT(29);
|
|
+ reg_unit[array_index].reg_mask =
|
|
+ reg_unit[array_index].reg_value;
|
|
+ } else if (PIPE1_EIS_DMA_CH_ID == dma_ch_id) {
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_DMA_IRQ_MASK1;
|
|
+ reg_unit[array_index].reg_value = BIT(30) | BIT(31);
|
|
+ reg_unit[array_index].reg_mask =
|
|
+ reg_unit[array_index].reg_value;
|
|
+ array_index++;
|
|
+ //error
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_DMA_IRQ_MASK2;
|
|
+ reg_unit[array_index].reg_value = BIT(0);
|
|
+ reg_unit[array_index].reg_mask =
|
|
+ reg_unit[array_index].reg_value;
|
|
+ } else if (PIPE0_PDC_DMA_CH_ID == dma_ch_id) {
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_DMA_IRQ_MASK2;
|
|
+ reg_unit[array_index].reg_value = BIT(10) | BIT(11) | BIT(12);
|
|
+ reg_unit[array_index].reg_mask =
|
|
+ reg_unit[array_index].reg_value;
|
|
+ } else if (PIPE1_PDC_DMA_CH_ID == dma_ch_id) {
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_DMA_IRQ_MASK2;
|
|
+ reg_unit[array_index].reg_value = BIT(13) | BIT(14) | BIT(15);
|
|
+ reg_unit[array_index].reg_mask =
|
|
+ reg_unit[array_index].reg_value;
|
|
+ }
|
|
+
|
|
+ array_index++;
|
|
+ }
|
|
+
|
|
+ k1xisp_reg_write_brust(reg_unit, array_index, false, NULL);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_stat_node_clear_dma_irqmask(struct k1xisp_stats_node *stats_node)
|
|
+{
|
|
+ int ret = 0, array_index = 0, dma_cnt, dma_ch_id;
|
|
+ struct isp_reg_unit reg_unit[10] = { 0 };
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+ for (dma_cnt = 0; dma_cnt < ISP_STAT_THROUGH_DMA_COUNT; dma_cnt++) { // normal is 2.
|
|
+ dma_ch_id = stats_node->stat_dma_irq_bitmap[dma_cnt].dma_ch_id;
|
|
+ if (PIPE0_EIS_DMA_CH_ID == dma_ch_id) {
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_DMA_IRQ_MASK1;
|
|
+ reg_unit[array_index].reg_mask = BIT(27) | BIT(28) | BIT(29);
|
|
+ } else if (PIPE1_EIS_DMA_CH_ID == dma_ch_id) {
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_DMA_IRQ_MASK1;
|
|
+ reg_unit[array_index].reg_mask = BIT(30) | BIT(31);
|
|
+ array_index++;
|
|
+ //error
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_DMA_IRQ_MASK2;
|
|
+ reg_unit[array_index].reg_mask = BIT(0);
|
|
+ } else if (PIPE0_PDC_DMA_CH_ID == dma_ch_id) {
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_DMA_IRQ_MASK2;
|
|
+ reg_unit[array_index].reg_mask = BIT(10) | BIT(11) | BIT(12);
|
|
+ } else if (PIPE1_PDC_DMA_CH_ID == dma_ch_id) {
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_DMA_IRQ_MASK2;
|
|
+ reg_unit[array_index].reg_mask = BIT(13) | BIT(14) | BIT(15);
|
|
+ }
|
|
+
|
|
+ reg_unit[array_index].reg_value = 0;
|
|
+ array_index++;
|
|
+ }
|
|
+
|
|
+ k1xisp_reg_write_brust(reg_unit, array_index, false, NULL);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+//thread context
|
|
+int k1xisp_stat_dma_dynamic_enable(struct k1xisp_stats_node *stats_node, u32 stat_id,
|
|
+ u32 enable)
|
|
+{
|
|
+ int ret = 0, i;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+ ISP_DRV_CHECK_PARAMETERS(stat_id, ISP_STAT_ID_PDC, ISP_STAT_ID_EIS,
|
|
+ "dma stat id");
|
|
+
|
|
+ for (i = 0; i < ISP_STAT_THROUGH_DMA_COUNT; i++) {
|
|
+ if (stat_id == stats_node->dma_irq_info[i].stat_id) {
|
|
+ if (enable)
|
|
+ stats_node->dma_irq_info[i].dynamic_switch =
|
|
+ STAT_DMA_SWITCH_DYNAMIC_ON;
|
|
+ else
|
|
+ stats_node->dma_irq_info[i].dynamic_switch =
|
|
+ STAT_DMA_SWITCH_DYNAMIC_OFF;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_stat_put_idlebuffer(struct isp_stat_buffer_queue *stat_bufqueue,
|
|
+ struct isp_kbuffer_info *kbuf_info)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stat_bufqueue);
|
|
+ ISP_DRV_CHECK_POINTER(kbuf_info);
|
|
+
|
|
+ //lock used only between thread and soft irq.
|
|
+ spin_lock_bh(&stat_bufqueue->queue_lock);
|
|
+ kbuf_info->frame_id = -1;
|
|
+ kbuf_info->buf_status = ISP_BUFFER_STATUS_IDLE;
|
|
+ list_add_tail(&kbuf_info->hook, &stat_bufqueue->idle_buflist);
|
|
+ stat_bufqueue->idle_bufcnt++;
|
|
+ spin_unlock_bh(&stat_bufqueue->queue_lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_stat_reqbuffer(struct k1xisp_stats_node *stats_node,
|
|
+ struct isp_buffer_request_info *req_info)
|
|
+{
|
|
+ int i = 0, buf_index = 0;
|
|
+ struct isp_stat_buffer_queue *stat_bufqueue = NULL;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+ ISP_DRV_CHECK_POINTER(req_info);
|
|
+
|
|
+ stat_bufqueue = stats_node->stat_bufqueue;
|
|
+ for (i = 0; i < ISP_STAT_ID_MAX; i++) {
|
|
+ if (req_info->stat_buf_count[i] > K1X_ISP_MAX_BUFFER_NUM)
|
|
+ req_info->stat_buf_count[i] = K1X_ISP_MAX_BUFFER_NUM;
|
|
+
|
|
+ stat_bufqueue[i].buf_count = req_info->stat_buf_count[i];
|
|
+ if (req_info->stat_buf_count[i] == 0 && stats_node->stat_active[i]) {
|
|
+ isp_log_err("zero buffer count, but we need this stat(%d)!", i);
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ for (buf_index = 0; buf_index < stat_bufqueue[i].buf_count; buf_index++) {
|
|
+ stat_bufqueue[i].buf_info[buf_index].buf_status = ISP_BUFFER_STATUS_DONE;
|
|
+ INIT_LIST_HEAD(&stat_bufqueue[i].buf_info[buf_index].hook);
|
|
+ stat_bufqueue[i].buf_info[buf_index].buf_index = buf_index;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int isp_stat_buffer_verfied(struct isp_kbuffer_info *kbuf_info, struct isp_ubuf_uint *ubuf_uint, u8 fd_memory)
|
|
+{
|
|
+ int verified = 1, i = 0;
|
|
+
|
|
+ if (kbuf_info->plane_count != ubuf_uint->plane_count)
|
|
+ return 0;
|
|
+
|
|
+ if (fd_memory) {
|
|
+ for (i = 0; i < ubuf_uint->plane_count; i++) {
|
|
+ if ((kbuf_info->buf_planes[i].m.fd != ubuf_uint->buf_planes[i].m.fd)
|
|
+ || (kbuf_info->buf_planes[i].length != ubuf_uint->buf_planes[i].length)) {
|
|
+ verified = 0;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ for (i = 0; i < ubuf_uint->plane_count; i++) {
|
|
+ if ((kbuf_info->buf_planes[i].m.phy_addr != ubuf_uint->buf_planes[i].m.phy_addr)
|
|
+ || (kbuf_info->buf_planes[i].length != ubuf_uint->buf_planes[i].length)) {
|
|
+ verified = 0;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return verified;
|
|
+}
|
|
+
|
|
+void _isp_stat_put_kvir_addr(u8 fd_memory, struct isp_kbuffer_info *kbuf_info)
|
|
+{
|
|
+ if (kbuf_info->kvir_addr) {
|
|
+ if (fd_memory) {
|
|
+ k1xisp_dev_put_viraddr_to_dma_buf(kbuf_info->dma_buffer,
|
|
+ (char*)kbuf_info->kvir_addr - kbuf_info->buf_planes[0].offset);
|
|
+ kbuf_info->kvir_addr = NULL;
|
|
+ dma_buf_put(kbuf_info->dma_buffer);
|
|
+ kbuf_info->dma_buffer = NULL;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+int _isp_stat_get_kvir_addr(u8 fd_memory, struct isp_kbuffer_info *kbuf_info)
|
|
+{
|
|
+ int ret = 0;
|
|
+ void *vir_addr = NULL;
|
|
+ struct dma_buf *dma_buffer = NULL;
|
|
+
|
|
+ if (fd_memory) {
|
|
+ if (kbuf_info->kvir_addr)
|
|
+ _isp_stat_put_kvir_addr(fd_memory, kbuf_info);
|
|
+
|
|
+ dma_buffer = dma_buf_get(kbuf_info->buf_planes[0].m.fd);
|
|
+ if (IS_ERR(dma_buffer)) {
|
|
+ isp_log_err("%s: get dma buffer failed!", __func__);
|
|
+ return -EBADF;
|
|
+ }
|
|
+
|
|
+ kbuf_info->dma_buffer = dma_buffer;
|
|
+ ret = k1xisp_dev_get_viraddr_from_dma_buf(dma_buffer, &vir_addr);
|
|
+ if (ret)
|
|
+ kbuf_info->kvir_addr = NULL;
|
|
+ else
|
|
+ kbuf_info->kvir_addr = (void*)((char*)vir_addr + kbuf_info->buf_planes[0].offset);
|
|
+ } else {
|
|
+ isp_log_err("%s:we get viraddr just only support fd memory!", __func__);
|
|
+ ret = -1;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int _isp_stat_get_phy_addr(u8 fd_memory, struct isp_kbuffer_info *kbuf_info)
|
|
+{
|
|
+ int ret = 0;
|
|
+ u64 phy_addr = 0;
|
|
+
|
|
+ if (fd_memory) {
|
|
+ ret = k1xisp_dev_get_phyaddr_from_dma_buf(kbuf_info->buf_planes[0].m.fd, &phy_addr);
|
|
+ if (ret)
|
|
+ kbuf_info->phy_addr = 0;
|
|
+ else
|
|
+ //multi stats alloc a whole buffer together, so we need offset.x
|
|
+ kbuf_info->phy_addr = phy_addr + kbuf_info->buf_planes[0].offset;
|
|
+ } else {
|
|
+ isp_log_err("%s:we get phyaddr just only support fd memory!", __func__);
|
|
+ ret = -1;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_stat_qbuffer(struct k1xisp_stats_node *stats_node,
|
|
+ struct isp_buffer_enqueue_info *qbuf_info)
|
|
+{
|
|
+ struct isp_stat_buffer_queue *buf_queue = NULL;
|
|
+ struct isp_kbuffer_info *kbuf_info = NULL;
|
|
+ int i = 0, buf_index = -1, ret = 0, plane_size = 0, verified = 0;
|
|
+ struct isp_ubuf_uint *ubuf_uint = NULL;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+ ISP_DRV_CHECK_POINTER(qbuf_info);
|
|
+
|
|
+ for (i = 0; i < ISP_STAT_ID_MAX; i++) {
|
|
+ buf_queue = &stats_node->stat_bufqueue[i];
|
|
+ ubuf_uint = &qbuf_info->ubuf_uint[i];
|
|
+ buf_index = ubuf_uint->buf_index;
|
|
+
|
|
+ //this stat may have no buffer this time.
|
|
+ if (ubuf_uint->plane_count == 0)
|
|
+ continue;
|
|
+
|
|
+ //this stat may have no buffer this time.
|
|
+ if (buf_index >= buf_queue->buf_count || buf_index < 0)
|
|
+ continue;
|
|
+
|
|
+ if (ubuf_uint->plane_count > K1X_ISP_MAX_PLANE_NUM) {
|
|
+ isp_log_err
|
|
+ ("the planeNum(%d) is valid in stat(%d) for enqueue on pipe%d!",
|
|
+ ubuf_uint->plane_count, i, stats_node->hw_pipe_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ubuf_uint->buf_planes[0].m.fd == 0) {
|
|
+ isp_log_err
|
|
+ ("the buffer(%d) fd is zeor in stat(%d) for enqueue on pipe%d!",
|
|
+ buf_index, i, stats_node->hw_pipe_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ kbuf_info = &buf_queue->buf_info[buf_index];
|
|
+ if (kbuf_info->buf_status != ISP_BUFFER_STATUS_DONE) {
|
|
+ isp_log_err
|
|
+ ("the status(%d) of buffer(%d) isn't dequeued on bufque(%d)!",
|
|
+ kbuf_info->buf_status, buf_index, i);
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ verified = isp_stat_buffer_verfied(kbuf_info, ubuf_uint, buf_queue->fd_memory);
|
|
+ if (!verified) {
|
|
+ //buffer has not been already verified
|
|
+ // kbuf_info->buf_index = buf_index;
|
|
+ kbuf_info->plane_count = ubuf_uint->plane_count;
|
|
+ plane_size = sizeof(struct isp_buffer_plane) * ubuf_uint->plane_count;
|
|
+ memcpy(kbuf_info->buf_planes, ubuf_uint->buf_planes, plane_size);
|
|
+
|
|
+ if (buf_queue->fill_by_cpu) {
|
|
+ //some stat result read from regs,so we need kernel virtual addr.
|
|
+ ret = _isp_stat_get_kvir_addr(buf_queue->fd_memory, kbuf_info);
|
|
+ } else {
|
|
+ //dma fill, need phy addr
|
|
+ ret = _isp_stat_get_phy_addr(buf_queue->fd_memory, kbuf_info);
|
|
+ }
|
|
+
|
|
+ if (ret) {
|
|
+ isp_log_err
|
|
+ ("get kaddr or phyaddr failed in stat(%d) for pipe%d!",
|
|
+ i, stats_node->hw_pipe_id);
|
|
+ ret = -EPERM;
|
|
+ goto Error_Exit;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ k1xisp_stat_put_idlebuffer(buf_queue, kbuf_info);
|
|
+ isp_log_dbg
|
|
+ ("the %d stat has index(%d) buffer,fd=%d, kaddr=0x%lx, phy=0x%llx!\n",
|
|
+ i, buf_index, kbuf_info->buf_planes[0].m.fd,
|
|
+ (unsigned long)kbuf_info->kvir_addr, kbuf_info->phy_addr);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+
|
|
+Error_Exit:
|
|
+ for (i = 0; i < ISP_STAT_ID_MAX; i++) {
|
|
+ buf_queue = &stats_node->stat_bufqueue[i];
|
|
+ ubuf_uint = &qbuf_info->ubuf_uint[i];
|
|
+ if (ubuf_uint->plane_count > 0 && buf_queue->fill_by_cpu) {
|
|
+ kbuf_info = &buf_queue->buf_info[ubuf_uint->buf_index];
|
|
+ _isp_stat_put_kvir_addr(buf_queue->fd_memory, kbuf_info);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int k1xisp_stat_flush_buffer(struct k1xisp_stats_node *stats_node)
|
|
+{
|
|
+ int i = 0, j = 0;
|
|
+ struct isp_stat_buffer_queue *buf_queue = NULL;
|
|
+ struct isp_kbuffer_info *kbuf_info = NULL;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+
|
|
+ for (i = 0; i < ISP_STAT_ID_MAX; i++) {
|
|
+ buf_queue = &stats_node->stat_bufqueue[i];
|
|
+ if (buf_queue->fill_by_cpu) {
|
|
+ for (j = 0; j < buf_queue->buf_count; j++) {
|
|
+ kbuf_info = &buf_queue->buf_info[j];
|
|
+ _isp_stat_put_kvir_addr(buf_queue->fd_memory, kbuf_info);
|
|
+ kbuf_info->buf_status = ISP_BUFFER_STATUS_INVALID;
|
|
+ }
|
|
+ }
|
|
+ isp_log_dbg("flush the %d stat bufferque!", i);
|
|
+ }
|
|
+
|
|
+ k1xisp_stat_bufqueue_init(stats_node);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+//call at close pipe dev for abnormal
|
|
+int k1xisp_stat_try_flush_buffer(struct k1xisp_stats_node *stats_node)
|
|
+{
|
|
+ int i, j, reinit = 0;
|
|
+ struct isp_stat_buffer_queue *buf_queue = NULL;
|
|
+ struct isp_kbuffer_info *kbuf_info = NULL;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+ for (i = 0; i < ISP_STAT_ID_MAX; i++) {
|
|
+ buf_queue = &stats_node->stat_bufqueue[i];
|
|
+ if (buf_queue->buf_count > 0 && buf_queue->fill_by_cpu) {
|
|
+ for (j = 0; j < buf_queue->buf_count; j++) {
|
|
+ kbuf_info = &buf_queue->buf_info[j];
|
|
+ _isp_stat_put_kvir_addr(buf_queue->fd_memory, kbuf_info);
|
|
+ kbuf_info->buf_status = ISP_BUFFER_STATUS_INVALID;
|
|
+ }
|
|
+ reinit = 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (reinit) {
|
|
+ k1xisp_stat_bufqueue_init(stats_node);
|
|
+ isp_log_info("flush stat bufferque for abnormal situation!");
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*get idle buffer and put into busy queue.*/
|
|
+struct isp_kbuffer_info *k1xisp_stat_get_idlebuffer(struct isp_stat_buffer_queue
|
|
+ *buffer_queue, u32 hw_pipe_id)
|
|
+{
|
|
+ struct isp_kbuffer_info *kbuf_info = NULL;
|
|
+
|
|
+ //lock used only between thread and soft irq.
|
|
+ spin_lock_bh(&buffer_queue->queue_lock);
|
|
+
|
|
+ if (buffer_queue->busy_bufcnt > 1) {
|
|
+ /* the previous eof may lost, use this busy buffer. */
|
|
+ kbuf_info =
|
|
+ list_first_entry(&buffer_queue->busy_buflist, struct isp_kbuffer_info, hook);
|
|
+ list_del_init(&kbuf_info->hook);
|
|
+ buffer_queue->busy_bufcnt--;
|
|
+ isp_log_warn
|
|
+ ("pre eof lost in stat(%d) on pipeid: %d, bufindex:%d,status:%d,fn:%d,cnt=%d!",
|
|
+ buffer_queue->stat_id, hw_pipe_id, kbuf_info->buf_index,
|
|
+ kbuf_info->buf_status, kbuf_info->frame_id,
|
|
+ buffer_queue->busy_bufcnt);
|
|
+ } else {
|
|
+ if (buffer_queue->idle_bufcnt) {
|
|
+ kbuf_info =
|
|
+ list_first_entry(&buffer_queue->idle_buflist, struct isp_kbuffer_info, hook);
|
|
+ list_del_init(&kbuf_info->hook);
|
|
+ buffer_queue->idle_bufcnt--;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ spin_unlock_bh(&buffer_queue->queue_lock);
|
|
+ return kbuf_info;
|
|
+}
|
|
+
|
|
+int k1xisp_stat_put_busybuffer(struct isp_stat_buffer_queue *stat_bufqueue,
|
|
+ struct isp_kbuffer_info *kbuf_info)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stat_bufqueue);
|
|
+ ISP_DRV_CHECK_POINTER(kbuf_info);
|
|
+
|
|
+ //lock used only between thread and soft irq.
|
|
+ spin_lock_bh(&stat_bufqueue->queue_lock);
|
|
+ kbuf_info->buf_status = ISP_BUFFER_STATUS_BUSY;
|
|
+ list_add_tail(&kbuf_info->hook, &stat_bufqueue->busy_buflist);
|
|
+ stat_bufqueue->busy_bufcnt++;
|
|
+ spin_unlock_bh(&stat_bufqueue->queue_lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+struct isp_kbuffer_info* k1xisp_stat_get_busybuffer(struct isp_stat_buffer_queue *stat_bufqueue, u32 wr_err, u32 hw_pipe_id)
|
|
+{
|
|
+ struct isp_kbuffer_info *kbuf_info = NULL;
|
|
+
|
|
+ if (!stat_bufqueue) {
|
|
+ isp_log_err("%s: Invalid pointer!", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ //lock used only between thread and soft irq.
|
|
+ spin_lock_bh(&stat_bufqueue->queue_lock);
|
|
+
|
|
+ if (stat_bufqueue->busy_bufcnt) {
|
|
+ kbuf_info = list_first_entry(&stat_bufqueue->busy_buflist, struct isp_kbuffer_info, hook);
|
|
+ if (!wr_err) {
|
|
+ //eof: just take out from list.
|
|
+ list_del_init(&kbuf_info->hook);
|
|
+ stat_bufqueue->busy_bufcnt--;
|
|
+ if (kbuf_info->buf_status != ISP_BUFFER_STATUS_BUSY && kbuf_info->buf_status != ISP_BUFFER_STATUS_ERROR) {
|
|
+ isp_log_err("buffer(%d,%d) busy status can't match(%d) on stat%d!", kbuf_info->buf_index,
|
|
+ kbuf_info->frame_id, kbuf_info->buf_status, stat_bufqueue->stat_id);
|
|
+ kbuf_info->buf_status = ISP_BUFFER_STATUS_ERROR;
|
|
+ }
|
|
+ } else {
|
|
+ /* dma write err: keep the buffer on the list, because eof may come out after err.
|
|
+ * If no eof come out, we should find lost pre eof at the next sof.
|
|
+ */
|
|
+ kbuf_info->buf_status = ISP_BUFFER_STATUS_ERROR;
|
|
+ }
|
|
+ } else
|
|
+ isp_log_err("there is no buffer in stat(%d) busy queue on pipe%d!", stat_bufqueue->stat_id, hw_pipe_id);
|
|
+
|
|
+ spin_unlock_bh(&stat_bufqueue->queue_lock);
|
|
+
|
|
+ return kbuf_info;
|
|
+}
|
|
+
|
|
+int k1xisp_stat_put_donebuffer(u32 stat_id, struct isp_stat_done_info *stat_done_info,
|
|
+ struct isp_kbuffer_info *kbuf_info)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stat_done_info);
|
|
+ ISP_DRV_CHECK_POINTER(kbuf_info);
|
|
+
|
|
+ //lock used only between thread and soft irq.
|
|
+ spin_lock_bh(&stat_done_info->done_lock);
|
|
+ kbuf_info->buf_status = ISP_BUFFER_STATUS_DONE;
|
|
+ list_add_tail(&kbuf_info->hook, &stat_done_info->done_list);
|
|
+ stat_done_info->done_cnt++;
|
|
+ spin_unlock_bh(&stat_done_info->done_lock);
|
|
+
|
|
+ if (stat_done_info->done_cnt > 1) {
|
|
+ isp_log_dbg("previous stat(%d) buffer may not deal on time,count=%d!",
|
|
+ stat_id, stat_done_info->done_cnt);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+struct isp_kbuffer_info* k1xisp_stat_get_donebuf_by_frameid(struct k1xisp_stats_node *stats_node,
|
|
+ u32 stat_id, u32 frame_num, u32 return_idle)
|
|
+{
|
|
+ struct isp_stat_done_info *stat_done_info = NULL;
|
|
+ struct isp_kbuffer_info *kbuf_info = NULL;
|
|
+ struct isp_kbuffer_info *kbuf_idle[K1X_ISP_MAX_BUFFER_NUM] = {NULL, NULL, NULL, NULL};
|
|
+ struct list_head *pos, *n;
|
|
+ int find_item = 0, idle_buf_cnt = 0, i = 0;
|
|
+
|
|
+ if (!stats_node) {
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ stat_done_info = &stats_node->stat_done_info[stat_id];
|
|
+ //lock used only between thread and soft irq.
|
|
+ spin_lock_bh(&stat_done_info->done_lock);
|
|
+
|
|
+ list_for_each_safe(pos, n, &stat_done_info->done_list) {
|
|
+ kbuf_info = list_entry(pos, struct isp_kbuffer_info, hook);
|
|
+ if (kbuf_info->frame_id <= frame_num) {
|
|
+ list_del_init(&kbuf_info->hook);
|
|
+ stat_done_info->done_cnt--;
|
|
+ if (kbuf_info->frame_id == frame_num) {
|
|
+ if (!return_idle) {
|
|
+ find_item = 1;
|
|
+ } else {
|
|
+ kbuf_idle[idle_buf_cnt] = kbuf_info;
|
|
+ idle_buf_cnt++;
|
|
+ }
|
|
+
|
|
+ break;
|
|
+ } else {
|
|
+ // we have to put the buffer whose frameid is less than frame_num to idle list.
|
|
+ isp_log_dbg
|
|
+ ("find stat%d buf(frame%d,index%d,cnt=%d), but we need frame%d!",
|
|
+ stat_id, kbuf_info->frame_id, kbuf_info->buf_index,
|
|
+ stat_done_info->done_cnt, frame_num);
|
|
+ kbuf_idle[idle_buf_cnt] = kbuf_info;
|
|
+ idle_buf_cnt++;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!find_item) {
|
|
+ kbuf_info = NULL;
|
|
+ isp_log_dbg("there is no buffer in stat(%d) done queue on pipe%d!",
|
|
+ stat_id, stats_node->hw_pipe_id);
|
|
+ }
|
|
+
|
|
+ spin_unlock_bh(&stat_done_info->done_lock);
|
|
+
|
|
+ for (i = 0; i < idle_buf_cnt; i ++)
|
|
+ k1xisp_stat_put_idlebuffer(&stats_node->stat_bufqueue[stat_id], kbuf_idle[i]);
|
|
+
|
|
+ return kbuf_info;
|
|
+}
|
|
+
|
|
+int k1xisp_stat_reset_dma_busybuf_frameid(struct k1xisp_stats_node *stats_node)
|
|
+{
|
|
+ struct isp_kbuffer_info *kbuf_info = NULL;
|
|
+ struct list_head *pos, *n;
|
|
+ u32 stat_id, dma_cnt;
|
|
+ struct isp_stat_buffer_queue *buffer_queue = NULL;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+
|
|
+ for (dma_cnt = 0; dma_cnt < ISP_STAT_THROUGH_DMA_COUNT; dma_cnt++) { // normal is 2.
|
|
+ stat_id = stats_node->stat_dma_irq_bitmap[dma_cnt].stat_id;
|
|
+ buffer_queue = &stats_node->stat_bufqueue[stat_id];
|
|
+ //lock used only between thread and soft irq.
|
|
+ spin_lock_bh(&buffer_queue->queue_lock);
|
|
+ list_for_each_safe(pos, n, &buffer_queue->busy_buflist) {
|
|
+ kbuf_info = list_entry(pos, struct isp_kbuffer_info, hook);
|
|
+ kbuf_info->frame_id = 0;
|
|
+ }
|
|
+ spin_unlock_bh(&buffer_queue->queue_lock);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+// the upper half irq context
|
|
+int k1xisp_stat_dma_irq_handler(struct k1xisp_stats_node *stats_node, void *irq_data)
|
|
+{
|
|
+ struct stat_dma_irq_bits *dma_irq_bitmap = NULL;
|
|
+ struct stat_dma_irq_info *dma_irq_info = NULL;
|
|
+ int dma_cnt = 0, type_index = 0, irq_happen = 0;
|
|
+ u32 dma_status = 0, irq_bit = 0;
|
|
+ struct dma_irq_data *dma_irq = (struct dma_irq_data *)irq_data;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+
|
|
+ dma_irq_bitmap = stats_node->stat_dma_irq_bitmap;
|
|
+ dma_irq_info = stats_node->dma_irq_info;
|
|
+ for (dma_cnt = 0; dma_cnt < ISP_STAT_THROUGH_DMA_COUNT; dma_cnt++) { // normal is 2.
|
|
+ spin_lock(&dma_irq_info[dma_cnt].flag_lock);
|
|
+ for (type_index = 0; type_index < ISP_DMA_IRQ_TYPE_NUM; type_index++) {
|
|
+ irq_bit = dma_irq_bitmap[dma_cnt].irq_bit[type_index];
|
|
+ if (irq_bit >= ISP_STAT_DMA_IRQ_BIT_MAX) {
|
|
+ //use dma1 status
|
|
+ irq_bit = irq_bit - ISP_STAT_DMA_IRQ_BIT_MAX;
|
|
+ dma_status = dma_irq->status2;
|
|
+ } else
|
|
+ dma_status = dma_irq->status1;
|
|
+
|
|
+ if (dma_status & BIT(irq_bit)) {
|
|
+ dma_irq_info[dma_cnt].irq_flag[type_index] = true;
|
|
+ if (!irq_happen)
|
|
+ irq_happen = true;
|
|
+ }
|
|
+ }
|
|
+ spin_unlock(&dma_irq_info[dma_cnt].flag_lock);
|
|
+ }
|
|
+
|
|
+ return irq_happen;
|
|
+}
|
|
+
|
|
+//tasklet context
|
|
+int isp_stat_dma_err_handler(struct k1xisp_stats_node *stats_node, u32 stat_id)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct isp_stat_buffer_queue *buffer_queue = NULL;
|
|
+
|
|
+ buffer_queue = &stats_node->stat_bufqueue[stat_id];
|
|
+ k1xisp_stat_get_busybuffer(buffer_queue, 1, stats_node->hw_pipe_id);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*the actiual dma write size of pdc is changing by hardware*/
|
|
+int isp_stat_get_pdc_real_ch_size(u32 hw_pipe_id, struct isp_kbuffer_info *kbuf_info)
|
|
+{
|
|
+ int ret = 0, i;
|
|
+ ulong reg_addr = 0;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(kbuf_info);
|
|
+ if (K1X_ISP_PDC_CHANNEL_NUM != kbuf_info->plane_count) {
|
|
+ isp_log_err("the error plane count(%d) for pdc buffer!", kbuf_info->plane_count);
|
|
+ ret = -EINVAL;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < kbuf_info->plane_count; i++) {
|
|
+ reg_addr = REG_ISP_PDC_BASE(hw_pipe_id) + 0x2f8 + i * 4;
|
|
+ //bytes peer channel(channel_cnt)
|
|
+ kbuf_info->buf_planes[i].reserved[0] = k1xisp_reg_readl(reg_addr);
|
|
+ //width peer channel, channel_cnt = channel_width * height;
|
|
+ kbuf_info->buf_planes[i].reserved[1] = k1xisp_reg_readl(reg_addr + 0x10);
|
|
+ isp_log_dbg("pdc channel%d: count=0x%x, width=0x%x!", i, kbuf_info->buf_planes[i].reserved[0],
|
|
+ kbuf_info->buf_planes[i].reserved[1]);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+//tasklet context:dma eof.
|
|
+int isp_stat_dma_eof_handler(struct k1xisp_stats_node *stats_node, u32 stat_id)
|
|
+{
|
|
+ int ret = 0, return_idle = 0;
|
|
+ struct isp_kbuffer_info *kbuf_info = NULL;
|
|
+ struct isp_stat_buffer_queue *buffer_queue = NULL;
|
|
+ struct stats_notify_params notify_param;
|
|
+
|
|
+ buffer_queue = &stats_node->stat_bufqueue[stat_id];
|
|
+ kbuf_info = k1xisp_stat_get_busybuffer(buffer_queue, 0, stats_node->hw_pipe_id);
|
|
+
|
|
+ if (kbuf_info) {
|
|
+ if (kbuf_info->buf_status == ISP_BUFFER_STATUS_ERROR) {
|
|
+ //error status handle
|
|
+ k1xisp_stat_put_idlebuffer(buffer_queue, kbuf_info);
|
|
+ isp_log_warn("the error buffer(%d) on stat%d return to idle!", kbuf_info->buf_index, stat_id);
|
|
+ } else {
|
|
+ if (ISP_STAT_ID_PDC == stat_id) {
|
|
+ if (stats_node->dma_irq_info[0].dynamic_switch != STAT_DMA_SWITCH_DYNAMIC_ON) {
|
|
+ //pdc hardware open, but software have no idea.
|
|
+ return_idle = 1;
|
|
+ } else {
|
|
+ isp_stat_get_pdc_real_ch_size(stats_node->hw_pipe_id, kbuf_info);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (return_idle)
|
|
+ k1xisp_stat_put_idlebuffer(buffer_queue, kbuf_info);
|
|
+ else
|
|
+ k1xisp_stat_put_donebuffer(stat_id, &stats_node->stat_done_info[stat_id], kbuf_info);
|
|
+
|
|
+ if (ISP_STAT_ID_PDC == stat_id && !return_idle) {
|
|
+ notify_param.stat_id = stat_id;
|
|
+ notify_param.event_enable = true;
|
|
+ notify_param.frame_id = kbuf_info->frame_id;
|
|
+ stats_node->notify_event(stats_node->private_dev, PIPE_EVENT_CAST_VOTE, (void*)¬ify_param,
|
|
+ sizeof(struct stats_notify_params));
|
|
+ }
|
|
+ }
|
|
+ } else
|
|
+ ret = -EPERM;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+//tasklet context and when streamon
|
|
+int isp_stat_dma_sof_handler(struct k1xisp_stats_node *stats_node, u32 stat_id,
|
|
+ u32 dma_ch_id, u32 frame_id)
|
|
+{
|
|
+ int ret = 0, array_index = 0, i = 0;
|
|
+ struct isp_kbuffer_info *kbuf_info = NULL;
|
|
+ struct isp_stat_buffer_queue *buffer_queue = NULL;
|
|
+ struct isp_reg_unit reg_unit[12] = { 0 };
|
|
+ u32 reg_base_addr = 0, addr_offset = 0;
|
|
+ u32 reg_high_addr = 0, low_addr = 0, high_addr = 0;
|
|
+
|
|
+ buffer_queue = &stats_node->stat_bufqueue[stat_id];
|
|
+ kbuf_info = k1xisp_stat_get_idlebuffer(buffer_queue, stats_node->hw_pipe_id);
|
|
+ if (!kbuf_info) {
|
|
+ isp_log_dbg("no buffer in stat(%d) idle for pipe%d,frameID:%d!",
|
|
+ stat_id, stats_node->hw_pipe_id, frame_id);
|
|
+ // pBufQue->bNextEofMiss = true;
|
|
+ return -EPERM;
|
|
+ }
|
|
+
|
|
+ kbuf_info->frame_id = frame_id;
|
|
+ if (dma_ch_id < PIPE0_PDC_DMA_CH_ID) {
|
|
+ //dma channel y and uv addr.
|
|
+ reg_base_addr = REG_ISP_DMA_Y_ADDR(dma_ch_id);
|
|
+ reg_high_addr = REG_ISP_DMA_Y_HIGH_ADDR(dma_ch_id);
|
|
+ } else if (dma_ch_id <= PIPE1_PDC_DMA_CH_ID) {
|
|
+ //pdc dma
|
|
+ reg_base_addr =
|
|
+ REG_ISP_PDC_DMA_BASE_ADDR(dma_ch_id - PIPE0_PDC_DMA_CH_ID);
|
|
+ reg_high_addr =
|
|
+ REG_ISP_PDC_DMA_HIGH_BASE_ADDR(dma_ch_id - PIPE0_PDC_DMA_CH_ID);
|
|
+ } else {
|
|
+ isp_log_err("unknown this dma ch ID:%d!", dma_ch_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ low_addr = kbuf_info->phy_addr & 0xffffffff;
|
|
+ high_addr = (kbuf_info->phy_addr >> 32) & 0x3;
|
|
+ for (i = 0; i < kbuf_info->plane_count; i++) {
|
|
+ reg_unit[array_index].reg_addr = reg_base_addr + (i * 0x4);
|
|
+ reg_unit[array_index].reg_value = low_addr + addr_offset;
|
|
+ reg_unit[array_index].reg_mask = 0xffffffff;
|
|
+ addr_offset += kbuf_info->buf_planes[i].length;
|
|
+ array_index++;
|
|
+
|
|
+ if (high_addr) {
|
|
+ reg_unit[array_index].reg_addr = reg_high_addr + (i * 0x4);
|
|
+ reg_unit[array_index].reg_value = high_addr;
|
|
+ reg_unit[array_index].reg_mask = 0x3;
|
|
+ array_index++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ //dma channel ready.
|
|
+ if (dma_ch_id < PIPE0_PDC_DMA_CH_ID) {
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_DMA_CHANNEL_MASTER(dma_ch_id);
|
|
+ reg_unit[array_index].reg_value = BIT(31);
|
|
+ reg_unit[array_index].reg_mask = BIT(31);
|
|
+ array_index++;
|
|
+ } else if (dma_ch_id <= PIPE1_PDC_DMA_CH_ID) {
|
|
+ reg_unit[array_index].reg_addr = REG_ISP_PDC_DMA_MASTER;
|
|
+ if (PIPE0_PDC_DMA_CH_ID == dma_ch_id) {
|
|
+ reg_unit[array_index].reg_value = BIT(30);
|
|
+ reg_unit[array_index].reg_mask = BIT(30);
|
|
+ } else {
|
|
+ reg_unit[array_index].reg_value = BIT(31);
|
|
+ reg_unit[array_index].reg_mask = BIT(31);
|
|
+ }
|
|
+ array_index++;
|
|
+ }
|
|
+
|
|
+ ret = k1xisp_reg_write_brust(reg_unit, array_index, false, NULL);
|
|
+ if (ret)
|
|
+ isp_log_err("isp stat dma sof config registers failed!");
|
|
+
|
|
+ k1xisp_stat_put_busybuffer(buffer_queue, kbuf_info);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int isp_stat_get_dma_ch_id(struct k1xisp_stats_node *stats_node, u32 stat_id)
|
|
+{
|
|
+ int ch_id = -1, i = 0;
|
|
+ struct stat_dma_irq_bits *dma_irq_bitmap = NULL;
|
|
+
|
|
+ if (stats_node) {
|
|
+ for (i = 0; i < ISP_STAT_THROUGH_DMA_COUNT; i++) {
|
|
+ dma_irq_bitmap = &stats_node->stat_dma_irq_bitmap[i];
|
|
+ if (stat_id == dma_irq_bitmap->stat_id)
|
|
+ ch_id = dma_irq_bitmap->dma_ch_id;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ch_id;
|
|
+}
|
|
+
|
|
+int isp_stat_dma_trigger_dynamic_condition(struct k1xisp_stats_node *stats_node,
|
|
+ u32 irq_info_index)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct stat_dma_irq_info *dma_irq_info = NULL;
|
|
+ struct stats_notify_params notify_param;
|
|
+
|
|
+ dma_irq_info = &stats_node->dma_irq_info[irq_info_index];
|
|
+ notify_param.stat_id = dma_irq_info->stat_id;
|
|
+ notify_param.frame_id = 0;
|
|
+ if (STAT_DMA_SWITCH_DYNAMIC_ON == dma_irq_info->dynamic_switch) {
|
|
+ if (dma_irq_info->dynamic_trigger != 1) {
|
|
+ notify_param.event_enable = true;
|
|
+ stats_node->notify_event(stats_node->private_dev,
|
|
+ PIPE_EVENT_TRIGGER_VOTE_SYS,
|
|
+ (void *)¬ify_param,
|
|
+ sizeof(struct stats_notify_params));
|
|
+ dma_irq_info->dynamic_trigger = 1;
|
|
+ }
|
|
+ } else if (STAT_DMA_SWITCH_DYNAMIC_OFF == dma_irq_info->dynamic_switch) {
|
|
+ if (dma_irq_info->dynamic_trigger != 2) {
|
|
+ notify_param.event_enable = false;
|
|
+ stats_node->notify_event(stats_node->private_dev,
|
|
+ PIPE_EVENT_TRIGGER_VOTE_SYS,
|
|
+ (void *)¬ify_param,
|
|
+ sizeof(struct stats_notify_params));
|
|
+ dma_irq_info->dynamic_trigger = 2;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+//tasklet context
|
|
+int k1xisp_stat_dma_lower_half_irq(struct k1xisp_stats_node *stats_node, u32 frame_id)
|
|
+{
|
|
+ int dma_cnt = 0, type_index = 0, ret = 0;
|
|
+ struct stat_dma_irq_info dest_irq_info[ISP_STAT_THROUGH_DMA_COUNT] = { 0 };
|
|
+ struct stat_dma_irq_info *dma_irq_info = NULL;
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+ dma_irq_info = stats_node->dma_irq_info;
|
|
+ for (dma_cnt = 0; dma_cnt < ISP_STAT_THROUGH_DMA_COUNT; dma_cnt++) { // normal is 2.
|
|
+ spin_lock_irqsave(&dma_irq_info[dma_cnt].flag_lock, flags);
|
|
+
|
|
+ for(type_index = 0; type_index < ISP_DMA_IRQ_TYPE_NUM; type_index++) {
|
|
+ dest_irq_info[dma_cnt].irq_flag[type_index] = dma_irq_info[dma_cnt].irq_flag[type_index];
|
|
+ dest_irq_info[dma_cnt].stat_id = dma_irq_info[dma_cnt].stat_id;
|
|
+ if (dest_irq_info[dma_cnt].irq_flag[type_index])
|
|
+ dma_irq_info[dma_cnt].irq_flag[type_index] = false;
|
|
+ }
|
|
+
|
|
+ spin_unlock_irqrestore(&dma_irq_info[dma_cnt].flag_lock, flags);
|
|
+ }
|
|
+
|
|
+ // handler the irq
|
|
+ for (dma_cnt = 0; dma_cnt < ISP_STAT_THROUGH_DMA_COUNT; dma_cnt++) { // normal is 2.
|
|
+ for (type_index = 0; type_index < ISP_DMA_IRQ_TYPE_NUM; type_index++) {
|
|
+ if (dest_irq_info[dma_cnt].irq_flag[type_index]) {
|
|
+ switch (type_index) {
|
|
+ case ISP_DMA_IRQ_TYPE_ERR:
|
|
+ isp_stat_dma_err_handler(stats_node, dest_irq_info[dma_cnt].stat_id);
|
|
+ break;
|
|
+ case ISP_DMA_IRQ_TYPE_EOF:
|
|
+ isp_stat_dma_eof_handler(stats_node, dest_irq_info[dma_cnt].stat_id);
|
|
+ break;
|
|
+ case ISP_DMA_IRQ_TYPE_SOF:
|
|
+ {
|
|
+ int dma_ch = -1;
|
|
+ dma_ch = isp_stat_get_dma_ch_id(stats_node, dest_irq_info[dma_cnt].stat_id);
|
|
+ if (dma_ch < 0) {
|
|
+ isp_log_err("get isp stat(%d) dma channel ID failed!", dest_irq_info[dma_cnt].stat_id);
|
|
+ ret = -EPERM;
|
|
+ } else {
|
|
+ isp_stat_dma_trigger_dynamic_condition(stats_node, dma_cnt);
|
|
+ isp_stat_dma_sof_handler(stats_node, dest_irq_info[dma_cnt].stat_id, dma_ch, frame_id + 1);
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ default:
|
|
+ isp_log_err("unsupport dma irq type:%d!", type_index);
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*tasklet context: ae,af,awb result read from mem*/
|
|
+struct isp_kbuffer_info* k1xisp_stat_read_from_mem(struct k1xisp_stats_node *stats_node, u32 stat_id, u32 frame_num)
|
|
+{
|
|
+ struct isp_kbuffer_info *kbuf_info = NULL;
|
|
+ u32 *buf_temp = NULL;
|
|
+ int i = 0, read_cnt = 0;
|
|
+ ulong reg_addr = 0, reg_base = 0;
|
|
+
|
|
+ if (!stats_node) {
|
|
+ isp_log_err("%s: Invalid pointer!", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (ISP_STAT_ID_AE == stat_id) {
|
|
+ reg_base = REG_STAT_AEM_RESULT_MEM(stats_node->hw_pipe_id) + 512 * 4;
|
|
+ } else if (ISP_STAT_ID_AF == stat_id) {
|
|
+ reg_base = REG_STAT_AFC_RESULT_MEM(stats_node->hw_pipe_id);
|
|
+ } else if (ISP_STAT_ID_AWB == stat_id) {
|
|
+ reg_base = REG_STAT_WBM_RESULT_MEM(stats_node->hw_pipe_id);
|
|
+ } else if (ISP_STAT_ID_LTM == stat_id) {
|
|
+ reg_base = REG_STAT_LTM_RESULT_MEM(stats_node->hw_pipe_id);
|
|
+ } else {
|
|
+ isp_log_err("%s: invalid stat(%d) for pipe%d!", __func__, stat_id, stats_node->hw_pipe_id);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ kbuf_info = k1xisp_stat_get_idlebuffer(&stats_node->stat_bufqueue[stat_id], stats_node->hw_pipe_id);
|
|
+ if (!kbuf_info) {
|
|
+ isp_log_info("no buffer in stat(%d) idle queue for pipe%d!", stat_id, stats_node->hw_pipe_id);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (!kbuf_info->kvir_addr) {
|
|
+ isp_log_err("the kaddr is NULL in stat(%d) for pipe%d!", stat_id, stats_node->hw_pipe_id);
|
|
+ k1xisp_stat_put_idlebuffer(&stats_node->stat_bufqueue[stat_id], kbuf_info);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ read_cnt = kbuf_info->buf_planes[0].length / 4;
|
|
+ if (ISP_STAT_ID_AE == stat_id) {
|
|
+ read_cnt = read_cnt - 512;
|
|
+ kbuf_info->buf_planes[0].reserved[0] = read_cnt * 4;
|
|
+ }
|
|
+
|
|
+ kbuf_info->frame_id = frame_num;
|
|
+ isp_log_dbg("ready to read the addr(0x%lx),size=%d!", reg_base, read_cnt);
|
|
+ for (i = 0; i < read_cnt; i++) {
|
|
+ buf_temp = (u32 *) kbuf_info->kvir_addr + i;
|
|
+ reg_addr = reg_base + i * 4;
|
|
+ *buf_temp = k1xisp_reg_readl(reg_addr);
|
|
+ }
|
|
+
|
|
+ return kbuf_info;
|
|
+}
|
|
+
|
|
+//isp tasklet context
|
|
+void k1xisp_stat_mem_lower_half_irq(struct k1xisp_stats_node *stats_node, u32 stat_id, u32 frame_num)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+ struct isp_kbuffer_info *kbuf_info = NULL;
|
|
+ u8 start_read = false;
|
|
+ int info_index = 0;
|
|
+
|
|
+ if (!stats_node) {
|
|
+ isp_log_err("%s: Invalid pointer!", __func__);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (stat_id >= ISP_STAT_ID_MAX) {
|
|
+ isp_log_err("%s: Invalid stat id:%d!", __func__, stat_id);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ info_index = stats_node->mem_irq_index[stat_id];
|
|
+ if (stats_node->stat_active[stat_id]) {
|
|
+ spin_lock_irqsave(&stats_node->mem_irq_info[info_index].mem_flag_lock, flags);
|
|
+ start_read = stats_node->mem_irq_info[info_index].start_read;
|
|
+ if (start_read)
|
|
+ stats_node->mem_irq_info[info_index].start_read = false;
|
|
+ spin_unlock_irqrestore(&stats_node->mem_irq_info[info_index].mem_flag_lock, flags);
|
|
+ }
|
|
+
|
|
+ if (start_read) {
|
|
+ kbuf_info = k1xisp_stat_read_from_mem(stats_node, stat_id, frame_num);
|
|
+ if (kbuf_info)
|
|
+ k1xisp_stat_put_donebuffer(stat_id, &stats_node->stat_done_info[stat_id], kbuf_info);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* the upper half irq context */
|
|
+int k1xisp_stat_mem_set_irq_flag(struct k1xisp_stats_node *stats_node, u32 stat_id, u32 hw_pipe_id)
|
|
+{
|
|
+ int info_index = -1;
|
|
+
|
|
+ ISP_DRV_CHECK_POINTER(stats_node);
|
|
+
|
|
+ if (hw_pipe_id != stats_node->hw_pipe_id) {
|
|
+ isp_log_err("%s: Invalid pipe:%d for stats node(%d)!", __func__,
|
|
+ hw_pipe_id, stats_node->hw_pipe_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (!stats_node->stat_active[stat_id]) {
|
|
+ isp_log_err("%s: stat%d isn't active on pipe:%d!", __func__, stat_id, hw_pipe_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ info_index = stats_node->mem_irq_index[stat_id];
|
|
+ if (info_index < 0) {
|
|
+ isp_log_err("%s: stat%d isn't memregs on pipe:%d!", __func__, stat_id, hw_pipe_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ spin_lock(&stats_node->mem_irq_info[info_index].mem_flag_lock);
|
|
+ stats_node->mem_irq_info[info_index].start_read = true;
|
|
+ spin_unlock(&stats_node->mem_irq_info[info_index].mem_flag_lock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_statistic.h b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_statistic.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_isp/k1x_isp_statistic.h
|
|
@@ -0,0 +1,144 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+#ifndef K1X_ISP_STATISTIC_H
|
|
+#define K1X_ISP_STATISTIC_H
|
|
+
|
|
+#include "k1x_isp_drv.h"
|
|
+
|
|
+int k1xisp_stat_node_init(struct k1xisp_stats_node *stats_node, u32 hw_pipe_id);
|
|
+int k1xisp_stat_job_flags_init(struct k1xisp_stats_node *stats_node);
|
|
+
|
|
+/**
|
|
+ * k1xisp_stat_node_streamon_dma_port - config the stats node when streamon
|
|
+ * @stats_node: pointer to &struct k1xisp_stats_node.
|
|
+ *
|
|
+ * Should be called after k1xisp_stat_node_init.
|
|
+ *
|
|
+ * This function:
|
|
+ *
|
|
+ * 1) config dma port when streamon isp.
|
|
+ *
|
|
+ * The return values:
|
|
+ * 0 : success.
|
|
+ * <0 : failed.
|
|
+ */
|
|
+int k1xisp_stat_node_streamon_dma_port(struct k1xisp_stats_node *stats_node);
|
|
+int k1xisp_stat_node_streamoff_dma_port(struct k1xisp_stats_node *stats_node);
|
|
+
|
|
+int k1xisp_stat_node_cfg_dma_irqmask(struct k1xisp_stats_node *stats_node);
|
|
+int k1xisp_stat_node_clear_dma_irqmask(struct k1xisp_stats_node *stats_node);
|
|
+
|
|
+/**
|
|
+ * k1xisp_stat_reqbuffer - the function of request buffer for user space.
|
|
+ * @stats_node: pointer to &struct k1xisp_stats_node.
|
|
+ * @req_info: pointer to &struct isp_buffer_request_info, filled by user space.
|
|
+ *
|
|
+ * The return values:
|
|
+ * 0 : success.
|
|
+ * <0 : failed.
|
|
+ */
|
|
+int k1xisp_stat_reqbuffer(struct k1xisp_stats_node *stats_node,
|
|
+ struct isp_buffer_request_info *req_info);
|
|
+
|
|
+int k1xisp_stat_qbuffer(struct k1xisp_stats_node *stats_node,
|
|
+ struct isp_buffer_enqueue_info *qbuf_info);
|
|
+
|
|
+int k1xisp_stat_flush_buffer(struct k1xisp_stats_node *stats_node);
|
|
+int k1xisp_stat_try_flush_buffer(struct k1xisp_stats_node *stats_node);
|
|
+
|
|
+/**
|
|
+ * k1xisp_stat_dma_irq_handler - the upper half of dma irq.
|
|
+ * @stats_node: pointer to &struct k1xisp_stats_node.
|
|
+ *
|
|
+ * This function: record some dma irq flag when them happen.
|
|
+ *
|
|
+ * The return values:
|
|
+ * <= 0 : no dma irq happens or some error.
|
|
+ * > 0 : some dma irq happens, at least one.
|
|
+ */
|
|
+int k1xisp_stat_dma_irq_handler(struct k1xisp_stats_node *stats_node, void *irq_data);
|
|
+
|
|
+/**
|
|
+ * k1xisp_stat_dma_lower_half_irq - the lower half of dma irq realized by tasklet.
|
|
+ * @stats_node: pointer to &struct k1xisp_stats_node.
|
|
+ * @frame_id: the current frame number.
|
|
+ *
|
|
+ * This function: handle some dma irq.
|
|
+ *
|
|
+ * The return values:
|
|
+ * < 0 : some error happen.
|
|
+ */
|
|
+int k1xisp_stat_dma_lower_half_irq(struct k1xisp_stats_node *stats_node, u32 frame_id);
|
|
+
|
|
+/**
|
|
+ * k1xisp_stat_get_donebuf_by_frameid - get done buffer by frameid from done queue.
|
|
+ *
|
|
+ * @stats_node: pointer to &struct k1xisp_stats_node.
|
|
+ * @stat_id: the stat id, defined by &enum isp_stat_id
|
|
+ * @frame_num: the frame number.
|
|
+ * @return_idle: find the buffer return to idle queue if ture.
|
|
+ *
|
|
+ * This function:
|
|
+ * 1. get the buffer whose frameid is equal to frame_num from done queue.
|
|
+ * 2. put the buffer whose frameid is less than frame_num to idle queue.
|
|
+ *
|
|
+ * The return values:
|
|
+ * NULL : have not found the buffer.
|
|
+ * pointer to the done buffer.
|
|
+ */
|
|
+struct isp_kbuffer_info* k1xisp_stat_get_donebuf_by_frameid(struct k1xisp_stats_node *stats_node,
|
|
+ u32 stat_id, u32 frame_num, u32 return_idle);
|
|
+
|
|
+/**
|
|
+ * k1xisp_stat_read_from_mem - read stat's result registers to fill the buffer.
|
|
+ *
|
|
+ * @stats_node: pointer to &struct k1xisp_stats_node.
|
|
+ * @stat_id: the stat id, defined by &enum isp_stat_id
|
|
+ * @frame_num: the frame number.
|
|
+ *
|
|
+ * This function likes dma sof and eof, but we do not need to busy queue:
|
|
+ * 1. get the buffer from idle list.
|
|
+ * 2. fill the stat result to this buffer.
|
|
+ *
|
|
+ * The return values:
|
|
+ * the buffer or NULL.
|
|
+ */
|
|
+struct isp_kbuffer_info *k1xisp_stat_read_from_mem(struct k1xisp_stats_node *stats_node,
|
|
+ u32 stat_id, u32 frame_num);
|
|
+
|
|
+/**
|
|
+ * k1xisp_stat_mem_set_irq_flag - set start read for this stat.
|
|
+ *
|
|
+ * @stats_node: pointer to &struct k1xisp_stats_node.
|
|
+ * @stat_id: the stat id, defined by &enum isp_stat_id.
|
|
+ * @hw_pipe_id: hardware pipeline id.
|
|
+ *
|
|
+ * The return values:
|
|
+ * < 0 : some error happen.
|
|
+ */
|
|
+int k1xisp_stat_mem_set_irq_flag(struct k1xisp_stats_node *stats_node, u32 stat_id,
|
|
+ u32 hw_pipe_id);
|
|
+
|
|
+/**
|
|
+ * k1xisp_stat_mem_lower_half_irq - the lower half irq realized by tasklet for reading mem regs.
|
|
+ *
|
|
+ * @stats_node: pointer to &struct k1xisp_stats_node.
|
|
+ * @stat_id: the stat id, defined by &enum isp_stat_id.
|
|
+ * @frame_num: the current frame num.
|
|
+ *
|
|
+ */
|
|
+void k1xisp_stat_mem_lower_half_irq(struct k1xisp_stats_node *stats_node, u32 stat_id,
|
|
+ u32 frame_num);
|
|
+
|
|
+int k1xisp_stat_dma_dynamic_enable(struct k1xisp_stats_node *stats_node, u32 stat_id,
|
|
+ u32 enable);
|
|
+
|
|
+/**
|
|
+ * k1xisp_stat_reset_dma_busybuf_frameid - reset frameid of busy buffer to zero.
|
|
+ *
|
|
+ * @stats_node: pointer to &struct k1xisp_stats_node.
|
|
+ *
|
|
+ * This function calls when stream stop and start by frameid comes from zero.
|
|
+ */
|
|
+int k1xisp_stat_reset_dma_busybuf_frameid(struct k1xisp_stats_node *stats_node);
|
|
+
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_plat/cam_plat.c b/drivers/media/platform/spacemit/camera/cam_plat/cam_plat.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_plat/cam_plat.c
|
|
@@ -0,0 +1,294 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * plat_cam.c - Driver for SPACEMIT K1X Platform Camera Manager
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited.
|
|
+ */
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/of_device.h>
|
|
+#include <media/v4l2-common.h>
|
|
+#include <media/v4l2-device.h>
|
|
+#include <media/v4l2-event.h>
|
|
+#include <media/v4l2-ioctl.h>
|
|
+#include <media/k1x/k1x_plat_cam.h>
|
|
+#include "cam_dbg.h"
|
|
+#include "cam_plat.h"
|
|
+
|
|
+#define PLAT_CAM_DEVICE_COMPATIBLE "spacemit,plat-cam"
|
|
+
|
|
+struct plat_cam_device {
|
|
+ struct v4l2_device v4l2_dev;
|
|
+ struct kref ref;
|
|
+ struct mutex mutex;
|
|
+};
|
|
+
|
|
+#define to_plat_cam_dev(v4l2_dev) \
|
|
+ (struct plat_cam_device *)(v4l2_dev)
|
|
+
|
|
+unsigned long phys_cam2cpu(unsigned long phys_addr)
|
|
+{
|
|
+ if (phys_addr >= 0x80000000UL) {
|
|
+ phys_addr += 0x80000000UL;
|
|
+ }
|
|
+ return phys_addr;
|
|
+}
|
|
+
|
|
+unsigned long phys_cpu2cam(unsigned long phys_addr)
|
|
+{
|
|
+ if (phys_addr >= 0x100000000UL) {
|
|
+ phys_addr -= 0x80000000UL;
|
|
+ }
|
|
+ return phys_addr;
|
|
+}
|
|
+
|
|
+static void plat_cam_sd_notify(struct v4l2_subdev *sd,
|
|
+ unsigned int notification, void *arg)
|
|
+{
|
|
+ struct v4l2_subdev *subdev;
|
|
+ struct plat_cam_device *plat_cam_dev;
|
|
+ struct plat_cam_subdev *csd;
|
|
+ struct v4l2_device *v4l2_dev;
|
|
+
|
|
+ //remove after k1xisp register as v4l2 subdev
|
|
+ v4l2_dev = plat_cam_v4l2_device_get();
|
|
+ plat_cam_dev = to_plat_cam_dev(v4l2_dev);
|
|
+ if (unlikely(sd && sd->v4l2_dev != v4l2_dev))
|
|
+ goto done;
|
|
+
|
|
+ mutex_lock(&plat_cam_dev->mutex);
|
|
+ v4l2_device_for_each_subdev(subdev, v4l2_dev) {
|
|
+ if (subdev == sd)
|
|
+ continue;
|
|
+ csd = subdev_to_plat_csd(subdev);
|
|
+ if (csd->spm_ops && csd->spm_ops->notify)
|
|
+ csd->spm_ops->notify(subdev, notification, arg);
|
|
+ }
|
|
+ mutex_unlock(&plat_cam_dev->mutex);
|
|
+
|
|
+done:
|
|
+ plat_cam_v4l2_device_put(v4l2_dev);
|
|
+ return;
|
|
+}
|
|
+
|
|
+int plat_cam_register_subdev(struct plat_cam_subdev *csd)
|
|
+{
|
|
+ struct v4l2_subdev *sd;
|
|
+ struct plat_cam_device *plat_cam_dev;
|
|
+ struct v4l2_device *v4l2_dev;
|
|
+ int ret;
|
|
+
|
|
+ if (!csd || !csd->name) {
|
|
+ cam_err("invalid arguments");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ sd = &csd->sd;
|
|
+ v4l2_dev = plat_cam_v4l2_device_get();
|
|
+ if (!v4l2_dev) {
|
|
+ cam_err("failed to get v4l2 device");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ plat_cam_dev = to_plat_cam_dev(v4l2_dev);
|
|
+
|
|
+ mutex_lock(&plat_cam_dev->mutex);
|
|
+
|
|
+ v4l2_subdev_init(sd, csd->ops);
|
|
+ sd->owner = NULL;
|
|
+ sd->internal_ops = csd->internal_ops;
|
|
+ snprintf(sd->name, ARRAY_SIZE(sd->name), csd->name);
|
|
+ v4l2_set_subdevdata(sd, csd->token);
|
|
+
|
|
+ sd->flags = csd->sd_flags;
|
|
+
|
|
+ if (csd->pads_cnt == 0 || csd->pads == NULL)
|
|
+ ret = media_entity_pads_init(&sd->entity, 0, NULL);
|
|
+ else
|
|
+ ret = media_entity_pads_init(&sd->entity, csd->pads_cnt, csd->pads);
|
|
+ if (ret) {
|
|
+ cam_err("Failed to register subdev\n");
|
|
+ goto reg_fail;
|
|
+ }
|
|
+
|
|
+ sd->entity.function = csd->ent_function;
|
|
+
|
|
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
|
|
+ if (ret) {
|
|
+ cam_err("register subdev failed");
|
|
+ goto reg_fail;
|
|
+ }
|
|
+
|
|
+ ret = v4l2_device_register_subdev_nodes(v4l2_dev);
|
|
+ if (ret) {
|
|
+ cam_err("failed to register %s node", sd->name);
|
|
+ goto reg_fail;
|
|
+ }
|
|
+
|
|
+ sd->entity.name = video_device_node_name(sd->devnode);
|
|
+
|
|
+ if (csd->spm_ops && csd->spm_ops->registered) {
|
|
+ ret = csd->spm_ops->registered(sd);
|
|
+ if (ret)
|
|
+ goto reg_fail;
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&plat_cam_dev->mutex);
|
|
+ return 0;
|
|
+
|
|
+reg_fail:
|
|
+ mutex_unlock(&plat_cam_dev->mutex);
|
|
+ plat_cam_v4l2_device_put(v4l2_dev);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(plat_cam_register_subdev);
|
|
+
|
|
+int plat_cam_unregister_subdev(struct plat_cam_subdev *csd)
|
|
+{
|
|
+ struct plat_cam_device *plat_cam_dev;
|
|
+ struct v4l2_device *v4l2_dev;
|
|
+
|
|
+ v4l2_dev = csd->sd.v4l2_dev;
|
|
+ plat_cam_dev = to_plat_cam_dev(v4l2_dev);
|
|
+
|
|
+ mutex_lock(&plat_cam_dev->mutex);
|
|
+ if (csd->spm_ops && csd->spm_ops->unregistered)
|
|
+ csd->spm_ops->unregistered(&csd->sd);
|
|
+ v4l2_device_unregister_subdev(&csd->sd);
|
|
+ mutex_unlock(&plat_cam_dev->mutex);
|
|
+
|
|
+ plat_cam_v4l2_device_put(v4l2_dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(plat_cam_unregister_subdev);
|
|
+
|
|
+static struct plat_cam_device *g_dev;
|
|
+static int plat_cam_probe(struct platform_device *pdev)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct plat_cam_device *plat_cam_dev;
|
|
+ struct v4l2_device *v4l2_dev;
|
|
+
|
|
+ plat_cam_dev = kzalloc(sizeof(*plat_cam_dev), GFP_KERNEL);
|
|
+ if (!plat_cam_dev) {
|
|
+ cam_err("could not allocate memory\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ kref_init(&plat_cam_dev->ref);
|
|
+ mutex_init(&plat_cam_dev->mutex);
|
|
+
|
|
+ /* setup v4l2 device */
|
|
+ v4l2_dev = &plat_cam_dev->v4l2_dev;
|
|
+ ret = v4l2_device_register(&(pdev->dev), v4l2_dev);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ g_dev = plat_cam_dev;
|
|
+
|
|
+#if defined(CONFIG_MEDIA_CONTROLLER)
|
|
+ /* setup media device */
|
|
+ v4l2_dev->mdev = kzalloc(sizeof(*v4l2_dev->mdev), GFP_KERNEL);
|
|
+ if (!v4l2_dev->mdev) {
|
|
+ cam_err("could not allocate memory\n");
|
|
+ ret = -ENOMEM;
|
|
+ goto media_fail;
|
|
+ }
|
|
+
|
|
+ media_device_init(v4l2_dev->mdev);
|
|
+ v4l2_dev->mdev->dev = &(pdev->dev);
|
|
+ strlcpy(v4l2_dev->mdev->model, PLAT_CAM_NAME, sizeof(v4l2_dev->mdev->model));
|
|
+
|
|
+ ret = __media_device_register(v4l2_dev->mdev, NULL);
|
|
+ if (ret)
|
|
+ goto media_fail;
|
|
+#endif
|
|
+ v4l2_dev->notify = plat_cam_sd_notify;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+#if defined(CONFIG_MEDIA_CONTROLLER)
|
|
+media_fail:
|
|
+ v4l2_device_unregister(v4l2_dev);
|
|
+ kfree(plat_cam_dev);
|
|
+#endif
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int plat_cam_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
|
|
+ struct plat_cam_device *plat_cam_dev = to_plat_cam_dev(v4l2_dev);
|
|
+
|
|
+#if defined(CONFIG_MEDIA_CONTROLLER)
|
|
+ media_device_unregister(v4l2_dev->mdev);
|
|
+ kfree(v4l2_dev->mdev);
|
|
+ v4l2_dev->mdev = NULL;
|
|
+#endif
|
|
+ v4l2_device_unregister(v4l2_dev);
|
|
+ g_dev = NULL;
|
|
+
|
|
+ mutex_destroy(&plat_cam_dev->mutex);
|
|
+ kfree(plat_cam_dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct of_device_id plat_cam_dt_match[] = {
|
|
+ { .compatible = PLAT_CAM_DEVICE_COMPATIBLE, .data = NULL },
|
|
+ {},
|
|
+};
|
|
+
|
|
+static struct platform_driver plat_cam_driver = {
|
|
+ .driver = {
|
|
+ .name = PLAT_CAM_NAME,
|
|
+ .of_match_table = plat_cam_dt_match,
|
|
+ },
|
|
+ .probe = plat_cam_probe,
|
|
+ .remove = plat_cam_remove,
|
|
+};
|
|
+
|
|
+struct v4l2_device *plat_cam_v4l2_device_get(void)
|
|
+{
|
|
+ if (g_dev) {
|
|
+ kref_get(&g_dev->ref);
|
|
+ return &(g_dev->v4l2_dev);
|
|
+ } else
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(plat_cam_v4l2_device_get);
|
|
+
|
|
+int plat_cam_v4l2_device_put(struct v4l2_device *v4l2_dev)
|
|
+{
|
|
+#if 0
|
|
+ struct plat_cam_device *plat_cam_dev = to_plat_cam_dev(v4l2_dev);
|
|
+ struct platform_driver *plat_driver;
|
|
+
|
|
+ if (!v4l2_dev || !v4l2_dev->dev)
|
|
+ return -ENODEV;
|
|
+
|
|
+ plat_driver = to_platform_driver(v4l2_dev->dev->driver);
|
|
+
|
|
+ kref_put(&plat_cam_dev->ref, NULL);
|
|
+
|
|
+ if (kref_read(&plat_cam_dev->ref) == 1) {
|
|
+ plat_driver->remove = plat_cam_remove;
|
|
+ platform_driver_unregister(plat_driver);
|
|
+ kfree(plat_driver->driver.name);
|
|
+ kfree(plat_driver->driver.of_match_table);
|
|
+ kfree(plat_driver);
|
|
+ }
|
|
+#else
|
|
+ if (g_dev)
|
|
+ kref_put(&g_dev->ref, NULL);
|
|
+#endif
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(plat_cam_v4l2_device_put);
|
|
+
|
|
+module_platform_driver(plat_cam_driver);
|
|
+
|
|
+MODULE_DESCRIPTION("SPACEMIT Camera Platform Driver");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_plat/cam_plat.h b/drivers/media/platform/spacemit/camera/cam_plat/cam_plat.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_plat/cam_plat.h
|
|
@@ -0,0 +1,180 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * plat_cam.h - Driver for SPACEMIT K1X Platform Camera Manager
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited.
|
|
+ */
|
|
+
|
|
+#ifndef __PLAT_CAM_H__
|
|
+#define __PLAT_CAM_H__
|
|
+
|
|
+#include <media/v4l2-subdev.h>
|
|
+#include <media/v4l2-ioctl.h>
|
|
+
|
|
+struct spm_v4l2_subdev_ops {
|
|
+ int (*registered)(struct v4l2_subdev *sd);
|
|
+ void (*unregistered)(struct v4l2_subdev *sd);
|
|
+ void (*notify)(struct v4l2_subdev *sd, unsigned int notification, void *arg);
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct plat_cam_subdev - describes a camera subdevice
|
|
+ *
|
|
+ * @sd: V4l2 subdevice
|
|
+ * @ops: V4l2 subdecie operations
|
|
+ * @internal_ops: V4l2 subdevice internal operations
|
|
+ * @spm_ops: spm internal operations
|
|
+ * @name: Name of the sub-device. Please notice that the name
|
|
+ * must be unique.
|
|
+ * @sd_flags: Subdev flags. Can be:
|
|
+ * %V4L2_SUBDEV_FL_HAS_DEVNODE - Set this flag if
|
|
+ * this subdev needs a device node.
|
|
+ * %V4L2_SUBDEV_FL_HAS_EVENTS - Set this flag if
|
|
+ * this subdev generates events.
|
|
+ * @ent_function: Media entity function type.
|
|
+ * @pads_cnt: Number of sink and source pads.
|
|
+ * @pads: Pads array with the size defined by @pads_cnt.
|
|
+ * @token: Pointer to cookie of the client driver
|
|
+ *
|
|
+ * Each instance of a subdev driver should create this struct, either
|
|
+ * stand-alone or embedded in a larger struct. This structure should be
|
|
+ * initialized/registered by plat_cam_register_subdev
|
|
+ *
|
|
+ */
|
|
+struct plat_cam_subdev {
|
|
+ struct v4l2_subdev sd;
|
|
+ const struct v4l2_subdev_ops *ops;
|
|
+ const struct v4l2_subdev_internal_ops *internal_ops;
|
|
+ const struct spm_v4l2_subdev_ops *spm_ops;
|
|
+ char *name;
|
|
+ uint32_t sd_flags;
|
|
+ uint32_t ent_function;
|
|
+
|
|
+ uint16_t pads_cnt;
|
|
+ struct media_pad *pads;
|
|
+
|
|
+ void *token;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PLAT_SD_NOTIFY_REGISTER_ISPFIRM = 1,
|
|
+ PLAT_SD_NOTIFY_EIS_DATA,
|
|
+ PLAT_SD_NOTIFY_SENSOR_STRM_CTRL,
|
|
+ PLAT_SD_NOTIFY_REGISTER_SENSOR_OPS,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SC_SENSOR_STRM_PAUSE = 0,
|
|
+ SC_SENSOR_STRM_RESUME,
|
|
+};
|
|
+
|
|
+struct spm_camera_sensor_strm_ctrl {
|
|
+ uint32_t sensor_idx;
|
|
+ uint32_t cmd;
|
|
+};
|
|
+
|
|
+#define SC_ISPFIRM_CMD_CUSTOM (1000)
|
|
+#define SC_ISPFIRM_CMD_GET_FRAME_INFO (SC_ISPFIRM_CMD_CUSTOM + 1)
|
|
+#define SC_ISPFIRM_CMD_PIPE_RESET_START (SC_ISPFIRM_CMD_CUSTOM + 2)
|
|
+#define SC_ISPFIRM_CMD_PIPE_RESET_END (SC_ISPFIRM_CMD_CUSTOM + 3)
|
|
+
|
|
+#define SC_SENSOR_CMD_CUSTOM (2000)
|
|
+#define SC_SENSOR_CMD_STRM_CTRL (SC_SENSOR_CMD_CUSTOM + 1)
|
|
+
|
|
+struct spm_camera_sensor_ops {
|
|
+ int (*send_cmd)(unsigned int cmd, void *cmd_payload, unsigned int payload_len);
|
|
+};
|
|
+
|
|
+struct spm_camera_ispfirm_ops {
|
|
+ int (*send_cmd)(unsigned int cmd, void *cmd_payload, unsigned int payload_len);
|
|
+ int (*irq_callback)(int irq_num, void *irq_data, unsigned int data_len);
|
|
+};
|
|
+
|
|
+struct camera_capture_slice_info {
|
|
+ int32_t hw_pipe_id;
|
|
+ int32_t total_slice_cnt;
|
|
+ int32_t slice_width;
|
|
+ int32_t raw_read_offset;
|
|
+ int32_t yuv_out_offset;
|
|
+ int32_t dwt_offset[4];
|
|
+ int32_t exception_exit; //stop this capture immediately because some error happen if true.
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct spm_camera_vi_ops - vi to extern(isp) operation.
|
|
+ *
|
|
+ * @notify_caputre_until_done: the function pointer of notify vi to start capture and wait done whose
|
|
+ * return value indicates sucess(0) or fail(negative). Timeout is ms.
|
|
+ */
|
|
+struct spm_camera_vi_ops {
|
|
+ int (*notify_caputre_until_done)(int slice_index, struct camera_capture_slice_info *slice_info, int timeout);
|
|
+};
|
|
+
|
|
+struct isp_firm {
|
|
+ size_t frameinfo_size;
|
|
+ struct spm_camera_ispfirm_ops *ispfirm_ops;
|
|
+ struct spm_camera_vi_ops *vi_ops;
|
|
+};
|
|
+
|
|
+struct isp_eis_data {
|
|
+ uint32_t pipeLineID;
|
|
+ int32_t frameId;
|
|
+ int32_t offsetX;
|
|
+ int32_t offsetY;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SC_FRM_INFO_T_INTERNAL = 0,
|
|
+ SC_FRM_INFO_T_VRF,
|
|
+};
|
|
+
|
|
+struct frame_info {
|
|
+ unsigned int pipe_id;
|
|
+ unsigned int frame_id;
|
|
+ int type;
|
|
+ void *vaddr;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ISP_IRQ,
|
|
+ DMA_IRQ,
|
|
+ CCIC_IRQ,
|
|
+};
|
|
+
|
|
+struct isp_pipe_reset {
|
|
+ unsigned int pipe_id;
|
|
+};
|
|
+
|
|
+struct isp_irq_data {
|
|
+ unsigned int pipe0_frame_id;
|
|
+ unsigned int pipe1_frame_id;
|
|
+ unsigned int pipe0_irq_status;
|
|
+ unsigned int pipe1_irq_status;
|
|
+};
|
|
+
|
|
+struct dma_irq_data {
|
|
+ unsigned int status1;
|
|
+ unsigned int status2;
|
|
+};
|
|
+
|
|
+struct ccic_irq_data {
|
|
+ unsigned int frame_id;
|
|
+ unsigned int snapshot;
|
|
+ unsigned int pipe_shadow_ready;
|
|
+};
|
|
+
|
|
+#define subdev_to_plat_csd(subdev) \
|
|
+ container_of(subdev, struct plat_cam_subdev, sd)
|
|
+
|
|
+unsigned long phys_cam2cpu(unsigned long phys_addr);
|
|
+unsigned long phys_cpu2cam(unsigned long phys_addr);
|
|
+
|
|
+int plat_cam_register_subdev(struct plat_cam_subdev *csd);
|
|
+
|
|
+int plat_cam_unregister_subdev(struct plat_cam_subdev *csd);
|
|
+
|
|
+struct v4l2_device *plat_cam_v4l2_device_get(void);
|
|
+
|
|
+int plat_cam_v4l2_device_put(struct v4l2_device *v4l2_dev);
|
|
+
|
|
+#endif /* ifndef __PLAT_CAM_H__ */
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_sensor/cam_sensor.c b/drivers/media/platform/spacemit/camera/cam_sensor/cam_sensor.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_sensor/cam_sensor.c
|
|
@@ -0,0 +1,1419 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ *cam_sensor.c - camera sensor driver
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ * All Rights Reserved.
|
|
+ */
|
|
+/* #define DEBUG */
|
|
+
|
|
+#include <linux/atomic.h>
|
|
+#include <linux/compat.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/cdev.h>
|
|
+#include <linux/i2c.h>
|
|
+#include <linux/delay.h>
|
|
+
|
|
+#include <linux/of_address.h>
|
|
+#include <linux/of_device.h>
|
|
+#include <linux/of_platform.h>
|
|
+#include <linux/timekeeping.h>
|
|
+#include <linux/regulator/driver.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/clk-provider.h>
|
|
+
|
|
+#include "cam_dbg.h"
|
|
+#include <media/k1x/cam_sensor_uapi.h>
|
|
+#include "cam_sensor.h"
|
|
+#include "../cam_ccic/ccic_drv.h"
|
|
+
|
|
+/* Standard module information, edit as appropriate */
|
|
+MODULE_LICENSE("GPL v2");
|
|
+MODULE_AUTHOR("SPACEMIT Inc.");
|
|
+MODULE_DESCRIPTION("SPACEMIT Camera Sensor Driver");
|
|
+
|
|
+#undef CAM_MODULE_TAG
|
|
+#define CAM_MODULE_TAG CAM_MDL_SNR
|
|
+
|
|
+#define DRIVER_NAME "cam_sensor"
|
|
+
|
|
+#define BURST_I2C_REG_SIZE 1
|
|
+
|
|
+static struct cam_sensor_device *g_sdev[CAM_SNS_MAX_DEV_NUM];
|
|
+static int camsnr_major;
|
|
+//static struct cdev camsnr_cdev;
|
|
+static struct class *camsnr_class;
|
|
+struct gpio_desc *gpio_dvdden = NULL;
|
|
+struct gpio_desc *gpio_dcdcen = NULL;
|
|
+
|
|
+#define SENSOR_DRIVER_CHECK_POINTER(ptr) \
|
|
+ do { \
|
|
+ if (NULL == (ptr)) { \
|
|
+ cam_err("%s: line %d: Invalid Pointer!", __func__, __LINE__); \
|
|
+ return -EINVAL; \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
+/*********************************************************************************/
|
|
+#define SENSOR_MCLK_CLK_RATE 24000000
|
|
+static int cam_sensor_power_set(struct cam_sensor_device *msnr_dev, u32 on)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ SENSOR_DRIVER_CHECK_POINTER(msnr_dev);
|
|
+
|
|
+ if (IS_ERR_OR_NULL(msnr_dev->pwdn) && IS_ERR_OR_NULL(msnr_dev->rst)) {
|
|
+ cam_err("%s: sensor%d pwdn or reset gpio is error", __func__,
|
|
+ msnr_dev->id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (on) {
|
|
+ if (msnr_dev->mclk) {
|
|
+ ret = clk_prepare_enable(msnr_dev->mclk);
|
|
+ if (ret < 0)
|
|
+ goto avdd_err;
|
|
+ ret = clk_set_rate(msnr_dev->mclk, SENSOR_MCLK_CLK_RATE);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->avdd)) {
|
|
+ regulator_set_voltage(msnr_dev->avdd, 2800000, 2800000);
|
|
+ ret = regulator_enable(msnr_dev->avdd);
|
|
+ if (ret < 0)
|
|
+ goto avdd_err;
|
|
+ }
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->dovdd)) {
|
|
+ regulator_set_voltage(msnr_dev->dovdd, 1800000, 1800000);
|
|
+ ret = regulator_enable(msnr_dev->dovdd);
|
|
+ if (ret < 0)
|
|
+ goto dovdd_err;
|
|
+ }
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->dvdd)) {
|
|
+ regulator_set_voltage(msnr_dev->dvdd, 1200000, 1200000);
|
|
+ ret = regulator_enable(msnr_dev->dvdd);
|
|
+ if (ret < 0)
|
|
+ goto dvdd_err;
|
|
+ }
|
|
+ /* dvdden-gpios */
|
|
+ if (!IS_ERR_OR_NULL(gpio_dvdden)) // msnr_dev->dvdden
|
|
+ gpiod_direction_output(gpio_dvdden, 1); // msnr_dev->dvdden
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->afvdd)) {
|
|
+ regulator_set_voltage(msnr_dev->afvdd, 2800000, 2800000);
|
|
+ ret = regulator_enable(msnr_dev->afvdd);
|
|
+ if (ret < 0)
|
|
+ goto af_err;
|
|
+ }
|
|
+
|
|
+ /* pwdn-gpios */
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->pwdn))
|
|
+ gpiod_set_value_cansleep(msnr_dev->pwdn, 1);
|
|
+
|
|
+ /* rst-gpios */
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->rst)) {
|
|
+ gpiod_set_value_cansleep(msnr_dev->rst, 0);
|
|
+ usleep_range(5 * 1000, 5 * 1000);
|
|
+ gpiod_set_value_cansleep(msnr_dev->rst, 1);
|
|
+ usleep_range(10 * 1000, 10 * 1000);
|
|
+ }
|
|
+ cam_dbg("sensor%d unreset", msnr_dev->id);
|
|
+ } else {
|
|
+ /* rst-gpios */
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->rst))
|
|
+ gpiod_set_value_cansleep(msnr_dev->rst, 0);
|
|
+
|
|
+ /* pwdn-gpios */
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->pwdn))
|
|
+ gpiod_set_value_cansleep(msnr_dev->pwdn, 0);
|
|
+
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->dvdd))
|
|
+ regulator_disable(msnr_dev->dvdd);
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->avdd))
|
|
+ regulator_disable(msnr_dev->avdd);
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->dovdd))
|
|
+ regulator_disable(msnr_dev->dovdd);
|
|
+ /* dvdden-gpios */
|
|
+ if (!IS_ERR_OR_NULL(gpio_dvdden)) // msnr_dev->dvdden
|
|
+ gpiod_direction_output(gpio_dvdden, 0); //msnr_dev->dvdden
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->afvdd))
|
|
+ regulator_disable(msnr_dev->afvdd);
|
|
+
|
|
+ clk_disable_unprepare(msnr_dev->mclk);
|
|
+ cam_dbg("sensor%d reset", msnr_dev->id);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+
|
|
+af_err:
|
|
+ if (msnr_dev->dvdd)
|
|
+ regulator_disable(msnr_dev->dvdd);
|
|
+dvdd_err:
|
|
+ if (msnr_dev->dovdd)
|
|
+ regulator_disable(msnr_dev->dovdd);
|
|
+dovdd_err:
|
|
+ if (msnr_dev->avdd)
|
|
+ regulator_disable(msnr_dev->afvdd);
|
|
+avdd_err:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int camsnr_set_power_voltage(unsigned long arg,
|
|
+ struct cam_sensor_device *msnr_dev)
|
|
+{
|
|
+ uint32_t voltage = 0;
|
|
+ cam_sensor_power_regulator_id regulator_id = 0;
|
|
+ struct cam_sensor_power sensor_power;
|
|
+
|
|
+ SENSOR_DRIVER_CHECK_POINTER(msnr_dev);
|
|
+
|
|
+ if (copy_from_user((void *)&sensor_power, (void *)arg, sizeof(sensor_power))) {
|
|
+ cam_err("Failed to copy args from user");
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ regulator_id = sensor_power.regulator_id;
|
|
+ voltage = sensor_power.voltage;
|
|
+
|
|
+ switch (regulator_id) {
|
|
+ case SENSOR_REGULATOR_AFVDD:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->afvdd)) {
|
|
+ regulator_set_voltage(msnr_dev->afvdd, voltage, voltage);
|
|
+ } else {
|
|
+ cam_err("afvdd is NULL!");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ break;
|
|
+ case SENSOR_REGULATOR_AVDD:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->avdd)) {
|
|
+ regulator_set_voltage(msnr_dev->avdd, voltage, voltage);
|
|
+ } else {
|
|
+ cam_err("avdd is NULL!");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ break;
|
|
+ case SENSOR_REGULATOR_DOVDD:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->dovdd)) {
|
|
+ regulator_set_voltage(msnr_dev->dovdd, voltage, voltage);
|
|
+ } else {
|
|
+ cam_err("dovdd is NULL!");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ break;
|
|
+ case SENSOR_REGULATOR_DVDD:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->dvdd)) {
|
|
+ regulator_set_voltage(msnr_dev->dvdd, voltage, voltage);
|
|
+ } else {
|
|
+ cam_err("dvdd is NULL!");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ cam_err("err regulator id");
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int camsnr_set_power_on(unsigned long arg, struct cam_sensor_device *msnr_dev)
|
|
+{
|
|
+ int ret = 0;
|
|
+ uint32_t on = 0;
|
|
+ cam_sensor_power_regulator_id regulator_id = 0;
|
|
+ struct cam_sensor_power sensor_power;
|
|
+
|
|
+ SENSOR_DRIVER_CHECK_POINTER(msnr_dev);
|
|
+
|
|
+ if (copy_from_user((void *)&sensor_power, (void *)arg, sizeof(sensor_power))) {
|
|
+ cam_err("Failed to copy args from user");
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ regulator_id = sensor_power.regulator_id;
|
|
+ on = sensor_power.on;
|
|
+
|
|
+ if (on) {
|
|
+ switch (regulator_id) {
|
|
+ case SENSOR_REGULATOR_AFVDD:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->afvdd)) {
|
|
+ ret = regulator_enable(msnr_dev->afvdd);
|
|
+ if (ret < 0) {
|
|
+ cam_err("enable afvdd failed");
|
|
+ return ret;
|
|
+ }
|
|
+ } else {
|
|
+ cam_err("afvdd is NULL!");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ break;
|
|
+ case SENSOR_REGULATOR_AVDD:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->avdd)) {
|
|
+ ret = regulator_enable(msnr_dev->avdd);
|
|
+ if (ret < 0) {
|
|
+ cam_err("enable avdd failed");
|
|
+ return ret;
|
|
+ }
|
|
+ } else {
|
|
+ cam_err("avdd is NULL!");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ break;
|
|
+ case SENSOR_REGULATOR_DOVDD:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->dovdd)) {
|
|
+ ret = regulator_enable(msnr_dev->dovdd);
|
|
+ if (ret < 0) {
|
|
+ cam_err("enable dovdd failed");
|
|
+ return ret;
|
|
+ }
|
|
+ } else {
|
|
+ cam_err("dovdd is NULL!");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ break;
|
|
+ case SENSOR_REGULATOR_DVDD:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->dvdd)) {
|
|
+ ret = regulator_enable(msnr_dev->dvdd);
|
|
+ if (ret < 0) {
|
|
+ cam_err("enable dvdd failed");
|
|
+ return ret;
|
|
+ }
|
|
+ } else {
|
|
+ cam_err("dvdd is NULL!");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ cam_err("err regulator id");
|
|
+ break;
|
|
+ }
|
|
+ } else {
|
|
+ switch (regulator_id) {
|
|
+ case SENSOR_REGULATOR_AFVDD:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->afvdd)) {
|
|
+ ret = regulator_disable(msnr_dev->afvdd);
|
|
+ } else {
|
|
+ cam_err("afvdd is NULL!");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ break;
|
|
+ case SENSOR_REGULATOR_AVDD:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->avdd)) {
|
|
+ ret = regulator_disable(msnr_dev->avdd);
|
|
+ } else {
|
|
+ cam_err("avdd is NULL!");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ break;
|
|
+ case SENSOR_REGULATOR_DOVDD:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->dovdd)) {
|
|
+ ret = regulator_disable(msnr_dev->dovdd);
|
|
+ } else {
|
|
+ cam_err("dovdd is NULL!");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ break;
|
|
+ case SENSOR_REGULATOR_DVDD:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->dvdd)) {
|
|
+ ret = regulator_disable(msnr_dev->dvdd);
|
|
+ } else {
|
|
+ cam_err("dvdd is NULL!");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ cam_err("err regulator id");
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int camsnr_set_gpio_enable(unsigned long arg, struct cam_sensor_device *msnr_dev)
|
|
+{
|
|
+ uint8_t enable = 0;
|
|
+ cam_sensor_gpio_id gpio_id = 0;
|
|
+ struct cam_sensor_gpio sensor_gpio;
|
|
+
|
|
+ SENSOR_DRIVER_CHECK_POINTER(msnr_dev);
|
|
+
|
|
+ if (copy_from_user((void *)&sensor_gpio, (void *)arg, sizeof(sensor_gpio))) {
|
|
+ cam_err("Failed to copy args from user");
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ gpio_id = sensor_gpio.gpio_id;
|
|
+ enable = sensor_gpio.enable;
|
|
+
|
|
+ switch (gpio_id) {
|
|
+ case SENSOR_GPIO_PWDN:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->pwdn))
|
|
+ gpiod_direction_output(msnr_dev->pwdn, enable);
|
|
+ break;
|
|
+ case SENSOR_GPIO_RST:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->rst))
|
|
+ gpiod_direction_output(msnr_dev->rst, enable);
|
|
+ break;
|
|
+ case SENSOR_GPIO_DVDDEN:
|
|
+ if (!IS_ERR_OR_NULL(gpio_dvdden))
|
|
+ gpiod_direction_output(gpio_dvdden, enable);
|
|
+ break;
|
|
+ case SENSOR_GPIO_DCDCEN:
|
|
+ if (!IS_ERR_OR_NULL(msnr_dev->dcdcen))
|
|
+ gpiod_direction_output(msnr_dev->dcdcen, enable);
|
|
+ break;
|
|
+ default:
|
|
+ cam_err("wrong gpio_id %d", gpio_id);
|
|
+ return -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int camsnr_set_mclk_rate(unsigned long arg, struct cam_sensor_device *msnr_dev)
|
|
+{
|
|
+ int ret = 0;
|
|
+ uint32_t clk_rate = 0;
|
|
+
|
|
+ SENSOR_DRIVER_CHECK_POINTER(msnr_dev);
|
|
+
|
|
+ if (copy_from_user((void *)&clk_rate, (void *)arg, sizeof(clk_rate))) {
|
|
+ cam_err("Failed to copy args from user");
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ clk_rate = (clk_rate == 0) ? SENSOR_MCLK_CLK_RATE : clk_rate;
|
|
+ if (msnr_dev->mclk) {
|
|
+ ret = clk_set_rate(msnr_dev->mclk, clk_rate);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ } else {
|
|
+ cam_err("%s: mclk is NULL", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int camsnr_set_mclk_enable(unsigned long arg, struct cam_sensor_device *msnr_dev)
|
|
+{
|
|
+ int ret = 0;
|
|
+ uint32_t clk_enable = 0;
|
|
+
|
|
+ SENSOR_DRIVER_CHECK_POINTER(msnr_dev);
|
|
+
|
|
+ if (copy_from_user((void *)&clk_enable, (void *)arg,
|
|
+ sizeof(clk_enable))) {
|
|
+ cam_err("Failed to copy args from user");
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ if (msnr_dev->mclk) {
|
|
+ if (clk_enable && !__clk_is_enabled(msnr_dev->mclk)) {
|
|
+ ret = clk_prepare_enable(msnr_dev->mclk);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ } else if (!clk_enable && __clk_is_enabled(msnr_dev->mclk)) {
|
|
+ clk_disable_unprepare(msnr_dev->mclk);
|
|
+ } else {
|
|
+ cam_warn("%s: mclk%d is already %s", __func__,
|
|
+ msnr_dev->id,
|
|
+ clk_enable ? "enabled" : "disabled");
|
|
+ }
|
|
+ } else {
|
|
+ cam_err("%s: mclk is NULL", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int camsnr_reset_sensor(unsigned long arg)
|
|
+{
|
|
+ int ret = 0;
|
|
+ sns_rst_source_t sns_reset_source;
|
|
+ struct cam_sensor_device *msnr_dev;
|
|
+
|
|
+ if (copy_from_user((void *)&sns_reset_source, (void *)arg,
|
|
+ sizeof(sns_reset_source))) {
|
|
+ cam_err("%s: Failed to copy args from user", __func__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ if (sns_reset_source >= CAM_SNS_MAX_DEV_NUM) {
|
|
+ cam_err("%s: Invalid snr reset source %d", __func__, sns_reset_source);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ msnr_dev = g_sdev[sns_reset_source];
|
|
+ if (IS_ERR_OR_NULL(msnr_dev)) {
|
|
+ cam_err("%s: Invalid cam_sensor_device", __func__);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ ret = cam_sensor_power_set(msnr_dev, 0);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int camsnr_unreset_sensor(unsigned long arg)
|
|
+{
|
|
+ int ret = 0;
|
|
+ sns_rst_source_t sns_reset_source;
|
|
+ struct cam_sensor_device *msnr_dev;
|
|
+
|
|
+ if (copy_from_user((void *)&sns_reset_source, (void *)arg,
|
|
+ sizeof(sns_reset_source))) {
|
|
+ cam_err("Failed to copy args from user");
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ if (sns_reset_source >= CAM_SNS_MAX_DEV_NUM) {
|
|
+ cam_err("Invalid snr reset source %d", sns_reset_source);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ msnr_dev = g_sdev[sns_reset_source];
|
|
+ if (IS_ERR_OR_NULL(msnr_dev)) {
|
|
+ cam_err("%s: Invalid cam_sensor_device", __func__);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ ret = cam_sensor_power_set(msnr_dev, 1);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void cam_sensor_i2c_dumpinfo(struct i2c_msg *msg_array, int num)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ cam_info("%s: dump i2c transfer data info, msg number is %d", __func__, num);
|
|
+ for(i = 0; i < num; i ++)
|
|
+ cam_info("%s, i2c transfer msg_array[%d], addr 0x%x, flags %d, len %d, val [0x%x, 0x%x, 0x%x, 0x%x]",
|
|
+ __func__, i, msg_array[i].addr, msg_array[i].flags, msg_array[i].len,
|
|
+ msg_array[i].buf[0], msg_array[i].buf[1], msg_array[i].buf[2], msg_array[i].buf[3]);
|
|
+}
|
|
+
|
|
+static int cam_sensor_write(struct cam_i2c_data *data,
|
|
+ struct cam_sensor_device *sensor_dev)
|
|
+{
|
|
+ struct i2c_adapter *adapter;
|
|
+ struct i2c_msg msg;
|
|
+ u8 val[4];
|
|
+ int ret = 0;
|
|
+ struct mutex *pcmd_mutex = NULL;
|
|
+ u8 twsi_no, addr;
|
|
+ u16 reg_len, val_len, reg, reg_val;
|
|
+
|
|
+ SENSOR_DRIVER_CHECK_POINTER(data);
|
|
+ SENSOR_DRIVER_CHECK_POINTER(sensor_dev);
|
|
+
|
|
+ if (!data->addr || !data->reg_len || !data->val_len) {
|
|
+ cam_err("%s: invalid addr 0x%x or reg_len %d or val_len %d",
|
|
+ __func__, data->addr, data->reg_len, data->val_len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ twsi_no = sensor_dev->twsi_no;
|
|
+ addr = data->addr;
|
|
+ reg_len = data->reg_len;
|
|
+ val_len = data->val_len;
|
|
+ reg = data->tab.reg;
|
|
+ reg_val = data->tab.val;
|
|
+
|
|
+ adapter = i2c_get_adapter(twsi_no);
|
|
+ if (!adapter) {
|
|
+ cam_err("%s: i2c get adapter fail", __func__);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ msg.addr = addr;
|
|
+ msg.flags = 0;
|
|
+ msg.buf = val;
|
|
+ msg.len = reg_len + val_len;
|
|
+
|
|
+ pcmd_mutex = &sensor_dev->lock;
|
|
+ mutex_lock(pcmd_mutex);
|
|
+ if (msg.len == 2) {
|
|
+ /* reg:8bit; val:8bit */
|
|
+ val[0] = reg & 0xff;
|
|
+ val[1] = reg_val & 0xff;
|
|
+ } else if (msg.len == 3) {
|
|
+ /* reg:16bit; val:8bit */
|
|
+ val[0] = (reg >> 8) & 0xff;
|
|
+ val[1] = reg & 0xff;
|
|
+ val[2] = reg_val & 0xff;
|
|
+ } else if (msg.len == 4) {
|
|
+ /* reg:16bit; val:16bit */
|
|
+ val[0] = (reg >> 8) & 0xff;
|
|
+ val[1] = reg & 0xff;
|
|
+ val[2] = (reg_val >> 8) & 0xff;
|
|
+ val[3] = reg_val & 0xff;
|
|
+ }
|
|
+ ret = i2c_transfer(adapter, &msg, 1);
|
|
+ if (ret < 0) {
|
|
+ cam_err("%s: i2c transfer data fail", __func__);
|
|
+ cam_sensor_i2c_dumpinfo(&msg, 1);
|
|
+ mutex_unlock(pcmd_mutex);
|
|
+ return ret;
|
|
+ }
|
|
+ mutex_unlock(pcmd_mutex);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cam_sensor_read(struct cam_i2c_data *data,
|
|
+ struct cam_sensor_device *sensor_dev)
|
|
+{
|
|
+ int ret;
|
|
+ u8 val[2];
|
|
+ struct i2c_adapter *adapter;
|
|
+ struct i2c_msg msg;
|
|
+ struct mutex *pcmd_mutex = NULL;
|
|
+ u8 twsi_no, addr;
|
|
+ u16 reg_len, val_len, reg, reg_val = 0;
|
|
+
|
|
+ SENSOR_DRIVER_CHECK_POINTER(data);
|
|
+ SENSOR_DRIVER_CHECK_POINTER(sensor_dev);
|
|
+
|
|
+ if (!data->addr || !data->reg_len || !data->val_len) {
|
|
+ cam_err("%s: invalid addr 0x%x or reg_len %d or val_len %d",
|
|
+ __func__, data->addr, data->reg_len, data->val_len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ twsi_no = sensor_dev->twsi_no;
|
|
+ addr = data->addr;
|
|
+ reg_len = data->reg_len;
|
|
+ val_len = data->val_len;
|
|
+ reg = data->tab.reg;
|
|
+
|
|
+ adapter = i2c_get_adapter(twsi_no);
|
|
+ if (!adapter) {
|
|
+ cam_err("%s: i2c get adapter fail", __func__);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ msg.addr = addr;
|
|
+ msg.flags = 0;
|
|
+ msg.len = 1;
|
|
+ msg.buf = val;
|
|
+
|
|
+ pcmd_mutex = &sensor_dev->lock;
|
|
+ mutex_lock(pcmd_mutex);
|
|
+ if (reg_len == I2C_8BIT) {
|
|
+ msg.len = 1;
|
|
+ val[0] = reg & 0xff;
|
|
+ } else if (reg_len == I2C_16BIT) {
|
|
+ msg.len = 2;
|
|
+ val[0] = (reg >> 8) & 0xff;
|
|
+ val[1] = reg & 0xff;
|
|
+ }
|
|
+ ret = i2c_transfer(adapter, &msg, 1);
|
|
+ if (ret < 0) {
|
|
+ mutex_unlock(pcmd_mutex);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ if (val_len == I2C_8BIT)
|
|
+ msg.len = 1;
|
|
+ else if (val_len == I2C_16BIT)
|
|
+ msg.len = 2;
|
|
+ msg.flags = I2C_M_RD;
|
|
+ ret = i2c_transfer(adapter, &msg, 1);
|
|
+ if (ret < 0) {
|
|
+ mutex_unlock(pcmd_mutex);
|
|
+ goto err;
|
|
+ }
|
|
+ if (val_len == I2C_8BIT)
|
|
+ reg_val = val[0];
|
|
+ else if (val_len == I2C_16BIT)
|
|
+ reg_val = (val[0] << 8) + val[1];
|
|
+ mutex_unlock(pcmd_mutex);
|
|
+
|
|
+ data->tab.val = reg_val;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ cam_err("%s: Failed reading register 0x%02x!", __func__, reg);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int cam_sensor_burst_write(struct cam_burst_i2c_data *data,
|
|
+ struct cam_sensor_device *sensor_dev)
|
|
+{
|
|
+ struct i2c_adapter *adapter;
|
|
+ struct regval_tab *tab;
|
|
+ struct i2c_msg msg_array[BURST_I2C_REG_SIZE];
|
|
+ u8 val[BURST_I2C_REG_SIZE][4];
|
|
+ struct regval_tab buf[BURST_I2C_REG_SIZE];
|
|
+ int ret = 0;
|
|
+ struct mutex *pcmd_mutex = NULL;
|
|
+ u32 num, i;
|
|
+ u8 twsi_no;
|
|
+
|
|
+ SENSOR_DRIVER_CHECK_POINTER(data);
|
|
+ SENSOR_DRIVER_CHECK_POINTER(sensor_dev);
|
|
+
|
|
+ if (!data->addr || !data->reg_len || !data->val_len) {
|
|
+ cam_err("%s: invalid addr 0x%x or reg_len %d or val_len %d",
|
|
+ __func__, data->addr, data->reg_len, data->val_len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ twsi_no = sensor_dev->twsi_no;
|
|
+ adapter = i2c_get_adapter(twsi_no);
|
|
+ if (!adapter) {
|
|
+ cam_err("%s: i2c get adapter fail, twsi_no %d", __func__, twsi_no);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ pcmd_mutex = &sensor_dev->lock;
|
|
+
|
|
+ do {
|
|
+ num = (data->num > BURST_I2C_REG_SIZE) ? BURST_I2C_REG_SIZE : data->num;
|
|
+ if (copy_from_user(buf, data->tab, sizeof(struct regval_tab) * num)) {
|
|
+ cam_err("%s: copy_from_user", __func__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ memset(msg_array, 0, BURST_I2C_REG_SIZE * sizeof(struct i2c_msg));
|
|
+ mutex_lock(pcmd_mutex);
|
|
+ for (i = 0; i < num; i++) {
|
|
+ msg_array[i].addr = data->addr;
|
|
+ msg_array[i].flags = 0;
|
|
+ msg_array[i].len = 1;
|
|
+ msg_array[i].buf = val[i];
|
|
+
|
|
+ if (data->reg_len == I2C_8BIT)
|
|
+ msg_array[i].len = 1;
|
|
+ else if (data->reg_len == I2C_16BIT)
|
|
+ msg_array[i].len = 2;
|
|
+
|
|
+ if (data->val_len == I2C_8BIT)
|
|
+ msg_array[i].len += 1;
|
|
+ else if (data->val_len == I2C_16BIT)
|
|
+ msg_array[i].len += 2;
|
|
+
|
|
+ tab = &buf[i];
|
|
+ if (msg_array[i].len == 2) {
|
|
+ /* reg:8bit; val:8bit */
|
|
+ val[i][0] = tab->reg & 0xff;
|
|
+ val[i][1] = tab->val & 0xff;
|
|
+ } else if (msg_array[i].len == 3) {
|
|
+ /* reg:16bit; val:8bit */
|
|
+ val[i][0] = (tab->reg >> 8) & 0xff;
|
|
+ val[i][1] = tab->reg & 0xff;
|
|
+ val[i][2] = tab->val & 0xff;
|
|
+ } else if (msg_array[i].len == 4) {
|
|
+ /* reg:16bit; val:16bit */
|
|
+ val[i][0] = (tab->reg >> 8) & 0xff;
|
|
+ val[i][1] = tab->reg & 0xff;
|
|
+ val[i][2] = (tab->val >> 8) & 0xff;
|
|
+ val[i][3] = tab->val & 0xff;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ret = i2c_transfer(adapter, msg_array, i);
|
|
+ if (ret != i) {
|
|
+ cam_err("%s, i2c transfer fail, ret %d, i %d", __func__, ret, i);
|
|
+ cam_sensor_i2c_dumpinfo(msg_array, i);
|
|
+ ret = -EIO;
|
|
+ mutex_unlock(pcmd_mutex);
|
|
+ goto out;
|
|
+ } else
|
|
+ ret = 0;
|
|
+
|
|
+ mutex_unlock(pcmd_mutex);
|
|
+ data->num -= num;
|
|
+ data->tab += num;
|
|
+ } while (data->num > 0);
|
|
+
|
|
+out:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int cam_sensor_burst_read(struct cam_burst_i2c_data *data,
|
|
+ struct cam_sensor_device *sensor_dev)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct regval_tab buf[BURST_I2C_REG_SIZE];
|
|
+ u32 i, num;
|
|
+ struct cam_i2c_data i2c_data;
|
|
+
|
|
+ SENSOR_DRIVER_CHECK_POINTER(data);
|
|
+ SENSOR_DRIVER_CHECK_POINTER(sensor_dev);
|
|
+
|
|
+ if (!data->addr || !data->reg_len || !data->val_len) {
|
|
+ cam_err("%s: invalid addr 0x%x or reg_len %d or val_len %d",
|
|
+ __func__, data->addr, data->reg_len, data->val_len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ i2c_data.reg_len = data->reg_len;
|
|
+ i2c_data.val_len = data->val_len;
|
|
+ i2c_data.addr = data->addr;
|
|
+
|
|
+ do {
|
|
+ num = (data->num > BURST_I2C_REG_SIZE) ? BURST_I2C_REG_SIZE : data->num;
|
|
+ if (copy_from_user(buf, data->tab, sizeof(struct regval_tab) * num)) {
|
|
+ cam_err("%s: copy_from_user", __func__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ for (i = 0; i < num; i++) {
|
|
+ i2c_data.tab.reg = buf[i].reg;
|
|
+ ret = cam_sensor_read(&i2c_data, sensor_dev);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ buf[i].val = i2c_data.tab.val;
|
|
+ }
|
|
+ if (copy_to_user(data->tab, buf, sizeof(struct regval_tab) * num)) {
|
|
+ cam_err("%s: copy read value to user failed!", __func__);
|
|
+ ret = -EPERM;
|
|
+ }
|
|
+ data->num -= num;
|
|
+ data->tab += num;
|
|
+ } while (data->num > 0);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int cam_sensor_get_info(struct cam_sensor_info *sensor_info,
|
|
+ struct cam_sensor_device *sensor_dev)
|
|
+{
|
|
+ SENSOR_DRIVER_CHECK_POINTER(sensor_info);
|
|
+ SENSOR_DRIVER_CHECK_POINTER(sensor_dev);
|
|
+
|
|
+ sensor_info->twsi_no = sensor_dev->twsi_no;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int camsnr_mipi_clock_set(unsigned long arg, unsigned int dphy_no)
|
|
+{
|
|
+ int ret = 0;
|
|
+ sns_mipi_clock_t sns_mipi_clock;
|
|
+ if (copy_from_user((void *)&sns_mipi_clock, (void *)arg, sizeof(sns_mipi_clock))) {
|
|
+ cam_err("Failed to copy args from user");
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ ret = ccic_dphy_hssettle_set(dphy_no, sns_mipi_clock);
|
|
+ if (!ret)
|
|
+ cam_dbg("mipi%d: set mipi clock\n", dphy_no);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static long camsnr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct cam_sensor_device *msnr_dev = NULL;
|
|
+
|
|
+ SENSOR_DRIVER_CHECK_POINTER(file);
|
|
+ msnr_dev = (struct cam_sensor_device *)file->private_data;
|
|
+
|
|
+ if (_IOC_TYPE(cmd) != CAM_SENSOR_IOC_MAGIC) {
|
|
+ cam_err("%s: invalid cmd %d", __func__, cmd);
|
|
+ return -ENOTTY;
|
|
+ }
|
|
+
|
|
+ switch (cmd) {
|
|
+ case CAM_SENSOR_RESET:
|
|
+ ret = camsnr_reset_sensor(arg);
|
|
+ break;
|
|
+ case CAM_SENSOR_UNRESET:
|
|
+ ret = camsnr_unreset_sensor(arg);
|
|
+ break;
|
|
+ case CAM_SENSOR_I2C_WRITE:{
|
|
+ struct cam_i2c_data data;
|
|
+ if (copy_from_user((void *)&data, (void *)arg, sizeof(data))) {
|
|
+ cam_err("%s: Line %d: Failed to copy args from user",
|
|
+ __func__, __LINE__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ ret = cam_sensor_write(&data, msnr_dev);
|
|
+ break;
|
|
+ }
|
|
+ case CAM_SENSOR_I2C_READ:{
|
|
+ struct cam_i2c_data data;
|
|
+ if (copy_from_user((void *)&data, (void *)arg, sizeof(data))) {
|
|
+ cam_err("%s: Line %d: Failed to copy args from user",
|
|
+ __func__, __LINE__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ ret = cam_sensor_read(&data, msnr_dev);
|
|
+ if (copy_to_user((void *)arg, (void *)&data, sizeof(data))) {
|
|
+ cam_err("%s: Line %d: Failed to copy args to user",
|
|
+ __func__, __LINE__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ case CAM_SENSOR_I2C_BURST_WRITE:{
|
|
+ struct cam_burst_i2c_data data;
|
|
+ if (copy_from_user((void *)&data, (void *)arg, sizeof(data))) {
|
|
+ cam_err("%s: Line %d: Failed to copy args from user",
|
|
+ __func__, __LINE__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ ret = cam_sensor_burst_write(&data, msnr_dev);
|
|
+ break;
|
|
+ }
|
|
+ case CAM_SENSOR_I2C_BURST_READ:{
|
|
+ struct cam_burst_i2c_data data;
|
|
+ if (copy_from_user((void *)&data, (void *)arg, sizeof(data))) {
|
|
+ cam_err("%s: Line %d: Failed to copy args from user",
|
|
+ __func__, __LINE__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ ret = cam_sensor_burst_read(&data, msnr_dev);
|
|
+ break;
|
|
+ }
|
|
+ case CAM_SENSOR_GET_INFO:{
|
|
+ struct cam_sensor_info data;
|
|
+
|
|
+ ret = cam_sensor_get_info(&data, msnr_dev);
|
|
+ if (copy_to_user((void *)arg, (void *)&data, sizeof(data))) {
|
|
+ cam_err("%s: Line %d: Failed to copy args to user",
|
|
+ __func__, __LINE__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ case CAM_SENSOR_SET_MIPI_CLOCK:
|
|
+ ret = camsnr_mipi_clock_set(arg, msnr_dev->dphy_no);
|
|
+ break;
|
|
+ case CAM_SENSOR_SET_POWER_VOLTAGE:
|
|
+ ret = camsnr_set_power_voltage(arg, msnr_dev);
|
|
+ break;
|
|
+ case CAM_SENSOR_SET_POWER_ON:
|
|
+ ret = camsnr_set_power_on(arg, msnr_dev);
|
|
+ break;
|
|
+ case CAM_SENSOR_SET_GPIO_ENABLE:
|
|
+ ret = camsnr_set_gpio_enable(arg, msnr_dev);
|
|
+ break;
|
|
+ case CAM_SENSOR_SET_MCLK_RATE:
|
|
+ ret = camsnr_set_mclk_rate(arg, msnr_dev);
|
|
+ break;
|
|
+ case CAM_SENSOR_SET_MCLK_ENABLE:
|
|
+ ret = camsnr_set_mclk_enable(arg, msnr_dev);
|
|
+ break;
|
|
+ default:
|
|
+ cam_err("unknown IOCTL code 0x%x", cmd);
|
|
+ ret = -ENOTTY;
|
|
+ }
|
|
+
|
|
+ cam_dbg("%s IN, cmd %x", __func__, cmd);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+//fixme: add compat in the future
|
|
+#if 0
|
|
+//#ifdef CONFIG_COMPAT
|
|
+
|
|
+#define assign_in_user(to, from) \
|
|
+({ \
|
|
+ typeof(*from) __assign_tmp; \
|
|
+ \
|
|
+ get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \
|
|
+})
|
|
+
|
|
+struct cam_burst_i2c_data32 {
|
|
+ enum sensor_i2c_len reg_len;
|
|
+ enum sensor_i2c_len val_len;
|
|
+ uint8_t addr;
|
|
+ uint32_t tab;
|
|
+ uint32_t num;
|
|
+};
|
|
+
|
|
+static int alloc_userspace(unsigned int size, u32 aux_space, void __user **new_p64)
|
|
+{
|
|
+ *new_p64 = compat_alloc_user_space(size + aux_space);
|
|
+ if (!*new_p64)
|
|
+ return -ENOMEM;
|
|
+ if (clear_user(*new_p64, size))
|
|
+ return -EFAULT;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#define CAM_SENSOR_I2C_BURST_WRITE32 _IOW(CAM_SENSOR_IOC_MAGIC, SENSOR_IOC_I2C_BURST_WRITE, struct cam_burst_i2c_data32)
|
|
+#define CAM_SENSOR_I2C_BURST_READ32 _IOW(CAM_SENSOR_IOC_MAGIC, SENSOR_IOC_I2C_BURST_READ, struct cam_burst_i2c_data32)
|
|
+
|
|
+static int get_burst_i2c_data(struct cam_burst_i2c_data __user *p64, struct cam_burst_i2c_data32 __user *p32)
|
|
+{
|
|
+ compat_caddr_t p = 0;
|
|
+ struct regval_tab __user *tab32 = NULL;
|
|
+
|
|
+ if (!access_ok(p32, sizeof(*p32))
|
|
+ || assign_in_user(&p64->reg_len, &p32->reg_len)
|
|
+ || assign_in_user(&p64->val_len, &p32->val_len)
|
|
+ || assign_in_user(&p64->addr, &p32->addr)
|
|
+ || assign_in_user(&p64->num, &p32->num)) {
|
|
+ cam_err("%s assign in user failed", __func__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ if (get_user(p, &p32->tab)) {
|
|
+ cam_err("%s get tab failed", __func__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ tab32 = compat_ptr(p);
|
|
+ if (put_user(tab32, &p64->tab)) {
|
|
+ cam_err("%s tab put user failed", __func__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int put_burst_i2c_data(struct cam_burst_i2c_data __user *p64,
|
|
+ struct cam_burst_i2c_data32 __user *p32)
|
|
+{
|
|
+ if (!access_ok(p32, sizeof(*p32))
|
|
+ || assign_in_user(&p32->reg_len, &p64->reg_len)
|
|
+ || assign_in_user(&p32->val_len, &p64->val_len)
|
|
+ || assign_in_user(&p32->addr, &p64->addr)
|
|
+ || assign_in_user(&p32->num, &p64->num)) {
|
|
+ cam_err("%s assign in user failed", __func__);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static long camsnr_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ void __user *p32 = compat_ptr(arg);
|
|
+ void __user *new_p64 = NULL;
|
|
+ //void __user *aux_buf;
|
|
+ //u32 aux_space;
|
|
+ long err = 0;
|
|
+ unsigned int ncmd = 0;
|
|
+ //size_t size32 = 0;
|
|
+ size_t size64 = 0;
|
|
+
|
|
+ //size32 = _IOC_SIZE(cmd);
|
|
+ //switch (_IOC_NR(cmd)) {
|
|
+ //case _IOC_NR(CAM_SENSOR_RESET):
|
|
+ // size64 = sizeof(sns_rst_source_t);
|
|
+ // break;
|
|
+ //case _IOC_NR(CAM_SENSOR_UNRESET):
|
|
+ // size64 = sizeof(sns_rst_source_t);
|
|
+ // break;
|
|
+ //case _IOC_NR(CAM_SENSOR_I2C_WRITE):
|
|
+ // size64 = sizeof(struct cam_i2c_data);
|
|
+ // break;
|
|
+ //case _IOC_NR(CAM_SENSOR_I2C_READ):
|
|
+ // size64 = sizeof(struct cam_i2c_data);
|
|
+ // break;
|
|
+ //case _IOC_NR(CAM_SENSOR_I2C_BURST_WRITE):
|
|
+ // size64 = sizeof(struct cam_burst_i2c_data);
|
|
+ // break;
|
|
+ //case _IOC_NR(CAM_SENSOR_I2C_BURST_READ):
|
|
+ // size64 = sizeof(struct cam_burst_i2c_data);
|
|
+ // break;
|
|
+ //case _IOC_NR(CAM_SENSOR_GET_INFO):
|
|
+ // size64 = sizeof(struct cam_sensor_info);
|
|
+ // break;
|
|
+ //case _IOC_NR(CAM_SENSOR_SET_MIPI_CLOCK):
|
|
+ // size64 = sizeof(sns_mipi_clock_t);
|
|
+ // break;
|
|
+ //}
|
|
+ //cam_dbg("%s:cmd_nr=%d size32=%u size64=%u", __func__, _IOC_NR(cmd), size32, size64);
|
|
+
|
|
+ switch (cmd) {
|
|
+ case CAM_SENSOR_I2C_BURST_WRITE32:
|
|
+ case CAM_SENSOR_I2C_BURST_READ32:
|
|
+ size64 = sizeof(struct cam_burst_i2c_data);
|
|
+ if (cmd == CAM_SENSOR_I2C_BURST_WRITE32) {
|
|
+ ncmd = CAM_SENSOR_I2C_BURST_WRITE;
|
|
+ } else {
|
|
+ ncmd = CAM_SENSOR_I2C_BURST_READ;
|
|
+ }
|
|
+ err = alloc_userspace(size64, 0, &new_p64);
|
|
+ if (err) {
|
|
+ cam_err("%s alloc userspace failed err=%l cmd=%d ioc_size=%u",
|
|
+ __func__, err, _IOC_NR(cmd), size64);
|
|
+ return err;
|
|
+ }
|
|
+ err = (long)get_burst_i2c_data(new_p64, p32);
|
|
+ if (err) {
|
|
+ return err;
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ size64 = _IOC_SIZE(cmd);
|
|
+ ncmd = cmd;
|
|
+ if (size64 > 0) {
|
|
+ err = alloc_userspace(size64, 0, &new_p64);
|
|
+ if (err) {
|
|
+ cam_err
|
|
+ ("%s alloc userspace failed err=%l cmd=%d ioc_size=%u",
|
|
+ __func__, err, _IOC_NR(cmd), size64);
|
|
+ return err;
|
|
+ }
|
|
+ err = copy_in_user(new_p64, p32, size64);
|
|
+ if (err) {
|
|
+ cam_err
|
|
+ ("%s copy in user 1 failed err=%l cmd=%d ioc_size=%u",
|
|
+ __func__, err, _IOC_NR(cmd), size64);
|
|
+ return err;
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ err = camsnr_ioctl(file, ncmd, (unsigned long)new_p64);
|
|
+ if (err == 0) {
|
|
+ switch (cmd) {
|
|
+ case CAM_SENSOR_I2C_BURST_WRITE32:
|
|
+ case CAM_SENSOR_I2C_BURST_READ32:
|
|
+ err = (long)put_burst_i2c_data(new_p64, p32);
|
|
+ if (err) {
|
|
+ cam_err("%s put_burst_i2c_data failed err=%l", __func__,
|
|
+ err);
|
|
+ return err;
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ err = copy_in_user(p32, new_p64, size64);
|
|
+ if (err) {
|
|
+ cam_err
|
|
+ ("%s copy in user 2 failed err=%l cmd=%d ioc_size=%u",
|
|
+ __func__, err, _IOC_NR(cmd), size64);
|
|
+ return err;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int camsnr_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ struct cam_sensor_device *msnr_dev =
|
|
+ container_of(inode->i_cdev, struct cam_sensor_device, cdev);
|
|
+
|
|
+ cam_dbg("%s open %s%d, twsi_no %d\n", __func__, DRIVER_NAME, msnr_dev->id,
|
|
+ msnr_dev->twsi_no);
|
|
+ file->private_data = msnr_dev;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int camsnr_release(struct inode *inode, struct file *file)
|
|
+{
|
|
+ struct cam_sensor_device *msnr_dev =
|
|
+ container_of(inode->i_cdev, struct cam_sensor_device, cdev);
|
|
+ cam_dbg("%s close %s%d, twsi_no %d\n", __func__, DRIVER_NAME, msnr_dev->id,
|
|
+ msnr_dev->twsi_no);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct file_operations camsnr_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = camsnr_open,
|
|
+ .release = camsnr_release,
|
|
+ .unlocked_ioctl = camsnr_ioctl,
|
|
+#ifdef CONFIG_COMPAT
|
|
+ //fixme: add compat in the future
|
|
+ //.compat_ioctl = camsnr_compat_ioctl,
|
|
+#endif
|
|
+};
|
|
+
|
|
+static void cam_snr_drv_deinit(void)
|
|
+{
|
|
+ dev_t dev_id = MKDEV(camsnr_major, 0);
|
|
+
|
|
+ unregister_chrdev_region(dev_id, CAM_SNS_MAX_DEV_NUM);
|
|
+ if (camsnr_class)
|
|
+ class_destroy(camsnr_class);
|
|
+}
|
|
+
|
|
+static int cam_snr_drv_init(void)
|
|
+{
|
|
+ int ret = 0;
|
|
+ dev_t dev_id;
|
|
+
|
|
+ ret = alloc_chrdev_region(&dev_id, 0, CAM_SNS_MAX_DEV_NUM, DRIVER_NAME);
|
|
+ if (ret) {
|
|
+ cam_err("%s: can't get major number", __func__);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ camsnr_major = MAJOR(dev_id);
|
|
+
|
|
+ camsnr_class = class_create(THIS_MODULE, DRIVER_NAME);
|
|
+ if (IS_ERR(camsnr_class)) {
|
|
+ cam_err("%s: camsnr_class is error", __func__);
|
|
+ ret = PTR_ERR(camsnr_class);
|
|
+ goto error_cdev;
|
|
+ }
|
|
+
|
|
+out:
|
|
+ return ret;
|
|
+
|
|
+error_cdev:
|
|
+ unregister_chrdev_region(dev_id, CAM_SNS_MAX_DEV_NUM);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int cam_snr_dev_destroy(struct cdev *cdev, int index)
|
|
+{
|
|
+ SENSOR_DRIVER_CHECK_POINTER(cdev);
|
|
+ device_destroy(camsnr_class, MKDEV(camsnr_major, index));
|
|
+ cdev_del(cdev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cam_snr_dev_create(struct cdev *cdev, int index)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ SENSOR_DRIVER_CHECK_POINTER(cdev);
|
|
+
|
|
+ cdev_init(cdev, &camsnr_fops);
|
|
+ ret = cdev_add(cdev, MKDEV(camsnr_major, index), 1);
|
|
+ if (ret < 0) {
|
|
+ cam_err("add device %d cdev fail", index);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /* create device node */
|
|
+ device_create(camsnr_class, NULL, MKDEV(camsnr_major, index), NULL, "%s%d",
|
|
+ DRIVER_NAME, index);
|
|
+
|
|
+out:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int camsnr_of_parse(struct cam_sensor_device *sensor)
|
|
+{
|
|
+ struct device *dev = NULL;
|
|
+ struct device_node *of_node = NULL;
|
|
+ u32 cell_id, twsi_no, dphy_no;
|
|
+ int ret;
|
|
+ const char *mclk_name;
|
|
+
|
|
+ SENSOR_DRIVER_CHECK_POINTER(sensor);
|
|
+ dev = &sensor->pdev->dev;
|
|
+ SENSOR_DRIVER_CHECK_POINTER(dev);
|
|
+ of_node = dev->of_node;
|
|
+
|
|
+ /* cell-index */
|
|
+ ret = of_property_read_u32(of_node, "cell-index", &cell_id);
|
|
+ if (ret < 0) {
|
|
+ cam_err("%s: cell-index read failed", __func__);
|
|
+ return ret;
|
|
+ }
|
|
+ if (cell_id >= CAM_SNS_MAX_DEV_NUM) {
|
|
+ cam_err("%s: invaid cell-index %d", __func__, cell_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ sensor->id = cell_id;
|
|
+ if (g_sdev[cell_id]) {
|
|
+ cam_err("%s: cell-index %d already exists", __func__, cell_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ cam_snr_dev_create(&sensor->cdev, sensor->id);
|
|
+
|
|
+ /*twsi_index */
|
|
+ ret = of_property_read_u32(of_node, "twsi-index", &twsi_no);
|
|
+ if (ret < 0) {
|
|
+ cam_err("%s: twsi-index read failed", __func__);
|
|
+ return ret;
|
|
+ }
|
|
+ sensor->twsi_no = (u8) twsi_no;
|
|
+
|
|
+ /*dphy_index */
|
|
+ ret = of_property_read_u32(of_node, "dphy-index", &dphy_no);
|
|
+ if (ret < 0) {
|
|
+ cam_err("%s: twsi-index read failed", __func__);
|
|
+ return ret;
|
|
+ }
|
|
+ sensor->dphy_no = (u8) dphy_no;
|
|
+
|
|
+ ret = of_property_read_string(of_node, "clock-names", &mclk_name);
|
|
+ if (!ret) {
|
|
+ if (strcmp(mclk_name, "cam_mclk0") && strcmp(mclk_name, "cam_mclk1") && strcmp(mclk_name, "cam_mclk2")) {
|
|
+ cam_err("%s: error! only support cam_mclk0~2!", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ } else {
|
|
+ cam_err("%s: clock-names read failed", __func__);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* mclks */
|
|
+ sensor->mclk = devm_clk_get(dev, mclk_name);
|
|
+ if (IS_ERR(sensor->mclk)) {
|
|
+ cam_err("unable to get cam_mclk%d\n", cell_id);
|
|
+ ret = PTR_ERR(sensor->mclk);
|
|
+ goto st_err;
|
|
+ }
|
|
+
|
|
+ sensor->afvdd = devm_regulator_get(dev, "af_2v8");
|
|
+ if (IS_ERR(sensor->afvdd)) {
|
|
+ dev_warn(dev, "Failed to get regulator af_2v8\n");
|
|
+ sensor->afvdd = NULL;
|
|
+ }
|
|
+
|
|
+ sensor->avdd = devm_regulator_get(dev, "avdd_2v8");
|
|
+ if (IS_ERR(sensor->avdd)) {
|
|
+ dev_warn(dev, "Failed to get regulator avdd_2v8\n");
|
|
+ sensor->avdd = NULL;
|
|
+ }
|
|
+
|
|
+ sensor->dovdd = devm_regulator_get(dev, "dovdd_1v8");
|
|
+ if (IS_ERR(sensor->dovdd)) {
|
|
+ dev_warn(dev, "Failed to get regulator dovdd_1v8\n");
|
|
+ sensor->dovdd = NULL;
|
|
+ }
|
|
+
|
|
+ sensor->dvdd = devm_regulator_get(dev, "dvdd_1v2");
|
|
+ if (IS_ERR(sensor->dvdd)) {
|
|
+ dev_warn(dev, "Failed to get regulator dvdd_1v2\n");
|
|
+ sensor->dvdd = NULL;
|
|
+ }
|
|
+
|
|
+ /* pwdn-gpios */
|
|
+ sensor->pwdn = devm_gpiod_get(dev, "pwdn", GPIOD_OUT_HIGH);
|
|
+ if (IS_ERR(sensor->pwdn)) {
|
|
+ cam_info("%s: unable to parse sensor%d pwdn gpio", __func__, cell_id);
|
|
+ ret = PTR_ERR(sensor->pwdn);
|
|
+ } else {
|
|
+ ret = gpiod_direction_output(sensor->pwdn, 0);
|
|
+ if (ret < 0) {
|
|
+ cam_err("%s: Failed to init sensor%d pwdn gpio", __func__, cell_id);
|
|
+ goto st_err;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* rst-gpios */
|
|
+ sensor->rst = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
|
|
+ if (IS_ERR(sensor->rst)) {
|
|
+ cam_info("%s: unable to parse sensor%d reset gpio", __func__, cell_id);
|
|
+ ret = PTR_ERR(sensor->rst);
|
|
+ } else {
|
|
+ ret = gpiod_direction_output(sensor->rst, 0);
|
|
+ if (ret < 0) {
|
|
+ cam_err("%s: Failed to init sensor%d reset gpio", __func__, cell_id);
|
|
+ goto st_err;
|
|
+ }
|
|
+ }
|
|
+#ifdef CONFIG_ARCH_ZYNQMP
|
|
+ cam_dbg("dptc-gpios,cell_id =0x%x",cell_id);
|
|
+ /* dptc-gpios */
|
|
+ sensor->dptc = devm_gpiod_get(dev, "dptc", GPIOD_OUT_HIGH);
|
|
+ if (IS_ERR(sensor->dptc)) {
|
|
+ cam_err("%s: unable to parse sensor%d dptc gpio", __func__, cell_id);
|
|
+ ret = PTR_ERR(sensor->dptc);
|
|
+ } else {
|
|
+ ret = gpiod_direction_output(sensor->dptc, 1);
|
|
+ if (ret < 0) {
|
|
+ cam_err("%s: Failed to init sensor%d dptc gpio", __func__, cell_id);
|
|
+ goto st_err;
|
|
+ }
|
|
+ gpiod_set_value_cansleep(sensor->dptc, 1);
|
|
+ usleep_range(100 * 1000, 100 * 1000);
|
|
+ gpiod_set_value_cansleep(sensor->dptc, 0);
|
|
+ usleep_range(100 * 1000, 100 * 1000);
|
|
+ gpiod_set_value_cansleep(sensor->dptc, 1);
|
|
+ usleep_range(100 * 1000, 100 * 1000);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ return ret;
|
|
+
|
|
+st_err:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int cam_sensor_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct cam_sensor_device *msnr_dev;
|
|
+
|
|
+ msnr_dev = platform_get_drvdata(pdev);
|
|
+ if (!msnr_dev) {
|
|
+ dev_err(&pdev->dev, "camera sensor device is NULL");
|
|
+ return 0;
|
|
+ }
|
|
+ mutex_destroy(&msnr_dev->lock);
|
|
+ cam_snr_dev_destroy(&msnr_dev->cdev, msnr_dev->id);
|
|
+ cam_dbg("camera sensor%d removed", msnr_dev->id);
|
|
+ devm_kfree(&pdev->dev, msnr_dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cam_sensor_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct cam_sensor_device *msnr_dev;
|
|
+ int ret;
|
|
+
|
|
+ cam_dbg("camera sensor begin to probed");
|
|
+
|
|
+ msnr_dev = devm_kzalloc(&pdev->dev, sizeof(struct cam_sensor_device),
|
|
+ GFP_KERNEL);
|
|
+ if (!msnr_dev)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ platform_set_drvdata(pdev, msnr_dev);
|
|
+ msnr_dev->pdev = pdev;
|
|
+
|
|
+ ret = camsnr_of_parse(msnr_dev);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ atomic_set(&msnr_dev->usr_cnt, 0);
|
|
+ mutex_init(&msnr_dev->lock);
|
|
+
|
|
+ g_sdev[msnr_dev->id] = msnr_dev;
|
|
+ cam_dbg("camera sensor%d probed", msnr_dev->id);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static const struct of_device_id cam_sensor_dt_match[] = {
|
|
+ { .compatible = "spacemit,cam-sensor" },
|
|
+ {}
|
|
+};
|
|
+
|
|
+MODULE_DEVICE_TABLE(of, cam_sensor_dt_match);
|
|
+
|
|
+static struct platform_driver camsnr_driver = {
|
|
+ .probe = cam_sensor_probe,
|
|
+ .remove = cam_sensor_remove,
|
|
+ .driver = {
|
|
+ .name = DRIVER_NAME,
|
|
+ .of_match_table = cam_sensor_dt_match,
|
|
+ },
|
|
+};
|
|
+
|
|
+static int __init cam_sensor_init(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = cam_snr_drv_init();
|
|
+ if (ret < 0) {
|
|
+ printk("camsnr cdev create failed\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return platform_driver_register(&camsnr_driver);
|
|
+}
|
|
+
|
|
+static void __exit cam_sensor_exit(void)
|
|
+{
|
|
+ platform_driver_unregister(&camsnr_driver);
|
|
+ cam_snr_drv_deinit();
|
|
+}
|
|
+
|
|
+late_initcall(cam_sensor_init);
|
|
+module_exit(cam_sensor_exit);
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_sensor/cam_sensor.h b/drivers/media/platform/spacemit/camera/cam_sensor/cam_sensor.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_sensor/cam_sensor.h
|
|
@@ -0,0 +1,42 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * cam_sensor.h - camera sensor driver
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ * All Rights Reserved.
|
|
+ */
|
|
+
|
|
+#ifndef __CAM_SENSOR_H__
|
|
+#define __CAM_SENSOR_H__
|
|
+
|
|
+#include <linux/types.h>
|
|
+#include <linux/gpio/consumer.h>
|
|
+#include <linux/pinctrl/consumer.h>
|
|
+
|
|
+struct cam_sensor_device {
|
|
+ struct platform_device *pdev;
|
|
+ struct cdev cdev;
|
|
+ u32 id;
|
|
+ u32 is_probe_succeed;
|
|
+ u8 twsi_no;
|
|
+ u8 dphy_no;
|
|
+
|
|
+ struct regulator *afvdd;
|
|
+ struct regulator *avdd;
|
|
+ struct regulator *dovdd;
|
|
+ struct regulator *dvdd;
|
|
+
|
|
+ struct gpio_desc *dvdden;
|
|
+ struct gpio_desc *dcdcen;
|
|
+ struct gpio_desc *pwdn;
|
|
+ struct gpio_desc *rst;
|
|
+#ifdef CONFIG_ARCH_ZYNQMP
|
|
+ struct gpio_desc *dptc;
|
|
+#endif
|
|
+ struct clk *mclk;
|
|
+
|
|
+ atomic_t usr_cnt;
|
|
+ struct mutex lock; /* Protects streaming, format, interval */
|
|
+};
|
|
+
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_util/cam_dbg.c b/drivers/media/platform/spacemit/camera/cam_util/cam_dbg.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_util/cam_dbg.c
|
|
@@ -0,0 +1,90 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * cam_dbg.c - camera debug utility
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+#define DEBUG /* for pr_debug() */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/moduleparam.h>
|
|
+#include <media/v4l2-device.h>
|
|
+#include <media/v4l2-ioctl.h>
|
|
+#include "cam_dbg.h"
|
|
+
|
|
+static uint debug_mdl = 0x0; /* disable all modules at default */
|
|
+//static uint debug_mdl = 0x1FF; /* enable all modules for debug */
|
|
+
|
|
+static const char *cam_mdl_str[] = {
|
|
+ [CAM_MDL_VI] = "vi",
|
|
+ [CAM_MDL_ISP] = "isp",
|
|
+ [CAM_MDL_CPP] = "cpp",
|
|
+ [CAM_MDL_VBE] = "vbe",
|
|
+ [CAM_MDL_SNR] = "snr",
|
|
+ [CAM_MDL_IRCUT] = "ircut",
|
|
+ [CAM_MDL_COMMON] = "",
|
|
+};
|
|
+
|
|
+void cam_printk(int module_tag, const char *cam_level, const char *kern_level,
|
|
+ const char *func, int line, const char *format, ...)
|
|
+{
|
|
+ struct va_format vaf;
|
|
+ va_list args;
|
|
+
|
|
+ va_start(args, format);
|
|
+
|
|
+ vaf.fmt = format;
|
|
+ vaf.va = &args;
|
|
+
|
|
+ printk("%s" "%s:%s (%s %d): %pV\n", kern_level, cam_level, cam_mdl_str[module_tag], func, line, &vaf);
|
|
+ va_end(args);
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(cam_printk);
|
|
+
|
|
+void cam_printk_ratelimited(int module_tag, const char *cam_level,
|
|
+ const char *kern_level, const char *format, ...)
|
|
+{
|
|
+ struct va_format vaf;
|
|
+ va_list args;
|
|
+
|
|
+ va_start(args, format);
|
|
+
|
|
+ vaf.fmt = format;
|
|
+ vaf.va = &args;
|
|
+
|
|
+ printk_ratelimited("%s" "%s: %s: %pV\n", kern_level, cam_level,
|
|
+ cam_mdl_str[module_tag], &vaf);
|
|
+ va_end(args);
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(cam_printk_ratelimited);
|
|
+
|
|
+void cam_debug(int module_tag, const char *cam_level, const char *func, int line, const char *format, ...)
|
|
+{
|
|
+ struct va_format vaf;
|
|
+ va_list args;
|
|
+
|
|
+ if (!(debug_mdl & (1 << module_tag)))
|
|
+ return;
|
|
+
|
|
+ va_start(args, format);
|
|
+
|
|
+ vaf.fmt = format;
|
|
+ vaf.va = &args;
|
|
+
|
|
+ pr_debug("%s:%s (%s %d): %pV\n", cam_level, cam_mdl_str[module_tag], func, line, &vaf);
|
|
+ va_end(args);
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(cam_debug);
|
|
+
|
|
+MODULE_PARM_DESC(debug_mdl, "Enable debug output, where each bit enables a module.\n"
|
|
+ "\t\tBit 0 (0x01) will enable VI messages\n"
|
|
+ "\t\tBit 1 (0x02) will enable ISP messages\n"
|
|
+ "\t\tBit 2 (0x04) will enable CPP messages\n"
|
|
+ "\t\tBit 3 (0x08) will enable VBE messages\n"
|
|
+ "\t\tBit 4 (0x10) will enable SENSOR messages\n"
|
|
+ "\t\tBit 5 (0x20) will enable IRCUT messages\n"
|
|
+ "\t\tBit 8 (0x100) will enable COMMON messages");
|
|
+module_param(debug_mdl, uint, 0644);
|
|
diff --git a/drivers/media/platform/spacemit/camera/cam_util/cam_dbg.h b/drivers/media/platform/spacemit/camera/cam_util/cam_dbg.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/cam_util/cam_dbg.h
|
|
@@ -0,0 +1,96 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * cam_dbg.h - camera debug utility
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+#ifndef __CAM_DBG_H__
|
|
+#define __CAM_DBG_H__
|
|
+
|
|
+#include <linux/printk.h>
|
|
+
|
|
+enum dbg_module_tag {
|
|
+ CAM_MDL_VI = 0,
|
|
+ CAM_MDL_ISP = 1,
|
|
+ CAM_MDL_CPP = 2,
|
|
+ CAM_MDL_VBE = 3,
|
|
+ CAM_MDL_SNR = 4,
|
|
+ CAM_MDL_IRCUT = 5,
|
|
+ CAM_MDL_COMMON = 8,
|
|
+};
|
|
+
|
|
+#ifndef CAM_MODULE_TAG
|
|
+#define CAM_MODULE_TAG CAM_MDL_COMMON
|
|
+#endif
|
|
+
|
|
+__printf(6, 7)
|
|
+void cam_printk(int module_tag, const char *cam_level, const char *kern_level,
|
|
+ const char *func, int line, const char *format, ...);
|
|
+
|
|
+__printf(4, 5)
|
|
+void cam_printk_ratelimited(int module_tag, const char *cam_level,
|
|
+ const char *kern_level, const char *format, ...);
|
|
+
|
|
+__printf(5, 6)
|
|
+void cam_debug(int module_tag, const char *cam_level, const char *func, int line, const char *format, ...);
|
|
+
|
|
+/**
|
|
+ * camera error output.
|
|
+ *
|
|
+ * @format: printf() like format string.
|
|
+ */
|
|
+#define cam_err(format, ...) \
|
|
+ cam_printk(CAM_MODULE_TAG, "cam_err", KERN_ERR, \
|
|
+ __func__, __LINE__, format, ##__VA_ARGS__)
|
|
+
|
|
+/**
|
|
+ * camera error output.
|
|
+ *
|
|
+ * @format: printf() like format string.
|
|
+ */
|
|
+#define cam_err_ratelimited(format, ...) \
|
|
+ cam_printk_ratelimited(CAM_MODULE_TAG, "cam_err", KERN_ERR, \
|
|
+ format, ##__VA_ARGS__)
|
|
+
|
|
+/**
|
|
+ * camera warning output.
|
|
+ *
|
|
+ * @format: printf() like format string.
|
|
+ */
|
|
+#define cam_warn(format, ...) \
|
|
+ cam_printk(CAM_MODULE_TAG, "cam_wrn", KERN_WARNING, \
|
|
+ __func__, __LINE__, format, ##__VA_ARGS__)
|
|
+
|
|
+/**
|
|
+ * camera notice output.
|
|
+ *
|
|
+ * @format: printf() like format string.
|
|
+ */
|
|
+#define cam_not(format, ...) \
|
|
+ cam_printk(CAM_MODULE_TAG, "cam_not", KERN_NOTICE, \
|
|
+ __func__, __LINE__, format, ##__VA_ARGS__)
|
|
+
|
|
+/**
|
|
+ * camera information output.
|
|
+ *
|
|
+ * @format: printf() like format string.
|
|
+ */
|
|
+#define cam_info(format, ...) \
|
|
+ cam_printk(CAM_MODULE_TAG, "cam_inf", KERN_INFO, \
|
|
+ __func__, __LINE__, format, ##__VA_ARGS__)
|
|
+
|
|
+/**
|
|
+ * camera debug output.
|
|
+ *
|
|
+ * @format: printf() like format string.
|
|
+ */
|
|
+#define cam_dbg(format, ...) \
|
|
+ cam_debug(CAM_MODULE_TAG, "cam_dbg", __func__, __LINE__, format, ##__VA_ARGS__)
|
|
+
|
|
+#define CAM_DBG_TRACE
|
|
+#ifdef CAM_DBG_TRACE
|
|
+#define cam_trace(f, args...) trace_printk(f, ##args)
|
|
+#else
|
|
+#define cam_trace(f, args...) no_printk(f, ##args)
|
|
+#endif
|
|
+#endif /* ifndef __CAM_DBG_H__ */
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/cam_block.c b/drivers/media/platform/spacemit/camera/vi/cam_block.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/cam_block.c
|
|
@@ -0,0 +1,58 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * cam_block.c - camera block functions
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include <linux/list.h>
|
|
+#include <asm/atomic.h>
|
|
+#define CAM_MODULE_TAG CAM_MDL_VI
|
|
+#include <cam_dbg.h>
|
|
+#include "cam_block.h"
|
|
+#include "vdev.h"
|
|
+#include "subdev.h"
|
|
+#include "mlink.h"
|
|
+
|
|
+void spm_camera_block_init(struct spm_camera_block *b, struct spm_camera_block_ops *ops)
|
|
+{
|
|
+ atomic_set(&b->ref_cnt, 1);
|
|
+ b->ops = ops;
|
|
+}
|
|
+
|
|
+void spm_camera_block_set_base_addr(struct spm_camera_block *b, unsigned long base_addr)
|
|
+{
|
|
+ b->base_addr = base_addr;
|
|
+}
|
|
+
|
|
+int spm_camera_block_get(struct spm_camera_block *b)
|
|
+{
|
|
+ return atomic_inc_return(&b->ref_cnt);
|
|
+}
|
|
+
|
|
+static int __spm_camera_block_put(struct spm_camera_block *b)
|
|
+{
|
|
+ int ret = atomic_dec_return(&b->ref_cnt);
|
|
+
|
|
+ if (0 == ret && b->ops && b->ops->release)
|
|
+ b->ops->release(b);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int spm_camera_block_put(struct media_entity *me)
|
|
+{
|
|
+ struct spm_camera_vnode *spm_vnode = NULL;
|
|
+ struct spm_camera_subdev *sc_subdev = NULL;
|
|
+ struct spm_camera_block *b = NULL;
|
|
+
|
|
+ if (is_subdev(me)) {
|
|
+ sc_subdev = (struct spm_camera_subdev *)me;
|
|
+ b = &sc_subdev->sc_block;
|
|
+ } else {
|
|
+ spm_vnode = (struct spm_camera_vnode *)me;
|
|
+ b = &spm_vnode->sc_block;
|
|
+ }
|
|
+
|
|
+ return __spm_camera_block_put(b);
|
|
+}
|
|
+
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/cam_block.h b/drivers/media/platform/spacemit/camera/vi/cam_block.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/cam_block.h
|
|
@@ -0,0 +1,34 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * cam_block.h - camera block functions
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef _SPACEMIT_PDEV_H_
|
|
+#define _SPACEMIT_PDEV_H_
|
|
+#include <linux/types.h>
|
|
+#include <media/media-entity.h>
|
|
+
|
|
+struct spm_camera_block;
|
|
+
|
|
+struct spm_camera_block_ops {
|
|
+ void (*release)(struct spm_camera_block *b);
|
|
+};
|
|
+
|
|
+struct spm_camera_block {
|
|
+ atomic_t ref_cnt;
|
|
+ unsigned long base_addr;
|
|
+ int irq_num;
|
|
+ struct spm_camera_block_ops *ops;
|
|
+};
|
|
+
|
|
+#define SC_BLOCK(p) (is_subdev((struct media_entity*)(p)) ? (&((struct spm_camera_subdev*)(p))->sc_block) : (&((struct spm_camera_vnode*)(p))->sc_block))
|
|
+
|
|
+void spm_camera_block_init(struct spm_camera_block *b,
|
|
+ struct spm_camera_block_ops *ops);
|
|
+void spm_camera_block_set_base_addr(struct spm_camera_block *b,
|
|
+ unsigned long base_addr);
|
|
+int spm_camera_block_get(struct spm_camera_block *b);
|
|
+int spm_camera_block_put(struct media_entity *me);
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/fe_isp.c b/drivers/media/platform/spacemit/camera/vi/k1xvi/fe_isp.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/fe_isp.c
|
|
@@ -0,0 +1,5416 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * fe_isp.c - k1xisp front end
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include "k1xvi.h"
|
|
+#include "fe_isp.h"
|
|
+#include "hw-seq/hw_isp.h"
|
|
+#include "hw-seq/hw_dma.h"
|
|
+#include "hw-seq/hw_postpipe.h"
|
|
+#include "hw-seq/hw_reg.h"
|
|
+#ifdef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+#include "hw-seq/hw_iommu.h"
|
|
+#include <linux/dma-mapping.h>
|
|
+#endif
|
|
+#include "../vdev.h"
|
|
+#include "../spacemit_videobuf2.h"
|
|
+#include "../vsensor.h"
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/clk-provider.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/time.h>
|
|
+#include <linux/pm_runtime.h>
|
|
+#include <linux/pm_qos.h>
|
|
+#include <linux/dma-map-ops.h>
|
|
+#include <linux/cma.h>
|
|
+#include <media/videobuf2-core.h>
|
|
+#include <media/k1x/k1x_videodev2.h>
|
|
+#include <media/k1x/k1x_media_bus_format.h>
|
|
+#include <linux/reset.h>
|
|
+#include "../../cam_ccic/ccic_drv.h"
|
|
+//#include <soc/spm/plat.h>
|
|
+//#include <soc/spm/clk-plat.h>
|
|
+#ifdef CAM_MODULE_TAG
|
|
+#undef CAM_MODULE_TAG
|
|
+#endif
|
|
+#define CAM_MODULE_TAG CAM_MDL_VI
|
|
+#include <cam_dbg.h>
|
|
+
|
|
+#define USE_TASKLET (1)
|
|
+#define USE_WORKQ (0)
|
|
+#define CCIC_MAX_CNT (3)
|
|
+#define DMA_START_CNT_WITH_DWT (5)
|
|
+#define FAKE_CCIC_IRQ (-1)
|
|
+#define FAKE_CCIC_ID (CCIC_MAX_CNT)
|
|
+
|
|
+#define ISP_FNC_CLK_FREQ_LOW (307200000)
|
|
+#define ISP_BUS_CLK_FREQ_LOW (307200000)
|
|
+#define ISP_FNC_CLK_FREQ_HIGH (416000000)
|
|
+#define ISP_BUS_CLK_FREQ_HIGH (307200000)
|
|
+
|
|
+#define MMU_RESERVED_MEM_SIZE (4 * 1024)
|
|
+
|
|
+enum {
|
|
+ ISP_CLK_LOW = 0,
|
|
+ ISP_CLK_HIGH,
|
|
+};
|
|
+
|
|
+#ifdef CONFIG_SPACEMIT_DEBUG
|
|
+struct dev_running_info {
|
|
+ bool b_dev_running;
|
|
+ bool (*is_dev_running)(struct dev_running_info *p_devinfo);
|
|
+ struct notifier_block nb;
|
|
+} vi_running_info;
|
|
+
|
|
+static bool __maybe_unused check_dev_running_status(struct dev_running_info *p_devinfo)
|
|
+{
|
|
+ return p_devinfo->b_dev_running;
|
|
+}
|
|
+
|
|
+#define to_devinfo(_nb) container_of(_nb, struct dev_running_info, nb)
|
|
+
|
|
+static int __maybe_unused dev_clkoffdet_notifier_handler(struct notifier_block *nb,
|
|
+ unsigned long msg, void *data)
|
|
+{
|
|
+ struct clk_notifier_data *cnd = data;
|
|
+ struct dev_running_info *p_devinfo = to_devinfo(nb);
|
|
+
|
|
+ if ((__clk_is_enabled(cnd->clk)) && (msg & PRE_RATE_CHANGE) &&
|
|
+ (cnd->new_rate == 0) && (cnd->old_rate != 0)) {
|
|
+ if (p_devinfo->is_dev_running(p_devinfo))
|
|
+ return NOTIFY_BAD;
|
|
+ }
|
|
+
|
|
+ return NOTIFY_OK;
|
|
+}
|
|
+#endif
|
|
+
|
|
+struct isp_context;
|
|
+
|
|
+struct frame_id {
|
|
+ __u64 id;
|
|
+ struct list_head entry;
|
|
+};
|
|
+
|
|
+#define ISP_DMA_WORK_MAX_CNT (16)
|
|
+struct isp_dma_context;
|
|
+struct isp_dma_work_struct {
|
|
+ struct work_struct dma_work;
|
|
+ struct tasklet_struct dma_tasklet;
|
|
+ struct list_head idle_list_entry;
|
|
+ struct list_head busy_list_entry;
|
|
+ unsigned int irq_status;
|
|
+ struct isp_dma_context *dma_ctx;
|
|
+};
|
|
+
|
|
+struct isp_dma_context {
|
|
+ struct list_head dma_work_idle_list;
|
|
+ struct list_head dma_work_busy_list;
|
|
+ struct list_head list_entry;
|
|
+ spinlock_t slock;
|
|
+ struct spm_camera_vnode *vnode;
|
|
+ struct isp_context *isp_ctx;
|
|
+ struct frame_id frame_id;
|
|
+ int used_for_hdr;
|
|
+ int trig_dma_reload;
|
|
+ struct wait_queue_head waitq_head;
|
|
+ struct wait_queue_head waitq_eof;
|
|
+ int in_streamoff;
|
|
+ int in_irq;
|
|
+ atomic_t busy_cnt;
|
|
+ int id;
|
|
+#ifdef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+ dma_addr_t tt_addr[2][2];
|
|
+ unsigned int *tt_base[2][2];
|
|
+ unsigned int tbu_update_cnt[2];
|
|
+#endif
|
|
+};
|
|
+
|
|
+struct ccic {
|
|
+ atomic_t pwr_cnt;
|
|
+ struct spm_camera_sensor *sc_sensor;
|
|
+ struct ccic_ctrl *csi_ctrl;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MMU_TBU_OK = 0,
|
|
+ //MMU_TBU_TRIGGER_READY,
|
|
+ MMU_TBU_RELOAD,
|
|
+ MMU_TBU_RELOAD_START = 4,
|
|
+};
|
|
+
|
|
+struct isp_pipeline_context {
|
|
+ struct list_head fmt_wdma_list[FORMATTER_NUM];
|
|
+ struct list_head wdma_list;
|
|
+ struct list_head fmt_wdma_sync[FORMATTER_NUM][VB2_MAX_FRAME];
|
|
+ unsigned int fmt_wdma_sync_cnt[FORMATTER_NUM][VB2_MAX_FRAME];
|
|
+ unsigned int fmt_wdma_start_cnt[FORMATTER_NUM];
|
|
+ unsigned int fmt_wdma_cnt[FORMATTER_NUM];
|
|
+ unsigned int mmu_tbu_reload;
|
|
+ struct camera_capture_slice_info cc_slice_info;
|
|
+};
|
|
+
|
|
+#define ISP_PRINT_WORK_MAX_CNT (64)
|
|
+struct isp_print_work_struct {
|
|
+ struct work_struct print_work;
|
|
+ struct list_head list;
|
|
+ char msg_string[128];
|
|
+ struct isp_context *isp_ctx;
|
|
+};
|
|
+
|
|
+#define ISP_FATAL_ERR_PIPE0_OVERRUN (1 << 0)
|
|
+#define ISP_FATAL_ERR_PIPE1_OVERRUN (1 << 1)
|
|
+#define ISP_FATAL_ERR_DMA_OVERLAP (1 << 2)
|
|
+
|
|
+struct isp_context {
|
|
+ struct fe_rawdump *rawdumps[RAWDUMP_NUM];
|
|
+ struct fe_pipe *pipes[PIPE_NUM];
|
|
+ struct fe_formatter *formatters[FORMATTER_NUM];
|
|
+ struct isp_dma_context dma_out_ctx[AOUT_NUM];
|
|
+ struct isp_dma_context dma_in_ctx[AIN_NUM];
|
|
+ struct frame_id pipe_frame_id[PIPE_NUM];
|
|
+ struct platform_device *pdev;
|
|
+ unsigned long base_addr;
|
|
+// struct clk *ahb_clk;
|
|
+ struct reset_control *ahb_reset;
|
|
+ struct reset_control *isp_reset;
|
|
+ struct reset_control *isp_ci_reset;
|
|
+ struct reset_control *lcd_mclk_reset;
|
|
+
|
|
+ struct clk *fnc_clk;
|
|
+ struct clk *bus_clk;
|
|
+ struct clk *dpu_clk;
|
|
+ struct spm_camera_block *dma_block;
|
|
+ struct ccic ccic[CCIC_MAX_CNT];
|
|
+ struct isp_print_work_struct print_works[ISP_PRINT_WORK_MAX_CNT];
|
|
+ struct list_head print_work_list;
|
|
+ atomic_t pwr_cnt;
|
|
+ unsigned int isp_fatal_error;
|
|
+ unsigned int dma_overlap_cnt;
|
|
+ spinlock_t slock;
|
|
+ struct completion global_reset_done;
|
|
+#ifdef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+ struct isp_iommu_device *mmu_dev;
|
|
+ dma_addr_t trans_tab_dma_addr;
|
|
+ void *trans_tab_cpu_addr;
|
|
+ size_t total_trans_tab_sz;
|
|
+ dma_addr_t rsvd_phy_addr;
|
|
+ unsigned char *rsvd_vaddr;
|
|
+#endif
|
|
+};
|
|
+
|
|
+struct vi_port_cfg {
|
|
+ struct wdma_fifo_ctrl w_fifo_ctrl;
|
|
+ unsigned int usage;
|
|
+ unsigned int buf_required_min;
|
|
+};
|
|
+
|
|
+#define vi_irq_print(fmt, ...) do { \
|
|
+ unsigned long vi_irq_flags = 0; \
|
|
+ struct isp_print_work_struct *print_work = NULL; \
|
|
+ spin_lock_irqsave(&isp_ctx->slock, vi_irq_flags); \
|
|
+ print_work = list_first_entry_or_null(&isp_ctx->print_work_list, struct isp_print_work_struct, list); \
|
|
+ if (print_work) { \
|
|
+ list_del_init(&print_work->list); \
|
|
+ spin_unlock_irqrestore(&isp_ctx->slock, vi_irq_flags);\
|
|
+ snprintf(print_work->msg_string, 128, fmt, ##__VA_ARGS__); \
|
|
+ schedule_work(&print_work->print_work); \
|
|
+ } else { \
|
|
+ spin_unlock_irqrestore(&isp_ctx->slock, vi_irq_flags);\
|
|
+ cam_err("d " fmt, ##__VA_ARGS__); \
|
|
+ } \
|
|
+ } while(0)
|
|
+
|
|
+static irqreturn_t fe_isp_irq_handler(int irq, void *dev_id);
|
|
+static irqreturn_t fe_isp_dma_irq_handler(int irq, void *dev_id);
|
|
+static irqreturn_t fe_isp_process_dma_reload(struct isp_context *isp_ctx,
|
|
+ struct spm_camera_pipeline *sc_pipeline);
|
|
+static void fe_isp_reset_frame_id(struct spm_camera_pipeline *sc_pipeline);
|
|
+static void fe_isp_flush_pipeline_buffers(struct isp_context *isp_ctx,
|
|
+ struct spm_camera_pipeline *sc_pipeline);
|
|
+static void fe_isp_export_camera_vbuffer(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer *sc_vb);
|
|
+static void fe_isp_dma_bh_handler(struct isp_dma_work_struct *isp_dma_work);
|
|
+
|
|
+#define IDI_FMT_FLAG_OFFLINE_INPUT (1 << 0)
|
|
+#define IDI_FMT_FLAG_ONLINE_INPUT (1 << 1)
|
|
+#define IDI_FMT_FLAG_OUTPUT (1 << 2)
|
|
+static struct {
|
|
+ unsigned int fmt_code;
|
|
+ int cfa_pattern;
|
|
+ unsigned int bit_depth;
|
|
+ unsigned int flags;
|
|
+} idi_fmts_table[] = {
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SRGB8_SPACEMITPACK_1X8,
|
|
+ .cfa_pattern = CFA_IGNR,
|
|
+ .bit_depth = 8,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SBGGR8_1X8,
|
|
+ .cfa_pattern = BGGR,
|
|
+ .bit_depth = 8,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SGBRG8_1X8,
|
|
+ .cfa_pattern = GBRG,
|
|
+ .bit_depth = 8,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SGRBG8_1X8,
|
|
+ .cfa_pattern = GRBG,
|
|
+ .bit_depth = 8,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SRGGB8_1X8,
|
|
+ .cfa_pattern = RGGB,
|
|
+ .bit_depth = 8,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SRGB10_SPACEMITPACK_1X10,
|
|
+ .cfa_pattern = CFA_IGNR,
|
|
+ .bit_depth = 10,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SBGGR10_1X10,
|
|
+ .cfa_pattern = BGGR,
|
|
+ .bit_depth = 10,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SGBRG10_1X10,
|
|
+ .cfa_pattern = GBRG,
|
|
+ .bit_depth = 10,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SGRBG10_1X10,
|
|
+ .cfa_pattern = GRBG,
|
|
+ .bit_depth = 10,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SRGGB10_1X10,
|
|
+ .cfa_pattern = RGGB,
|
|
+ .bit_depth = 10,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SRGB12_SPACEMITPACK_1X12,
|
|
+ .cfa_pattern = CFA_IGNR,
|
|
+ .bit_depth = 12,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SBGGR12_1X12,
|
|
+ .cfa_pattern = BGGR,
|
|
+ .bit_depth = 12,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SGBRG12_1X12,
|
|
+ .cfa_pattern = GBRG,
|
|
+ .bit_depth = 12,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SGRBG12_1X12,
|
|
+ .cfa_pattern = GRBG,
|
|
+ .bit_depth = 12,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SRGGB12_1X12,
|
|
+ .cfa_pattern = RGGB,
|
|
+ .bit_depth = 12,
|
|
+ .flags = IDI_FMT_FLAG_OFFLINE_INPUT | IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SRGB14_SPACEMITPACK_1X14,
|
|
+ .cfa_pattern = CFA_IGNR,
|
|
+ .bit_depth = 14,
|
|
+ .flags = IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SBGGR14_1X14,
|
|
+ .cfa_pattern = BGGR,
|
|
+ .bit_depth = 14,
|
|
+ .flags = IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SGBRG14_1X14,
|
|
+ .cfa_pattern = GBRG,
|
|
+ .bit_depth = 14,
|
|
+ .flags = IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SGRBG14_1X14,
|
|
+ .cfa_pattern = GRBG,
|
|
+ .bit_depth = 14,
|
|
+ .flags = IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_SRGGB14_1X14,
|
|
+ .cfa_pattern = RGGB,
|
|
+ .bit_depth = 14,
|
|
+ .flags = IDI_FMT_FLAG_ONLINE_INPUT | IDI_FMT_FLAG_OUTPUT,
|
|
+ },
|
|
+};
|
|
+
|
|
+static struct {
|
|
+ unsigned int fmt_code;
|
|
+ int format;
|
|
+} formatter_fmts_table[] = {
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_RGB565_1X16,
|
|
+ .format = RGB565,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_RGB888_1X24,
|
|
+ .format = RGB888,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_YUYV8_1_5X8,
|
|
+ .format = NV12,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_YVYU8_1_5X8,
|
|
+ .format = NV21,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_YUYV10_1X20,
|
|
+ .format = Y210,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_YUYV10_2X10,
|
|
+ .format = P210,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_YUYV10_1_5X10,
|
|
+ .format = P010,
|
|
+ },
|
|
+};
|
|
+
|
|
+static struct {
|
|
+ unsigned int fmt_code;
|
|
+ int layer_idx;
|
|
+}dwt_fmts_table[] = {
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_YUYV10_1_5X10_D1,
|
|
+ .layer_idx = 1,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_YUYV10_1_5X10_D2,
|
|
+ .layer_idx = 2,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_YUYV10_1_5X10_D3,
|
|
+ .layer_idx = 3,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_YUYV10_1_5X10_D4,
|
|
+ .layer_idx = 4,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_YVYU10_1_5X10_D1,
|
|
+ .layer_idx = 1,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_YVYU10_1_5X10_D2,
|
|
+ .layer_idx = 2,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_YVYU10_1_5X10_D3,
|
|
+ .layer_idx = 3,
|
|
+ },
|
|
+ {
|
|
+ .fmt_code = MEDIA_BUS_FMT_YVYU10_1_5X10_D4,
|
|
+ .layer_idx = 4,
|
|
+ },
|
|
+};
|
|
+
|
|
+static struct {
|
|
+ unsigned int width;
|
|
+ unsigned int height;
|
|
+ unsigned int hblank;
|
|
+ unsigned int vblank;
|
|
+}tpg_timming_table[] = {
|
|
+ {
|
|
+ .width = 2560,
|
|
+ .height = 1440,
|
|
+ .hblank = 3218,
|
|
+ .vblank = 455830,
|
|
+ },
|
|
+ {
|
|
+ .width = 3840,
|
|
+ .height = 2160,
|
|
+ .hblank = 3864,
|
|
+ .vblank = 32495,
|
|
+ },
|
|
+ {
|
|
+ .width = 4608,
|
|
+ .height = 3456,
|
|
+ .hblank = 207,
|
|
+ .vblank = 32495,
|
|
+ },
|
|
+};
|
|
+
|
|
+static void fe_isp_set_clk(struct isp_context *isp_ctx, int clk_mode)
|
|
+{
|
|
+ unsigned long clk_val = 0;
|
|
+
|
|
+ if (clk_mode == ISP_CLK_LOW) {
|
|
+ clk_val = clk_round_rate(isp_ctx->fnc_clk, ISP_FNC_CLK_FREQ_LOW);
|
|
+ clk_set_rate(isp_ctx->fnc_clk, clk_val);
|
|
+ clk_val = clk_round_rate(isp_ctx->bus_clk, ISP_BUS_CLK_FREQ_LOW);
|
|
+ clk_set_rate(isp_ctx->bus_clk, clk_val);
|
|
+ } else {
|
|
+ clk_val = clk_round_rate(isp_ctx->fnc_clk, ISP_FNC_CLK_FREQ_HIGH);
|
|
+ clk_set_rate(isp_ctx->fnc_clk, clk_val);
|
|
+ clk_val = clk_round_rate(isp_ctx->bus_clk, ISP_BUS_CLK_FREQ_HIGH);
|
|
+ clk_set_rate(isp_ctx->bus_clk, clk_val);
|
|
+ }
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+
|
|
+static uint32_t fe_isp_fill_trans_tab_by_sg(uint32_t *tt_base, struct sg_table *sgt,
|
|
+ uint32_t offset, uint32_t length)
|
|
+{
|
|
+ struct scatterlist *sg = NULL;
|
|
+ size_t temp_size = 0, temp_offset = 0, temp_length = 0;
|
|
+ dma_addr_t start_addr = 0, end_addr = 0, dmad = 0;
|
|
+ int i = 0;
|
|
+ uint32_t tt_size = 0;
|
|
+
|
|
+ sg = sgt->sgl;
|
|
+ for (i = 0; i < sgt->nents; ++i, sg = sg_next(sg)) {
|
|
+ cam_dbg("sg%d: addr 0x%llx, size 0x%x", i, sg_phys(sg), sg_dma_len(sg));
|
|
+ temp_size += sg_dma_len(sg);
|
|
+ if (temp_size <= offset)
|
|
+ continue;
|
|
+
|
|
+ if (offset > temp_size - sg_dma_len(sg))
|
|
+ temp_offset = offset - temp_size + sg_dma_len(sg);
|
|
+ else
|
|
+ temp_offset = 0;
|
|
+ start_addr = ((phys_cpu2cam(sg_phys(sg)) + temp_offset) >> 12) << 12;
|
|
+
|
|
+ temp_length = temp_size - offset;
|
|
+ if (temp_length >= length)
|
|
+ temp_offset = sg_dma_len(sg) - temp_length + length;
|
|
+ else
|
|
+ temp_offset = sg_dma_len(sg);
|
|
+ end_addr = ((phys_cpu2cam(sg_phys(sg)) + temp_offset + 0xfff) >> 12) << 12;
|
|
+
|
|
+ for (dmad = start_addr; dmad < end_addr; dmad += 0x1000)
|
|
+ tt_base[tt_size++] = (dmad >> 12) & 0x3fffff;
|
|
+
|
|
+ if (temp_length >= length)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return tt_size;
|
|
+}
|
|
+
|
|
+static dma_addr_t spm_vb2_buf_paddr(struct vb2_buffer *vb, unsigned int plane_no)
|
|
+{
|
|
+ unsigned int offset = 0, length = 0, tt_size = 0, tid = 0;
|
|
+ int index = 0;
|
|
+ uint32_t *tt_base = NULL;
|
|
+ dma_addr_t tt_addr = 0;
|
|
+ dma_addr_t paddr = 0;
|
|
+ struct spm_camera_vbuffer *sc_vb = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct isp_context *isp_ctx = NULL;
|
|
+ struct scatterlist *sg = NULL;
|
|
+ struct sg_table *sgt = NULL;
|
|
+ struct platform_device *pdev = k1xvi_get_platform_device();
|
|
+ struct k1xvi_platform_data *drvdata = NULL;
|
|
+
|
|
+ drvdata = platform_get_drvdata(pdev);
|
|
+ BUG_ON(!drvdata);
|
|
+ isp_ctx = drvdata->isp_ctx;
|
|
+ BUG_ON(!isp_ctx);
|
|
+ sc_vb = vb2_buffer_to_spm_camera_vbuffer(vb);
|
|
+ if (sc_vb->flags & SC_BUF_FLAG_RSVD_Z1) {
|
|
+ return isp_ctx->rsvd_phy_addr;
|
|
+ }
|
|
+ sc_vnode = sc_vb->sc_vnode;
|
|
+ BUG_ON(!sc_vnode);
|
|
+ sgt = (struct sg_table *)vb2_plane_cookie(vb, plane_no);
|
|
+ offset = sc_vnode->planes_offset[vb->index][plane_no];
|
|
+ length = vb->planes[plane_no].length;
|
|
+ if (sc_vb->flags & SC_BUF_FLAG_CONTINOUS) {
|
|
+ sg = sgt->sgl;
|
|
+ paddr = sg_phys(sg) + offset;
|
|
+ } else {
|
|
+ if (SPACEMIT_VNODE_DIR_OUT == sc_vnode->direction) {
|
|
+ index = (isp_ctx->dma_out_ctx[sc_vnode->idx].tbu_update_cnt[plane_no])++ & 0x1;
|
|
+ tt_base = isp_ctx->dma_out_ctx[sc_vnode->idx].tt_base[index][plane_no];
|
|
+ tt_addr = isp_ctx->dma_out_ctx[sc_vnode->idx].tt_addr[index][plane_no];
|
|
+ } else {
|
|
+ tt_base = isp_ctx->dma_in_ctx[sc_vnode->idx].tt_base[0][plane_no];
|
|
+ tt_addr = isp_ctx->dma_in_ctx[sc_vnode->idx].tt_addr[0][plane_no];
|
|
+ }
|
|
+ tid = MMU_TID(sc_vnode->direction, sc_vnode->idx, plane_no);
|
|
+ tt_size = fe_isp_fill_trans_tab_by_sg(tt_base, sgt, offset, length);
|
|
+ isp_mmu_call(isp_ctx->mmu_dev, config_channel, tid, tt_addr, tt_size);
|
|
+ isp_mmu_call(isp_ctx->mmu_dev, enable_channel, tid);
|
|
+ paddr = (dma_addr_t)isp_ctx->mmu_dev->ops->get_sva(isp_ctx->mmu_dev, tid, offset);
|
|
+ }
|
|
+
|
|
+ return paddr;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int __maybe_unused fe_isp_lookup_tpg_timming_table(unsigned int width,
|
|
+ unsigned int height,
|
|
+ unsigned int *hblank,
|
|
+ unsigned int *vblank)
|
|
+{
|
|
+ int i = 0;
|
|
+
|
|
+ if (!hblank || !vblank)
|
|
+ return -1;
|
|
+ for (i = 0; i < ARRAY_SIZE(tpg_timming_table); i++) {
|
|
+ if (tpg_timming_table[i].width == width
|
|
+ && tpg_timming_table[i].height == height) {
|
|
+ *hblank = tpg_timming_table[i].hblank;
|
|
+ *vblank = tpg_timming_table[i].vblank;
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+static int list_add_no_repeat(struct list_head *new, struct list_head *head)
|
|
+{
|
|
+ struct list_head *pos = NULL;
|
|
+
|
|
+ list_for_each(pos, head) {
|
|
+ if (pos == new)
|
|
+ return 1;
|
|
+ }
|
|
+ list_add(new, head);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_isp_lookup_dwt_fmts_table(struct v4l2_subdev_format *sd_format,
|
|
+ int layer_idx)
|
|
+{
|
|
+ int i = 0;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(dwt_fmts_table); i++) {
|
|
+ if (sd_format->format.code == dwt_fmts_table[i].fmt_code
|
|
+ && layer_idx == dwt_fmts_table[i].layer_idx)
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_isp_lookup_formatter_fmts_table(struct v4l2_subdev_format *sd_format,
|
|
+ int *format, int dwt_mode)
|
|
+{
|
|
+ int i = 0;
|
|
+
|
|
+ if (dwt_mode && sd_format->format.code != MEDIA_BUS_FMT_YUYV8_1_5X8
|
|
+ && sd_format->format.code != MEDIA_BUS_FMT_YVYU8_1_5X8)
|
|
+ return 0;
|
|
+ for (i = 0; i < ARRAY_SIZE(formatter_fmts_table); i++) {
|
|
+ if (sd_format->format.code == formatter_fmts_table[i].fmt_code) {
|
|
+ *format = formatter_fmts_table[i].format;
|
|
+ return 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void fe_isp_update_ain_dma_addr(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer *sc_vbuf,
|
|
+ unsigned int offset)
|
|
+{
|
|
+ hw_dma_update_rdma_address(SC_BLOCK(sc_vnode),
|
|
+ sc_vnode->idx,
|
|
+ (uint64_t)spm_vb2_buf_paddr(&(sc_vbuf->vb2_v4l2_buf.vb2_buf), 0) + offset);
|
|
+}
|
|
+
|
|
+static void fe_isp_update_aout_dma_addr(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer *sc_vbuf,
|
|
+ unsigned int offset)
|
|
+{
|
|
+ dma_addr_t p0 = 0, p1 = 0;
|
|
+ struct vb2_buffer *vb2_buf = &(sc_vbuf->vb2_v4l2_buf.vb2_buf);
|
|
+ struct media_pad *remote_pad = media_entity_remote_pad(&sc_vnode->pad);
|
|
+ struct v4l2_subdev *remote_sd = NULL;
|
|
+ struct k1xvi_platform_data *drvdata = NULL;
|
|
+ struct isp_context *isp_ctx = NULL;
|
|
+
|
|
+ if (!remote_pad) {
|
|
+ cam_err("%s(%s) remote_pad was null", __func__, sc_vnode->name);
|
|
+ return;
|
|
+ }
|
|
+ remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
|
|
+ if (!remote_sd) {
|
|
+ cam_err("%s(%s) remote_sd was null", __func__, sc_vnode->name);
|
|
+ return;
|
|
+ }
|
|
+ drvdata = dev_get_drvdata(remote_sd->dev);
|
|
+ if (!drvdata) {
|
|
+ cam_err("%s(%s) drvdata was null", __func__, sc_vnode->name);
|
|
+ return;
|
|
+ }
|
|
+ isp_ctx = drvdata->isp_ctx;
|
|
+ p0 = spm_vb2_buf_paddr(vb2_buf, 0) + offset;
|
|
+ if (vb2_buf->num_planes > 1) {
|
|
+ p1 = spm_vb2_buf_paddr(vb2_buf, 1) + offset;
|
|
+ }
|
|
+ hw_dma_update_wdma_address(SC_BLOCK(sc_vnode), sc_vnode->idx, p0, p1);
|
|
+}
|
|
+
|
|
+static int fe_isp_vnode_notifier_handler(struct notifier_block *nb,
|
|
+ unsigned long action, void *data)
|
|
+{
|
|
+ struct spm_camera_subdev *sc_subdev =
|
|
+ container_of(nb, struct spm_camera_subdev, vnode_nb);
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(&sc_subdev->pcsd.sd.entity);
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct isp_pipeline_context *pipe_ctx = NULL;
|
|
+ struct isp_context *isp_ctx = NULL;
|
|
+ struct csi *csi = NULL;
|
|
+ struct fe_dwt *dwt = NULL;
|
|
+ struct fe_formatter *formatter = NULL;
|
|
+ struct fe_offline_channel *offline_channel = NULL;
|
|
+ struct fe_rawdump *rawdump = NULL;
|
|
+ //struct media_pad *remote_pad = NULL;
|
|
+ int ret = 0, i = 0, j = 0;
|
|
+ struct spm_camera_vbuffer *sc_vb = NULL, *pos = NULL, *n = NULL;
|
|
+ unsigned long flags = 0;
|
|
+ unsigned int offset = 0;
|
|
+ //unsigned char *buf_vaddr = NULL;
|
|
+ //static const char uv_padding[] = {0x00, 0x02, 0x08, 0x20, 0x80};
|
|
+
|
|
+ if (!pipe)
|
|
+ return NOTIFY_DONE;
|
|
+ dwt = v4l2_subdev_to_dwt(&sc_subdev->pcsd.sd);
|
|
+ csi = v4l2_subdev_to_csi(&sc_subdev->pcsd.sd);
|
|
+ formatter = v4l2_subdev_to_formatter(&sc_subdev->pcsd.sd);
|
|
+ offline_channel = v4l2_subdev_to_offline_channel(&sc_subdev->pcsd.sd);
|
|
+ rawdump = v4l2_subdev_to_rawdump(&sc_subdev->pcsd.sd);
|
|
+ isp_ctx = spm_subdev_get_drvdata(sc_subdev);
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ pipe_ctx = (struct isp_pipeline_context *)sc_pipeline->usr_data;
|
|
+ if (action == SPACEMIT_VNODE_NOTIFY_BUF_QUEUED) {
|
|
+ sc_vnode = (struct spm_camera_vnode *)data;
|
|
+ if (!sc_vnode)
|
|
+ return NOTIFY_DONE;
|
|
+ if (!(sc_pipeline->is_online_mode) && is_vnode_streaming(sc_vnode)) {
|
|
+ if (!sc_pipeline->is_slice_mode) {
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ if (sc_vnode->direction == SPACEMIT_VNODE_DIR_IN
|
|
+ && __spm_vdev_busy_list_empty(sc_vnode)) {
|
|
+ ret = __spm_vdev_dq_idle_vbuffer(sc_vnode, &sc_vb);
|
|
+ if (ret) {
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ return NOTIFY_DONE;
|
|
+ }
|
|
+ fe_isp_update_ain_dma_addr(sc_vnode, sc_vb, 0);
|
|
+ __spm_vdev_q_busy_vbuffer(sc_vnode, sc_vb);
|
|
+ hw_dma_rdma_trigger(SC_BLOCK(sc_vnode), sc_vnode->idx);
|
|
+ }
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ } else {
|
|
+ BUG_ON(!pipe_ctx);
|
|
+ if (offline_channel) {
|
|
+ offset = pipe_ctx->cc_slice_info.raw_read_offset;
|
|
+ cam_dbg("%s(%s) raw_read_offset=%d", __func__, sc_vnode->name, offset);
|
|
+ } else if (formatter) {
|
|
+ offset = pipe_ctx->cc_slice_info.yuv_out_offset;
|
|
+ cam_dbg("%s(%s) yuv_out_offset=%d", __func__, sc_vnode->name, offset);
|
|
+ } else if (dwt) {
|
|
+ offset = pipe_ctx->cc_slice_info.dwt_offset[dwt->layer_idx - 1];
|
|
+ cam_dbg("%s(%s) dwt[%d]_offset=%d", __func__, sc_vnode->name, dwt->layer_idx, offset);
|
|
+ } else {
|
|
+ BUG_ON(1);
|
|
+ }
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ ret = __spm_vdev_dq_idle_vbuffer(sc_vnode, &sc_vb);
|
|
+ if (ret) {
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ return NOTIFY_STOP;
|
|
+ }
|
|
+ __spm_vdev_q_busy_vbuffer(sc_vnode, sc_vb);
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ if ((sc_pipeline->slice_id + 1) == pipe_ctx->cc_slice_info.total_slice_cnt)
|
|
+ sc_vb->vb2_v4l2_buf.flags |= V4L2_BUF_FLAG_SLICES_DONE;
|
|
+ if (offline_channel) {
|
|
+ fe_isp_update_ain_dma_addr(sc_vnode, sc_vb, offset);
|
|
+ hw_dma_rdma_trigger(SC_BLOCK(sc_vnode), sc_vnode->idx);
|
|
+ } else {
|
|
+ fe_isp_update_aout_dma_addr(sc_vnode, sc_vb, offset);
|
|
+ hw_dma_set_wdma_ready(SC_BLOCK(sc_vnode), sc_vnode->idx, 1);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (rawdump && is_vnode_streaming(sc_vnode)) {
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ if (__spm_vdev_busy_list_empty(sc_vnode)) {
|
|
+ sc_vb = NULL;
|
|
+ __spm_vdev_pick_idle_vbuffer(sc_vnode, &sc_vb);
|
|
+ if (sc_vb && sc_vb->flags & SC_BUF_FLAG_FORCE_SHADOW) {
|
|
+ __spm_vdev_dq_idle_vbuffer(sc_vnode, &sc_vb);
|
|
+ __spm_vdev_q_busy_vbuffer(sc_vnode, sc_vb);
|
|
+ fe_isp_update_aout_dma_addr(sc_vnode, sc_vb, 0);
|
|
+ hw_dma_set_wdma_ready(SC_BLOCK(sc_vnode), sc_vnode->idx, 1);
|
|
+ }
|
|
+ }
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ }
|
|
+ } else if(action == SPACEMIT_VNODE_NOTIFY_BUF_PREPARE) {
|
|
+ //sc_vb = (struct spm_camera_vbuffer*)data;
|
|
+ //if (!sc_vb)
|
|
+ // return NOTIFY_DONE;
|
|
+ //if (dwt && !sc_vb->reset_flag) {
|
|
+ // remote_pad = media_entity_remote_pad(&(dwt->pads[PAD_OUT]));
|
|
+ // if (!remote_pad)
|
|
+ // return NOTIFY_DONE;
|
|
+ // sc_vnode = media_entity_to_sc_vnode(remote_pad->entity);
|
|
+ // if (!sc_vnode)
|
|
+ // return NOTIFY_DONE;
|
|
+ // sc_vb->reset_flag = 1;
|
|
+ // buf_vaddr = (unsigned char*)vb2_plane_vaddr(&sc_vb->vb2_v4l2_buf.vb2_buf, 0);
|
|
+ // BUG_ON(!buf_vaddr);
|
|
+ // memset(buf_vaddr, 0, sc_vnode->cur_fmt.fmt.pix_mp.plane_fmt[0].sizeimage);
|
|
+ // buf_vaddr = vb2_plane_vaddr(&sc_vb->vb2_v4l2_buf.vb2_buf, 1);
|
|
+ // for (i = 0; (i + 5) <= sc_vnode->cur_fmt.fmt.pix_mp.plane_fmt[1].sizeimage; i += 5) {
|
|
+ // memcpy(buf_vaddr + i, uv_padding, 5);
|
|
+ // }
|
|
+ //}
|
|
+ } else if (action == SPACEMIT_VNODE_NOTIFY_STREAM_OFF) {
|
|
+ sc_vnode = (struct spm_camera_vnode *)data;
|
|
+ if (!csi && pipe_ctx) {
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ for (i = 0; i < FORMATTER_NUM; i++) {
|
|
+ for (j = 0; j < VB2_MAX_FRAME; j++) {
|
|
+ list_for_each_entry_safe(pos, n, &pipe_ctx->fmt_wdma_sync[i][j], list_entry) {
|
|
+ if (sc_vnode->idx == pos->sc_vnode->idx) {
|
|
+ spm_vdev_export_camera_vbuffer(pos, 1);
|
|
+ list_del_init(&(pos->list_entry));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ }
|
|
+ }
|
|
+ return NOTIFY_DONE;
|
|
+}
|
|
+
|
|
+static int fe_isp_init_dma_context(struct isp_dma_context *dma_ctx,
|
|
+ struct isp_context *isp_ctx,
|
|
+ unsigned int dma_worker_cnt,
|
|
+ void (*dma_work_handler)(struct work_struct *),
|
|
+ void (*dma_tasklet_handler)(unsigned long),
|
|
+ struct device *dev)
|
|
+{
|
|
+ int i = 0;
|
|
+ struct isp_dma_work_struct *isp_dma_work = NULL;
|
|
+
|
|
+ INIT_LIST_HEAD(&dma_ctx->dma_work_idle_list);
|
|
+ INIT_LIST_HEAD(&dma_ctx->dma_work_busy_list);
|
|
+ INIT_LIST_HEAD(&dma_ctx->list_entry);
|
|
+ INIT_LIST_HEAD(&dma_ctx->frame_id.entry);
|
|
+ spin_lock_init(&dma_ctx->slock);
|
|
+ init_waitqueue_head(&dma_ctx->waitq_head);
|
|
+ init_waitqueue_head(&dma_ctx->waitq_eof);
|
|
+ dma_ctx->in_streamoff = 0;
|
|
+ dma_ctx->in_irq = 0;
|
|
+ atomic_set(&dma_ctx->busy_cnt, 0);
|
|
+ for (i = 0; i < dma_worker_cnt; i++) {
|
|
+ isp_dma_work = devm_kzalloc(dev, sizeof(*isp_dma_work), GFP_KERNEL);
|
|
+ if (!isp_dma_work) {
|
|
+ cam_err("%s not enough mem", __func__);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ INIT_WORK(&isp_dma_work->dma_work, dma_work_handler);
|
|
+ tasklet_init(&isp_dma_work->dma_tasklet, dma_tasklet_handler,
|
|
+ (unsigned long)isp_dma_work);
|
|
+ INIT_LIST_HEAD(&isp_dma_work->idle_list_entry);
|
|
+ INIT_LIST_HEAD(&isp_dma_work->busy_list_entry);
|
|
+ isp_dma_work->dma_ctx = dma_ctx;
|
|
+ list_add(&isp_dma_work->idle_list_entry, &dma_ctx->dma_work_idle_list);
|
|
+ }
|
|
+ dma_ctx->isp_ctx = isp_ctx;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_isp_get_dma_work(struct isp_dma_context *dma_ctx,
|
|
+ struct isp_dma_work_struct **isp_dma_work)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ spin_lock_irqsave(&dma_ctx->slock, flags);
|
|
+ *isp_dma_work =
|
|
+ list_first_entry_or_null(&dma_ctx->dma_work_idle_list,
|
|
+ struct isp_dma_work_struct, idle_list_entry);
|
|
+ if (NULL == *isp_dma_work) {
|
|
+ spin_unlock_irqrestore(&dma_ctx->slock, flags);
|
|
+ return -1;
|
|
+ }
|
|
+ list_del_init(&((*isp_dma_work)->idle_list_entry));
|
|
+ list_add(&((*isp_dma_work)->busy_list_entry), &dma_ctx->dma_work_busy_list);
|
|
+ spin_unlock_irqrestore(&dma_ctx->slock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_isp_put_dma_work(struct isp_dma_context *dma_ctx,
|
|
+ struct isp_dma_work_struct *isp_dma_work)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ spin_lock_irqsave(&dma_ctx->slock, flags);
|
|
+ list_del_init(&isp_dma_work->busy_list_entry);
|
|
+ list_add(&isp_dma_work->idle_list_entry, &dma_ctx->dma_work_idle_list);
|
|
+ spin_unlock_irqrestore(&dma_ctx->slock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_rawdump_subdev_pad_get_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct fe_rawdump *rawdump = v4l2_subdev_to_rawdump(sd);
|
|
+
|
|
+ if (format->pad >= RAWDUMP_PAD_NUM) {
|
|
+ cam_dbg("%s(%s) invalid pad%d.", __func__, rawdump->sc_subdev.name,
|
|
+ format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ format->format = rawdump->pad_fmts[format->pad].format;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_isp_lookup_raw_fmts_table(struct v4l2_subdev_format *format,
|
|
+ unsigned int flags,
|
|
+ int *cfa_pattern, unsigned int *bit_depth)
|
|
+{
|
|
+ int i = 0;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(idi_fmts_table); i++) {
|
|
+ if (format->format.code == idi_fmts_table[i].fmt_code
|
|
+ && (flags & idi_fmts_table[i].flags)) {
|
|
+ if (cfa_pattern)
|
|
+ *cfa_pattern = idi_fmts_table[i].cfa_pattern;
|
|
+ if (bit_depth)
|
|
+ *bit_depth = idi_fmts_table[i].bit_depth;
|
|
+ return 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_rawdump_subdev_pad_set_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct fe_rawdump *rawdump = v4l2_subdev_to_rawdump(sd);
|
|
+ struct spm_camera_subdev *sc_subdev = &rawdump->sc_subdev;
|
|
+ struct v4l2_format v4l2_fmt;
|
|
+ struct v4l2_subdev_format *pad_in_fmt = NULL;
|
|
+ unsigned int bit_depth = 0;
|
|
+ struct isp_context *isp_ctx = v4l2_get_subdevdata(sd);
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct media_pad *remote_pad = NULL;
|
|
+ struct media_entity *me = &sd->entity;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_pipeline *mpipe = media_entity_pipeline(me);
|
|
+ struct fe_pipe *pipe = NULL;
|
|
+ int is_mix_hdr = 0;
|
|
+
|
|
+ if (format->which != V4L2_SUBDEV_FORMAT_ACTIVE) {
|
|
+ cam_err("%s(%s) didn't support format which(%d)", __func__,
|
|
+ sc_subdev->name, format->which);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (format->pad >= RAWDUMP_PAD_NUM) {
|
|
+ cam_err("%s(%s) invalid pad%d.", __func__, sc_subdev->name,
|
|
+ format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ switch (format->pad) {
|
|
+ case PAD_IN:
|
|
+ if (!mpipe) {
|
|
+ cam_err("%s(%s) pipe is null", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+ //if (format->format.width > sc_pipeline->max_width[0]
|
|
+ // || format->format.height > sc_pipeline->max_height[0]
|
|
+ // || format->format.width < sc_pipeline->min_width[0]
|
|
+ // || format->format.height < sc_pipeline->min_height[0]) {
|
|
+ // cam_err("%s(%s) %ux%u exceeded max %ux%u min %ux%u", __func__, sc_subdev->name,
|
|
+ // format->format.width, format->format.height,
|
|
+ // sc_pipeline->max_width[0], sc_pipeline->max_height[0],
|
|
+ // sc_pipeline->min_width[0], sc_pipeline->min_height[0]);
|
|
+ // return -1;
|
|
+ //}
|
|
+ if (!fe_isp_lookup_raw_fmts_table(format, IDI_FMT_FLAG_ONLINE_INPUT, NULL, &bit_depth)) {
|
|
+ cam_err("%s(%s) pad%d didn't support format(%dx%d code:0x%08x).", __func__, sc_subdev->name, format->pad,
|
|
+ format->format.width, format->format.height, format->format.code);
|
|
+ return -1;
|
|
+ }
|
|
+ hw_isp_top_set_rawdump_fmt(SC_BLOCK(isp_ctx->pipes[0]),
|
|
+ rawdump->idx,
|
|
+ format->format.width,
|
|
+ format->format.height, bit_depth);
|
|
+ rawdump->pad_fmts[PAD_IN].format = format->format;
|
|
+ rawdump->pad_fmts[PAD_OUT].format = format->format;
|
|
+ cam_dbg("%s(%s) pad%d set format(%dx%d code:0x%08x bit_depth:%u).", __func__, sc_subdev->name, format->pad,
|
|
+ format->format.width, format->format.height, format->format.code, bit_depth);
|
|
+ return 0;
|
|
+ case PAD_OUT:
|
|
+ pad_in_fmt = &rawdump->pad_fmts[PAD_IN];
|
|
+ remote_pad = media_entity_remote_pad(&rawdump->pads[format->pad]);
|
|
+ if (!remote_pad) {
|
|
+ cam_err("%s(%s) PAD_OUT had no link.", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_vnode = media_entity_to_sc_vnode(remote_pad->entity);
|
|
+ BUG_ON(!sc_vnode);
|
|
+ if (!fe_isp_lookup_raw_fmts_table(format, IDI_FMT_FLAG_OUTPUT, NULL, &bit_depth)) {
|
|
+ cam_err("%s(%s) pad%d didn't support format(%dx%d code:0x%08x).", __func__, sc_subdev->name, format->pad,
|
|
+ format->format.width, format->format.height, format->format.code);
|
|
+ return -1;
|
|
+ }
|
|
+ if (format->format.code != pad_in_fmt->format.code
|
|
+ || format->format.width != pad_in_fmt->format.width
|
|
+ || format->format.height != pad_in_fmt->format.height) {
|
|
+ cam_err("%s(%s) PAD_OUT format(%dx%d code:0x%08x) is not the same with PAD_IN format(%dx%d code:0x%08x).",
|
|
+ __func__, sc_subdev->name,
|
|
+ format->format.width, format->format.height, format->format.code,
|
|
+ pad_in_fmt->format.width, pad_in_fmt->format.height, pad_in_fmt->format.code);
|
|
+ return -1;
|
|
+ }
|
|
+ remote_pad = media_entity_remote_pad(&rawdump->pads[PAD_IN]);
|
|
+ if (remote_pad) {
|
|
+ pipe = media_entity_to_pipe(remote_pad->entity);
|
|
+ if (pipe)
|
|
+ is_mix_hdr = 1;
|
|
+ }
|
|
+ spm_vdev_fill_v4l2_format(format, &v4l2_fmt);
|
|
+ //fill HW sequence
|
|
+ //hw_isp_top_set_rawdump_fmt(&(isp_ctx->pipes[0]->sc_subdev.sc_block),
|
|
+ // rawdump->idx,
|
|
+ // format->format.width,
|
|
+ // format->format.height,
|
|
+ // bit_depth);
|
|
+ hw_dma_set_wdma_pitch(SC_BLOCK(sc_vnode),
|
|
+ sc_vnode->idx,
|
|
+ v4l2_fmt.fmt.pix_mp.num_planes,
|
|
+ v4l2_fmt.fmt.pix_mp.plane_fmt[0].bytesperline,
|
|
+ v4l2_fmt.fmt.pix_mp.plane_fmt[1].bytesperline);
|
|
+ if (is_mix_hdr)
|
|
+ hw_dma_set_rdma_pitch(SC_BLOCK(sc_vnode), 0, v4l2_fmt.fmt.pix_mp.plane_fmt[0].bytesperline);
|
|
+ return 0;
|
|
+ default:
|
|
+ cam_dbg("%s(%s) didn't support set fmt for pad%d.", __func__, sc_subdev->name, format->pad);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_rawdump_subdev_video_s_stream(struct v4l2_subdev *sd, int enable)
|
|
+{
|
|
+ struct fe_rawdump *rawdump = v4l2_subdev_to_rawdump(sd);
|
|
+ struct spm_camera_subdev *sc_subdev = &rawdump->sc_subdev;
|
|
+ struct media_pad *remote_pad = NULL;
|
|
+ struct csi *csi = NULL;
|
|
+ struct fe_pipe *pipe = NULL;
|
|
+ struct isp_context *isp_ctx = v4l2_get_subdevdata(sd);
|
|
+ struct media_entity *me_pipe0 = (struct media_entity*)isp_ctx->pipes[0];
|
|
+ struct media_entity *me_pipe1 = (struct media_entity*)isp_ctx->pipes[1];
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL, *sc_pipeline0 = NULL, *sc_pipeline1 = NULL;
|
|
+ struct media_pipeline *mpipe = media_entity_pipeline(&sd->entity);
|
|
+ unsigned int irq_bitmap = 0, cap_to_preview = 0;
|
|
+ unsigned int vi_flags = 0, clk_high = 0;
|
|
+ int ret = 0, source = 0, rawdump_only = 0, sensor_id = 0;
|
|
+
|
|
+ BUG_ON(!me_pipe0);
|
|
+ BUG_ON(!me_pipe1);
|
|
+ if (!mpipe) {
|
|
+ cam_err("%s(%s) pipe was null", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ sc_pipeline0 = media_pipeline_to_sc_pipeline(me_pipe0);
|
|
+ sc_pipeline1 = media_pipeline_to_sc_pipeline(me_pipe1);
|
|
+ vi_flags = (rawdump->pad_fmts[PAD_IN].format.field >> SPACEMIT_VI_SWITCH_FLAGS_SHIFT) & SPACEMIT_VI_PRI_DATA_MASK;
|
|
+ cap_to_preview = vi_flags & SPACEMIT_VI_FLAG_BACK_TO_PREVIEW;
|
|
+ clk_high = vi_flags & SPACEMIT_VI_FLAG_CLK_HIGH;
|
|
+ sensor_id = rawdump->pad_fmts[PAD_IN].format.field & SPACEMIT_VI_PRI_DATA_MASK;
|
|
+ sensor_id = (sensor_id & SPACEMIT_VI_SENSOR_ID_MASK) >> SPACEMIT_VI_SENSOR_ID_SHIFT;
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+ remote_pad = media_entity_remote_pad(&rawdump->pads[PAD_IN]);
|
|
+ if (!remote_pad) {
|
|
+ cam_err("%s(%s) PAD_IN had no link", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ csi = media_entity_to_csi(remote_pad->entity);
|
|
+ pipe = media_entity_to_pipe(remote_pad->entity);
|
|
+
|
|
+ if (pipe && !csi) {
|
|
+ remote_pad = media_entity_remote_pad(&(pipe->pads[PIPE_PAD_IN]));
|
|
+ if (!remote_pad) {
|
|
+ cam_err("%s(%s) PAD_IN->pipe had no active input link", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ csi = media_entity_to_csi(remote_pad->entity);
|
|
+ }
|
|
+ if (!csi) {
|
|
+ cam_err("%s(%s) PAD_IN had no link to csi or pipe->csi.", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ ret = blocking_notifier_call_chain(&sc_pipeline->blocking_notify_chain,
|
|
+ PIPELINE_ACTION_PIPE_ACK, NULL);
|
|
+ if (NOTIFY_STOP == ret) {
|
|
+ rawdump_only = 0;
|
|
+ rawdump->rawdump_only = 0;
|
|
+ } else {
|
|
+ rawdump_only = 1;
|
|
+ rawdump->rawdump_only = 1;
|
|
+ }
|
|
+ if (rawdump->idx == 0)
|
|
+ irq_bitmap = POSTERR_IRQ_RDP0_SDW_CLOSE_DONE;
|
|
+ else
|
|
+ irq_bitmap = POSTERR_IRQ_RDP1_SDW_CLOSE_DONE;
|
|
+ hw_isp_top_set_rdp_cfg_rdy(SC_BLOCK(isp_ctx->pipes[0]), rawdump->idx, 0);
|
|
+ if (enable) {
|
|
+ if (clk_high)
|
|
+ fe_isp_set_clk(isp_ctx, ISP_CLK_HIGH);
|
|
+ else
|
|
+ fe_isp_set_clk(isp_ctx, ISP_CLK_LOW);
|
|
+ atomic_set(&rawdump->close_done, 0);
|
|
+ fe_isp_reset_frame_id(sc_pipeline);
|
|
+ isp_ctx->dma_overlap_cnt = 0;
|
|
+ if (sensor_id == 0)
|
|
+ source = SENSOR0_CH0;
|
|
+ else
|
|
+ source = SENSOR1_CH0;
|
|
+ hw_isp_top_clr_posterr_irq_status(SC_BLOCK(isp_ctx->pipes[0]), irq_bitmap);
|
|
+ hw_isp_top_set_posterr_irq_enable(SC_BLOCK(isp_ctx->pipes[0]), irq_bitmap, 0);
|
|
+ hw_isp_top_set_rawdump_source(SC_BLOCK(isp_ctx->pipes[0]), rawdump->idx, source);
|
|
+ hw_isp_top_enable_rawdump(SC_BLOCK(isp_ctx->pipes[rawdump->idx]), 1, rawdump_only);
|
|
+ if (isp_ctx->dma_block)
|
|
+ hw_dma_reset(isp_ctx->dma_block);
|
|
+ if (sc_pipeline0 && sc_pipeline1) {
|
|
+ if ((sc_pipeline0->is_online_mode && !sc_pipeline1->is_online_mode)
|
|
+ || (!sc_pipeline0->is_online_mode && sc_pipeline1->is_online_mode)) {
|
|
+ hw_isp_top_set_speed_ctrl(SC_BLOCK(isp_ctx->pipes[0]), 1);
|
|
+ } else {
|
|
+ hw_isp_top_set_speed_ctrl(SC_BLOCK(isp_ctx->pipes[0]), 0);
|
|
+ }
|
|
+ } else {
|
|
+ hw_isp_top_set_speed_ctrl(SC_BLOCK(isp_ctx->pipes[0]), 0);
|
|
+ }
|
|
+ if (!cap_to_preview)
|
|
+ hw_isp_top_shadow_latch(SC_BLOCK(isp_ctx->pipes[rawdump->idx]));
|
|
+ } else {
|
|
+ hw_isp_top_set_rawdump_source(SC_BLOCK(isp_ctx->pipes[0]), rawdump->idx, INVALID_CH);
|
|
+ hw_isp_top_enable_rawdump(SC_BLOCK(isp_ctx->pipes[rawdump->idx]), 0, rawdump_only);
|
|
+ hw_isp_top_set_posterr_irq_enable(SC_BLOCK(isp_ctx->pipes[0]), 0, irq_bitmap);
|
|
+ if (rawdump_only) {
|
|
+ if (!atomic_read(&rawdump->close_done))
|
|
+ cam_warn("%s(%s) stream off not signaled", __func__, sc_subdev->name);
|
|
+ else
|
|
+ cam_dbg("%s(%s) stream off ok", __func__, sc_subdev->name);
|
|
+ }
|
|
+ }
|
|
+ hw_isp_top_set_rdp_cfg_rdy(SC_BLOCK(isp_ctx->pipes[0]), rawdump->idx, 1);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int csi_subdev_core_s_power(struct v4l2_subdev *sd, int on);
|
|
+
|
|
+static int csi_subdev_video_s_stream(struct v4l2_subdev *sd, int enable)
|
|
+{
|
|
+ struct csi *csi = v4l2_subdev_to_csi(sd);
|
|
+ struct spm_camera_subdev *sc_subdev = &csi->sc_subdev;
|
|
+ struct isp_context *isp_ctx = v4l2_get_subdevdata(sd);
|
|
+ struct ccic_ctrl *csi_ctrl = NULL;
|
|
+ int ret = 0, csi2vc = 0, csi2idi = 0, mipi_lane_num = 0, sensor_id = 0;
|
|
+
|
|
+ sensor_id = csi->pad_fmts[CSI_PAD_IN].format.field & SPACEMIT_VI_PRI_DATA_MASK;
|
|
+ sensor_id = (sensor_id & SPACEMIT_VI_SENSOR_ID_MASK) >> SPACEMIT_VI_SENSOR_ID_SHIFT;
|
|
+ csi_ctrl = isp_ctx->ccic[sensor_id].csi_ctrl;
|
|
+ BUG_ON(!csi_ctrl);
|
|
+ if (sensor_id == 0)
|
|
+ csi2idi = CCIC_CSI2IDI0;
|
|
+ else
|
|
+ csi2idi = CCIC_CSI2IDI1;
|
|
+ if (csi->channel_type == CSI_MAIN)
|
|
+ csi2vc = CCIC_CSI2VC_MAIN;
|
|
+ else
|
|
+ csi2vc = CCIC_CSI2VC_VCDT;
|
|
+ if (enable) {
|
|
+#ifndef CONFIG_SPACEMIT_XILINX_ZYNQMP
|
|
+ csi_subdev_core_s_power(sd, 1);
|
|
+#endif
|
|
+ mipi_lane_num = csi->pad_fmts[CSI_PAD_IN].format.field & SPACEMIT_VI_PRI_DATA_MASK;
|
|
+ mipi_lane_num &= SPACEMIT_VI_MIPI_LANE_MASK;
|
|
+ cam_dbg("%s(%s) mipi lane num:%d", __func__, sc_subdev->name, mipi_lane_num);
|
|
+ ret = csi_ctrl->ops->config_csi2idi_mux(csi_ctrl, csi2vc, csi2idi, 1);
|
|
+ if (ret) {
|
|
+ cam_err("%s(%s) config mux(enable) failed ret=%d", __func__, sc_subdev->name, ret);
|
|
+ return ret;
|
|
+ }
|
|
+ ret = csi_ctrl->ops->config_csi2_mbus(csi_ctrl, CCIC_CSI2VC_NM, 0, 0, mipi_lane_num);
|
|
+ if (ret) {
|
|
+ cam_err("%s(%s) config mbus(enable) lane=%d failed ret=%d", __func__, sc_subdev->name, 4, ret);
|
|
+ return ret;
|
|
+ }
|
|
+ csi_ctrl->ops->irq_mask(csi_ctrl, 1);
|
|
+ } else {
|
|
+ csi_ctrl->ops->irq_mask(csi_ctrl, 0);
|
|
+ csi_ctrl->ops->config_csi2_mbus(csi_ctrl, CCIC_CSI2VC_NM, 0, 0, 0);
|
|
+ csi_ctrl->ops->config_csi2idi_mux(csi_ctrl, csi2vc, csi2idi, 0);
|
|
+ csi_subdev_core_s_power(sd, 0);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct isp_pipeline_context *fe_pipeline_create_ctx(struct device *dev)
|
|
+{
|
|
+ int i = 0, j = 0;
|
|
+ struct isp_pipeline_context *pipe_ctx = devm_kzalloc(dev, sizeof(*pipe_ctx), GFP_KERNEL);
|
|
+
|
|
+ if (!pipe_ctx)
|
|
+ return NULL;
|
|
+ for (i = 0; i < FORMATTER_NUM; i++)
|
|
+ INIT_LIST_HEAD(&pipe_ctx->fmt_wdma_list[i]);
|
|
+ for (i = 0; i < FORMATTER_NUM; i++) {
|
|
+ for (j = 0; j < VB2_MAX_FRAME; j++)
|
|
+ INIT_LIST_HEAD(&pipe_ctx->fmt_wdma_sync[i][j]);
|
|
+ }
|
|
+ INIT_LIST_HEAD(&pipe_ctx->wdma_list);
|
|
+
|
|
+ return pipe_ctx;
|
|
+}
|
|
+
|
|
+static int fe_offline_channel_subdev_pad_set_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct fe_offline_channel *offline_channel = v4l2_subdev_to_offline_channel(sd);
|
|
+ struct spm_camera_subdev *sc_subdev = &offline_channel->sc_subdev;
|
|
+ struct media_pad *remote_pad_in = NULL, *remote_pad_p0out = NULL, *remote_pad_p1out = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct v4l2_format v4l2_fmt;
|
|
+
|
|
+ if (format->which != V4L2_SUBDEV_FORMAT_ACTIVE) {
|
|
+ cam_err("%s(%s) didn't support format which(%d)", __func__,
|
|
+ sc_subdev->name, format->which);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (format->pad >= OFFLINE_CH_PAD_NUM) {
|
|
+ cam_err("%s(%s) invalid pad%d.", __func__, sc_subdev->name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ remote_pad_in = media_entity_remote_pad(&(offline_channel->pads[OFFLINE_CH_PAD_IN]));
|
|
+ remote_pad_p0out = media_entity_remote_pad(&(offline_channel->pads[OFFLINE_CH_PAD_P0OUT]));
|
|
+ remote_pad_p1out = media_entity_remote_pad(&(offline_channel->pads[OFFLINE_CH_PAD_P1OUT]));
|
|
+ if (!remote_pad_in || (!remote_pad_p0out && !remote_pad_p1out)) {
|
|
+ cam_err("%s didn't have valid link.", sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_vnode = media_entity_to_sc_vnode(remote_pad_in->entity);
|
|
+ if (!sc_vnode) {
|
|
+ cam_err("%s(%s) OFFLINE_CH_PAD_IN should link to ain", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ switch (format->pad) {
|
|
+ case OFFLINE_CH_PAD_IN:
|
|
+ if (!fe_isp_lookup_raw_fmts_table(format, IDI_FMT_FLAG_OFFLINE_INPUT, NULL, NULL)) {
|
|
+ cam_err("%s(%s) pad%d didn't support format(%dx%d code:0x%08x).",
|
|
+ __func__, sc_subdev->name, format->pad, format->format.width,
|
|
+ format->format.height, format->format.code);
|
|
+ return -1;
|
|
+ }
|
|
+ spm_vdev_fill_v4l2_format(format, &v4l2_fmt);
|
|
+ hw_dma_set_rdma_pitch(SC_BLOCK(sc_vnode),
|
|
+ sc_vnode->idx,
|
|
+ v4l2_fmt.fmt.pix_mp.plane_fmt[0].bytesperline);
|
|
+ offline_channel->pad_fmts[OFFLINE_CH_PAD_IN].format = format->format;
|
|
+ offline_channel->pad_fmts[OFFLINE_CH_PAD_P0OUT].format = format->format;
|
|
+ offline_channel->pad_fmts[OFFLINE_CH_PAD_P1OUT].format = format->format;
|
|
+ return 0;
|
|
+ default:
|
|
+ cam_dbg("%s(%s) didn't support set fmt for pad%d.", __func__, sc_subdev->name, format->pad);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_offline_channel_subdev_pad_get_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct fe_offline_channel *offline_channel = v4l2_subdev_to_offline_channel(sd);
|
|
+
|
|
+ if (format->pad >= OFFLINE_CH_PAD_NUM) {
|
|
+ cam_dbg("%s didn't have pad%d.", offline_channel->sc_subdev.name,
|
|
+ format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ format->format = offline_channel->pad_fmts[format->pad].format;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __fe_offline_channel_pad_s_stream(struct fe_offline_channel *offline_channel,
|
|
+ unsigned int pad, int enable)
|
|
+{
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct spm_camera_subdev *sc_subdev = &offline_channel->sc_subdev;
|
|
+ //struct spm_camera_vbuffer *sc_vbuf = NULL;
|
|
+ struct isp_context *isp_ctx = spm_subdev_get_drvdata(sc_subdev);
|
|
+ int ret = 0;
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ switch (pad) {
|
|
+ case OFFLINE_CH_PAD_IN:
|
|
+ sc_vnode = spm_vdev_remote_vnode(&offline_channel->pads[pad]);
|
|
+ if (!sc_vnode) {
|
|
+ cam_err("%s(%s) ain was not found", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ if (enable) { // stream on
|
|
+ //ret = spm_vdev_dq_idle_vbuffer(sc_vnode, &sc_vbuf);
|
|
+ //if (ret) {
|
|
+ // cam_err("%s(%s) enable:1 dq buf from %s failed.", __func__, sc_subdev->name, sc_vnode->name);
|
|
+ // return ret;
|
|
+ //}
|
|
+ //hw_isp_top_enable_hw_gap(SC_BLOCK(isp_ctx->pipes[0]), 0);
|
|
+ //fe_isp_update_ain_dma_addr(sc_vnode, sc_vbuf, 0);
|
|
+ //spm_vdev_q_busy_vbuffer(sc_vnode, sc_vbuf);
|
|
+ isp_ctx->dma_in_ctx[sc_vnode->idx].vnode = sc_vnode;
|
|
+ hw_isp_top_set_idi_rd_burst_len(SC_BLOCK(isp_ctx->pipes[0]), offline_channel->idx, 22, 8);
|
|
+ hw_dma_set_irq_enable(SC_BLOCK(sc_vnode),
|
|
+ DMA_IRQ_SRC_RDMA_CH0 + sc_vnode->idx,
|
|
+ DMA_IRQ_START | DMA_IRQ_DONE | DMA_IRQ_ERR,
|
|
+ 0);
|
|
+ //hw_dma_rdma_trigger(SC_BLOCK(sc_vnode), sc_vnode->idx);
|
|
+ } else {// stream off
|
|
+ spin_lock_irqsave(&(isp_ctx->dma_in_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ wait_event_interruptible_locked_irq(isp_ctx->dma_in_ctx[sc_vnode->idx].waitq_head,
|
|
+ !isp_ctx->dma_in_ctx[sc_vnode->idx].in_irq);
|
|
+ isp_ctx->dma_in_ctx[sc_vnode->idx].in_streamoff = 1;
|
|
+ spin_unlock_irqrestore(&(isp_ctx->dma_in_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ hw_dma_set_irq_enable(SC_BLOCK(sc_vnode),
|
|
+ DMA_IRQ_SRC_RDMA_CH0 + sc_vnode->idx,
|
|
+ 0,
|
|
+ DMA_IRQ_ALL);
|
|
+ //hw_isp_top_enable_hw_gap(SC_BLOCK(isp_ctx->pipes[0]), 1);
|
|
+ isp_ctx->dma_in_ctx[sc_vnode->idx].vnode = NULL;
|
|
+ spin_lock_irqsave(&(isp_ctx->dma_in_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ isp_ctx->dma_in_ctx[sc_vnode->idx].in_streamoff = 0;
|
|
+ spin_unlock_irqrestore(&(isp_ctx->dma_in_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ cam_err("%s(%s) pad s_stream not supported on pad%d", __func__, sc_subdev->name, pad);
|
|
+ return -1;
|
|
+ }
|
|
+ ret = spm_subdev_pad_s_stream(sc_subdev, pad, enable);
|
|
+ if (ret)
|
|
+ cam_err("%s(%s) s_stream on pad%u failed", __func__, sc_subdev->name, pad);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_offline_channel_subdev_pad_s_stream(struct v4l2_subdev *sd,
|
|
+ struct media_link *link,
|
|
+ struct v4l2_subdev_format *source_fmt,
|
|
+ struct v4l2_subdev_format *sink_fmt)
|
|
+{
|
|
+ struct fe_offline_channel *offline_channel = v4l2_subdev_to_offline_channel(sd);
|
|
+ unsigned int pad = source_fmt->pad;
|
|
+ int enable = source_fmt->which;
|
|
+
|
|
+ return __fe_offline_channel_pad_s_stream(offline_channel, pad, enable);
|
|
+}
|
|
+
|
|
+static int fe_formatter_subdev_pad_set_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct fe_formatter *formatter = v4l2_subdev_to_formatter(sd);
|
|
+ struct spm_camera_subdev *sc_subdev = &formatter->sc_subdev;
|
|
+ struct media_pad *remote_pad_in = NULL, *remote_pad_aout = NULL;
|
|
+ struct media_pad *remote_pad_d1out = NULL, *remote_pad_d2out = NULL;
|
|
+ struct media_pad *remote_pad_d3out = NULL, *remote_pad_d4out = NULL;
|
|
+ struct media_pad *remote_pad_out = NULL;
|
|
+ int formatter_fmt = 0;
|
|
+ struct v4l2_subdev_format *pad_fmt_in = NULL;
|
|
+ struct v4l2_format v4l2_fmt;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct isp_pipeline_context *pipe_ctx = NULL;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(&sd->entity);
|
|
+ int valid_link = 0, dwt_mode = 0;
|
|
+
|
|
+ if (format->which != V4L2_SUBDEV_FORMAT_ACTIVE) {
|
|
+ cam_err("%s(%s) didn't support format which(%d)", __func__,
|
|
+ sc_subdev->name, format->which);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (format->pad >= FORMATTER_PAD_NUM) {
|
|
+ cam_err("%s(%s) invalid pad%d.", __func__, sc_subdev->name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (!pipe) {
|
|
+ cam_err("%s(%s) pipe was null", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ mutex_lock(&sc_pipeline->mlock);
|
|
+ if (!sc_pipeline->usr_data) {
|
|
+ pipe_ctx = fe_pipeline_create_ctx(formatter->sc_subdev.pcsd.sd.dev);
|
|
+ if (!pipe_ctx) {
|
|
+ mutex_unlock(&sc_pipeline->mlock);
|
|
+ cam_err("%s(%s) create pipe_ctx failed", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_pipeline->usr_data = pipe_ctx;
|
|
+ } else {
|
|
+ pipe_ctx = (struct isp_pipeline_context *)sc_pipeline->usr_data;
|
|
+ }
|
|
+ mutex_unlock(&sc_pipeline->mlock);
|
|
+ remote_pad_in = media_entity_remote_pad(&(formatter->pads[FMT_PAD_IN]));
|
|
+ remote_pad_aout = media_entity_remote_pad(&(formatter->pads[FMT_PAD_AOUT]));
|
|
+ remote_pad_d1out = media_entity_remote_pad(&(formatter->pads[FMT_PAD_D1OUT]));
|
|
+ remote_pad_d2out = media_entity_remote_pad(&(formatter->pads[FMT_PAD_D2OUT]));
|
|
+ remote_pad_d3out = media_entity_remote_pad(&(formatter->pads[FMT_PAD_D3OUT]));
|
|
+ remote_pad_d4out = media_entity_remote_pad(&(formatter->pads[FMT_PAD_D4OUT]));
|
|
+ if (remote_pad_in) {
|
|
+ if (remote_pad_aout && !remote_pad_d1out && !remote_pad_d2out
|
|
+ && !remote_pad_d3out && !remote_pad_d4out) {
|
|
+ valid_link = 1;
|
|
+ dwt_mode = 0;
|
|
+ } else if (remote_pad_aout && remote_pad_d1out && remote_pad_d2out
|
|
+ && remote_pad_d3out && remote_pad_d4out) {
|
|
+ valid_link = 1;
|
|
+ dwt_mode = 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!valid_link) {
|
|
+ cam_err("%s didn't have valid link.", sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ switch (format->pad) {
|
|
+ case FMT_PAD_IN:
|
|
+ formatter->pad_fmts[FMT_PAD_IN].format = format->format;
|
|
+ if (!sc_subdev->is_resetting) {
|
|
+ formatter->pad_fmts[FMT_PAD_AOUT].format = format->format;
|
|
+ formatter->pad_fmts[FMT_PAD_D1OUT].format = format->format;
|
|
+ formatter->pad_fmts[FMT_PAD_D2OUT].format = format->format;
|
|
+ formatter->pad_fmts[FMT_PAD_D3OUT].format = format->format;
|
|
+ formatter->pad_fmts[FMT_PAD_D4OUT].format = format->format;
|
|
+ }
|
|
+ return 0;
|
|
+ break;
|
|
+ case FMT_PAD_AOUT:
|
|
+ fallthrough;
|
|
+ case FMT_PAD_D1OUT:
|
|
+ case FMT_PAD_D2OUT:
|
|
+ case FMT_PAD_D3OUT:
|
|
+ case FMT_PAD_D4OUT:
|
|
+ if (!fe_isp_lookup_formatter_fmts_table(format, &formatter_fmt, /*dwt_mode*/0)) {
|
|
+ if (!sc_subdev->is_resetting)
|
|
+ cam_err("%s(%s) mbus format code(0x%08x) not supported in dwt_mode(0)",
|
|
+ __func__, sc_subdev->name, format->format.code);
|
|
+ else
|
|
+ cam_dbg("%s(%s) mbus format code(0x%08x) not supported in dwt_mode(0)",
|
|
+ __func__, sc_subdev->name, format->format.code);
|
|
+ return -1;
|
|
+ }
|
|
+ if (dwt_mode) {
|
|
+ if (NV12 == formatter_fmt || NV21 == formatter_fmt)
|
|
+ pipe_ctx->fmt_wdma_start_cnt[formatter->idx] = DMA_START_CNT_WITH_DWT;
|
|
+ else
|
|
+ pipe_ctx->fmt_wdma_start_cnt[formatter->idx] = 1;
|
|
+ }
|
|
+ pad_fmt_in = &(formatter->pad_fmts[FMT_PAD_IN]);
|
|
+ if (format->format.width > pad_fmt_in->format.width
|
|
+ || format->format.height > pad_fmt_in->format.height) {
|
|
+ cam_err("%s(%s) FMT_PAD_AOUT(%ux%u) didn't match FMT_PAD_IN(%ux%u)",
|
|
+ __func__, sc_subdev->name, format->format.width, format->format.height,
|
|
+ pad_fmt_in->format.width, pad_fmt_in->format.height);
|
|
+ return -1;
|
|
+ }
|
|
+ formatter->pad_fmts[format->pad].format = format->format;
|
|
+ hw_postpipe_set_formatter_format(SC_BLOCK(formatter), formatter->idx, formatter_fmt);
|
|
+ remote_pad_out = media_entity_remote_pad(&(formatter->pads[format->pad]));
|
|
+ sc_vnode = media_entity_to_sc_vnode(remote_pad_out->entity);
|
|
+ BUG_ON((format->pad == FMT_PAD_AOUT) && !sc_vnode);
|
|
+ if (sc_vnode) {
|
|
+ spm_vdev_fill_v4l2_format(format, &v4l2_fmt);
|
|
+ hw_dma_set_wdma_pitch(SC_BLOCK(sc_vnode),
|
|
+ sc_vnode->idx,
|
|
+ v4l2_fmt.fmt.pix_mp.num_planes,
|
|
+ v4l2_fmt.fmt.pix_mp.plane_fmt[0].bytesperline,
|
|
+ v4l2_fmt.fmt.pix_mp.plane_fmt[1].bytesperline);
|
|
+ }
|
|
+ return 0;
|
|
+ break;
|
|
+ default:
|
|
+ cam_dbg("%s(%s) didn't support set fmt for pad%d.", __func__, sc_subdev->name, format->pad);
|
|
+ return -1;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_formatter_subdev_pad_get_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct fe_formatter *formatter = v4l2_subdev_to_formatter(sd);
|
|
+
|
|
+ if (format->pad >= FORMATTER_PAD_NUM) {
|
|
+ cam_dbg("%s(%s) invalid pad%d.", __func__, formatter->sc_subdev.name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ format->format = formatter->pad_fmts[format->pad].format;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __fe_formatter_pad_s_stream(struct fe_formatter *formatter, unsigned int pad, int enable)
|
|
+{
|
|
+ int source = 0, ret = 0, need_initial_load = 1;
|
|
+ struct media_pad *remote_pad_aout = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct spm_camera_subdev *sc_subdev = &formatter->sc_subdev;
|
|
+ struct device *dev = sc_subdev->pcsd.sd.dev;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(&sc_subdev->pcsd.sd.entity);
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct isp_pipeline_context *pipe_ctx = NULL;
|
|
+ struct isp_context *isp_ctx = spm_subdev_get_drvdata(sc_subdev);
|
|
+ struct vi_port_cfg *port_cfg = NULL;
|
|
+ unsigned int wdma_fifo_offset = 0, wdma_fifo_depth = 0, wdma_weight = 0, wdma_fifo_div_mode = 8;
|
|
+ unsigned long flags = 0;
|
|
+ struct spm_camera_vbuffer *sc_vb = NULL;
|
|
+
|
|
+ if (!pipe) {
|
|
+ cam_err("%s(%s) pipe was null", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ switch (pad) {
|
|
+ case FMT_PAD_AOUT:
|
|
+ remote_pad_aout = media_entity_remote_pad(&(formatter->pads[pad]));
|
|
+ if (!remote_pad_aout) {
|
|
+ cam_err("%s(%s) FMT_PAD_AOUT had no active link.", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_vnode = media_entity_to_sc_vnode(remote_pad_aout->entity);
|
|
+ BUG_ON(!sc_vnode);
|
|
+ if (enable) {
|
|
+ atomic_set(&isp_ctx->dma_out_ctx[sc_vnode->idx].busy_cnt, 0);
|
|
+ mutex_lock(&sc_pipeline->mlock);
|
|
+ if (!sc_pipeline->usr_data) {
|
|
+ pipe_ctx = fe_pipeline_create_ctx(dev);
|
|
+ if (!pipe_ctx) {
|
|
+ mutex_unlock(&sc_pipeline->mlock);
|
|
+ cam_err("%s(%s) create pipeline context failed", __func__, sc_subdev->name);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ sc_pipeline->usr_data = pipe_ctx;
|
|
+ } else {
|
|
+ pipe_ctx = (struct isp_pipeline_context*)sc_pipeline->usr_data;
|
|
+ }
|
|
+ mutex_unlock(&sc_pipeline->mlock);
|
|
+ if (pipe_ctx->fmt_wdma_start_cnt[formatter->idx] == 1)
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload = 1;
|
|
+ else
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload = 0;
|
|
+ source = FORMATTER0 + formatter->idx;
|
|
+ port_cfg = sc_vnode_get_usrdata(sc_vnode);
|
|
+ if (port_cfg) {
|
|
+ wdma_fifo_offset = port_cfg->w_fifo_ctrl.offset;
|
|
+ wdma_fifo_depth = port_cfg->w_fifo_ctrl.depth;
|
|
+ wdma_weight = port_cfg->w_fifo_ctrl.weight;
|
|
+ wdma_fifo_div_mode = port_cfg->w_fifo_ctrl.div_mode;
|
|
+ }
|
|
+ hw_dma_set_wdma_source(SC_BLOCK(sc_vnode), sc_vnode->idx, source, wdma_fifo_offset,
|
|
+ wdma_fifo_depth, wdma_weight, wdma_fifo_div_mode);
|
|
+ if (isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload) {
|
|
+ hw_dma_set_irq_enable(SC_BLOCK(sc_vnode),
|
|
+ DMA_IRQ_SRC_WDMA_CH0 + sc_vnode->idx,
|
|
+ DMA_IRQ_START | DMA_IRQ_DONE | DMA_IRQ_ERR,
|
|
+ 0);
|
|
+ } else {
|
|
+ hw_dma_set_irq_enable(SC_BLOCK(sc_vnode),
|
|
+ DMA_IRQ_SRC_WDMA_CH0 + sc_vnode->idx,
|
|
+ DMA_IRQ_DONE | DMA_IRQ_ERR,
|
|
+ DMA_IRQ_START);
|
|
+ }
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].vnode = sc_vnode;
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ ret = list_add_no_repeat(&isp_ctx->dma_out_ctx[sc_vnode->idx].list_entry, &pipe_ctx->fmt_wdma_list[formatter->idx]);
|
|
+ if (0 == ret)
|
|
+ pipe_ctx->fmt_wdma_cnt[formatter->idx]++;
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ if (need_initial_load) {
|
|
+ ret = spm_vdev_dq_idle_vbuffer(sc_vnode, &sc_vb);
|
|
+ if (ret) {
|
|
+ if (sc_vnode->sc_vb)
|
|
+ sc_vb = sc_vnode->sc_vb;
|
|
+ else
|
|
+ cam_info("%s(%s) no initial buffer available", __func__, sc_subdev->name);
|
|
+ } else {
|
|
+ spm_vdev_q_busy_vbuffer(sc_vnode, sc_vb);
|
|
+ }
|
|
+ if (sc_vb) {
|
|
+ fe_isp_update_aout_dma_addr(sc_vnode, sc_vb, 0);
|
|
+ hw_dma_set_wdma_ready(SC_BLOCK(sc_vnode), sc_vnode->idx, 1);
|
|
+ }
|
|
+ }
|
|
+ if (pipe_ctx->fmt_wdma_start_cnt[formatter->idx] == 1)
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload = 1;
|
|
+ else
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload = 0;
|
|
+ } else {// stream off
|
|
+ if (sc_pipeline->is_online_mode) {
|
|
+ ret = wait_event_interruptible_timeout(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_eof,
|
|
+ (atomic_read(&isp_ctx->dma_out_ctx[sc_vnode->idx].busy_cnt) <= 0),
|
|
+ msecs_to_jiffies(60));
|
|
+ if (0 == ret)
|
|
+ cam_warn("%s(%s) stream off wait eof timeout", __func__, sc_subdev->name);
|
|
+ else if (ret < 0)
|
|
+ cam_warn("%s(%s) stream off wait eof error ret=%d", __func__, sc_subdev->name, ret);
|
|
+ }
|
|
+ spin_lock_irqsave(&(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ wait_event_interruptible_locked_irq(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head,
|
|
+ !isp_ctx->dma_out_ctx[sc_vnode->idx].in_irq);
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].in_streamoff = 1;
|
|
+ spin_unlock_irqrestore(&(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ hw_dma_set_irq_enable(SC_BLOCK(sc_vnode), DMA_IRQ_SRC_WDMA_CH0 + sc_vnode->idx, 0, DMA_IRQ_ALL);
|
|
+ pipe_ctx = (struct isp_pipeline_context*)sc_pipeline->usr_data;
|
|
+ if (pipe_ctx) {
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ list_del_init(&isp_ctx->dma_out_ctx[sc_vnode->idx].list_entry);
|
|
+ pipe_ctx->fmt_wdma_cnt[formatter->idx]--;
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ }
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].vnode = NULL;
|
|
+ spin_lock_irqsave(&(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].in_streamoff = 0;
|
|
+ spin_unlock_irqrestore(&(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ return -1;
|
|
+ }
|
|
+ ret = spm_subdev_pad_s_stream(sc_subdev, pad, enable);
|
|
+ if (ret) {
|
|
+ cam_err("%s(%s) s_stream on pad%u failed", __func__, sc_subdev->name, pad);
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_formatter_subdev_pad_s_stream(struct v4l2_subdev *sd,
|
|
+ struct media_link *link,
|
|
+ struct v4l2_subdev_format *source_fmt,
|
|
+ struct v4l2_subdev_format *sink_fmt)
|
|
+{
|
|
+ struct fe_formatter *formatter = v4l2_subdev_to_formatter(sd);
|
|
+ unsigned int pad = source_fmt->pad;
|
|
+ int enable = source_fmt->which;
|
|
+
|
|
+ return __fe_formatter_pad_s_stream(formatter, pad, enable);
|
|
+}
|
|
+
|
|
+static int fe_formatter_subdev_video_s_stream(struct v4l2_subdev *sd, int enable)
|
|
+{
|
|
+ //int source = 0, source_twin = 0;
|
|
+ //struct media_pad *remote_pad_in = NULL;
|
|
+ //struct fe_formatter *formatter = v4l2_subdev_to_formatter(sd), *formatter_twin = NULL;
|
|
+ //struct spm_camera_subdev *sc_subdev = &formatter->sc_subdev;
|
|
+ //struct fe_pipe *pipe = NULL;
|
|
+ //struct fe_hdr_combine *hdr_combine = NULL;
|
|
+ //struct isp_context *isp_ctx = v4l2_get_subdevdata(sd);
|
|
+ //struct media_entity *me = NULL;
|
|
+
|
|
+ //if (formatter->idx == 0)
|
|
+ // formatter_twin = isp_ctx->formatters[1];
|
|
+ //else
|
|
+ // formatter_twin = isp_ctx->formatters[0];
|
|
+ //me = &formatter_twin->sc_subdev.pcsd.sd.entity;
|
|
+ //remote_pad_in = media_entity_remote_pad(&(formatter->pads[FMT_PAD_IN]));
|
|
+ //if (!remote_pad_in) {
|
|
+ // cam_err("%s(%s) FMT_PAD_IN had no active link", __func__, sc_subdev->name);
|
|
+ // return -1;
|
|
+ //}
|
|
+ //pipe = media_entity_to_pipe(remote_pad_in->entity);
|
|
+ //if (pipe) {
|
|
+ // if (pipe->idx == 0) {
|
|
+ // source = SCL_SRC_SEL_PIPE0;
|
|
+ // source_twin = SCL_SRC_SEL_PIPE1;
|
|
+ // }
|
|
+ // else {
|
|
+ // source = SCL_SRC_SEL_PIPE1;
|
|
+ // source_twin = SCL_SRC_SEL_PIPE0;
|
|
+ // }
|
|
+ //} else {
|
|
+ // hdr_combine = media_entity_to_hdr_combine(remote_pad_in->entity);
|
|
+ // if (!hdr_combine) {
|
|
+ // cam_err("%s(%s) FMT_PAD_IN should link to pipe or hdr_combine", __func__, sc_subdev->name);
|
|
+ // return -1;
|
|
+ // }
|
|
+ // source = SCL_SRC_SEL_PIPE0;
|
|
+ // source_twin = SCL_SRC_SEL_PIPE1;
|
|
+ //}
|
|
+ //hw_postpipe_set_scaler_source(SC_BLOCK(formatter), formatter->idx, source);
|
|
+ //if (!me->pipe) {
|
|
+ // hw_postpipe_set_scaler_source(SC_BLOCK(formatter_twin), formatter_twin->idx, source_twin);
|
|
+ //}
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_dwt_subdev_pad_set_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct fe_dwt *dwt = v4l2_subdev_to_dwt(sd);
|
|
+ struct spm_camera_subdev *sc_subdev = &dwt->sc_subdev;
|
|
+ struct media_pad *remote_pad_in = NULL, *remote_pad_out = NULL;
|
|
+ struct v4l2_subdev_format sd_format;
|
|
+ struct v4l2_subdev *remote_sd_in = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct v4l2_format v4l2_fmt;
|
|
+ int i = 1, ret = 0;
|
|
+
|
|
+ if (format->which != V4L2_SUBDEV_FORMAT_ACTIVE) {
|
|
+ cam_err("%s(%s) didn't support format which(%d)", __func__,
|
|
+ sc_subdev->name, format->which);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (format->pad >= DWT_PAD_NUM) {
|
|
+ cam_err("%s(%s) invalid pad%d.", __func__, sc_subdev->name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ remote_pad_in = media_entity_remote_pad(&(dwt->pads[PAD_IN]));
|
|
+ remote_pad_out = media_entity_remote_pad(&(dwt->pads[PAD_OUT]));
|
|
+ if (!remote_pad_in || !remote_pad_out) {
|
|
+ cam_err("%s didn't have valid link.", sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ switch (format->pad) {
|
|
+ case PAD_IN:
|
|
+ dwt->pad_fmts[PAD_IN].format = format->format;
|
|
+ if (!sc_subdev->is_resetting) {
|
|
+ dwt->pad_fmts[PAD_OUT].format = format->format;
|
|
+ for (i = 1; i <= dwt->layer_idx; i++) {
|
|
+ dwt->pad_fmts[PAD_OUT].format.width =
|
|
+ (dwt->pad_fmts[PAD_OUT].format.width + 1) >> 1;
|
|
+ dwt->pad_fmts[PAD_OUT].format.height =
|
|
+ (dwt->pad_fmts[PAD_OUT].format.height + 1) >> 1;
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+ case PAD_OUT:
|
|
+ sc_vnode = media_entity_to_sc_vnode(remote_pad_out->entity);
|
|
+ BUG_ON(!sc_vnode);
|
|
+ if (!fe_isp_lookup_dwt_fmts_table(format, dwt->layer_idx)) {
|
|
+ if (!sc_subdev->is_resetting)
|
|
+ cam_err("%s(%s) mbus format code(0x%08x) not supported",
|
|
+ __func__, sc_subdev->name, format->format.code);
|
|
+ else
|
|
+ cam_dbg("%s(%s) mbus format code(0x%08x) not supported",
|
|
+ __func__, sc_subdev->name, format->format.code);
|
|
+ return -1;
|
|
+ }
|
|
+ //if (format->format.width != dwt->pad_fmts[PAD_OUT].format.width
|
|
+ // || format->format.height != dwt->pad_fmts[PAD_OUT].format.height) {
|
|
+ // cam_err("%s(%s) PAD_OUT(%ux%u) didn't match PAD_IN(%ux%u)",
|
|
+ // __func__, sc_subdev->name, format->format.width, format->format.height,
|
|
+ // dwt->pad_fmts[PAD_OUT].format.width, dwt->pad_fmts[PAD_OUT].format.height);
|
|
+ // return -1;
|
|
+ //}
|
|
+ //dwt->pad_fmts[PAD_OUT].format.code = format->format.code;
|
|
+ dwt->pad_fmts[PAD_OUT].format = format->format;
|
|
+ sd_format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
|
|
+ sd_format.pad = remote_pad_in->index;
|
|
+ if (format->format.code >= MEDIA_BUS_FMT_YUYV10_1_5X10_D1
|
|
+ && format->format.code <= MEDIA_BUS_FMT_YUYV10_1_5X10_D4)
|
|
+ sd_format.format.code = MEDIA_BUS_FMT_YUYV8_1_5X8;
|
|
+ else
|
|
+ sd_format.format.code = MEDIA_BUS_FMT_YVYU8_1_5X8;
|
|
+ sd_format.format.width = dwt->pad_fmts[PAD_OUT].format.width;
|
|
+ sd_format.format.height = dwt->pad_fmts[PAD_OUT].format.height;
|
|
+ remote_sd_in = media_entity_to_v4l2_subdev(remote_pad_in->entity);
|
|
+ ret = v4l2_subdev_call(remote_sd_in, pad, set_fmt, NULL, &sd_format);
|
|
+ if (ret) {
|
|
+ cam_err("%s(%s) remote_pad_in set format(%ux%u code=0x%08x) failed",
|
|
+ __func__, sc_subdev->name, sd_format.format.width, sd_format.format.height,
|
|
+ sd_format.format.code);
|
|
+ return ret;
|
|
+ }
|
|
+ dwt->pad_fmts[PAD_IN].format = dwt->pad_fmts[PAD_OUT].format;
|
|
+ spm_vdev_fill_v4l2_format(format, &v4l2_fmt);
|
|
+ hw_dma_set_wdma_pitch(SC_BLOCK(sc_vnode),
|
|
+ sc_vnode->idx,
|
|
+ v4l2_fmt.fmt.pix_mp.num_planes,
|
|
+ v4l2_fmt.fmt.pix_mp.plane_fmt[0].bytesperline,
|
|
+ v4l2_fmt.fmt.pix_mp.plane_fmt[1].bytesperline);
|
|
+ return 0;
|
|
+ default:
|
|
+ cam_dbg("%s(%s) didn't support set fmt for pad%d.", __func__, sc_subdev->name, format->pad);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_dwt_subdev_pad_get_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct fe_dwt *dwt = v4l2_subdev_to_dwt(sd);
|
|
+
|
|
+ if (format->pad >= DWT_PAD_NUM) {
|
|
+ cam_dbg("%s didn't have pad%d.", dwt->sc_subdev.name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ format->format = dwt->pad_fmts[format->pad].format;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __fe_dwt_pad_s_stream(struct fe_dwt *dwt, unsigned int pad, int enable)
|
|
+{
|
|
+ int source = 0, mux_sel = 0, ret = 0, need_initial_load = 1;
|
|
+ struct media_pad *remote_pad = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct spm_camera_subdev *sc_subdev = &dwt->sc_subdev;
|
|
+ struct device *dev = sc_subdev->pcsd.sd.dev;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(&sc_subdev->pcsd.sd.entity);
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct isp_pipeline_context *pipe_ctx = NULL;
|
|
+ struct fe_formatter *formatter = NULL;
|
|
+ struct isp_context *isp_ctx = spm_subdev_get_drvdata(sc_subdev);
|
|
+ struct vi_port_cfg *port_cfg = NULL;
|
|
+ struct spm_camera_vbuffer *sc_vb = NULL;
|
|
+ unsigned int wdma_fifo_offset = 0, wdma_fifo_depth = 0, wdma_weight = 0, wdma_fifo_div_mode = 8;
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ if (!pipe) {
|
|
+ cam_err("%s(%s) pipe was null", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ switch (pad) {
|
|
+ case PAD_OUT:
|
|
+ remote_pad = media_entity_remote_pad(&(dwt->pads[pad]));
|
|
+ if (!remote_pad) {
|
|
+ cam_err("%s(%s) PAD_OUT had no active link.", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_vnode = media_entity_to_sc_vnode(remote_pad->entity);
|
|
+ BUG_ON(!sc_vnode);
|
|
+ remote_pad = media_entity_remote_pad(&(dwt->pads[PAD_IN]));
|
|
+ if (!remote_pad) {
|
|
+ cam_err("%s(%s) PAD_IN had no active link", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ formatter = media_entity_to_formatter(remote_pad->entity);
|
|
+ BUG_ON(!formatter);
|
|
+ if (enable) {
|
|
+ atomic_set(&isp_ctx->dma_out_ctx[sc_vnode->idx].busy_cnt, 0);
|
|
+ if (dwt->layer_idx == 1)
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload = 1;
|
|
+ else
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload = 0;
|
|
+ if (sc_subdev->is_resetting || atomic_inc_return(&formatter->dwt_refcnt) == 1)
|
|
+ hw_postpipe_enable_dwt(SC_BLOCK(dwt), dwt->idx, DWT_SRC_SEL_FORMATTER0 + formatter->idx, 1);
|
|
+ if (dwt->idx == 0) {
|
|
+ mux_sel = MUX_SEL_DWT0_LAYER1 + dwt->layer_idx - 1;
|
|
+ hw_postpipe_dma_mux_enable(SC_BLOCK(dwt), mux_sel);
|
|
+ source = DWT0_LAYER1 + dwt->layer_idx - 1;
|
|
+ } else {
|
|
+ source = DWT1_LAYER1 + dwt->layer_idx - 1;
|
|
+ }
|
|
+ port_cfg = sc_vnode_get_usrdata(sc_vnode);
|
|
+ if (port_cfg) {
|
|
+ wdma_fifo_offset = port_cfg->w_fifo_ctrl.offset;
|
|
+ wdma_fifo_depth = port_cfg->w_fifo_ctrl.depth;
|
|
+ wdma_weight = port_cfg->w_fifo_ctrl.weight;
|
|
+ wdma_fifo_div_mode = port_cfg->w_fifo_ctrl.div_mode;
|
|
+ }
|
|
+ hw_dma_set_wdma_source(SC_BLOCK(sc_vnode), sc_vnode->idx, source, wdma_fifo_offset,
|
|
+ wdma_fifo_depth, wdma_weight, wdma_fifo_div_mode);
|
|
+ if (isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload) {
|
|
+ hw_dma_set_irq_enable(SC_BLOCK(sc_vnode),
|
|
+ DMA_IRQ_SRC_WDMA_CH0 + sc_vnode->idx,
|
|
+ DMA_IRQ_START | DMA_IRQ_DONE | DMA_IRQ_ERR,
|
|
+ 0);
|
|
+ } else {
|
|
+ hw_dma_set_irq_enable(SC_BLOCK(sc_vnode),
|
|
+ DMA_IRQ_SRC_WDMA_CH0 + sc_vnode->idx,
|
|
+ DMA_IRQ_DONE | DMA_IRQ_ERR,
|
|
+ DMA_IRQ_START);
|
|
+ }
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].vnode = sc_vnode;
|
|
+ mutex_lock(&sc_pipeline->mlock);
|
|
+ if (!sc_pipeline->usr_data) {
|
|
+ pipe_ctx = fe_pipeline_create_ctx(dev);
|
|
+ if (!pipe_ctx) {
|
|
+ mutex_unlock(&sc_pipeline->mlock);
|
|
+ cam_err("%s(%s) create pipeline context failed", __func__, sc_subdev->name);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ sc_pipeline->usr_data = pipe_ctx;
|
|
+ } else {
|
|
+ pipe_ctx = (struct isp_pipeline_context*)sc_pipeline->usr_data;
|
|
+ }
|
|
+ mutex_unlock(&sc_pipeline->mlock);
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ ret = list_add_no_repeat(&isp_ctx->dma_out_ctx[sc_vnode->idx].list_entry, &pipe_ctx->fmt_wdma_list[formatter->idx]);
|
|
+ if (0 == ret)
|
|
+ pipe_ctx->fmt_wdma_cnt[formatter->idx]++;
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ if (need_initial_load) {
|
|
+ ret = spm_vdev_dq_idle_vbuffer(sc_vnode, &sc_vb);
|
|
+ if (ret) {
|
|
+ if (sc_vnode->sc_vb)
|
|
+ sc_vb = sc_vnode->sc_vb;
|
|
+ else
|
|
+ cam_info("%s(%s) no initial buffer available", __func__, sc_subdev->name);
|
|
+ } else {
|
|
+ spm_vdev_q_busy_vbuffer(sc_vnode, sc_vb);
|
|
+ }
|
|
+ if (sc_vb) {
|
|
+ fe_isp_update_aout_dma_addr(sc_vnode, sc_vb, 0);
|
|
+ hw_dma_set_wdma_ready(SC_BLOCK(sc_vnode), sc_vnode->idx, 1);
|
|
+ }
|
|
+ }
|
|
+ if (dwt->layer_idx == 1)
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload = 1;
|
|
+ else
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload = 0;
|
|
+ } else { // stream off
|
|
+ if (sc_pipeline->is_online_mode) {
|
|
+ ret = wait_event_interruptible_timeout(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_eof,
|
|
+ (atomic_read(&isp_ctx->dma_out_ctx[sc_vnode->idx].busy_cnt) <= 0),
|
|
+ msecs_to_jiffies(60));
|
|
+ if (0 == ret)
|
|
+ cam_warn("%s(%s) stream off wait eof timeout", __func__, sc_subdev->name);
|
|
+ else if (ret < 0)
|
|
+ cam_warn("%s(%s) stream off wait eof error ret=%d", __func__, sc_subdev->name, ret);
|
|
+ }
|
|
+ spin_lock_irqsave(&(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ wait_event_interruptible_locked_irq(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head,
|
|
+ !isp_ctx->dma_out_ctx[sc_vnode->idx].in_irq);
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].in_streamoff = 1;
|
|
+ spin_unlock_irqrestore(&(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ hw_dma_set_irq_enable(SC_BLOCK(sc_vnode), DMA_IRQ_SRC_WDMA_CH0 + sc_vnode->idx, 0, DMA_IRQ_ALL);
|
|
+ pipe_ctx = (struct isp_pipeline_context*)sc_pipeline->usr_data;
|
|
+ if (pipe_ctx) {
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ list_del_init(&isp_ctx->dma_out_ctx[sc_vnode->idx].list_entry);
|
|
+ pipe_ctx->fmt_wdma_cnt[formatter->idx]--;
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ }
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].vnode = NULL;
|
|
+ if (atomic_dec_and_test(&formatter->dwt_refcnt))
|
|
+ hw_postpipe_enable_dwt(SC_BLOCK(dwt), dwt->idx, DWT_SRC_SEL_FORMATTER0 + formatter->idx, 0);
|
|
+ spin_lock_irqsave(&(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].in_streamoff = 0;
|
|
+ spin_unlock_irqrestore(&(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ ret = spm_subdev_pad_s_stream(sc_subdev, pad, enable);
|
|
+ if (ret) {
|
|
+ cam_err("%s(%s) s_stream on pad%u failed", __func__, sc_subdev->name, pad);
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_dwt_subdev_pad_s_stream(struct v4l2_subdev *sd, struct media_link *link,
|
|
+ struct v4l2_subdev_format *source_fmt,
|
|
+ struct v4l2_subdev_format *sink_fmt)
|
|
+{
|
|
+ struct fe_dwt *dwt = v4l2_subdev_to_dwt(sd);
|
|
+ unsigned int pad = source_fmt->pad;
|
|
+ int enable = source_fmt->which;
|
|
+
|
|
+ return __fe_dwt_pad_s_stream(dwt, pad, enable);
|
|
+}
|
|
+
|
|
+static int fe_pipe_subdev_pad_set_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct fe_pipe *pipe = v4l2_subdev_to_pipe(sd);
|
|
+ struct isp_context *isp_ctx = v4l2_get_subdevdata(sd);
|
|
+ struct spm_camera_subdev *sc_subdev = &pipe->sc_subdev;
|
|
+ struct media_pad *remote_pad = NULL;
|
|
+ int cfa_pattern = 0, hdr_mode = HDR_NONE, offline_channel_idx = 0;
|
|
+ unsigned int bit_depth = 0, flags = 0;
|
|
+ struct fe_offline_channel *offline_channel = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct fe_rawdump *rawdump = NULL;
|
|
+ struct fe_hdr_combine *hdr_combine = NULL;
|
|
+ struct csi *csi = NULL;
|
|
+ struct media_pipeline *mpipe = media_entity_pipeline(&sd->entity);
|
|
+ int i = 0;
|
|
+
|
|
+ if (format->which != V4L2_SUBDEV_FORMAT_ACTIVE) {
|
|
+ cam_err("%s(%s) didn't support format which(%d)", __func__,
|
|
+ sc_subdev->name, format->which);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (format->pad >= PIPE_PAD_NUM) {
|
|
+ cam_err("%s(%s) invalid pad%d.", __func__, sc_subdev->name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (!mpipe) {
|
|
+ cam_err("%s(%s) pipe is null", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+
|
|
+ switch (format->pad) {
|
|
+ case PIPE_PAD_IN:
|
|
+ //if (format->format.width > sc_pipeline->max_width[0]
|
|
+ // || format->format.height > sc_pipeline->max_height[0]
|
|
+ // || format->format.width < sc_pipeline->min_width[0]
|
|
+ // || format->format.height < sc_pipeline->min_height[0]) {
|
|
+ // cam_err("%s(%s) %ux%u exceeded max %ux%u min %ux%u", __func__, sc_subdev->name,
|
|
+ // format->format.width, format->format.height,
|
|
+ // sc_pipeline->max_width[0], sc_pipeline->max_height[0],
|
|
+ // sc_pipeline->min_width[0], sc_pipeline->min_height[0]);
|
|
+ // return -EINVAL;
|
|
+ //}
|
|
+ remote_pad = media_entity_remote_pad(&(pipe->pads[format->pad]));
|
|
+ if (!remote_pad) {
|
|
+ cam_err("%s(%s) PIPE_PAD_IN had no active link", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ csi = media_entity_to_csi(remote_pad->entity);
|
|
+ if (csi) {
|
|
+ flags = IDI_FMT_FLAG_ONLINE_INPUT;
|
|
+ } else {
|
|
+ flags = IDI_FMT_FLAG_OFFLINE_INPUT;
|
|
+ offline_channel = media_entity_to_offline_channel(remote_pad->entity);
|
|
+ if (!offline_channel) {
|
|
+ cam_err("%s(%s) PIPE_PAD_IN should link to sensor or offline channel", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ offline_channel_idx = offline_channel->idx;
|
|
+ }
|
|
+ if (!fe_isp_lookup_raw_fmts_table(format, flags, &cfa_pattern, &bit_depth)) {
|
|
+ cam_err("%s(%s) pad%d didn't support format(%dx%d code:0x%08x).", __func__, sc_subdev->name, format->pad,
|
|
+ format->format.width, format->format.height, format->format.code);
|
|
+ return -1;
|
|
+ }
|
|
+ remote_pad = media_entity_remote_pad(&(pipe->pads[PIPE_PAD_HDROUT]));
|
|
+ if (remote_pad)
|
|
+ hdr_combine = media_entity_to_hdr_combine(remote_pad->entity);
|
|
+ remote_pad = media_entity_remote_pad(&(isp_ctx->pipes[0]->pads[PIPE_PAD_RAWDUMP0OUT]));
|
|
+ if (remote_pad)
|
|
+ rawdump = media_entity_to_rawdump(remote_pad->entity);
|
|
+ if (hdr_combine) {
|
|
+ if (offline_channel) {
|
|
+ hdr_mode = HDR_OFFLINE;
|
|
+ } else {
|
|
+ if (rawdump)
|
|
+ hdr_mode = HDR_MIX;
|
|
+ else
|
|
+ hdr_mode = HDR_ONLINE;
|
|
+ }
|
|
+ } else {
|
|
+ hdr_mode = HDR_NONE;
|
|
+ }
|
|
+
|
|
+ if (offline_channel || (pipe->idx == 0 && hdr_mode == HDR_MIX)) { // offline
|
|
+ hw_isp_top_set_idi_offline_input_fmt(SC_BLOCK(pipe),
|
|
+ offline_channel_idx,
|
|
+ format->format.width,
|
|
+ format->format.height,
|
|
+ cfa_pattern, bit_depth);
|
|
+ } else { // online
|
|
+ hw_isp_top_set_idi_online_input_fmt(SC_BLOCK(pipe),
|
|
+ format->format.width,
|
|
+ format->format.height,
|
|
+ cfa_pattern);
|
|
+ }
|
|
+ for (i = 0; i < PIPE_PAD_NUM; i++)
|
|
+ pipe->pad_fmts[i].format = format->format;
|
|
+ break;
|
|
+ default:
|
|
+ cam_dbg("%s(%s) didn't support pad%d.", __func__, sc_subdev->name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_pipe_subdev_pad_get_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct fe_pipe *pipe = v4l2_subdev_to_pipe(sd);
|
|
+
|
|
+ if (format->pad >= PIPE_PAD_NUM) {
|
|
+ cam_dbg("%s(%s) invalid pad%d.", __func__, pipe->sc_subdev.name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ format->format = pipe->pad_fmts[format->pad].format;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_pipe_subdev_video_s_stream(struct v4l2_subdev *sd, int enable)
|
|
+{
|
|
+ struct fe_pipe *pipe = v4l2_subdev_to_pipe(sd);
|
|
+ struct isp_context *isp_ctx = v4l2_get_subdevdata(sd);
|
|
+ struct spm_camera_subdev *sc_subdev = &pipe->sc_subdev;
|
|
+ unsigned int pipe0_width = 0, pipe1_width = 0, idi0_fifo_depth = 0, idi1_fifo_depth = 0;
|
|
+ unsigned int idi_line_depth = 0, idi_pix_depth = 0, mix_hdr_line = 0, idi_insert_dummy_line = 8;
|
|
+ struct media_entity *me_pipe0 = (struct media_entity*)isp_ctx->pipes[0];
|
|
+ struct media_entity *me_pipe1 = (struct media_entity*)isp_ctx->pipes[1];
|
|
+ struct media_pipeline *mpipe0 = media_entity_pipeline(me_pipe0);
|
|
+ struct media_pipeline *mpipe1 = media_entity_pipeline(me_pipe1);
|
|
+ struct media_pad *remote_pad = NULL;
|
|
+ int idi_source = 0, hdr_mode = HDR_NONE, enable_read_outstanding = 0, sensor_id = 0;
|
|
+ struct spm_camera_sensor *sc_sensor = NULL;
|
|
+ struct fe_offline_channel *offline_channel = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL, *sc_pipeline0 = NULL, *sc_pipeline1 = NULL;
|
|
+ struct fe_rawdump *rawdump = NULL;
|
|
+ struct fe_hdr_combine *hdr_combine = NULL;
|
|
+ struct csi *csi = NULL;
|
|
+ unsigned int irq_bitmap = 0, cap_to_preview = 0;
|
|
+ unsigned int vi_flags = 0, clk_high = 0, force_sw_gap = 0;
|
|
+ struct media_pipeline *mpipe = media_entity_pipeline(&sd->entity);
|
|
+ long l_ret = 0;
|
|
+
|
|
+ BUG_ON(!me_pipe0);
|
|
+ BUG_ON(!me_pipe1);
|
|
+ vi_flags = (pipe->pad_fmts[PIPE_PAD_IN].format.field >> SPACEMIT_VI_SWITCH_FLAGS_SHIFT) & SPACEMIT_VI_PRI_DATA_MASK;
|
|
+ cap_to_preview = vi_flags & SPACEMIT_VI_FLAG_BACK_TO_PREVIEW;
|
|
+ //clk_high = vi_flags & SPACEMIT_VI_FLAG_CLK_HIGH;
|
|
+ //force_sw_gap = vi_flags & SPACEMIT_VI_FLAG_FORCE_SW_GAP;
|
|
+ sensor_id = pipe->pad_fmts[PIPE_PAD_IN].format.field & SPACEMIT_VI_PRI_DATA_MASK;
|
|
+ sensor_id = (sensor_id & SPACEMIT_VI_SENSOR_ID_MASK) >> SPACEMIT_VI_SENSOR_ID_SHIFT;
|
|
+ if (mpipe0 && mpipe1) {
|
|
+ sc_pipeline0 = media_pipeline_to_sc_pipeline(mpipe0);
|
|
+ sc_pipeline1 = media_pipeline_to_sc_pipeline(mpipe1);
|
|
+ if (sc_pipeline0->is_online_mode && sc_pipeline1->is_online_mode) {
|
|
+ clk_high = 1;
|
|
+ force_sw_gap = 1;
|
|
+ }
|
|
+ }
|
|
+ remote_pad = media_entity_remote_pad(&(pipe->pads[PIPE_PAD_HDROUT]));
|
|
+ if (remote_pad)
|
|
+ hdr_combine = media_entity_to_hdr_combine(remote_pad->entity);
|
|
+ remote_pad = media_entity_remote_pad(&(isp_ctx->pipes[0]->pads[PIPE_PAD_RAWDUMP0OUT]));
|
|
+ if (remote_pad)
|
|
+ rawdump = media_entity_to_rawdump(remote_pad->entity);
|
|
+ remote_pad = media_entity_remote_pad(&(pipe->pads[PIPE_PAD_IN]));
|
|
+ if (!remote_pad) {
|
|
+ cam_err("%s(%s) PIPE_PAD_IN had no active link", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ csi = media_entity_to_csi(remote_pad->entity);
|
|
+ if (csi) {
|
|
+ remote_pad = media_entity_remote_pad(&(csi->pads[CSI_PAD_IN]));
|
|
+ BUG_ON(!remote_pad);
|
|
+ sc_sensor = media_entity_to_sc_sensor(remote_pad->entity);
|
|
+ }
|
|
+ if (!sc_sensor) {
|
|
+ offline_channel = media_entity_to_offline_channel(remote_pad->entity);
|
|
+ }
|
|
+ if (pipe->idx == 0)
|
|
+ irq_bitmap = POSTERR_IRQ_PIP0_SDW_CLOSE_DONE;
|
|
+ else
|
|
+ irq_bitmap = POSTERR_IRQ_PIP1_SDW_CLOSE_DONE;
|
|
+ if (!mpipe) {
|
|
+ cam_err("%s entity pipe is null", __func__);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+ BUG_ON(!sc_pipeline);
|
|
+ if (enable) {
|
|
+ if (clk_high)
|
|
+ fe_isp_set_clk(isp_ctx, ISP_CLK_HIGH);
|
|
+ else
|
|
+ fe_isp_set_clk(isp_ctx, ISP_CLK_LOW);
|
|
+ isp_ctx->isp_fatal_error = 0;
|
|
+ isp_ctx->dma_overlap_cnt = 0;
|
|
+ if (isp_ctx->dma_block)
|
|
+ hw_dma_reset(isp_ctx->dma_block);
|
|
+ atomic_set(&sc_pipeline->slice_info_update, 0);
|
|
+ fe_isp_reset_frame_id(sc_pipeline);
|
|
+ if (mpipe0) {
|
|
+ pipe0_width = CAM_ALIGN(isp_ctx->pipes[0]->pad_fmts[PIPE_PAD_IN].format.width, 8);
|
|
+ if (pipe0_width == 0)
|
|
+ pipe0_width = 1920;
|
|
+ }
|
|
+ if (mpipe1) {
|
|
+ pipe1_width = CAM_ALIGN(isp_ctx->pipes[1]->pad_fmts[PIPE_PAD_IN].format.width, 8);
|
|
+ if (pipe1_width == 0)
|
|
+ pipe1_width = 1920;
|
|
+ }
|
|
+ if (hdr_combine) {
|
|
+ if (offline_channel) {
|
|
+ hdr_mode = HDR_OFFLINE;
|
|
+ } else if (sc_sensor) {
|
|
+ if (rawdump)
|
|
+ hdr_mode = HDR_MIX;
|
|
+ else
|
|
+ hdr_mode = HDR_ONLINE;
|
|
+ } else {
|
|
+ cam_err("%s(%s) PIPE_PAD_IN should link to offline_channel or sensor", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ } else {
|
|
+ hdr_mode = HDR_NONE;
|
|
+ }
|
|
+ if (hdr_mode == HDR_MIX) {
|
|
+ enable_read_outstanding = 1;
|
|
+ idi_insert_dummy_line = 9;
|
|
+ } else {
|
|
+ idi_insert_dummy_line = 8;
|
|
+ }
|
|
+ BUG_ON(0 == (pipe0_width + pipe1_width));
|
|
+ if (hdr_mode == HDR_MIX) {
|
|
+ idi1_fifo_depth = pipe1_width >> 1;
|
|
+ idi0_fifo_depth = 4750 - idi1_fifo_depth;
|
|
+ BUG_ON(0 == isp_ctx->pipes[0]->pad_fmts[PIPE_PAD_IN].format.width);
|
|
+ } else {
|
|
+ if (pipe0_width > 0 && pipe1_width > 0)
|
|
+ idi0_fifo_depth = 4750 >> 1;
|
|
+ else
|
|
+ idi0_fifo_depth = (4750 * pipe0_width) / (pipe0_width + pipe1_width);
|
|
+ idi1_fifo_depth = 4750 - idi0_fifo_depth;
|
|
+ }
|
|
+ if (!enable_read_outstanding) {
|
|
+ if (pipe0_width == 0)
|
|
+ pipe0_width = 1920;
|
|
+ if (pipe1_width == 0)
|
|
+ pipe1_width = 1920;
|
|
+ idi_line_depth = (idi0_fifo_depth << 2) / pipe0_width;
|
|
+ idi_pix_depth = (idi0_fifo_depth << 2) - idi_line_depth * pipe0_width;
|
|
+ idi_line_depth = (idi1_fifo_depth << 2) / pipe1_width;
|
|
+ idi_pix_depth = (idi1_fifo_depth << 2) - idi_line_depth * pipe1_width;
|
|
+ hw_isp_top_enable_rd_outstanding(SC_BLOCK(isp_ctx->pipes[0]), 0);
|
|
+ } else { // enable read outstanding
|
|
+ BUG_ON(0 == pipe0_width);
|
|
+ BUG_ON(0 == pipe1_width);
|
|
+ BUG_ON(pipe0_width != pipe1_width);
|
|
+ idi_line_depth = (idi0_fifo_depth << 2) / pipe0_width;
|
|
+ idi_line_depth >>= 1;
|
|
+ idi_line_depth <<= 1;
|
|
+ idi0_fifo_depth = (idi_line_depth * pipe0_width) >> 2;
|
|
+ idi1_fifo_depth = 4750 - idi0_fifo_depth;
|
|
+ if (0 == pipe->idx) {
|
|
+ idi_pix_depth = (idi0_fifo_depth << 2) - idi_line_depth * pipe0_width;
|
|
+ } else {
|
|
+ idi_line_depth = (idi1_fifo_depth << 2) / pipe1_width;
|
|
+ idi_pix_depth = (idi1_fifo_depth << 2) - idi_line_depth * pipe1_width;
|
|
+ }
|
|
+ hw_isp_top_enable_rd_outstanding(SC_BLOCK(isp_ctx->pipes[0]), 1);
|
|
+ }
|
|
+ if (csi) {
|
|
+ if (hdr_mode == HDR_MIX) {
|
|
+ if (pipe->idx == 0) {
|
|
+ idi_source = OFFLINE_CH0;
|
|
+ } else {
|
|
+ if (sensor_id == 0)
|
|
+ idi_source = SENSOR1_CH1;
|
|
+ else
|
|
+ idi_source = SENSOR0_CH1;
|
|
+ }
|
|
+ } else {
|
|
+ if (sensor_id == 0)
|
|
+ idi_source = SENSOR0_CH0;
|
|
+ else
|
|
+ idi_source = SENSOR1_CH0;
|
|
+ }
|
|
+
|
|
+ } else {
|
|
+ if (!offline_channel) {
|
|
+ cam_err("%s(%s) PIPE_PAD_IN should link to offline_channel or sensor", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ if (offline_channel->idx == 0)
|
|
+ idi_source = OFFLINE_CH0;
|
|
+ else
|
|
+ idi_source = OFFLINE_CH1;
|
|
+ }
|
|
+ hw_isp_top_set_idi_input_source(SC_BLOCK(pipe), idi_source);
|
|
+ hw_isp_top_set_idi_dummyline(SC_BLOCK(isp_ctx->pipes[0]), idi_insert_dummy_line);
|
|
+ if (csi) {
|
|
+ hw_isp_top_enable_vsync_pass_through(SC_BLOCK(isp_ctx->pipes[0]), pipe->idx, 1);
|
|
+ hw_isp_top_set_vsync2href_dly_cnt(SC_BLOCK(isp_ctx->pipes[0]), pipe->idx, 0xc8);
|
|
+ } else {
|
|
+ hw_isp_top_enable_vsync_pass_through(SC_BLOCK(isp_ctx->pipes[0]), pipe->idx, 0);
|
|
+ hw_isp_top_set_vsync2href_dly_cnt(SC_BLOCK(isp_ctx->pipes[0]), pipe->idx, 0x3e8);
|
|
+ }
|
|
+ hw_isp_top_enable_hdr(SC_BLOCK(isp_ctx->pipes[0]), hdr_mode);
|
|
+ if (hdr_mode == HDR_MIX) {
|
|
+ mix_hdr_line = CAM_ALIGN((idi0_fifo_depth << 3) / isp_ctx->pipes[0]->pad_fmts[PIPE_PAD_IN].format.width, 2);
|
|
+ hw_isp_top_set_mix_hdr_line(SC_BLOCK(isp_ctx->pipes[0]), mix_hdr_line);
|
|
+ hw_isp_top_set_ddr_wr_line(SC_BLOCK(isp_ctx->pipes[0]), 2);
|
|
+ }
|
|
+ if (csi)
|
|
+ hw_isp_top_set_irq_enable(SC_BLOCK(pipe), ISP_IRQ_SDE_SOF, 0);
|
|
+ hw_isp_top_clr_err0_irq_status(SC_BLOCK(isp_ctx->pipes[0]), 0xffffffff);
|
|
+ hw_isp_top_set_err0_irq_enable(SC_BLOCK(isp_ctx->pipes[0]), 0xffffffff, 0);
|
|
+ hw_isp_top_clr_err2_irq_status(SC_BLOCK(isp_ctx->pipes[0]), 0xffffffff);
|
|
+ hw_isp_top_set_err2_irq_enable(SC_BLOCK(isp_ctx->pipes[0]), 0xffffffff, 0);
|
|
+ hw_isp_top_clr_posterr_irq_status(SC_BLOCK(isp_ctx->pipes[0]), irq_bitmap);
|
|
+ hw_isp_top_set_posterr_irq_enable(SC_BLOCK(isp_ctx->pipes[0]), irq_bitmap, 0);
|
|
+ hw_isp_top_set_gap_value(SC_BLOCK(isp_ctx->pipes[0]), 200, 200, 200);
|
|
+ if (csi && !force_sw_gap) {
|
|
+ hw_isp_top_enable_hw_gap(SC_BLOCK(isp_ctx->pipes[0]), pipe->idx, 1);
|
|
+ } else {
|
|
+ hw_isp_top_enable_hw_gap(SC_BLOCK(isp_ctx->pipes[0]), pipe->idx, 0);
|
|
+ }
|
|
+ if (sc_pipeline0 && sc_pipeline1) {
|
|
+ if ((sc_pipeline0->is_online_mode && !sc_pipeline1->is_online_mode)
|
|
+ || (!sc_pipeline0->is_online_mode && sc_pipeline1->is_online_mode)) {
|
|
+ hw_isp_top_set_speed_ctrl(SC_BLOCK(isp_ctx->pipes[0]), 1);
|
|
+ } else {
|
|
+ hw_isp_top_set_speed_ctrl(SC_BLOCK(isp_ctx->pipes[0]), 0);
|
|
+ }
|
|
+ } else {
|
|
+ hw_isp_top_set_speed_ctrl(SC_BLOCK(isp_ctx->pipes[0]), 0);
|
|
+ }
|
|
+ if (!cap_to_preview)
|
|
+ hw_isp_top_shadow_latch(SC_BLOCK(pipe));
|
|
+ } else {
|
|
+ complete_all(&sc_pipeline->slice_done);
|
|
+ if (csi) {
|
|
+ reinit_completion(&(pipe->close_done));
|
|
+ }
|
|
+ hw_isp_top_set_idi_input_source(SC_BLOCK(pipe), INVALID_CH);
|
|
+ hw_isp_top_set_cfg_rdy(SC_BLOCK(pipe), 1);
|
|
+ if (csi) {
|
|
+ l_ret = wait_for_completion_interruptible_timeout(&(pipe->close_done), msecs_to_jiffies(500));
|
|
+ if (l_ret == 0)
|
|
+ cam_warn("%s(%s) wait stream off timeout", __func__, sc_subdev->name);
|
|
+ else if (l_ret < 0)
|
|
+ cam_warn("%s(%s) wait stream off interrputed by user app", __func__, sc_subdev->name);
|
|
+ else
|
|
+ cam_dbg("%s(%s) wait stream off ok", __func__, sc_subdev->name);
|
|
+ }
|
|
+ hw_isp_top_enable_vsync_pass_through(SC_BLOCK(isp_ctx->pipes[0]), pipe->idx, 0);
|
|
+ hw_isp_top_set_vsync2href_dly_cnt(SC_BLOCK(isp_ctx->pipes[0]), pipe->idx, 0);
|
|
+ hw_isp_top_set_posterr_irq_enable(SC_BLOCK(isp_ctx->pipes[0]), 0, irq_bitmap);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_isp_pipeline_notifier_handler(struct notifier_block *nb,
|
|
+ unsigned long action, void *data)
|
|
+{
|
|
+ struct fe_x *x = container_of(nb, struct fe_x, pipeline_notify_block);
|
|
+ struct fe_pipe *pipe = NULL;
|
|
+ struct csi *csi = NULL;
|
|
+ struct fe_offline_channel *offline_channel = NULL;
|
|
+ struct media_pad *remote_pad = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct isp_pipeline_context *pipe_ctx = NULL;
|
|
+ struct list_head *pos = NULL, *n = NULL;
|
|
+ unsigned long flags = 0;
|
|
+ int i = 0, j = 0, ret = 0;
|
|
+ unsigned int idi_fifo_depth = 0, idi_line_depth = 0, idi_pix_depth = 0;
|
|
+ struct v4l2_subdev_format format = { 0 };
|
|
+ unsigned int bit_depth = 0;
|
|
+
|
|
+ csi = v4l2_subdev_to_csi(&(x->sc_subdev.pcsd.sd));
|
|
+ pipe = v4l2_subdev_to_pipe(&(x->sc_subdev.pcsd.sd));
|
|
+ switch (action) {
|
|
+ case PIPELINE_ACTION_PIPE_ACK:
|
|
+ //pipe = v4l2_subdev_to_pipe(&(x->sc_subdev.pcsd.sd));
|
|
+ if (pipe)
|
|
+ return NOTIFY_OK | NOTIFY_STOP_MASK;
|
|
+ return NOTIFY_DONE;
|
|
+ break;
|
|
+ case PIPELINE_ACTION_CLEAN_USR_DATA:
|
|
+ sc_pipeline = (struct spm_camera_pipeline *)data;
|
|
+ pipe_ctx = (struct isp_pipeline_context *)sc_pipeline->usr_data;
|
|
+ if (pipe_ctx) {
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ for (i = 0; i < FORMATTER_NUM; i++) {
|
|
+ list_for_each_safe(pos, n, &pipe_ctx->fmt_wdma_list[i]) {
|
|
+ list_del_init(pos);
|
|
+ }
|
|
+ }
|
|
+ for (i = 0; i < FORMATTER_NUM; i++) {
|
|
+ for (j = 0; j < VB2_MAX_FRAME; j++) {
|
|
+ INIT_LIST_HEAD(&pipe_ctx->fmt_wdma_sync[i][j]);
|
|
+ pipe_ctx->fmt_wdma_sync_cnt[i][j] = 0;
|
|
+ }
|
|
+ }
|
|
+ list_for_each_safe(pos, n, &(pipe_ctx->wdma_list)) {
|
|
+ list_del_init(pos);
|
|
+ }
|
|
+ pipe_ctx->mmu_tbu_reload = MMU_TBU_OK;
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ }
|
|
+ return NOTIFY_OK | NOTIFY_STOP_MASK;
|
|
+ break;
|
|
+ case PIPELINE_ACTION_SENSOR_STREAM_ON:
|
|
+ if (csi) {
|
|
+ ret = csi_subdev_video_s_stream(&(csi->sc_subdev.pcsd.sd), 1);
|
|
+ if (ret)
|
|
+ return NOTIFY_BAD;
|
|
+ return NOTIFY_OK | NOTIFY_STOP_MASK;
|
|
+ }
|
|
+ break;
|
|
+ case PIPELINE_ACTION_SENSOR_STREAM_OFF:
|
|
+ if (csi) {
|
|
+ ret = csi_subdev_video_s_stream(&(csi->sc_subdev.pcsd.sd), 0);
|
|
+ if (ret)
|
|
+ return NOTIFY_BAD;
|
|
+ return NOTIFY_OK | NOTIFY_STOP_MASK;
|
|
+ }
|
|
+ break;
|
|
+ case PIPELINE_ACTION_SLICE_READY:
|
|
+ if (pipe) {
|
|
+ sc_pipeline = (struct spm_camera_pipeline *)data;
|
|
+ BUG_ON(!sc_pipeline);
|
|
+ pipe_ctx = (struct isp_pipeline_context *)sc_pipeline->usr_data;
|
|
+ BUG_ON(!pipe_ctx);
|
|
+ idi_fifo_depth = hw_isp_top_get_idi_fifo_depth(SC_BLOCK(pipe));
|
|
+ idi_line_depth = (idi_fifo_depth << 2) / pipe_ctx->cc_slice_info.slice_width;
|
|
+ idi_pix_depth = (idi_fifo_depth << 2) - idi_line_depth * pipe_ctx->cc_slice_info.slice_width;
|
|
+ //hw_isp_top_set_idi_linebuf(SC_BLOCK(pipe), idi_fifo_depth, idi_line_depth, idi_pix_depth);
|
|
+ format.format = pipe->pad_fmts[PIPE_PAD_IN].format;
|
|
+ ret = fe_isp_lookup_raw_fmts_table(&format, IDI_FMT_FLAG_OFFLINE_INPUT, NULL, &bit_depth);
|
|
+ BUG_ON(!ret);
|
|
+ remote_pad = media_entity_remote_pad(&pipe->pads[PIPE_PAD_IN]);
|
|
+ BUG_ON(!remote_pad);
|
|
+ offline_channel = media_entity_to_offline_channel(remote_pad->entity);
|
|
+ BUG_ON(!offline_channel);
|
|
+ hw_isp_top_set_idi_offline_input_fmt(SC_BLOCK(pipe),
|
|
+ offline_channel->idx,
|
|
+ pipe_ctx->cc_slice_info.slice_width,
|
|
+ format.format.height,
|
|
+ 0, bit_depth);
|
|
+ cam_not("slice width=%d idi_fifo_depth=%u", pipe_ctx->cc_slice_info.slice_width, idi_fifo_depth);
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ return NOTIFY_DONE;
|
|
+ }
|
|
+
|
|
+ return NOTIFY_DONE;
|
|
+}
|
|
+
|
|
+static int __fe_rawdump_pad_s_stream(struct fe_rawdump *rawdump, unsigned int pad, int enable)
|
|
+{
|
|
+ int source = 0, ret = 0, need_initial_load = 0, rawdump_only = 0;
|
|
+ struct media_pad *remote_pad_out = NULL, *remote_pad_in = NULL;
|
|
+ //struct spm_camera_sensor *sc_sensor = NULL;
|
|
+ struct fe_pipe *pipe = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct spm_camera_subdev *sc_subdev = &rawdump->sc_subdev;
|
|
+ struct device *dev = sc_subdev->pcsd.sd.dev;
|
|
+ struct media_pipeline *mpipe = media_entity_pipeline(&sc_subdev->pcsd.sd.entity);
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct isp_pipeline_context *pipe_ctx = NULL;
|
|
+ struct isp_context *isp_ctx = spm_subdev_get_drvdata(sc_subdev);
|
|
+ struct spm_camera_vbuffer *sc_vb = NULL;
|
|
+ struct vi_port_cfg *port_cfg = NULL;
|
|
+ unsigned int wdma_fifo_offset = 0, wdma_fifo_depth = 0, wdma_weight = 0, wdma_fifo_div_mode = 8;
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ if (!mpipe) {
|
|
+ cam_err("%s(%s) pipe was null", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+ switch (pad) {
|
|
+ case PAD_OUT:
|
|
+ remote_pad_out = media_entity_remote_pad(&(rawdump->pads[pad]));
|
|
+ if (!remote_pad_out) {
|
|
+ cam_err("%s(%s) PAD_OUT had no active link.", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_vnode = media_entity_to_sc_vnode(remote_pad_out->entity);
|
|
+ if (!sc_vnode) {
|
|
+ cam_err("%s(%s) PAD_OUT had no link to vnode.", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ remote_pad_in = media_entity_remote_pad(&(rawdump->pads[PAD_IN]));
|
|
+ if (!remote_pad_in) {
|
|
+ cam_err("%s(%s) PAD_IN had no active link.", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ //sc_sensor = media_entity_to_sc_sensor(remote_pad_in->entity);
|
|
+ pipe = media_entity_to_pipe(remote_pad_in->entity);
|
|
+ //if (pipe && !sc_sensor) {
|
|
+ // remote_pad_in = media_entity_remote_pad(&(pipe->pads[PIPE_PAD_IN]));
|
|
+ // if (!remote_pad_in) {
|
|
+ // cam_err("%s(%s) PAD_IN->pipe had no active input link", __func__, sc_subdev->name);
|
|
+ // return -1;
|
|
+ // }
|
|
+ // sc_sensor = media_entity_to_sc_sensor(remote_pad_in->entity);
|
|
+ //}
|
|
+ //if (!sc_sensor) {
|
|
+ // cam_err("%s(%s) PAD_IN had no link to sensor or pipe->sensor.", __func__, sc_subdev->name);
|
|
+ // return -1;
|
|
+ //}
|
|
+ if (enable) {
|
|
+ atomic_set(&isp_ctx->dma_out_ctx[sc_vnode->idx].busy_cnt, 0);
|
|
+ if (rawdump->idx == 0)
|
|
+ source = RAWDUMP0;
|
|
+ else
|
|
+ source = RAWDUMP1;
|
|
+ port_cfg = sc_vnode_get_usrdata(sc_vnode);
|
|
+ if (port_cfg) {
|
|
+ wdma_fifo_offset = port_cfg->w_fifo_ctrl.offset;
|
|
+ wdma_fifo_depth = port_cfg->w_fifo_ctrl.depth;
|
|
+ wdma_weight = port_cfg->w_fifo_ctrl.weight;
|
|
+ wdma_fifo_div_mode = port_cfg->w_fifo_ctrl.div_mode;
|
|
+ ret = blocking_notifier_call_chain(&sc_pipeline->blocking_notify_chain,
|
|
+ PIPELINE_ACTION_PIPE_ACK, NULL);
|
|
+ if (NOTIFY_STOP == ret) {
|
|
+ port_cfg->buf_required_min = 0;
|
|
+ rawdump_only = 0;
|
|
+ } else {
|
|
+ port_cfg->buf_required_min = 0;
|
|
+ need_initial_load = 1;
|
|
+ rawdump_only = 1;
|
|
+ }
|
|
+ }
|
|
+ if (rawdump_only)
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload = 1;
|
|
+ else
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload = 0;
|
|
+ hw_dma_set_wdma_source(SC_BLOCK(sc_vnode),
|
|
+ sc_vnode->idx,
|
|
+ source,
|
|
+ wdma_fifo_offset,
|
|
+ wdma_fifo_depth,
|
|
+ wdma_weight,
|
|
+ wdma_fifo_div_mode);
|
|
+ if (pipe || need_initial_load) { //mix hdr or need_initial_load
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ ret = __spm_vdev_dq_idle_vbuffer(sc_vnode, &sc_vb);
|
|
+ if (ret) {
|
|
+ if (pipe) {
|
|
+ ret = __spm_vdev_pick_busy_vbuffer(sc_vnode, &sc_vb);
|
|
+ if (ret) {
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ cam_err("%s(%s) no buffer available for mix hdr rawdump", __func__, sc_subdev->name);
|
|
+ return ret;
|
|
+ }
|
|
+ } else {
|
|
+ //if (rawdump_only) {
|
|
+ // cam_err("%s(%s) no initial buffer available for rawdump only", __func__, sc_subdev->name);
|
|
+ // return ret;
|
|
+ //} else {
|
|
+ // if (sc_vnode->sc_vb) {
|
|
+ // sc_vb = sc_vnode->sc_vb;
|
|
+ // } else {
|
|
+ cam_info("%s(%s) no initial buffer available", __func__, sc_subdev->name);
|
|
+ // }
|
|
+ //}
|
|
+ }
|
|
+ } else {
|
|
+ __spm_vdev_q_busy_vbuffer(sc_vnode, sc_vb);
|
|
+ }
|
|
+ if (sc_vb) {
|
|
+ fe_isp_update_aout_dma_addr(sc_vnode, sc_vb, 0);
|
|
+ hw_dma_set_wdma_ready(SC_BLOCK(sc_vnode), sc_vnode->idx, 1);
|
|
+ }
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ }
|
|
+ hw_dma_enable_rawdump(SC_BLOCK(sc_vnode), rawdump->idx, 1);
|
|
+ if (isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload && !pipe) {
|
|
+ hw_dma_set_irq_enable(SC_BLOCK(sc_vnode),
|
|
+ DMA_IRQ_SRC_WDMA_CH0 + sc_vnode->idx,
|
|
+ DMA_IRQ_START | DMA_IRQ_DONE | DMA_IRQ_ERR,
|
|
+ 0);
|
|
+ } else {
|
|
+ hw_dma_set_irq_enable(SC_BLOCK(sc_vnode),
|
|
+ DMA_IRQ_SRC_WDMA_CH0 + sc_vnode->idx,
|
|
+ DMA_IRQ_DONE | DMA_IRQ_ERR,
|
|
+ DMA_IRQ_START);
|
|
+ }
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].vnode = sc_vnode;
|
|
+ if (pipe) { //mix hdr
|
|
+ BUG_ON(!sc_vb);
|
|
+ sc_vb->flags |= SC_BUF_FLAG_SPECIAL_USE;
|
|
+ hw_dma_set_irq_enable(SC_BLOCK(sc_vnode),
|
|
+ DMA_IRQ_SRC_RDMA_CH0,
|
|
+ DMA_IRQ_START | DMA_IRQ_DONE | DMA_IRQ_ERR,
|
|
+ 0);
|
|
+ hw_dma_update_rdma_address(SC_BLOCK(sc_vnode), 0, (uint64_t)spm_vb2_buf_paddr(&(sc_vb->vb2_v4l2_buf.vb2_buf), 0));
|
|
+ hw_isp_top_set_idi_rd_burst_len(SC_BLOCK(isp_ctx->pipes[0]), 0, 22, 32);
|
|
+ } else {
|
|
+ mutex_lock(&sc_pipeline->mlock);
|
|
+ if (!sc_pipeline->usr_data) {
|
|
+ pipe_ctx = fe_pipeline_create_ctx(dev);
|
|
+ if (!pipe_ctx) {
|
|
+ mutex_unlock(&sc_pipeline->mlock);
|
|
+ cam_err("%s(%s) create pipeline context failed", __func__, sc_subdev->name);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ sc_pipeline->usr_data = pipe_ctx;
|
|
+ } else {
|
|
+ pipe_ctx = (struct isp_pipeline_context*)sc_pipeline->usr_data;
|
|
+ }
|
|
+ mutex_unlock(&sc_pipeline->mlock);
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ list_add_no_repeat(&isp_ctx->dma_out_ctx[sc_vnode->idx].list_entry, &pipe_ctx->wdma_list);
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ }
|
|
+ } else {// stream off
|
|
+ ret = wait_event_interruptible_timeout(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_eof,
|
|
+ (atomic_read(&isp_ctx->dma_out_ctx[sc_vnode->idx].busy_cnt) <= 0),
|
|
+ msecs_to_jiffies(60));
|
|
+ if (0 == ret)
|
|
+ cam_warn("%s(%s) stream off wait eof timeout", __func__, sc_subdev->name);
|
|
+ else if (ret < 0)
|
|
+ cam_warn("%s(%s) stream off wait eof error ret=%d", __func__, sc_subdev->name, ret);
|
|
+
|
|
+ spin_lock_irqsave(&(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ wait_event_interruptible_locked_irq(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head,
|
|
+ !isp_ctx->dma_out_ctx[sc_vnode->idx].in_irq);
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].in_streamoff = 1;
|
|
+ spin_unlock_irqrestore(&(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ hw_dma_set_irq_enable(SC_BLOCK(sc_vnode),
|
|
+ DMA_IRQ_SRC_WDMA_CH0 + sc_vnode->idx,
|
|
+ 0,
|
|
+ DMA_IRQ_ALL);
|
|
+ hw_dma_enable_rawdump(SC_BLOCK(sc_vnode), rawdump->idx, 0);
|
|
+ if (!pipe) {
|
|
+ pipe_ctx = (struct isp_pipeline_context*)sc_pipeline->usr_data;
|
|
+ if (pipe_ctx) {
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ list_del_init(&isp_ctx->dma_out_ctx[sc_vnode->idx].list_entry);
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ }
|
|
+ } else { //mix hdr
|
|
+ hw_dma_set_irq_enable(SC_BLOCK(sc_vnode),
|
|
+ DMA_IRQ_SRC_RDMA_CH0,
|
|
+ 0,
|
|
+ DMA_IRQ_ALL);
|
|
+ }
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].vnode = NULL;
|
|
+ spin_lock_irqsave(&(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].in_streamoff = 0;
|
|
+ spin_unlock_irqrestore(&(isp_ctx->dma_out_ctx[sc_vnode->idx].waitq_head.lock), flags);
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ ret = spm_subdev_pad_s_stream(sc_subdev, pad, enable);
|
|
+ if (ret) {
|
|
+ cam_err("%s(%s) s_stream on pad%u failed", __func__, sc_subdev->name, pad);
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_rawdump_subdev_pad_s_stream(struct v4l2_subdev *sd,
|
|
+ struct media_link *link,
|
|
+ struct v4l2_subdev_format *source_fmt,
|
|
+ struct v4l2_subdev_format *sink_fmt)
|
|
+{
|
|
+ struct fe_rawdump *rawdump = v4l2_subdev_to_rawdump(sd);
|
|
+ unsigned int pad = source_fmt->pad;
|
|
+ int enable = source_fmt->which;
|
|
+
|
|
+ return __fe_rawdump_pad_s_stream(rawdump, pad, enable);
|
|
+}
|
|
+
|
|
+static int fe_hdr_combine_subdev_pad_set_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct fe_hdr_combine *hdr_combine = v4l2_subdev_to_hdr_combine(sd);
|
|
+ struct spm_camera_subdev *sc_subdev = NULL;
|
|
+ int i = 0;
|
|
+
|
|
+ if (!hdr_combine) {
|
|
+ cam_err("%sinvalid sd(%s)", __func__, sd->name);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ sc_subdev = &hdr_combine->sc_subdev;
|
|
+
|
|
+ if (format->which != V4L2_SUBDEV_FORMAT_ACTIVE) {
|
|
+ cam_err("%s(%s) didn't support format which(%d)", __func__,
|
|
+ sc_subdev->name, format->which);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (format->pad >= HDR_COMBINE_PAD_NUM) {
|
|
+ cam_err("%s(%s) invalid pad%d.", __func__, sc_subdev->name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ switch (format->pad) {
|
|
+ case HDR_PAD_P0IN:
|
|
+ case HDR_PAD_P1IN:
|
|
+ for (i = 0; i < HDR_COMBINE_PAD_NUM; i++)
|
|
+ hdr_combine->pad_fmts[i].format = format->format;
|
|
+
|
|
+ break;
|
|
+ default:
|
|
+ cam_dbg("%s(%s) didn't support pad%d.", __func__, sc_subdev->name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_hdr_combine_subdev_pad_get_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct fe_hdr_combine *hdr_combine = v4l2_subdev_to_hdr_combine(sd);
|
|
+
|
|
+ if (!hdr_combine) {
|
|
+ cam_err("%s invalid sd(%s)", __func__, sd->name);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (format->pad >= HDR_COMBINE_PAD_NUM) {
|
|
+ cam_dbg("%s(%s) invalid pad%d.", __func__, hdr_combine->sc_subdev.name,
|
|
+ format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ format->format = hdr_combine->pad_fmts[format->pad].format;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int csi_subdev_pad_set_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct csi *csi = v4l2_subdev_to_csi(sd);
|
|
+ struct spm_camera_subdev *sc_subdev = &csi->sc_subdev;
|
|
+ int i = 0;
|
|
+
|
|
+ if (format->which != V4L2_SUBDEV_FORMAT_ACTIVE) {
|
|
+ cam_err("%s(%s) didn't support format which(%d)", __func__,
|
|
+ sc_subdev->name, format->which);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ switch (format->pad) {
|
|
+ case CSI_PAD_IN:
|
|
+ for (i = 0; i < CSI_PAD_NUM; i++)
|
|
+ csi->pad_fmts[i].format = format->format;
|
|
+ break;
|
|
+ default:
|
|
+ cam_dbg("%s(%s) didn't support pad%d.", __func__, sc_subdev->name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int csi_subdev_pad_get_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct csi *csi = v4l2_subdev_to_csi(sd);
|
|
+
|
|
+ if (format->pad >= CSI_PAD_NUM) {
|
|
+ cam_dbg("%s didn't have pad%d.", csi->sc_subdev.name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ format->format = csi->pad_fmts[format->pad].format;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int csi_subdev_pad_s_stream(struct v4l2_subdev *sd, struct media_link *link,
|
|
+ struct v4l2_subdev_format *source_fmt,
|
|
+ struct v4l2_subdev_format *sink_fmt)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct v4l2_subdev_pad_ops rawdump_subdev_pad_ops = {
|
|
+ .set_fmt = fe_rawdump_subdev_pad_set_fmt,
|
|
+ .get_fmt = fe_rawdump_subdev_pad_get_fmt,
|
|
+ .link_validate = fe_rawdump_subdev_pad_s_stream,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_pad_ops offline_channel_subdev_pad_ops = {
|
|
+ .set_fmt = fe_offline_channel_subdev_pad_set_fmt,
|
|
+ .get_fmt = fe_offline_channel_subdev_pad_get_fmt,
|
|
+ .link_validate = fe_offline_channel_subdev_pad_s_stream,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_pad_ops formatter_subdev_pad_ops = {
|
|
+ .set_fmt = fe_formatter_subdev_pad_set_fmt,
|
|
+ .get_fmt = fe_formatter_subdev_pad_get_fmt,
|
|
+ .link_validate = fe_formatter_subdev_pad_s_stream,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_pad_ops dwt_subdev_pad_ops = {
|
|
+ .set_fmt = fe_dwt_subdev_pad_set_fmt,
|
|
+ .get_fmt = fe_dwt_subdev_pad_get_fmt,
|
|
+ .link_validate = fe_dwt_subdev_pad_s_stream,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_pad_ops pipe_subdev_pad_ops = {
|
|
+ .set_fmt = fe_pipe_subdev_pad_set_fmt,
|
|
+ .get_fmt = fe_pipe_subdev_pad_get_fmt,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_pad_ops hdr_combine_subdev_pad_ops = {
|
|
+ .set_fmt = fe_hdr_combine_subdev_pad_set_fmt,
|
|
+ .get_fmt = fe_hdr_combine_subdev_pad_get_fmt,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_pad_ops csi_subdev_pad_ops = {
|
|
+ .set_fmt = csi_subdev_pad_set_fmt,
|
|
+ .get_fmt = csi_subdev_pad_get_fmt,
|
|
+ .link_validate = csi_subdev_pad_s_stream,
|
|
+};
|
|
+
|
|
+static long fe_isp_global_reset(struct isp_context *isp_ctx)
|
|
+{
|
|
+ reinit_completion(&isp_ctx->global_reset_done);
|
|
+ hw_isp_top_global_reset(SC_BLOCK(isp_ctx->pipes[0]));
|
|
+ return wait_for_completion_interruptible_timeout(&isp_ctx->global_reset_done, msecs_to_jiffies(500));
|
|
+}
|
|
+extern void dpu_mclk_exclusive_put(void);
|
|
+extern bool dpu_mclk_exclusive_get(void);
|
|
+
|
|
+static int __fe_isp_s_power(struct v4l2_subdev *sd, int on)
|
|
+{
|
|
+ struct spm_camera_subdev *sc_subdev = v4l2_subdev_to_sc_subdev(sd);
|
|
+ struct isp_context *isp_ctx = v4l2_get_subdevdata(sd);
|
|
+ int v = 0;
|
|
+ int ret = 0;
|
|
+ long l_ret = 0;
|
|
+ unsigned int idi0_fifo_depth = 0, idi1_fifo_depth = 0;
|
|
+
|
|
+ //HW sequence
|
|
+ if (on) {
|
|
+ if (atomic_inc_return(&isp_ctx->pwr_cnt) == 1) {
|
|
+ cam_not("vi s_power 1");
|
|
+#ifdef CONFIG_ARCH_SPACEMIT
|
|
+ ret = pm_runtime_get_sync(&isp_ctx->pdev->dev);
|
|
+ if (ret < 0) {
|
|
+ cam_err("rpm get failed");
|
|
+ return -1;
|
|
+ }
|
|
+ pm_stay_awake(&isp_ctx->pdev->dev);
|
|
+
|
|
+// fe_isp_set_clk(isp_ctx, ISP_CLK_LOW);
|
|
+
|
|
+ reset_control_deassert(isp_ctx->ahb_reset);
|
|
+// clk_prepare_enable(isp_ctx->ahb_clk);
|
|
+
|
|
+ clk_prepare_enable(isp_ctx->fnc_clk);
|
|
+ reset_control_deassert(isp_ctx->isp_reset);
|
|
+
|
|
+ clk_prepare_enable(isp_ctx->bus_clk);
|
|
+ reset_control_deassert(isp_ctx->isp_ci_reset);
|
|
+
|
|
+ clk_prepare_enable(isp_ctx->dpu_clk);
|
|
+ reset_control_deassert(isp_ctx->lcd_mclk_reset);
|
|
+ fe_isp_set_clk(isp_ctx, ISP_CLK_LOW);
|
|
+
|
|
+#endif
|
|
+#ifdef CONFIG_SPACEMIT_DEBUG
|
|
+ vi_running_info.b_dev_running = true;
|
|
+#endif
|
|
+ hw_isp_top_enable_debug_clk(SC_BLOCK(isp_ctx->pipes[0]), 1);
|
|
+ hw_isp_top_set_irq_enable(SC_BLOCK(isp_ctx->pipes[0]), ISP_IRQ_G_RST_DONE, 0);
|
|
+ hw_isp_top_set_irq_enable(SC_BLOCK(isp_ctx->pipes[0]),
|
|
+ ISP_IRQ_PIPE_SOF | ISP_IRQ_STATS_ERR | ISP_IRQ_IDI_SHADOW_DONE,
|
|
+ 0);
|
|
+ hw_isp_top_set_irq_enable(SC_BLOCK(isp_ctx->pipes[1]),
|
|
+ ISP_IRQ_PIPE_SOF | ISP_IRQ_STATS_ERR | ISP_IRQ_IDI_SHADOW_DONE,
|
|
+ 0);
|
|
+ idi0_fifo_depth = 4750 >> 1;
|
|
+ idi1_fifo_depth = 4750 - idi0_fifo_depth;
|
|
+ hw_isp_top_set_idi_linebuf(SC_BLOCK(isp_ctx->pipes[0]), idi0_fifo_depth, 0, 0);
|
|
+ hw_isp_top_set_idi_linebuf(SC_BLOCK(isp_ctx->pipes[1]), idi1_fifo_depth, 0, 0);
|
|
+ hw_dma_reset(isp_ctx->dma_block);
|
|
+#if IS_ENABLED(CONFIG_DRM_SPACEMIT)
|
|
+ while (1) {
|
|
+ if (dpu_mclk_exclusive_get()) {
|
|
+ clk_set_rate(isp_ctx->dpu_clk, 409600000);
|
|
+ if (ret < 0 && ret != -EBUSY) {
|
|
+ cam_err("%s lock dpu clk failed ret=%d", __func__, ret);
|
|
+ return ret;
|
|
+ } else if (ret == 0) {
|
|
+ break;
|
|
+ }
|
|
+ } else {
|
|
+ continue;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+#ifdef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+ isp_ctx->mmu_dev->ops->set_timeout_default_addr(isp_ctx->mmu_dev, (uint64_t)isp_ctx->rsvd_phy_addr);
|
|
+#endif
|
|
+ }
|
|
+ } else {
|
|
+ v = atomic_dec_return(&isp_ctx->pwr_cnt);
|
|
+ if (v == 0) {
|
|
+#if IS_ENABLED(CONFIG_DRM_SPACEMIT)
|
|
+ dpu_mclk_exclusive_put();
|
|
+#endif
|
|
+ l_ret = fe_isp_global_reset(isp_ctx);
|
|
+ if (l_ret == 0)
|
|
+ cam_err("%s global reset timeout", __func__);
|
|
+ else if (l_ret < 0)
|
|
+ cam_err("%s global reset is interrupted by user app", __func__);
|
|
+ else
|
|
+ cam_dbg("%s global reset done", __func__);
|
|
+
|
|
+#ifdef CONFIG_SPACEMIT_DEBUG
|
|
+ vi_running_info.b_dev_running = false;
|
|
+#endif
|
|
+ cam_not("vi s_power 0");
|
|
+#ifdef CONFIG_ARCH_SPACEMIT
|
|
+// clk_disable_unprepare(isp_ctx->ahb_clk);
|
|
+ reset_control_assert(isp_ctx->ahb_reset);
|
|
+
|
|
+ reset_control_deassert(isp_ctx->isp_reset);
|
|
+ clk_disable_unprepare(isp_ctx->fnc_clk);
|
|
+
|
|
+ reset_control_deassert(isp_ctx->isp_ci_reset);
|
|
+ clk_disable_unprepare(isp_ctx->bus_clk);
|
|
+
|
|
+ reset_control_deassert(isp_ctx->lcd_mclk_reset);
|
|
+ clk_disable_unprepare(isp_ctx->dpu_clk);
|
|
+
|
|
+ pm_relax(&isp_ctx->pdev->dev);
|
|
+ pm_runtime_put_sync(&isp_ctx->pdev->dev);
|
|
+#endif
|
|
+ } else if (v < 0) {
|
|
+ atomic_inc(&isp_ctx->pwr_cnt);
|
|
+ cam_err("%s(%s) invalid power off", __func__, sc_subdev->name);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int fe_isp_s_power(void *isp_context, int on)
|
|
+{
|
|
+ struct isp_context *isp_ctx = isp_context;
|
|
+
|
|
+ if (isp_ctx->pipes[0] == NULL)
|
|
+ return -10;
|
|
+
|
|
+ return __fe_isp_s_power(&(isp_ctx->pipes[0]->sc_subdev.pcsd.sd), on);
|
|
+}
|
|
+
|
|
+static int csi_subdev_core_s_power(struct v4l2_subdev *sd, int on)
|
|
+{
|
|
+ struct spm_camera_subdev *sc_subdev = v4l2_subdev_to_sc_subdev(sd);
|
|
+ struct isp_context *isp_ctx = v4l2_get_subdevdata(sd);
|
|
+ struct csi *csi = v4l2_subdev_to_csi(sd);
|
|
+ struct ccic_ctrl *csi_ctrl = NULL;
|
|
+ int v = 0, sensor_id = 0;
|
|
+
|
|
+ sensor_id = csi->pad_fmts[CSI_PAD_IN].format.field & SPACEMIT_VI_PRI_DATA_MASK;
|
|
+ sensor_id = (sensor_id & SPACEMIT_VI_SENSOR_ID_MASK) >> SPACEMIT_VI_SENSOR_ID_SHIFT;
|
|
+ csi_ctrl = isp_ctx->ccic[sensor_id].csi_ctrl;
|
|
+ BUG_ON(!csi_ctrl);
|
|
+ if (on) {
|
|
+ if (atomic_inc_return(&(isp_ctx->ccic[sensor_id].pwr_cnt)) == 1) {
|
|
+ cam_not("csi%d s_power 1", sensor_id);
|
|
+ csi_ctrl->ops->clk_enable(csi_ctrl, 1);
|
|
+ }
|
|
+ } else {
|
|
+ v = atomic_dec_return(&(isp_ctx->ccic[sensor_id].pwr_cnt));
|
|
+ if (v == 0) {
|
|
+ cam_not("csi s_power 0");
|
|
+ csi_ctrl->ops->clk_enable(csi_ctrl, 0);
|
|
+ } else if (v < 0) {
|
|
+ atomic_inc(&(isp_ctx->ccic[sensor_id].pwr_cnt));
|
|
+ cam_err("%s(%s) invalid power off", __func__, sc_subdev->name);
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int csi_subdev_core_reset(struct v4l2_subdev *sd, u32 val)
|
|
+{
|
|
+ if (RESET_STAGE1 == val) {
|
|
+ //do csi reset
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct v4l2_subdev_core_ops isp_subdev_core_ops = {
|
|
+ .ioctl = spm_subdev_ioctl,
|
|
+ .s_power = __fe_isp_s_power,
|
|
+ .reset = spm_subdev_reset,
|
|
+//#ifdef CONFIG_COMPAT
|
|
+#if 0
|
|
+ .compat_ioctl32 = spm_subdev_compat_ioctl32,
|
|
+#endif
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_core_ops csi_subdev_core_ops = {
|
|
+ .ioctl = spm_subdev_ioctl,
|
|
+ //.s_power = csi_subdev_core_s_power,
|
|
+ .reset = csi_subdev_core_reset,
|
|
+//#ifdef CONFIG_COMPAT
|
|
+#if 0
|
|
+ .compat_ioctl32 = spm_subdev_compat_ioctl32,
|
|
+#endif
|
|
+
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_video_ops formatter_subdev_video_ops = {
|
|
+ .s_stream = fe_formatter_subdev_video_s_stream,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_video_ops pipe_subdev_video_ops = {
|
|
+ .s_stream = fe_pipe_subdev_video_s_stream,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_video_ops rawdump_subdev_video_ops = {
|
|
+ .s_stream = fe_rawdump_subdev_video_s_stream,
|
|
+};
|
|
+
|
|
+//static struct v4l2_subdev_video_ops csi_subdev_video_ops = {
|
|
+// .s_stream = csi_subdev_video_s_stream,
|
|
+//};
|
|
+
|
|
+static struct v4l2_subdev_ops fe_rawdump_subdev_ops = {
|
|
+ .core = &isp_subdev_core_ops,
|
|
+ .pad = &rawdump_subdev_pad_ops,
|
|
+ .video = &rawdump_subdev_video_ops,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_ops fe_offline_channel_subdev_ops = {
|
|
+ .core = &isp_subdev_core_ops,
|
|
+ .pad = &offline_channel_subdev_pad_ops,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_ops fe_formatter_subdev_ops = {
|
|
+ .core = &isp_subdev_core_ops,
|
|
+ .pad = &formatter_subdev_pad_ops,
|
|
+ .video = &formatter_subdev_video_ops,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_ops fe_dwt_subdev_ops = {
|
|
+ .core = &isp_subdev_core_ops,
|
|
+ .pad = &dwt_subdev_pad_ops,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_ops fe_pipe_subdev_ops = {
|
|
+ .core = &isp_subdev_core_ops,
|
|
+ .pad = &pipe_subdev_pad_ops,
|
|
+ .video = &pipe_subdev_video_ops,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_ops fe_hdr_combine_subdev_ops = {
|
|
+ .core = &isp_subdev_core_ops,
|
|
+ .pad = &hdr_combine_subdev_pad_ops,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_ops csi_subdev_ops = {
|
|
+ .core = &csi_subdev_core_ops,
|
|
+ .pad = &csi_subdev_pad_ops,
|
|
+ //.video = &csi_subdev_video_ops,
|
|
+};
|
|
+
|
|
+static void fe_pipe_subdev_release(struct spm_camera_subdev *sc_subdev)
|
|
+{
|
|
+ struct isp_context *isp_ctx = NULL;
|
|
+ struct fe_pipe *pipe = container_of(sc_subdev, struct fe_pipe, sc_subdev);
|
|
+
|
|
+ isp_ctx = spm_subdev_get_drvdata(sc_subdev);
|
|
+ isp_ctx->pipes[pipe->idx] = NULL;
|
|
+}
|
|
+
|
|
+static void fe_rawdump_subdev_release(struct spm_camera_subdev *sc_subdev)
|
|
+{
|
|
+ struct isp_context *isp_ctx = NULL;
|
|
+ struct fe_rawdump *rawdump = container_of(sc_subdev, struct fe_rawdump, sc_subdev);
|
|
+ isp_ctx = spm_subdev_get_drvdata(sc_subdev);
|
|
+ isp_ctx->rawdumps[rawdump->idx] = NULL;
|
|
+}
|
|
+
|
|
+static void fe_formatter_subdev_release(struct spm_camera_subdev *sc_subdev)
|
|
+{
|
|
+ struct isp_context *isp_ctx = NULL;
|
|
+ struct fe_formatter *formatter = container_of(sc_subdev, struct fe_formatter, sc_subdev);
|
|
+
|
|
+ isp_ctx = spm_subdev_get_drvdata(sc_subdev);
|
|
+ isp_ctx->formatters[formatter->idx] = NULL;
|
|
+}
|
|
+
|
|
+static int fe_isp_media_link_setup(struct media_entity *entity,
|
|
+ const struct media_pad *local,
|
|
+ const struct media_pad *remote, u32 flags)
|
|
+{
|
|
+ struct spm_camera_subdev *sc_subdev = media_entity_to_sc_subdev(entity);
|
|
+ struct csi *csi = media_entity_to_csi(entity);
|
|
+ struct spm_camera_vnode *sc_vnode = media_entity_to_sc_vnode(remote->entity);
|
|
+ int ret = 0, irq = 0;
|
|
+ struct isp_context *isp_ctx = NULL;
|
|
+ struct platform_device *pdev = NULL;
|
|
+ struct device *dev = NULL;
|
|
+
|
|
+ BUG_ON(!sc_subdev);
|
|
+ isp_ctx = spm_subdev_get_drvdata(sc_subdev);
|
|
+ pdev = isp_ctx->pdev;
|
|
+ dev = &pdev->dev;
|
|
+ if (sc_vnode) {
|
|
+ if (flags & MEDIA_LNK_FL_ENABLED) {
|
|
+ if (!csi) {
|
|
+ spm_camera_block_set_base_addr(SC_BLOCK(sc_vnode), isp_ctx->base_addr + SPACEMIT_ISP_DMA_OFFSET);
|
|
+ cam_dbg("install baseaddr(0x%08lx) for vnode(%s)", isp_ctx->base_addr + SPACEMIT_ISP_DMA_OFFSET, sc_vnode->name);
|
|
+ if (!isp_ctx->dma_block) {
|
|
+ isp_ctx->dma_block = SC_BLOCK(sc_vnode);
|
|
+ irq = platform_get_irq_byname(pdev, "feisp-dma-irq");
|
|
+ if (irq < 0) {
|
|
+ cam_err("get irq resource for feisp-dma failed ret=%d", irq);
|
|
+ return -1;
|
|
+ }
|
|
+ ret = devm_request_irq(dev, irq, fe_isp_dma_irq_handler, IRQF_SHARED, "feisp-dma", isp_ctx);
|
|
+ if (ret) {
|
|
+ cam_err("request irq for dma failed ret=%d", ret);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ ret = spm_vdev_register_vnode_notify(sc_vnode, &(sc_subdev->vnode_nb));
|
|
+ if (ret) {
|
|
+ cam_err("%s(%s) register notifier to vnode(%s) failed", __func__, sc_subdev->name, sc_vnode->name);
|
|
+ return ret;
|
|
+ }
|
|
+ cam_dbg("%s(%s) register notifier to vnode(%s)", __func__, sc_subdev->name, sc_vnode->name);
|
|
+ } else {
|
|
+ ret = spm_vdev_unregister_vnode_notify(sc_vnode, &(sc_subdev->vnode_nb));
|
|
+ if (ret) {
|
|
+ cam_err("%s(%s) unregister notifier to vnode(%s) failed", __func__, sc_subdev->name, sc_vnode->name);
|
|
+ return ret;
|
|
+ }
|
|
+ cam_dbg("%s(%s) unregister notifier to vnode(%s)", __func__, sc_subdev->name, sc_vnode->name);
|
|
+ if (sc_vnode->direction == SPACEMIT_VNODE_DIR_OUT) {
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].used_for_hdr = 0;
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload = 0;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int fe_rawdump_link_validate(struct media_link *link)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct media_entity *me = NULL;
|
|
+ struct fe_rawdump *rawdump = NULL;
|
|
+ struct media_pad *remote_pad_in = NULL, *remote_pad_out = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_pipeline *mpipe = NULL;
|
|
+ struct device *dev = NULL;
|
|
+ struct k1xvi_platform_data *drvdata = NULL;
|
|
+ struct csi *csi = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct fe_pipe *pipe = NULL;
|
|
+ struct isp_context *isp_ctx = NULL;
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ me = link->sink->entity;
|
|
+ rawdump = media_entity_to_rawdump(me);
|
|
+ if (!rawdump)
|
|
+ return -1;
|
|
+ isp_ctx = spm_subdev_get_drvdata(&rawdump->sc_subdev);
|
|
+ remote_pad_in = media_entity_remote_pad(&(rawdump->pads[PAD_IN]));
|
|
+ remote_pad_out = media_entity_remote_pad(&(rawdump->pads[PAD_OUT]));
|
|
+ dev = rawdump->sc_subdev.pcsd.sd.dev;
|
|
+ drvdata = dev_get_drvdata(dev);
|
|
+
|
|
+ if (!remote_pad_in)
|
|
+ return -2;
|
|
+ if (!remote_pad_out)
|
|
+ return -3;
|
|
+ csi = media_entity_to_csi(remote_pad_in->entity);
|
|
+ pipe = media_entity_to_pipe(remote_pad_in->entity);
|
|
+ sc_vnode = media_entity_to_sc_vnode(remote_pad_out->entity);
|
|
+ if (!csi) {
|
|
+ if ((rawdump->idx == 0 && !pipe) || rawdump->idx == 1)
|
|
+ return -4;
|
|
+ if (!pipe)
|
|
+ return -7;
|
|
+ remote_pad_in = media_entity_remote_pad(&(pipe->pads[PIPE_PAD_IN]));
|
|
+ if (!remote_pad_in)
|
|
+ return -8;
|
|
+ csi = media_entity_to_csi(remote_pad_in->entity);
|
|
+ if (!csi)
|
|
+ return -9;
|
|
+ }
|
|
+ if (!sc_vnode)
|
|
+ return -5;
|
|
+ mpipe = media_entity_pipeline(me);
|
|
+ if (!mpipe)
|
|
+ return -6;
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+ sc_pipeline->max_width[0] = FE_ISP_MAX_WIDTH;
|
|
+ sc_pipeline->max_height[0] = FE_ISP_MAX_HEIGHT;
|
|
+ sc_pipeline->min_width[0] = FE_ISP_MIN_WIDTH;
|
|
+ sc_pipeline->min_height[0] = FE_ISP_MIN_HEIGHT;
|
|
+ ret = blocking_notifier_chain_register(&sc_pipeline->blocking_notify_chain, &rawdump->pipeline_notify_block);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ if (drvdata->isp_firm)
|
|
+ sc_pipeline->ispfirm_ops = drvdata->isp_firm->ispfirm_ops;
|
|
+ else
|
|
+ sc_pipeline->ispfirm_ops = NULL;
|
|
+ sc_pipeline->sensor_ops = drvdata->sensor_ops;
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ list_add(&isp_ctx->dma_out_ctx[sc_vnode->idx].frame_id.entry, &sc_pipeline->frame_id_list);
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ if (pipe)
|
|
+ isp_ctx->dma_out_ctx[sc_vnode->idx].used_for_hdr = 1;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct media_entity_operations rawdump_media_entity_ops = {
|
|
+ .link_setup = fe_isp_media_link_setup,
|
|
+ .link_validate = fe_rawdump_link_validate,
|
|
+};
|
|
+
|
|
+static int fe_offline_channel_link_validate(struct media_link *link)
|
|
+{
|
|
+ struct media_entity *me = link->sink->entity;
|
|
+ struct fe_offline_channel *offline_channel = media_entity_to_offline_channel(me);
|
|
+ struct media_pad *remote_pad_in = NULL, *remote_pad_p0out = NULL, *remote_pad_p1out = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct fe_pipe *pipe = NULL;
|
|
+
|
|
+ BUG_ON(!offline_channel);
|
|
+ remote_pad_in = media_entity_remote_pad(&(offline_channel->pads[OFFLINE_CH_PAD_IN]));
|
|
+ remote_pad_p0out = media_entity_remote_pad(&(offline_channel->pads[OFFLINE_CH_PAD_P0OUT]));
|
|
+ remote_pad_p1out = media_entity_remote_pad(&(offline_channel->pads[OFFLINE_CH_PAD_P1OUT]));
|
|
+
|
|
+ if (!remote_pad_in)
|
|
+ return -1;
|
|
+ if (!remote_pad_p0out && !remote_pad_p1out)
|
|
+ return -2;
|
|
+
|
|
+ sc_vnode = media_entity_to_sc_vnode(remote_pad_in->entity);
|
|
+ if (!sc_vnode)
|
|
+ return -3;
|
|
+ if (remote_pad_p0out) {
|
|
+ pipe = media_entity_to_pipe(remote_pad_p0out->entity);
|
|
+ if (!pipe)
|
|
+ return -4;
|
|
+ }
|
|
+ if (remote_pad_p1out) {
|
|
+ pipe = media_entity_to_pipe(remote_pad_p1out->entity);
|
|
+ if (!pipe)
|
|
+ return -5;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct media_entity_operations offline_channel_media_entity_ops = {
|
|
+ .link_setup = fe_isp_media_link_setup,
|
|
+ .link_validate = fe_offline_channel_link_validate,
|
|
+};
|
|
+
|
|
+static int fe_formatter_link_validate(struct media_link *link)
|
|
+{
|
|
+ struct media_entity *me = link->sink->entity;
|
|
+ struct fe_formatter *formatter = NULL;
|
|
+ struct media_pad *remote_pad_in = NULL, *remote_pad_aout = NULL;
|
|
+ struct media_pad *remote_pad_d1out = NULL, *remote_pad_d2out = NULL;
|
|
+ struct media_pad *remote_pad_d3out = NULL, *remote_pad_d4out = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct v4l2_subdev *sd = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_pipeline *mpipe = NULL;
|
|
+ struct isp_context *isp_ctx = NULL;
|
|
+ struct isp_pipeline_context *pipe_ctx = NULL;
|
|
+ //struct fe_pipe *pipe = NULL;
|
|
+ //struct spm_camera_sensor *sc_sensor = NULL;
|
|
+ int valid_link = 0;
|
|
+ unsigned int dma_start_cnt = 0;
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ formatter = media_entity_to_formatter(me);
|
|
+ if (!formatter)
|
|
+ return -100;
|
|
+ isp_ctx = spm_subdev_get_drvdata(&formatter->sc_subdev);
|
|
+
|
|
+ remote_pad_in = media_entity_remote_pad(&(formatter->pads[FMT_PAD_IN]));
|
|
+ remote_pad_aout = media_entity_remote_pad(&(formatter->pads[FMT_PAD_AOUT]));
|
|
+ remote_pad_d1out = media_entity_remote_pad(&(formatter->pads[FMT_PAD_D1OUT]));
|
|
+ remote_pad_d2out = media_entity_remote_pad(&(formatter->pads[FMT_PAD_D2OUT]));
|
|
+ remote_pad_d3out = media_entity_remote_pad(&(formatter->pads[FMT_PAD_D3OUT]));
|
|
+ remote_pad_d4out = media_entity_remote_pad(&(formatter->pads[FMT_PAD_D4OUT]));
|
|
+
|
|
+ if (remote_pad_in) {
|
|
+ if (remote_pad_aout && !remote_pad_d1out
|
|
+ && !remote_pad_d2out && !remote_pad_d3out
|
|
+ && !remote_pad_d4out) {
|
|
+ valid_link = 1;
|
|
+ dma_start_cnt = 1;
|
|
+ } else if (remote_pad_aout && remote_pad_d1out
|
|
+ && remote_pad_d2out && remote_pad_d3out
|
|
+ && remote_pad_d4out) {
|
|
+ valid_link = 2;
|
|
+ dma_start_cnt = DMA_START_CNT_WITH_DWT;
|
|
+ }
|
|
+ }
|
|
+ if (!valid_link)
|
|
+ return -1;
|
|
+ sd = media_entity_to_v4l2_subdev(remote_pad_in->entity);
|
|
+ if (SD_GRP(sd->grp_id) != FE_ISP)
|
|
+ return -2;
|
|
+ if (SD_SUB(sd->grp_id) != PIPE && SD_SUB(sd->grp_id) != HDR_COMBINE)
|
|
+ return -3;
|
|
+
|
|
+ sc_vnode = media_entity_to_sc_vnode(remote_pad_aout->entity);
|
|
+ if (!sc_vnode)
|
|
+ return -4;
|
|
+ if (2 == valid_link) {
|
|
+ if (!is_subdev(remote_pad_d1out->entity))
|
|
+ return -5;
|
|
+ sd = media_entity_to_v4l2_subdev(remote_pad_d1out->entity);
|
|
+ if (SD_GRP(sd->grp_id) != FE_ISP)
|
|
+ return -6;
|
|
+ if (SD_SUB(sd->grp_id) != DWT0 && SD_SUB(sd->grp_id) != DWT1)
|
|
+ return -7;
|
|
+ if (!is_subdev(remote_pad_d2out->entity))
|
|
+ return -8;
|
|
+ sd = media_entity_to_v4l2_subdev(remote_pad_d2out->entity);
|
|
+ if (SD_GRP(sd->grp_id) != FE_ISP)
|
|
+ return -9;
|
|
+ if (SD_SUB(sd->grp_id) != DWT0 && SD_SUB(sd->grp_id) != DWT1)
|
|
+ return -10;
|
|
+ if (!is_subdev(remote_pad_d3out->entity))
|
|
+ return -11;
|
|
+ sd = media_entity_to_v4l2_subdev(remote_pad_d3out->entity);
|
|
+ if (SD_GRP(sd->grp_id) != FE_ISP)
|
|
+ return -12;
|
|
+ if (SD_SUB(sd->grp_id) != DWT0 && SD_SUB(sd->grp_id) != DWT1)
|
|
+ return -13;
|
|
+ if (!is_subdev(remote_pad_d4out->entity))
|
|
+ return -14;
|
|
+ sd = media_entity_to_v4l2_subdev(remote_pad_d4out->entity);
|
|
+ if (SD_GRP(sd->grp_id) != FE_ISP)
|
|
+ return -15;
|
|
+ if (SD_SUB(sd->grp_id) != DWT0 && SD_SUB(sd->grp_id) != DWT1)
|
|
+ return -16;
|
|
+ }
|
|
+ //pipe = media_entity_to_pipe(remote_pad_in->entity);
|
|
+ //if (pipe) {
|
|
+ // remote_pad_in = media_entity_remote_pad(&pipe->pads[PIPE_PAD_IN]);
|
|
+ // if (remote_pad_in) {
|
|
+ // sc_sensor = media_entity_to_sc_sensor(remote_pad_in->entity);
|
|
+ // if (sc_sensor && sc_sensor->idx == 3 && valid_link == 1) //tpg
|
|
+ // isp_ctx->dma_out_ctx[sc_vnode->idx].trig_dma_reload = 1;
|
|
+ // }
|
|
+ //}
|
|
+ mpipe = media_entity_pipeline(me);
|
|
+ if (!mpipe)
|
|
+ return -17;
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+ mutex_lock(&sc_pipeline->mlock);
|
|
+ if (!sc_pipeline->usr_data) {
|
|
+ pipe_ctx = fe_pipeline_create_ctx(formatter->sc_subdev.pcsd.sd.dev);
|
|
+ if (!pipe_ctx) {
|
|
+ mutex_unlock(&sc_pipeline->mlock);
|
|
+ return -18;
|
|
+ }
|
|
+ sc_pipeline->usr_data = pipe_ctx;
|
|
+ } else {
|
|
+ pipe_ctx = (struct isp_pipeline_context *)sc_pipeline->usr_data;
|
|
+ }
|
|
+ mutex_unlock(&sc_pipeline->mlock);
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ list_add(&isp_ctx->dma_out_ctx[sc_vnode->idx].frame_id.entry, &sc_pipeline->frame_id_list);
|
|
+ pipe_ctx->fmt_wdma_start_cnt[formatter->idx] = dma_start_cnt;
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct media_entity_operations formatter_media_entity_ops = {
|
|
+ .link_setup = fe_isp_media_link_setup,
|
|
+ .link_validate = fe_formatter_link_validate,
|
|
+};
|
|
+
|
|
+static int fe_dwt_link_validate(struct media_link *link)
|
|
+{
|
|
+ struct media_entity *me = link->sink->entity;
|
|
+ struct fe_dwt *dwt = media_entity_to_dwt(me);
|
|
+ struct media_pad *remote_pad_in = NULL, *remote_pad_out = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct v4l2_subdev *sd = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_pipeline *mpipe = NULL;
|
|
+ struct isp_context *isp_ctx = NULL;
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ if (!dwt) {
|
|
+ return -999;
|
|
+ }
|
|
+ isp_ctx = spm_subdev_get_drvdata(&dwt->sc_subdev);
|
|
+ remote_pad_in = media_entity_remote_pad(&(dwt->pads[PAD_IN]));
|
|
+ remote_pad_out = media_entity_remote_pad(&(dwt->pads[PAD_OUT]));
|
|
+
|
|
+ if (!remote_pad_in)
|
|
+ return -1;
|
|
+ if (!remote_pad_out)
|
|
+ return -2;
|
|
+
|
|
+ sc_vnode = media_entity_to_sc_vnode(remote_pad_out->entity);
|
|
+ if (!sc_vnode)
|
|
+ return -3;
|
|
+ if (!is_subdev(remote_pad_in->entity))
|
|
+ return -4;
|
|
+ sd = media_entity_to_v4l2_subdev(remote_pad_in->entity);
|
|
+ if (SD_GRP(sd->grp_id) != FE_ISP)
|
|
+ return -5;
|
|
+ if (SD_SUB(sd->grp_id) != FORMATTER)
|
|
+ return -6;
|
|
+ mpipe = media_entity_pipeline(me);
|
|
+ if (!mpipe)
|
|
+ return -7;
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ list_add(&isp_ctx->dma_out_ctx[sc_vnode->idx].frame_id.entry, &sc_pipeline->frame_id_list);
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct media_entity_operations dwt_media_entity_ops = {
|
|
+ .link_setup = fe_isp_media_link_setup,
|
|
+ .link_validate = fe_dwt_link_validate,
|
|
+};
|
|
+
|
|
+static int fe_pipe_link_validate(struct media_link *link)
|
|
+{
|
|
+ struct media_entity *me = NULL;
|
|
+ struct fe_pipe *pipe = NULL;
|
|
+ struct media_pad *remote_pad_in = NULL, *remote_pad_out = NULL;
|
|
+ int out_link_num = 0, i = 0, ret = 0;
|
|
+ struct csi *csi = NULL;
|
|
+ struct fe_offline_channel *offline_channel = NULL;
|
|
+ struct fe_hdr_combine *hdr_combine = NULL;
|
|
+ struct fe_formatter *formatter = NULL;
|
|
+ struct fe_rawdump *rawdump = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_pipeline *mpipe = NULL;
|
|
+ struct device *dev = NULL;
|
|
+ struct k1xvi_platform_data *drvdata = NULL;
|
|
+ struct isp_context *isp_ctx = NULL;
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ me = link->sink->entity;
|
|
+ pipe = media_entity_to_pipe(me);
|
|
+ if (!pipe)
|
|
+ return -100;
|
|
+ isp_ctx = spm_subdev_get_drvdata(&pipe->sc_subdev);
|
|
+ dev = pipe->sc_subdev.pcsd.sd.dev;
|
|
+ drvdata = dev_get_drvdata(dev);
|
|
+ remote_pad_in = media_entity_remote_pad(&(pipe->pads[PIPE_PAD_IN]));
|
|
+ if (!remote_pad_in)
|
|
+ return -1;
|
|
+ csi = media_entity_to_csi(remote_pad_in->entity);
|
|
+ offline_channel = media_entity_to_offline_channel(remote_pad_in->entity);
|
|
+ if (!csi && !offline_channel)
|
|
+ return -2;
|
|
+ for (i = 1; i < PIPE_PAD_NUM; i++) {
|
|
+ remote_pad_out = media_entity_remote_pad(&(pipe->pads[i]));
|
|
+ if (remote_pad_out) {
|
|
+ if (i == PIPE_PAD_HDROUT) {
|
|
+ hdr_combine = media_entity_to_hdr_combine(remote_pad_out->entity);
|
|
+ if (!hdr_combine)
|
|
+ return -3;
|
|
+ } else if (i == PIPE_PAD_RAWDUMP0OUT) {
|
|
+ rawdump = media_entity_to_rawdump(remote_pad_out->entity);
|
|
+ if (!rawdump)
|
|
+ return -4;
|
|
+ if (rawdump->idx != 0)
|
|
+ return -5;
|
|
+ } else {
|
|
+ formatter = media_entity_to_formatter(remote_pad_out->entity);
|
|
+ if (!formatter)
|
|
+ return -6;
|
|
+ }
|
|
+ out_link_num++;
|
|
+ }
|
|
+ }
|
|
+ if (out_link_num <= 0)
|
|
+ return -7;
|
|
+ if (pipe->idx == 0) {
|
|
+ if (!hdr_combine && rawdump)
|
|
+ return -8;
|
|
+ } else if (hdr_combine && out_link_num > 1) {
|
|
+ return -6;
|
|
+ }
|
|
+ mpipe = media_entity_pipeline(me);
|
|
+ if (!mpipe) {
|
|
+ return -7;
|
|
+ }
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+ sc_pipeline->max_width[0] = FE_ISP_MAX_WIDTH;
|
|
+ sc_pipeline->max_height[0] = FE_ISP_MAX_HEIGHT;
|
|
+ sc_pipeline->min_width[0] = FE_ISP_MIN_WIDTH;
|
|
+ sc_pipeline->min_height[0] = FE_ISP_MIN_HEIGHT;
|
|
+ ret = blocking_notifier_chain_register(&sc_pipeline->blocking_notify_chain, &pipe->pipeline_notify_block);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ cam_dbg("%s register pipe(%d) notify block to pipeline blocking notify chain", __func__, pipe->idx);
|
|
+ if (drvdata->isp_firm)
|
|
+ sc_pipeline->ispfirm_ops = drvdata->isp_firm->ispfirm_ops;
|
|
+ else
|
|
+ sc_pipeline->ispfirm_ops = NULL;
|
|
+ sc_pipeline->sensor_ops = drvdata->sensor_ops;
|
|
+ if (hdr_combine) {
|
|
+ if (pipe->idx == 0)
|
|
+ sc_pipeline->id = MAKE_SC_PIPELINE_ID(PIPELINE_TYPE_HDR, pipe->idx);
|
|
+ } else {
|
|
+ sc_pipeline->id = MAKE_SC_PIPELINE_ID(PIPELINE_TYPE_SINGLE, pipe->idx);
|
|
+ }
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ list_add(&(isp_ctx->pipe_frame_id[pipe->idx].entry), &(sc_pipeline->frame_id_list));
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct media_entity_operations pipe_media_entity_ops = {
|
|
+ .link_validate = fe_pipe_link_validate,
|
|
+};
|
|
+
|
|
+static int fe_hdr_combine_link_validate(struct media_link *link)
|
|
+{
|
|
+ struct media_entity *me = link->sink->entity;
|
|
+ struct fe_hdr_combine *hdr_combine = NULL;
|
|
+ struct media_pad *remote_pad = NULL;
|
|
+ int in_link_num = 0, out_link_num = 0, i = 0;
|
|
+ struct fe_pipe *pipe = NULL;
|
|
+ struct fe_formatter *formatter = NULL;
|
|
+
|
|
+ hdr_combine = media_entity_to_hdr_combine(me);
|
|
+ if (!hdr_combine) {
|
|
+ return -1;
|
|
+ }
|
|
+ for (i = 0; i < HDR_COMBINE_PAD_NUM; i++) {
|
|
+ remote_pad = media_entity_remote_pad(&(hdr_combine->pads[i]));
|
|
+ if (remote_pad) {
|
|
+ if (i <= HDR_PAD_P1IN) {
|
|
+ pipe = media_entity_to_pipe(remote_pad->entity);
|
|
+ if (pipe)
|
|
+ in_link_num++;
|
|
+ } else {
|
|
+ formatter = media_entity_to_formatter(remote_pad->entity);
|
|
+ if (formatter)
|
|
+ out_link_num++;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (in_link_num < 2)
|
|
+ return -2;
|
|
+ if (out_link_num < 1)
|
|
+ return -3;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct media_entity_operations hdr_combine_media_entity_ops = {
|
|
+ .link_validate = fe_hdr_combine_link_validate,
|
|
+};
|
|
+
|
|
+static int csi_link_validate(struct media_link *link)
|
|
+{
|
|
+ struct media_entity *me = link->sink->entity;
|
|
+ struct csi *csi = media_entity_to_csi(me);
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_pipeline *mpipe = NULL;
|
|
+ int ret = 0;
|
|
+
|
|
+ BUG_ON(!csi);
|
|
+ mpipe = media_entity_pipeline(me);
|
|
+ BUG_ON(!mpipe);
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+ BUG_ON(!sc_pipeline);
|
|
+ ret = blocking_notifier_chain_register(&sc_pipeline->blocking_notify_chain, &csi->pipeline_notify_block);
|
|
+ if (ret)
|
|
+ return -9;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct media_entity_operations csi_media_entity_ops = {
|
|
+ .link_setup = fe_isp_media_link_setup,
|
|
+ .link_validate = csi_link_validate,
|
|
+};
|
|
+
|
|
+static long fe_isp_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
|
|
+{
|
|
+ struct v4l2_vi_port_cfg *v4l2_port_cfg = NULL;
|
|
+ struct vi_port_cfg *port_cfg = NULL;
|
|
+ struct v4l2_vi_input_interface *input_intf = NULL;
|
|
+ struct device *dev = sd->dev;
|
|
+ struct isp_context *isp_ctx = v4l2_get_subdevdata(sd);
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct v4l2_vi_dbg_reg *dbg_reg = NULL;
|
|
+ struct entity_usrdata usrdata;
|
|
+ struct media_entity *me = NULL;
|
|
+ struct spm_camera_sensor *sc_sensor = NULL;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(&sd->entity);
|
|
+ unsigned int reg_val = 0, offset = 0, isp_fatal_error = 0;
|
|
+ unsigned int *pipe_status = NULL;
|
|
+ int ret = 0, p0_overrun = 0, p1_overrun = 0;
|
|
+ long l_ret = 0;
|
|
+
|
|
+ switch (cmd) {
|
|
+ case VIDIOC_G_PIPE_STATUS:
|
|
+ pipe_status = (unsigned int *)arg;
|
|
+ *pipe_status = 0;
|
|
+ if (!pipe) {
|
|
+ cam_warn("%s(VIDIOC_G_PIPE_STATUS): pipe was null", __func__);
|
|
+ return 0;
|
|
+ }
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ BUG_ON(!sc_pipeline);
|
|
+ isp_fatal_error = isp_ctx->isp_fatal_error;
|
|
+ if (isp_fatal_error & ISP_FATAL_ERR_PIPE0_OVERRUN)
|
|
+ p0_overrun = 1;
|
|
+ if (isp_fatal_error & ISP_FATAL_ERR_PIPE1_OVERRUN)
|
|
+ p1_overrun = 1;
|
|
+ if (isp_fatal_error & ISP_FATAL_ERR_DMA_OVERLAP) {
|
|
+ cam_err("fatal error: dma overlap");
|
|
+ *pipe_status = 1;
|
|
+ }
|
|
+ if (PIPELINE_TYPE_SINGLE == PIPELINE_TYPE(sc_pipeline->id)) {
|
|
+ if ((PIPELINE_ID(sc_pipeline->id) == 0 && p0_overrun)
|
|
+ || (PIPELINE_ID(sc_pipeline->id) == 1 && p1_overrun))
|
|
+ *pipe_status = 1;
|
|
+ } else if (p0_overrun || p1_overrun) {
|
|
+ *pipe_status = 1;
|
|
+ }
|
|
+ return 0;
|
|
+ break;
|
|
+ case VIDIOC_S_PORT_CFG:
|
|
+ BUG_ON(!pipe);
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ BUG_ON(!sc_pipeline);
|
|
+ v4l2_port_cfg = (struct v4l2_vi_port_cfg *)arg;
|
|
+ usrdata.entity_id = v4l2_port_cfg->port_entity_id;
|
|
+ usrdata.usr_data = NULL;
|
|
+ ret = blocking_notifier_call_chain(&sc_pipeline->blocking_notify_chain,
|
|
+ PIPELINE_ACTION_GET_ENTITY_USRDATA,
|
|
+ &usrdata);
|
|
+ if (ret != NOTIFY_STOP) {
|
|
+ cam_err("%s: get entity(%d) usrdata fail.", __func__, usrdata.entity_id);
|
|
+ return -1;
|
|
+ }
|
|
+ if (!usrdata.usr_data) {
|
|
+ port_cfg = devm_kzalloc(dev, sizeof(*port_cfg), GFP_KERNEL);
|
|
+ if (!port_cfg) {
|
|
+ cam_err("%s: no mem.", __func__);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ usrdata.usr_data = port_cfg;
|
|
+ blocking_notifier_call_chain(&sc_pipeline->blocking_notify_chain,
|
|
+ PIPELINE_ACTION_SET_ENTITY_USRDATA,
|
|
+ &usrdata);
|
|
+ } else {
|
|
+ port_cfg = (struct vi_port_cfg *)usrdata.usr_data;
|
|
+ }
|
|
+ port_cfg->w_fifo_ctrl.offset = v4l2_port_cfg->offset;
|
|
+ port_cfg->w_fifo_ctrl.depth = v4l2_port_cfg->depth;
|
|
+ port_cfg->w_fifo_ctrl.weight = v4l2_port_cfg->weight;
|
|
+ port_cfg->w_fifo_ctrl.div_mode = v4l2_port_cfg->div_mode;
|
|
+ port_cfg->usage = v4l2_port_cfg->usage;
|
|
+ return 0;
|
|
+ break;
|
|
+ case VIDIOC_CFG_INPUT_INTF:
|
|
+ BUG_ON(!pipe);
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ BUG_ON(!sc_pipeline);
|
|
+ input_intf = (struct v4l2_vi_input_interface *)arg;
|
|
+ me = spm_mlink_find_sensor(&(sd->entity));
|
|
+ sc_pipeline->is_slice_mode = 0;
|
|
+ if (input_intf->type == VI_INPUT_INTERFACE_MIPI) {
|
|
+ if (!me) {
|
|
+ cam_err("config vi input interface(mipi), but no sensor entity founded");
|
|
+ return -1;
|
|
+ }
|
|
+ sc_sensor = (struct spm_camera_sensor *)me;
|
|
+ if (input_intf->ccic_idx >= CCIC_MAX_CNT) {
|
|
+ cam_err("invalid ccic idx(%u)", input_intf->ccic_idx);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ isp_ctx->ccic[input_intf->ccic_idx].sc_sensor = sc_sensor;
|
|
+ } else {
|
|
+ if (me) {
|
|
+ cam_err("config vi input interface(offline), but sensor entity was founded");
|
|
+ return -1;
|
|
+ }
|
|
+ if (input_intf->type == VI_INPUT_INTERFACE_OFFLINE_SLICE) {
|
|
+ sc_pipeline->is_slice_mode = 1;
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+ break;
|
|
+ case VIDIOC_S_BANDWIDTH:
|
|
+ return ret;
|
|
+ break;
|
|
+ case VIDIOC_DBG_REG_READ:
|
|
+ dbg_reg = (struct v4l2_vi_dbg_reg *)arg;
|
|
+ //offset = dbg_reg->addr - 0xa3430000;
|
|
+ offset = dbg_reg->addr;
|
|
+ dbg_reg->value = read32(isp_ctx->base_addr + offset);
|
|
+ return 0;
|
|
+ break;
|
|
+ case VIDIOC_DBG_REG_WRITE:
|
|
+ dbg_reg = (struct v4l2_vi_dbg_reg *)arg;
|
|
+ //offset = dbg_reg->addr - 0xa3430000;
|
|
+ offset = dbg_reg->addr;
|
|
+ if (dbg_reg->mask != 0xffffffff) {
|
|
+ reg_val = read32(isp_ctx->base_addr + offset);
|
|
+ reg_val &= ~(dbg_reg->mask);
|
|
+ dbg_reg->value &= dbg_reg->mask;
|
|
+ reg_val |= dbg_reg->value;
|
|
+ } else
|
|
+ reg_val = dbg_reg->value;
|
|
+ write32(isp_ctx->base_addr + offset, reg_val);
|
|
+ return 0;
|
|
+ break;
|
|
+ case VIDIOC_GLOBAL_RESET:
|
|
+ l_ret = fe_isp_global_reset(isp_ctx);
|
|
+ if (l_ret == 0) {
|
|
+ cam_err("%s global reset timeout", __func__);
|
|
+ return -ETIME;
|
|
+ } else if (l_ret < 0) {
|
|
+ cam_err("%s global reset is interrupted by user app", __func__);
|
|
+ return -1;
|
|
+ } else {
|
|
+ cam_dbg("%s global reset done", __func__);
|
|
+ }
|
|
+ return 0;
|
|
+ break;
|
|
+ case VIDIOC_FLUSH_BUFFERS:
|
|
+ BUG_ON(!pipe);
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ BUG_ON(!sc_pipeline);
|
|
+ if (!sc_pipeline->is_slice_mode) {
|
|
+ fe_isp_flush_pipeline_buffers(isp_ctx, sc_pipeline);
|
|
+ }
|
|
+ return 0;
|
|
+ break;
|
|
+ default:
|
|
+ return -ENOIOCTLCMD;
|
|
+ }
|
|
+ return -ENOIOCTLCMD;
|
|
+}
|
|
+
|
|
+static void fe_isp_subdev_notify(struct spm_camera_subdev *sc_subdev,
|
|
+ unsigned int notification, void *arg)
|
|
+{
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_entity *me = &sc_subdev->pcsd.sd.entity;
|
|
+ struct k1xvi_platform_data *drvdata = NULL;
|
|
+ struct media_pipeline *mpipe = media_entity_pipeline(me);
|
|
+
|
|
+ if (mpipe) {
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+ }
|
|
+ drvdata = dev_get_drvdata(sc_subdev->pcsd.sd.dev);
|
|
+ switch (notification) {
|
|
+ case PLAT_SD_NOTIFY_REGISTER_ISPFIRM:
|
|
+ k1xvi_register_isp_firmware((struct isp_firm *)arg);
|
|
+ if (sc_pipeline && drvdata && drvdata->isp_firm)
|
|
+ sc_pipeline->ispfirm_ops = drvdata->isp_firm->ispfirm_ops;
|
|
+ break;
|
|
+ case PLAT_SD_NOTIFY_REGISTER_SENSOR_OPS:
|
|
+ k1xvi_register_sensor_ops((struct spm_camera_sensor_ops *)arg);
|
|
+ if (sc_pipeline)
|
|
+ sc_pipeline->sensor_ops = (struct spm_camera_sensor_ops *)arg;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+struct fe_pipe *fe_pipe_create(unsigned int grp_id, void *isp_ctx)
|
|
+{
|
|
+ char name[SPACEMIT_VI_ENTITY_NAME_LEN];
|
|
+ struct fe_pipe *pipe = NULL;
|
|
+ struct device *dev = NULL;
|
|
+ struct platform_device *pdev = NULL;
|
|
+ int ret = 0;
|
|
+ struct isp_context *isp_context = (struct isp_context *)isp_ctx;
|
|
+ int i = 0, irq = 0;
|
|
+
|
|
+ if (!isp_ctx) {
|
|
+ pr_err("%s invalid arguments.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ pdev = isp_context->pdev;
|
|
+ dev = &isp_context->pdev->dev;
|
|
+ if (SD_GRP(grp_id) != FE_ISP || SD_SUB(grp_id) != PIPE) {
|
|
+ cam_err("%s invalid grp_id(%u).", __func__, grp_id);
|
|
+ return NULL;
|
|
+ }
|
|
+ if (SD_IDX(grp_id) >= PIPE_NUM) {
|
|
+ cam_err("%s id(%d) is greater than %d.", __func__, SD_IDX(grp_id), PIPE_NUM - 1);
|
|
+ return NULL;
|
|
+ }
|
|
+ if (isp_context->pipes[SD_IDX(grp_id)]) {
|
|
+ cam_err("%s pipe%u had been already created before.",
|
|
+ __func__, SD_IDX(grp_id));
|
|
+ return NULL;
|
|
+ }
|
|
+ pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
|
|
+ if (!pipe) {
|
|
+ cam_err("%s not enough mem.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ pipe->idx = SD_IDX(grp_id);
|
|
+ pipe->pads[0].flags = MEDIA_PAD_FL_SINK;
|
|
+ for (i = 1; i < PIPE_PAD_NUM; i++) {
|
|
+ pipe->pads[i].flags = MEDIA_PAD_FL_SOURCE;
|
|
+ }
|
|
+ pipe->sc_subdev.pcsd.sd.entity.ops = &pipe_media_entity_ops;
|
|
+
|
|
+ snprintf(name, SPACEMIT_VI_ENTITY_NAME_LEN, "pipe%d", pipe->idx);
|
|
+ ret = spm_subdev_init(grp_id, name, 0, &fe_pipe_subdev_ops,
|
|
+ PIPE_PAD_NUM, pipe->pads, isp_ctx, &pipe->sc_subdev);
|
|
+ if (ret) {
|
|
+ cam_err("%s spm_subdev_init fail ret=%d.", __func__, ret);
|
|
+ goto pipe_sc_subdev_init_fail;
|
|
+ }
|
|
+ spm_camera_block_set_base_addr(&pipe->sc_subdev.sc_block, isp_context->base_addr + SPACEMIT_ISP_TOP0_OFFSET + pipe->idx * SPACEMIT_PIPE_OFFSET);
|
|
+ pipe->sc_subdev.ioctl = fe_isp_subdev_ioctl;
|
|
+ pipe->sc_subdev.release = fe_pipe_subdev_release;
|
|
+ pipe->sc_subdev.notify = fe_isp_subdev_notify;
|
|
+ pipe->sc_subdev.pcsd.sd.dev = dev;
|
|
+ pipe->pipeline_notify_block.notifier_call = fe_isp_pipeline_notifier_handler;
|
|
+ pipe->pipeline_notify_block.priority = SC_PIPE_NOTIFY_PRIO_NORMAL;
|
|
+ init_completion(&(pipe->close_done));
|
|
+ init_completion(&(pipe->sde_sof));
|
|
+ isp_context->pipes[pipe->idx] = pipe;
|
|
+ if (pipe->idx == (PIPE_NUM - 1)) {
|
|
+ irq = platform_get_irq_byname(pdev, "feisp-irq");
|
|
+ if (irq < 0) {
|
|
+ cam_err("get irq resource for feisp failed ret=%d", irq);
|
|
+ goto pipe_sc_subdev_init_fail;
|
|
+ }
|
|
+ ret = devm_request_irq(dev, irq, fe_isp_irq_handler, IRQF_SHARED, "feisp", isp_ctx);
|
|
+ if (ret) {
|
|
+ cam_err("request irq for isp failed ret=%d", ret);
|
|
+ goto pipe_sc_subdev_init_fail;
|
|
+ }
|
|
+ }
|
|
+ return pipe;
|
|
+pipe_sc_subdev_init_fail:
|
|
+ devm_kfree(dev, pipe);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+struct fe_rawdump *fe_rawdump_create(unsigned int grp_id, void *isp_ctx)
|
|
+{
|
|
+ char name[SPACEMIT_VI_ENTITY_NAME_LEN];
|
|
+ struct fe_rawdump *rawdump = NULL;
|
|
+ struct device *dev = NULL;
|
|
+ int ret = 0;
|
|
+ struct isp_context *isp_context = (struct isp_context *)isp_ctx;
|
|
+
|
|
+ if (!isp_ctx) {
|
|
+ pr_err("%s invalid arguments.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ dev = &isp_context->pdev->dev;
|
|
+ if (SD_GRP(grp_id) != FE_ISP || SD_SUB(grp_id) != RAWDUMP) {
|
|
+ cam_err("%s invalid grp_id(%u).", __func__, grp_id);
|
|
+ return NULL;
|
|
+ }
|
|
+ if (SD_IDX(grp_id) >= RAWDUMP_NUM) {
|
|
+ cam_err("%s id(%d) is greater than %d.", __func__, SD_IDX(grp_id), RAWDUMP_NUM - 1);
|
|
+ return NULL;
|
|
+ }
|
|
+ if (isp_context->rawdumps[SD_IDX(grp_id)]) {
|
|
+ cam_err("%s rawdump with the same id(%u) had been already created before.",
|
|
+ __func__, SD_IDX(grp_id));
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ rawdump = devm_kzalloc(dev, sizeof(*rawdump), GFP_KERNEL);
|
|
+ if (!rawdump) {
|
|
+ cam_err("%s not enough mem.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ rawdump->idx = SD_IDX(grp_id);
|
|
+ rawdump->pads[0].flags = MEDIA_PAD_FL_SINK;
|
|
+ rawdump->pads[1].flags = MEDIA_PAD_FL_SOURCE;
|
|
+ rawdump->sc_subdev.pcsd.sd.entity.ops = &rawdump_media_entity_ops;
|
|
+
|
|
+ snprintf(name, SPACEMIT_VI_ENTITY_NAME_LEN, "rawdump%d", rawdump->idx);
|
|
+ ret = spm_subdev_init(grp_id, name, 0, &fe_rawdump_subdev_ops,
|
|
+ RAWDUMP_PAD_NUM, rawdump->pads, isp_ctx,
|
|
+ &rawdump->sc_subdev);
|
|
+ if (ret) {
|
|
+ cam_err("%s spm_subdev_init fail ret=%d.", __func__, ret);
|
|
+ goto rawdump_sc_subdev_init_fail;
|
|
+ }
|
|
+ spm_camera_block_set_base_addr(&rawdump->sc_subdev.sc_block,
|
|
+ isp_context->base_addr +
|
|
+ SPACEMIT_ISP_TOP0_OFFSET);
|
|
+ rawdump->sc_subdev.ioctl = fe_isp_subdev_ioctl;
|
|
+ rawdump->sc_subdev.release = fe_rawdump_subdev_release;
|
|
+ rawdump->sc_subdev.vnode_nb.notifier_call = fe_isp_vnode_notifier_handler;
|
|
+ rawdump->sc_subdev.pcsd.sd.dev = dev;
|
|
+ rawdump->pipeline_notify_block.notifier_call = fe_isp_pipeline_notifier_handler;
|
|
+ rawdump->pipeline_notify_block.priority = SC_PIPE_NOTIFY_PRIO_NORMAL;
|
|
+ atomic_set(&rawdump->close_done, 0);
|
|
+ isp_context->rawdumps[rawdump->idx] = rawdump;
|
|
+ return rawdump;
|
|
+rawdump_sc_subdev_init_fail:
|
|
+ devm_kfree(dev, rawdump);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+struct fe_offline_channel *fe_offline_channel_create(unsigned int grp_id, void *isp_ctx)
|
|
+{
|
|
+ char name[SPACEMIT_VI_ENTITY_NAME_LEN];
|
|
+ struct fe_offline_channel *offline_channel = NULL;
|
|
+ struct device *dev = NULL;
|
|
+ struct isp_context *isp_context = isp_ctx;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (!isp_ctx) {
|
|
+ pr_err("%s invalid arguments.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ dev = &isp_context->pdev->dev;
|
|
+ if (SD_GRP(grp_id) != FE_ISP || SD_SUB(grp_id) != OFFLINE_CHANNEL) {
|
|
+ cam_err("%s invalid grp_id(%u).", __func__, grp_id);
|
|
+ return NULL;
|
|
+ }
|
|
+ if (SD_IDX(grp_id) >= OFFLINE_CH_NUM) {
|
|
+ cam_err("%s id(%d) is greater than %d.", __func__, SD_IDX(grp_id),
|
|
+ OFFLINE_CH_NUM - 1);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ offline_channel = devm_kzalloc(dev, sizeof(*offline_channel), GFP_KERNEL);
|
|
+ if (!offline_channel) {
|
|
+ cam_err("%s not enough mem.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ offline_channel->idx = SD_IDX(grp_id);
|
|
+ offline_channel->pads[0].flags = MEDIA_PAD_FL_SINK;
|
|
+ offline_channel->pads[1].flags = MEDIA_PAD_FL_SOURCE;
|
|
+ offline_channel->pads[2].flags = MEDIA_PAD_FL_SOURCE;
|
|
+ offline_channel->sc_subdev.pcsd.sd.entity.ops = &offline_channel_media_entity_ops;
|
|
+ snprintf(name, SPACEMIT_VI_ENTITY_NAME_LEN, "offline_channel%d", offline_channel->idx);
|
|
+ ret = spm_subdev_init(grp_id, name, 0, &fe_offline_channel_subdev_ops,
|
|
+ OFFLINE_CH_PAD_NUM, offline_channel->pads, isp_ctx, &offline_channel->sc_subdev);
|
|
+ if (ret) {
|
|
+ cam_err("%s spm_subdev_init fail ret=%d.", __func__, ret);
|
|
+ goto offline_channel_sc_subdev_init_fail;
|
|
+ }
|
|
+ spm_camera_block_set_base_addr(SC_BLOCK(offline_channel), isp_context->base_addr + SPACEMIT_ISP_TOP0_OFFSET);
|
|
+ offline_channel->sc_subdev.ioctl = fe_isp_subdev_ioctl;
|
|
+ offline_channel->sc_subdev.vnode_nb.notifier_call = fe_isp_vnode_notifier_handler;
|
|
+ offline_channel->sc_subdev.pcsd.sd.dev = dev;
|
|
+ return offline_channel;
|
|
+offline_channel_sc_subdev_init_fail:
|
|
+ devm_kfree(dev, offline_channel);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+struct fe_formatter *fe_formatter_create(unsigned int grp_id, void *isp_ctx)
|
|
+{
|
|
+ char name[SPACEMIT_VI_ENTITY_NAME_LEN];
|
|
+ struct fe_formatter *formatter = NULL;
|
|
+ struct device *dev = NULL;
|
|
+ struct isp_context *isp_context = isp_ctx;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (!isp_ctx) {
|
|
+ cam_err("%s invalid arguments.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ dev = &isp_context->pdev->dev;
|
|
+ if (SD_GRP(grp_id) != FE_ISP || SD_SUB(grp_id) != FORMATTER) {
|
|
+ cam_err("%s invalid grp_id(%u).", __func__, grp_id);
|
|
+ return NULL;
|
|
+ }
|
|
+ if (SD_IDX(grp_id) >= FORMATTER_NUM) {
|
|
+ cam_err("%s id(%d) is greater than %d.", __func__, SD_IDX(grp_id),
|
|
+ FORMATTER_NUM - 1);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ formatter = devm_kzalloc(dev, sizeof(*formatter), GFP_KERNEL);
|
|
+ if (!formatter) {
|
|
+ cam_err("%s not enough mem.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ formatter->idx = SD_IDX(grp_id);
|
|
+ atomic_set(&formatter->dwt_refcnt, 0);
|
|
+ formatter->pads[0].flags = MEDIA_PAD_FL_SINK;
|
|
+ formatter->pads[1].flags = MEDIA_PAD_FL_SOURCE;
|
|
+ formatter->pads[2].flags = MEDIA_PAD_FL_SOURCE;
|
|
+ formatter->pads[3].flags = MEDIA_PAD_FL_SOURCE;
|
|
+ formatter->pads[4].flags = MEDIA_PAD_FL_SOURCE;
|
|
+ formatter->pads[5].flags = MEDIA_PAD_FL_SOURCE;
|
|
+ snprintf(name, SPACEMIT_VI_ENTITY_NAME_LEN, "formatter%d", formatter->idx);
|
|
+ ret =
|
|
+ spm_subdev_init(grp_id, name, 0, &fe_formatter_subdev_ops,
|
|
+ FORMATTER_PAD_NUM, formatter->pads, isp_ctx,
|
|
+ &formatter->sc_subdev);
|
|
+ if (ret) {
|
|
+ cam_err("%s spm_subdev_init fail ret=%d.", __func__, ret);
|
|
+ goto formatter_sc_subdev_init_fail;
|
|
+ }
|
|
+ formatter->sc_subdev.pcsd.sd.entity.ops = &formatter_media_entity_ops;
|
|
+ spm_camera_block_set_base_addr(SC_BLOCK(formatter), isp_context->base_addr + SPACEMIT_POSTPIPE_OFFSET);
|
|
+ formatter->sc_subdev.ioctl = fe_isp_subdev_ioctl;
|
|
+ formatter->sc_subdev.release = fe_formatter_subdev_release;
|
|
+ formatter->sc_subdev.vnode_nb.notifier_call = fe_isp_vnode_notifier_handler;
|
|
+ formatter->sc_subdev.pcsd.sd.dev = dev;
|
|
+ isp_context->formatters[formatter->idx] = formatter;
|
|
+ return formatter;
|
|
+formatter_sc_subdev_init_fail:
|
|
+ devm_kfree(dev, formatter);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+struct fe_dwt *fe_dwt_create(unsigned int grp_id, void *isp_ctx)
|
|
+{
|
|
+ char name[SPACEMIT_VI_ENTITY_NAME_LEN];
|
|
+ struct fe_dwt *dwt = NULL;
|
|
+ struct device *dev = NULL;
|
|
+ struct isp_context *isp_context = isp_ctx;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (!isp_ctx) {
|
|
+ cam_err("%s invalid arguments.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ dev = &isp_context->pdev->dev;
|
|
+ if (SD_GRP(grp_id) != FE_ISP || !(SD_SUB(grp_id) == DWT0 || SD_SUB(grp_id) == DWT1)) {
|
|
+ cam_err("%s invalid grp_id(%u).", __func__, grp_id);
|
|
+ return NULL;
|
|
+ }
|
|
+ if (SD_IDX(grp_id) > DWT_LAYER_NUM) {
|
|
+ cam_err("%s layer id(%d) is greater than %d.", __func__, SD_IDX(grp_id), DWT_LAYER_NUM);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ dwt = devm_kzalloc(dev, sizeof(*dwt), GFP_KERNEL);
|
|
+ if (!dwt) {
|
|
+ cam_err("%s not enough mem.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ dwt->idx = SD_SUB(grp_id) - DWT0;
|
|
+ dwt->layer_idx = SD_IDX(grp_id);
|
|
+ dwt->pads[0].flags = MEDIA_PAD_FL_SINK;
|
|
+ dwt->pads[1].flags = MEDIA_PAD_FL_SOURCE;
|
|
+ snprintf(name, SPACEMIT_VI_ENTITY_NAME_LEN, "dwt%d_layer%d", dwt->idx, dwt->layer_idx);
|
|
+ ret = spm_subdev_init(grp_id, name, 0, &fe_dwt_subdev_ops, DWT_PAD_NUM, dwt->pads, isp_ctx, &dwt->sc_subdev);
|
|
+ if (ret) {
|
|
+ cam_err("%s spm_subdev_init fail ret=%d.", __func__, ret);
|
|
+ goto dwt_sc_subdev_init_fail;
|
|
+ }
|
|
+ dwt->sc_subdev.pcsd.sd.entity.ops = &dwt_media_entity_ops;
|
|
+ spm_camera_block_set_base_addr(SC_BLOCK(dwt), isp_context->base_addr + SPACEMIT_POSTPIPE_OFFSET);
|
|
+ dwt->sc_subdev.ioctl = fe_isp_subdev_ioctl;
|
|
+ dwt->sc_subdev.vnode_nb.notifier_call = fe_isp_vnode_notifier_handler;
|
|
+ dwt->sc_subdev.pcsd.sd.dev = dev;
|
|
+ return dwt;
|
|
+dwt_sc_subdev_init_fail:
|
|
+ devm_kfree(dev, dwt);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+struct fe_hdr_combine *fe_hdr_combine_create(unsigned int grp_id, void *isp_ctx)
|
|
+{
|
|
+ char name[SPACEMIT_VI_ENTITY_NAME_LEN];
|
|
+ struct fe_hdr_combine *hdr_combine = NULL;
|
|
+ struct device *dev = NULL;
|
|
+ struct isp_context *isp_context = isp_ctx;
|
|
+ int ret = 0, i = 0;
|
|
+
|
|
+ if (!isp_ctx) {
|
|
+ cam_err("%s invalid arguments.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ dev = &isp_context->pdev->dev;
|
|
+ if (SD_GRP(grp_id) != FE_ISP || SD_SUB(grp_id) != HDR_COMBINE) {
|
|
+ cam_err("%s invalid grp_id(%u).", __func__, grp_id);
|
|
+ return NULL;
|
|
+ }
|
|
+ if (SD_IDX(grp_id) >= HDR_COMBINE_NUM) {
|
|
+ cam_err("%s id(%d) is greater than %d.", __func__, SD_IDX(grp_id), HDR_COMBINE_NUM - 1);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ hdr_combine = devm_kzalloc(dev, sizeof(*hdr_combine), GFP_KERNEL);
|
|
+ if (!hdr_combine) {
|
|
+ cam_err("%s not enough mem.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ for (i = HDR_PAD_P0IN; i <= HDR_PAD_P1IN; i++) {
|
|
+ hdr_combine->pads[i].flags = MEDIA_PAD_FL_SINK;
|
|
+ }
|
|
+ for (i = HDR_PAD_F0OUT; i < HDR_COMBINE_PAD_NUM; i++) {
|
|
+ hdr_combine->pads[i].flags = MEDIA_PAD_FL_SOURCE;
|
|
+ }
|
|
+ strlcpy(name, "hdr_combine", SPACEMIT_VI_ENTITY_NAME_LEN);
|
|
+ ret = spm_subdev_init(grp_id, name, 0, &fe_hdr_combine_subdev_ops, HDR_COMBINE_PAD_NUM, hdr_combine->pads, isp_ctx, &hdr_combine->sc_subdev);
|
|
+ if (ret) {
|
|
+ cam_err("%s spm_subdev_init fail ret=%d.", __func__, ret);
|
|
+ goto hdr_combine_sc_subdev_init_fail;
|
|
+ }
|
|
+ hdr_combine->sc_subdev.pcsd.sd.entity.ops = &hdr_combine_media_entity_ops;
|
|
+ spm_camera_block_set_base_addr(SC_BLOCK(hdr_combine), isp_context->base_addr + SPACEMIT_ISP_TOP0_OFFSET);
|
|
+ hdr_combine->sc_subdev.ioctl = fe_isp_subdev_ioctl;
|
|
+ hdr_combine->sc_subdev.pcsd.sd.dev = dev;
|
|
+ return hdr_combine;
|
|
+hdr_combine_sc_subdev_init_fail:
|
|
+ devm_kfree(dev, hdr_combine);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+struct csi *csi_create(unsigned int grp_id, void *isp_ctx)
|
|
+{
|
|
+ char name[SPACEMIT_VI_ENTITY_NAME_LEN];
|
|
+ struct csi *csi = NULL;
|
|
+ struct device *dev = NULL;
|
|
+ struct isp_context *isp_context = isp_ctx;
|
|
+ int i = 0, ret = 0;
|
|
+
|
|
+ if (!isp_ctx) {
|
|
+ cam_err("%s invalid arguments.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ dev = &isp_context->pdev->dev;
|
|
+ if (SD_GRP(grp_id) != MIPI || (SD_SUB(grp_id) != CSI_MAIN && SD_SUB(grp_id) != CSI_VCDT)) {
|
|
+ cam_err("%s invalid grp_id(%u).", __func__, grp_id);
|
|
+ return NULL;
|
|
+ }
|
|
+ if (SD_IDX(grp_id) >= CSI_NUM) {
|
|
+ cam_err("%s id(%d) is greater than %d.", __func__, SD_IDX(grp_id), CSI_NUM - 1);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ csi = devm_kzalloc(dev, sizeof(*csi), GFP_KERNEL);
|
|
+ if (!csi) {
|
|
+ cam_err("%s not enough mem.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ csi->pads[0].flags = MEDIA_PAD_FL_SINK;
|
|
+ for (i = CSI_PAD_RAWDUMP0; i < CSI_PAD_NUM; i++) {
|
|
+ csi->pads[i].flags = MEDIA_PAD_FL_SOURCE;
|
|
+ }
|
|
+ csi->idx = SD_IDX(grp_id);
|
|
+ if (SD_SUB(grp_id) == CSI_MAIN) {
|
|
+ csi->channel_type = CSI_MAIN;
|
|
+ snprintf(name, SPACEMIT_VI_ENTITY_NAME_LEN, "csi%d_main", csi->idx);
|
|
+ } else {
|
|
+ csi->channel_type = CSI_VCDT;
|
|
+ snprintf(name, SPACEMIT_VI_ENTITY_NAME_LEN, "csi%d_vcdt", csi->idx);
|
|
+ }
|
|
+ csi->sc_subdev.pcsd.sd.entity.ops = &csi_media_entity_ops;
|
|
+ ret = spm_subdev_init(grp_id, name, 0, &csi_subdev_ops,
|
|
+ CSI_PAD_NUM, csi->pads, isp_ctx, &csi->sc_subdev);
|
|
+ if (ret) {
|
|
+ cam_err("%s spm_subdev_init fail ret=%d.", __func__, ret);
|
|
+ goto csi_sc_subdev_init_fail;
|
|
+ }
|
|
+ csi->sc_subdev.ioctl = fe_isp_subdev_ioctl;
|
|
+ csi->sc_subdev.vnode_nb.notifier_call = fe_isp_vnode_notifier_handler;
|
|
+ csi->sc_subdev.pcsd.sd.dev = dev;
|
|
+ csi->pipeline_notify_block.notifier_call = fe_isp_pipeline_notifier_handler;
|
|
+ csi->pipeline_notify_block.priority = SC_PIPE_NOTIFY_PRIO_NORMAL;
|
|
+ return csi;
|
|
+csi_sc_subdev_init_fail:
|
|
+ devm_kfree(dev, csi);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static struct frame_id *fe_isp_frame_id(struct list_head *frame_id_list)
|
|
+{
|
|
+ struct frame_id *pos = NULL, *max_frame_id = NULL;
|
|
+ max_frame_id = list_first_entry_or_null(frame_id_list, struct frame_id, entry);
|
|
+ if (max_frame_id == NULL)
|
|
+ return NULL;
|
|
+ list_for_each_entry(pos, frame_id_list, entry) {
|
|
+ if (pos->id >= max_frame_id->id)
|
|
+ max_frame_id = pos;
|
|
+ }
|
|
+
|
|
+ return max_frame_id;
|
|
+}
|
|
+
|
|
+static void fe_isp_reset_frame_id(struct spm_camera_pipeline *sc_pipeline)
|
|
+{
|
|
+ struct frame_id *pos = NULL;
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ if (!sc_pipeline)
|
|
+ return;
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ list_for_each_entry(pos, &sc_pipeline->frame_id_list, entry) {
|
|
+ pos->id = 0;
|
|
+ }
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+}
|
|
+
|
|
+#define PIPE_ERR_SHIFT (21)
|
|
+#define PIPE_ERR(a) ((a) << PIPE_ERR_SHIFT)
|
|
+#define DMA_OVERLAP_CNT_MAX (5)
|
|
+static irqreturn_t fe_isp_dma_irq_handler(int irq, void *dev_id)
|
|
+{
|
|
+ unsigned int status1 = 0, status2 = 0, irq_status = 0, isp_fatal_error = 0, tmp = 0;
|
|
+ unsigned int tmp_status1 = 0, tmp_status2 = 0;
|
|
+#ifdef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+ unsigned int mmu_irq_status = 0;
|
|
+ struct media_pipeline *pipe = NULL;
|
|
+#endif
|
|
+ int irq_src = 0, is_mix_hdr = 0, i = 0, p0_overrun = 0, p1_overrun = 0;
|
|
+ unsigned int *hw_err_code = NULL;
|
|
+ struct isp_context *isp_ctx = (struct isp_context *)dev_id;
|
|
+ struct isp_dma_context *dma_ctx = NULL, *dma_ctx_pos = NULL;
|
|
+ struct media_entity *me = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL, *sc_pipelines[2] = {NULL, NULL};
|
|
+ struct media_pipeline *mpipe = NULL;
|
|
+ struct isp_pipeline_context *pipe_ctx = NULL;
|
|
+ struct fe_rawdump *rawdump = NULL;
|
|
+ struct media_pad *remote_pad = NULL;
|
|
+ struct frame_id *frame_id = NULL;
|
|
+ uint32_t frame_idx = 0;
|
|
+ struct spm_camera_vbuffer *pos = NULL;
|
|
+ struct isp_dma_work_struct *isp_dma_work = NULL;
|
|
+ struct dma_irq_data irq_data = { 0 };
|
|
+ struct k1xvi_platform_data *drvdata = platform_get_drvdata(isp_ctx->pdev);
|
|
+ struct spm_camera_ispfirm_ops *ispfirm_ops = NULL;
|
|
+ unsigned long tasklet_state[ISP_DMA_WORK_MAX_CNT];
|
|
+ int ret = 0;
|
|
+ static unsigned long print_jiffies = 0;
|
|
+
|
|
+ if (!isp_ctx->dma_block)
|
|
+ return IRQ_HANDLED;
|
|
+
|
|
+#ifdef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+ mmu_irq_status = isp_ctx->mmu_dev->ops->irq_status(isp_ctx->mmu_dev);
|
|
+ if (mmu_irq_status & MMU_RD_TIMEOUT) {
|
|
+ cam_err("isp iommu RD_Timeout_error_IRQ");
|
|
+ isp_ctx->mmu_dev->ops->dump_channel_regs(isp_ctx->mmu_dev, 3);
|
|
+ }
|
|
+ if (mmu_irq_status & MMU_WR_TIMEOUT) {
|
|
+ for (i = 0; i < PIPE_NUM; i++) {
|
|
+ pipe = media_entity_pipeline(&isp_ctx->pipes[i]->sc_subdev.pcsd.sd.entity);
|
|
+ if (pipe) {
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ spin_lock(&sc_pipeline->slock);
|
|
+ if (sc_pipeline->usr_data) {
|
|
+ pipe_ctx = (struct isp_pipeline_context*)sc_pipeline->usr_data;
|
|
+ pipe_ctx->mmu_tbu_reload = MMU_TBU_RELOAD_START;
|
|
+ }
|
|
+ spin_unlock(&sc_pipeline->slock);
|
|
+ }
|
|
+ }
|
|
+ cam_err("isp iommu WR_Timeout_error_IRQ");
|
|
+ isp_ctx->mmu_dev->ops->dump_channel_regs(isp_ctx->mmu_dev, 4);
|
|
+ }
|
|
+ for (i = 0; i < 16; i++) {
|
|
+ if (mmu_irq_status & (0x1 << i))
|
|
+ cam_err("isp iommu tbu%d/%d dma err", 2 * i, 2 * i + 1);
|
|
+ }
|
|
+#endif
|
|
+ isp_fatal_error = isp_ctx->isp_fatal_error;
|
|
+ if (isp_fatal_error & ISP_FATAL_ERR_PIPE0_OVERRUN)
|
|
+ p0_overrun = 1;
|
|
+ if (isp_fatal_error & ISP_FATAL_ERR_PIPE1_OVERRUN)
|
|
+ p1_overrun = 1;
|
|
+
|
|
+ if (drvdata && drvdata->isp_firm)
|
|
+ ispfirm_ops = drvdata->isp_firm->ispfirm_ops;
|
|
+ status1 = hw_dma_get_irq_status1(isp_ctx->dma_block);
|
|
+ if (status1)
|
|
+ hw_dma_clr_irq_status1(isp_ctx->dma_block, status1);
|
|
+ status2 = hw_dma_get_irq_status2(isp_ctx->dma_block);
|
|
+ if (status2)
|
|
+ hw_dma_clr_irq_status2(isp_ctx->dma_block, status2);
|
|
+
|
|
+ if (status2 & DMA_IRQ_OVERRUN) {
|
|
+ vi_irq_print("dma overrun occured!");
|
|
+ }
|
|
+ if (status2 & DMA_IRQ_OVERLAP) {
|
|
+ vi_irq_print("dma overlap occured!");
|
|
+ if (isp_ctx->dma_overlap_cnt++ >= DMA_OVERLAP_CNT_MAX) {
|
|
+ isp_ctx->dma_overlap_cnt = 0;
|
|
+ isp_ctx->isp_fatal_error |= ISP_FATAL_ERR_DMA_OVERLAP;
|
|
+ }
|
|
+ } else {
|
|
+ isp_ctx->dma_overlap_cnt = 0;
|
|
+ isp_ctx->isp_fatal_error &= ~ISP_FATAL_ERR_DMA_OVERLAP;
|
|
+ }
|
|
+ //if (status1 || status2)
|
|
+ // cam_dbg("dma irq status1=0x%08x status2=0x%08x", status1, status2);
|
|
+ for (irq_src = DMA_IRQ_SRC_WDMA_CH0; irq_src <= DMA_IRQ_SRC_WDMA_CH13; irq_src++) {
|
|
+ dma_ctx = &(isp_ctx->dma_out_ctx[irq_src - DMA_IRQ_SRC_WDMA_CH0]);
|
|
+ spin_lock(&dma_ctx->waitq_head.lock);
|
|
+ dma_ctx->in_irq = 1;
|
|
+ if (dma_ctx->in_streamoff) {
|
|
+ dma_ctx->in_irq = 0;
|
|
+ wake_up_locked(&dma_ctx->waitq_head);
|
|
+ spin_unlock(&dma_ctx->waitq_head.lock);
|
|
+ continue;
|
|
+ }
|
|
+ spin_unlock(&dma_ctx->waitq_head.lock);
|
|
+ if (dma_ctx->vnode) {
|
|
+ irq_status = hw_dma_irq_analyze(irq_src, status1, status2);
|
|
+ me = &dma_ctx->vnode->vnode.entity;
|
|
+ mpipe = media_entity_pipeline(me);
|
|
+ if (mpipe)
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+ else
|
|
+ sc_pipeline = NULL;
|
|
+ if (sc_pipeline) {
|
|
+ spin_lock(&sc_pipeline->slock);
|
|
+ pipe_ctx = (struct isp_pipeline_context*)sc_pipeline->usr_data;
|
|
+ if (pipe_ctx && (irq_status & DMA_IRQ_START) && dma_ctx->trig_dma_reload) {
|
|
+ for (i = 0; i < FORMATTER_NUM; i++) {
|
|
+ list_for_each_entry(dma_ctx_pos, &pipe_ctx->fmt_wdma_list[i], list_entry) {
|
|
+ __hw_dma_set_irq_enable(DMA_IRQ_SRC_WDMA_CH0 + dma_ctx_pos->id,
|
|
+ DMA_IRQ_START, 0, &tmp_status1, &tmp_status2);
|
|
+ }
|
|
+ }
|
|
+ list_for_each_entry(dma_ctx_pos, &pipe_ctx->wdma_list, list_entry) {
|
|
+ __hw_dma_set_irq_enable(DMA_IRQ_SRC_WDMA_CH0 + dma_ctx_pos->id,
|
|
+ DMA_IRQ_START, 0, &tmp_status1, &tmp_status2);
|
|
+ }
|
|
+ status1 |= tmp_status1;
|
|
+ status2 |= tmp_status2;
|
|
+ if (PIPELINE_ID(sc_pipeline->id) == 0)
|
|
+ sc_pipelines[0] = sc_pipeline;
|
|
+ else
|
|
+ sc_pipelines[1] = sc_pipeline;
|
|
+ }
|
|
+ spin_unlock(&sc_pipeline->slock);
|
|
+ }
|
|
+ }
|
|
+ spin_lock(&dma_ctx->waitq_head.lock);
|
|
+ dma_ctx->in_irq = 0;
|
|
+ wake_up_locked(&dma_ctx->waitq_head);
|
|
+ spin_unlock(&dma_ctx->waitq_head.lock);
|
|
+ }
|
|
+ for (irq_src = DMA_IRQ_SRC_WDMA_CH0; irq_src <= DMA_IRQ_SRC_WDMA_CH13; irq_src++) {
|
|
+ dma_ctx = &(isp_ctx->dma_out_ctx[irq_src - DMA_IRQ_SRC_WDMA_CH0]);
|
|
+ spin_lock(&dma_ctx->waitq_head.lock);
|
|
+ dma_ctx->in_irq = 1;
|
|
+ if (dma_ctx->in_streamoff) {
|
|
+ dma_ctx->in_irq = 0;
|
|
+ wake_up_locked(&dma_ctx->waitq_head);
|
|
+ spin_unlock(&dma_ctx->waitq_head.lock);
|
|
+ continue;
|
|
+ }
|
|
+ spin_unlock(&dma_ctx->waitq_head.lock);
|
|
+ if (dma_ctx->vnode) {
|
|
+ if (dma_ctx->used_for_hdr)
|
|
+ is_mix_hdr = 1;
|
|
+ irq_status = hw_dma_irq_analyze(irq_src, status1, status2);
|
|
+ me = &dma_ctx->vnode->vnode.entity;
|
|
+ mpipe = media_entity_pipeline(me);
|
|
+ if (mpipe)
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+ else
|
|
+ sc_pipeline = NULL;
|
|
+ if (sc_pipeline) {
|
|
+ if (PIPELINE_TYPE_SINGLE == PIPELINE_TYPE(sc_pipeline->id)) {
|
|
+ if ((PIPELINE_ID(sc_pipeline->id) == 0 && p0_overrun)
|
|
+ || (PIPELINE_ID(sc_pipeline->id) == 1 && p1_overrun))
|
|
+ irq_status |= PIPE_ERR(1);
|
|
+ } else if (p0_overrun || p1_overrun) {
|
|
+ irq_status |= PIPE_ERR(1);
|
|
+ }
|
|
+ }
|
|
+ remote_pad = media_entity_remote_pad(&dma_ctx->vnode->pad);
|
|
+ BUG_ON(!remote_pad);
|
|
+ rawdump = media_entity_to_rawdump(remote_pad->entity);
|
|
+ if (irq_status) {
|
|
+ if (((irq_status & DMA_IRQ_START) && sc_pipeline && sc_pipeline->is_online_mode)
|
|
+ || ((irq_status & DMA_IRQ_DONE) && sc_pipeline && !sc_pipeline->is_online_mode)) {
|
|
+ spin_lock(&sc_pipeline->slock);
|
|
+ dma_ctx->frame_id.id++;
|
|
+ frame_id = fe_isp_frame_id(&sc_pipeline->frame_id_list);
|
|
+ if (frame_id) {
|
|
+ if (dma_ctx->frame_id.id < frame_id->id)
|
|
+ dma_ctx->frame_id.id = frame_id->id;
|
|
+ } else {
|
|
+ cam_warn("%s:frame id list is null", __func__);
|
|
+ }
|
|
+ frame_idx = dma_ctx->frame_id.id - 1;
|
|
+ spin_unlock(&sc_pipeline->slock);
|
|
+ dma_ctx->vnode->total_frm++;
|
|
+ }
|
|
+ if (!dma_ctx->used_for_hdr) {
|
|
+ tmp = irq_status & (~PIPE_ERR(1));
|
|
+ spin_lock(&(dma_ctx->vnode->slock));
|
|
+ list_for_each_entry(pos, &(dma_ctx->vnode->busy_list), list_entry) {
|
|
+ if (!tmp)
|
|
+ break;
|
|
+ if (tmp & DMA_IRQ_ERR) {
|
|
+ if (!(pos->flags & SC_BUF_FLAG_SOF_TOUCH)) {
|
|
+ vi_irq_print("%s dma err(0x%08x) without sof, drop it", dma_ctx->vnode->name, tmp);
|
|
+ tmp &= ~DMA_IRQ_ERR;
|
|
+ } else if (!(pos->flags & SC_BUF_FLAG_HW_ERR)) {
|
|
+ pos->flags |= SC_BUF_FLAG_HW_ERR;
|
|
+ hw_err_code = (unsigned int*)(&pos->reserved[0]);
|
|
+ *hw_err_code = irq_status;
|
|
+ tmp &= ~DMA_IRQ_ERR;
|
|
+ pos->timestamp_eof = ktime_get_boottime_ns();
|
|
+ }
|
|
+ atomic_dec(&dma_ctx->busy_cnt);
|
|
+ wake_up_interruptible_all(&dma_ctx->waitq_eof);
|
|
+ }
|
|
+ if (tmp & DMA_IRQ_DONE) {
|
|
+ if (!(pos->flags & SC_BUF_FLAG_SOF_TOUCH) && sc_pipeline && sc_pipeline->is_online_mode) {
|
|
+ cam_dbg("%s dma done without sof, drop it", dma_ctx->vnode->name);
|
|
+ tmp &= ~DMA_IRQ_DONE;
|
|
+ } else if (!(pos->flags & SC_BUF_FLAG_DONE_TOUCH)) {
|
|
+ if (sc_pipeline && !sc_pipeline->is_online_mode) {
|
|
+ pos->vb2_v4l2_buf.sequence = frame_idx;
|
|
+ pos->vb2_v4l2_buf.vb2_buf.timestamp = ktime_get_boottime_ns();
|
|
+ }
|
|
+ pos->timestamp_eof = ktime_get_boottime_ns();
|
|
+ pos->flags |= SC_BUF_FLAG_DONE_TOUCH;
|
|
+ tmp &= ~DMA_IRQ_DONE;
|
|
+ if (irq_status & PIPE_ERR(1)) {
|
|
+ hw_err_code = (unsigned int*)(&pos->reserved[0]);
|
|
+ *hw_err_code = irq_status;
|
|
+ pos->flags |= SC_BUF_FLAG_HW_ERR;
|
|
+ }
|
|
+ }
|
|
+ atomic_dec(&dma_ctx->busy_cnt);
|
|
+ wake_up_interruptible_all(&dma_ctx->waitq_eof);
|
|
+ }
|
|
+ if (tmp & DMA_IRQ_START) {
|
|
+ if (pos->flags & SC_BUF_FLAG_SOF_TOUCH) {
|
|
+ if (!(pos->flags & (SC_BUF_FLAG_DONE_TOUCH | SC_BUF_FLAG_HW_ERR | SC_BUF_FLAG_SW_ERR))
|
|
+ && !(pos->flags & SC_BUF_FLAG_FORCE_SHADOW)) {
|
|
+ cam_warn("%s next sof arrived without dma done or err", dma_ctx->vnode->name);
|
|
+ pos->flags |= SC_BUF_FLAG_SW_ERR;
|
|
+ atomic_dec(&dma_ctx->busy_cnt);
|
|
+ }
|
|
+ } else {
|
|
+ pos->flags |= (SC_BUF_FLAG_SOF_TOUCH | SC_BUF_FLAG_TIMESTAMPED);
|
|
+ tmp &= ~DMA_IRQ_START;
|
|
+ if (sc_pipeline && sc_pipeline->is_online_mode) {
|
|
+ pos->vb2_v4l2_buf.sequence = frame_idx;
|
|
+ pos->vb2_v4l2_buf.vb2_buf.timestamp = ktime_get_boottime_ns();
|
|
+ }
|
|
+ atomic_inc(&dma_ctx->busy_cnt);
|
|
+ if (rawdump && rawdump->rawdump_only) {
|
|
+ if (__spm_vdev_idle_list_empty(dma_ctx->vnode)) {
|
|
+ hw_isp_top_set_rdp_cfg_rdy(SC_BLOCK(isp_ctx->pipes[0]), rawdump->idx, 0);
|
|
+ hw_isp_top_set_rawdump_source(SC_BLOCK(isp_ctx->pipes[0]), rawdump->idx, INVALID_CH);
|
|
+ hw_isp_top_set_rdp_cfg_rdy(SC_BLOCK(isp_ctx->pipes[0]), rawdump->idx, 1);
|
|
+ pos->vb2_v4l2_buf.flags |= V4L2_BUF_FLAG_CLOSE_DOWN;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ spin_unlock(&(dma_ctx->vnode->slock));
|
|
+
|
|
+ ret = fe_isp_get_dma_work(dma_ctx, &isp_dma_work);
|
|
+ if (ret) {
|
|
+ if (printk_timed_ratelimit(&print_jiffies, 5000)) {
|
|
+ cam_warn("%s dma work idle list was null", dma_ctx->vnode->name);
|
|
+ i = 0;
|
|
+ spin_lock(&dma_ctx->slock);
|
|
+ list_for_each_entry(isp_dma_work, &dma_ctx->dma_work_busy_list, busy_list_entry) {
|
|
+ tasklet_state[i++] = isp_dma_work->dma_tasklet.state;
|
|
+ }
|
|
+ spin_unlock(&dma_ctx->slock);
|
|
+ while (--i >= 0) {
|
|
+ cam_warn("%s tasklet(%d) state=%lu", dma_ctx->vnode->name, i, tasklet_state[i]);
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ isp_dma_work->irq_status = irq_status;
|
|
+#if USE_TASKLET
|
|
+ //if (irq_status & DMA_IRQ_DONE && sc_pipeline && !sc_pipeline->is_online_mode) {
|
|
+ // cam_dbg("ao[%d] t", dma_ctx->id);
|
|
+ //}
|
|
+ tasklet_schedule(&(isp_dma_work->dma_tasklet));
|
|
+#elif USE_WORKQ
|
|
+ if(!schedule_work(&(isp_dma_work->dma_work)))
|
|
+ cam_err("%s schedule work failed", dma_ctx->vnode->name);
|
|
+#else
|
|
+ fe_isp_dma_bh_handler(isp_dma_work);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ if (irq_status & DMA_IRQ_DONE) {
|
|
+ //if (sc_pipeline && !sc_pipeline->is_online_mode) {
|
|
+ // cam_dbg("ao[%d] d", dma_ctx->id);
|
|
+ //}
|
|
+ if (dma_ctx->id == 0)
|
|
+ hw_isp_top_clr_irq_status(SC_BLOCK(isp_ctx->pipes[0]), 0x5bff0000);
|
|
+ if (dma_ctx->id == 1)
|
|
+ hw_isp_top_clr_irq_status(SC_BLOCK(isp_ctx->pipes[1]), 0x5bff0000);
|
|
+ }
|
|
+ } else if (irq_status & DMA_IRQ_START) {
|
|
+ hw_dma_set_wdma_ready(SC_BLOCK(dma_ctx->vnode), dma_ctx->vnode->idx, 1);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ spin_lock(&dma_ctx->waitq_head.lock);
|
|
+ dma_ctx->in_irq = 0;
|
|
+ wake_up_locked(&dma_ctx->waitq_head);
|
|
+ spin_unlock(&dma_ctx->waitq_head.lock);
|
|
+ }
|
|
+ for (irq_src = DMA_IRQ_SRC_RDMA_CH0; irq_src <= DMA_IRQ_SRC_RDMA_CH1; irq_src++) {
|
|
+ dma_ctx = &(isp_ctx->dma_in_ctx[irq_src - DMA_IRQ_SRC_RDMA_CH0]);
|
|
+ spin_lock(&dma_ctx->waitq_head.lock);
|
|
+ dma_ctx->in_irq = 1;
|
|
+ if (dma_ctx->in_streamoff) {
|
|
+ dma_ctx->in_irq = 0;
|
|
+ wake_up_locked(&dma_ctx->waitq_head);
|
|
+ spin_unlock(&dma_ctx->waitq_head.lock);
|
|
+ continue;
|
|
+ }
|
|
+ spin_unlock(&dma_ctx->waitq_head.lock);
|
|
+ if (dma_ctx->vnode || (is_mix_hdr && irq_src == DMA_IRQ_SRC_RDMA_CH0)) {
|
|
+ irq_status = hw_dma_irq_analyze(irq_src, status1, status2);
|
|
+ if (is_mix_hdr && (irq_status & DMA_IRQ_ERR)) {
|
|
+ cam_err("ain0 DMA_IRQ_ERR");
|
|
+ }
|
|
+ if (irq_status && dma_ctx->vnode) {
|
|
+ tmp = irq_status;
|
|
+ spin_lock(&(dma_ctx->vnode->slock));
|
|
+ list_for_each_entry(pos, &dma_ctx->vnode->busy_list, list_entry) {
|
|
+ if (!tmp)
|
|
+ break;
|
|
+ if ((tmp & DMA_IRQ_ERR) && !(pos->flags & SC_BUF_FLAG_HW_ERR)) {
|
|
+ pos->flags |= SC_BUF_FLAG_HW_ERR;
|
|
+ tmp &= ~DMA_IRQ_ERR;
|
|
+ }
|
|
+ if ((tmp & DMA_IRQ_DONE) && !(pos->flags & SC_BUF_FLAG_DONE_TOUCH)) {
|
|
+ pos->flags |= SC_BUF_FLAG_DONE_TOUCH;
|
|
+ tmp &= ~DMA_IRQ_DONE;
|
|
+ }
|
|
+ if (tmp & DMA_IRQ_START) {
|
|
+ if (pos->flags & SC_BUF_FLAG_SOF_TOUCH) {
|
|
+ if (!(pos->flags & (SC_BUF_FLAG_DONE_TOUCH | SC_BUF_FLAG_HW_ERR | SC_BUF_FLAG_SW_ERR))) {
|
|
+ cam_warn("%s next sof arrived without dma done or err", dma_ctx->vnode->name);
|
|
+ pos->flags |= SC_BUF_FLAG_SW_ERR;
|
|
+ }
|
|
+ } else {
|
|
+ pos->flags |= SC_BUF_FLAG_SOF_TOUCH;
|
|
+ }
|
|
+ tmp &= ~DMA_IRQ_START;
|
|
+ }
|
|
+ }
|
|
+ spin_unlock(&(dma_ctx->vnode->slock));
|
|
+ ret = fe_isp_get_dma_work(dma_ctx, &isp_dma_work);
|
|
+ if (ret) {
|
|
+ cam_warn("%s dma work idle list was null", dma_ctx->vnode->name);
|
|
+ } else {
|
|
+ isp_dma_work->irq_status = irq_status;
|
|
+#if USE_TASKLET
|
|
+ tasklet_schedule(&(isp_dma_work->dma_tasklet));
|
|
+#elif USE_WORKQ
|
|
+ if (!schedule_work(&(isp_dma_work->dma_work)))
|
|
+ cam_err("%s schedule work failed", dma_ctx->vnode->name);
|
|
+#else
|
|
+ fe_isp_dma_bh_handler(isp_dma_work);
|
|
+#endif
|
|
+ }
|
|
+ //if (irq_status & DMA_IRQ_DONE) {
|
|
+ // cam_dbg("ai[%d] d", dma_ctx->id);
|
|
+ //}
|
|
+ }
|
|
+ }
|
|
+ spin_lock(&dma_ctx->waitq_head.lock);
|
|
+ dma_ctx->in_irq = 0;
|
|
+ wake_up_locked(&dma_ctx->waitq_head);
|
|
+ spin_unlock(&dma_ctx->waitq_head.lock);
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < 2; i++) {
|
|
+ if (sc_pipelines[i]) {
|
|
+ fe_isp_process_dma_reload(isp_ctx, sc_pipelines[i]);
|
|
+ }
|
|
+ }
|
|
+ // dma9, dma10,dma14 and dma15 irqs forward to isp firmware
|
|
+ if ((status1 & 0xf8000000) || (status2 & 0xfc01)) {
|
|
+ irq_data.status1 = status1;
|
|
+ irq_data.status2 = status2;
|
|
+ sc_ispfirm_call(ispfirm_ops, irq_callback, DMA_IRQ, &irq_data, sizeof(irq_data));
|
|
+ }
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static irqreturn_t fe_isp_process_dma_reload(struct isp_context *isp_ctx, struct spm_camera_pipeline *sc_pipeline)
|
|
+{
|
|
+ int i = 0, buf_ready = 0, ret = 0, flag = 0, dma_busy = 0;
|
|
+ unsigned int buf_index = 0;
|
|
+ struct isp_pipeline_context *pipe_ctx = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct isp_dma_context *dma_ctx = NULL;
|
|
+ struct spm_camera_vbuffer *sc_vb = NULL, *pos = NULL;
|
|
+
|
|
+ pipe_ctx = (struct isp_pipeline_context *)sc_pipeline->usr_data;
|
|
+ if (!pipe_ctx) {
|
|
+ cam_err("%s pipe_ctx was null", __func__);
|
|
+ return IRQ_HANDLED;
|
|
+ }
|
|
+ spin_lock(&sc_pipeline->slock);
|
|
+ if (pipe_ctx->mmu_tbu_reload > 0) {
|
|
+ pipe_ctx->mmu_tbu_reload--;
|
|
+ }
|
|
+ if (sc_pipeline->state < PIPELINE_ST_STARTED) {
|
|
+ spin_unlock(&sc_pipeline->slock);
|
|
+ return IRQ_HANDLED;
|
|
+ }
|
|
+ for (i = 0; i < FORMATTER_NUM && pipe_ctx->mmu_tbu_reload <= MMU_TBU_RELOAD; i++) {
|
|
+ buf_ready = 1;
|
|
+ if (pipe_ctx->fmt_wdma_cnt[i] < pipe_ctx->fmt_wdma_start_cnt[i])
|
|
+ buf_ready = 0;
|
|
+ if (buf_ready) {
|
|
+ list_for_each_entry(dma_ctx, &pipe_ctx->fmt_wdma_list[i], list_entry) {
|
|
+ sc_vnode = dma_ctx->vnode;
|
|
+ flag = 0;
|
|
+ if (sc_vnode) {
|
|
+ spin_lock(&sc_vnode->slock);
|
|
+ list_for_each_entry(pos, &sc_vnode->queued_list, list_entry) {
|
|
+ if (!(pos->flags & SC_BUF_FLAG_CCIC_TOUCH)) {
|
|
+ flag = 1;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (!flag) {
|
|
+ buf_ready = 0;
|
|
+ spin_unlock(&sc_vnode->slock);
|
|
+ break;
|
|
+ }
|
|
+ spin_unlock(&sc_vnode->slock);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (buf_ready) {
|
|
+ flag = 0;
|
|
+ list_for_each_entry(dma_ctx, &pipe_ctx->fmt_wdma_list[i], list_entry) {
|
|
+ sc_vnode = dma_ctx->vnode;
|
|
+ if (sc_vnode) {
|
|
+ //hw_dma_set_wdma_ready(SC_BLOCK(sc_vnode), sc_vnode->idx, 0);
|
|
+ spin_lock(&sc_vnode->slock);
|
|
+ //if (pipe_ctx->mmu_tbu_reload == MMU_TBU_OK) {
|
|
+ list_for_each_entry(pos, &sc_vnode->queued_list, list_entry) {
|
|
+ if (!(pos->flags & SC_BUF_FLAG_CCIC_TOUCH)) {
|
|
+ pos->flags |= SC_BUF_FLAG_CCIC_TOUCH;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ ret = __spm_vdev_dq_idle_vbuffer(sc_vnode, &sc_vb);
|
|
+ //} else { // MMU_TBU_RELOAD
|
|
+ // ret = __spm_vdev_pick_idle_vbuffer(sc_vnode, &sc_vb);
|
|
+ //}
|
|
+ if (0 == ret) {
|
|
+ if (flag == 0)
|
|
+ buf_index = sc_vb->vb2_v4l2_buf.vb2_buf.index;
|
|
+ else if (buf_index != sc_vb->vb2_v4l2_buf.vb2_buf.index) {
|
|
+ vi_irq_print("%s(%s) buf index miss match (%u vs %u)", __func__, sc_vnode->name, buf_index, sc_vb->vb2_v4l2_buf.vb2_buf.index);
|
|
+ }
|
|
+ fe_isp_update_aout_dma_addr(sc_vnode, sc_vb, 0);
|
|
+ //if (pipe_ctx->mmu_tbu_reload == MMU_TBU_OK) {
|
|
+ hw_dma_set_wdma_ready(SC_BLOCK(sc_vnode), sc_vnode->idx, 1);
|
|
+ __spm_vdev_q_busy_vbuffer(sc_vnode, sc_vb);
|
|
+ //}
|
|
+ } else {
|
|
+ vi_irq_print("%s(%s) failed to dq idle buf", __func__, sc_vnode->name);
|
|
+ }
|
|
+ spin_unlock(&sc_vnode->slock);
|
|
+ flag++;
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ list_for_each_entry(dma_ctx, &pipe_ctx->fmt_wdma_list[i], list_entry) {
|
|
+ sc_vnode = dma_ctx->vnode;
|
|
+ if (sc_vnode) {
|
|
+ //hw_dma_set_wdma_ready(SC_BLOCK(sc_vnode), sc_vnode->idx, 0);
|
|
+ spin_lock(&sc_vnode->slock);
|
|
+ if (sc_vnode->sc_vb) {
|
|
+ sc_vb = sc_vnode->sc_vb;
|
|
+ fe_isp_update_aout_dma_addr(sc_vnode, sc_vb, 0);
|
|
+ hw_dma_set_wdma_ready(SC_BLOCK(sc_vnode), sc_vnode->idx, 1);
|
|
+ }
|
|
+ spin_unlock(&sc_vnode->slock);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (pipe_ctx->mmu_tbu_reload <= MMU_TBU_RELOAD) {
|
|
+ list_for_each_entry(dma_ctx, &pipe_ctx->wdma_list, list_entry) {
|
|
+ sc_vnode = dma_ctx->vnode;
|
|
+ if (sc_vnode) {
|
|
+ //hw_dma_set_wdma_ready(SC_BLOCK(sc_vnode), sc_vnode->idx, 0);
|
|
+ spin_lock(&sc_vnode->slock);
|
|
+ //list_for_each_entry(pos, &sc_vnode->queued_list, list_entry) {
|
|
+ // if (!(pos->flags & SC_BUF_FLAG_CCIC_TOUCH)) {
|
|
+ // pos->flags |= SC_BUF_FLAG_CCIC_TOUCH;
|
|
+ // break;
|
|
+ // }
|
|
+ //}
|
|
+ if ((sc_vnode->idx == 12 || sc_vnode->idx == 13) && sc_vnode->sc_vb) {// rawdump
|
|
+ if (__spm_vdev_busy_list_empty(sc_vnode)) {
|
|
+ dma_busy = 0;
|
|
+ } else {
|
|
+ dma_busy = 1;
|
|
+ }
|
|
+ if (dma_busy) {
|
|
+ ret = __spm_vdev_dq_idle_vbuffer(sc_vnode, &sc_vb);
|
|
+ if (0 == ret) {
|
|
+ __spm_vdev_q_busy_vbuffer(sc_vnode, sc_vb);
|
|
+ }
|
|
+ } else {
|
|
+ ret = __spm_vdev_pick_idle_vbuffer(sc_vnode, &sc_vb);
|
|
+ if (0 == ret) {
|
|
+ if (sc_vb->flags & SC_BUF_FLAG_GEN_EOF) {
|
|
+ __spm_vdev_dq_idle_vbuffer(sc_vnode, &sc_vb);
|
|
+ __spm_vdev_q_busy_vbuffer(sc_vnode, sc_vb);
|
|
+ } else {
|
|
+ sc_vb->flags |= SC_BUF_FLAG_GEN_EOF;
|
|
+ sc_vb = sc_vnode->sc_vb;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ ret = __spm_vdev_dq_idle_vbuffer(sc_vnode, &sc_vb);
|
|
+ if (0 == ret) {
|
|
+ __spm_vdev_q_busy_vbuffer(sc_vnode, sc_vb);
|
|
+ } else {
|
|
+ if (sc_vnode->sc_vb) {
|
|
+ sc_vb = sc_vnode->sc_vb;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (sc_vb) {
|
|
+ fe_isp_update_aout_dma_addr(sc_vnode, sc_vb, 0);
|
|
+ hw_dma_set_wdma_ready(SC_BLOCK(sc_vnode), sc_vnode->idx, 1);
|
|
+ }
|
|
+ spin_unlock(&sc_vnode->slock);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ spin_unlock(&sc_pipeline->slock);
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static void fe_isp_flush_pipeline_buffers(struct isp_context *isp_ctx, struct spm_camera_pipeline *sc_pipeline)
|
|
+{
|
|
+ int i = 0;
|
|
+ unsigned long flags = 0;
|
|
+ struct isp_pipeline_context *pipe_ctx = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct isp_dma_context *dma_ctx = NULL;
|
|
+ struct spm_camera_vbuffer *pos = NULL, *n = NULL;
|
|
+
|
|
+ pipe_ctx = (struct isp_pipeline_context *)sc_pipeline->usr_data;
|
|
+ if (!pipe_ctx) {
|
|
+ cam_err("%s pipe_ctx was null", __func__);
|
|
+ return;
|
|
+ }
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ for (i = 0; i < FORMATTER_NUM; i++) {
|
|
+ list_for_each_entry(dma_ctx, &pipe_ctx->fmt_wdma_list[i], list_entry) {
|
|
+ sc_vnode = dma_ctx->vnode;
|
|
+ if (sc_vnode) {
|
|
+ spin_lock(&sc_vnode->slock);
|
|
+ list_for_each_entry_safe(pos, n, &sc_vnode->queued_list, list_entry) {
|
|
+ list_del_init(&(pos->list_entry));
|
|
+ pos->vb2_v4l2_buf.flags |= V4L2_BUF_FLAG_IGNOR;
|
|
+ fe_isp_export_camera_vbuffer(sc_vnode, pos);
|
|
+ }
|
|
+ spin_unlock(&sc_vnode->slock);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ list_for_each_entry(dma_ctx, &pipe_ctx->wdma_list, list_entry) {
|
|
+ sc_vnode = dma_ctx->vnode;
|
|
+ if (sc_vnode) {
|
|
+ spin_lock(&sc_vnode->slock);
|
|
+ list_for_each_entry_safe(pos, n, &sc_vnode->queued_list, list_entry) {
|
|
+ list_del_init(&(pos->list_entry));
|
|
+ pos->vb2_v4l2_buf.flags |= V4L2_BUF_FLAG_IGNOR;
|
|
+ fe_isp_export_camera_vbuffer(sc_vnode, pos);
|
|
+ }
|
|
+ spin_unlock(&sc_vnode->slock);
|
|
+ }
|
|
+ }
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+}
|
|
+
|
|
+static irqreturn_t fe_isp_irq_handler(int irq, void *dev_id)
|
|
+{
|
|
+ unsigned int pipe0_irq_status = 0, pipe1_irq_status = 0, err0_irq_status = 0, err2_irq_status = 0, err1_irq_status = 0;
|
|
+ unsigned int posterr_status = 0, pipe0_irq_raw_status = 0, pipe1_irq_raw_status = 0;
|
|
+ unsigned int err0_detect_mask = 0;
|
|
+ struct isp_context *isp_ctx = (struct isp_context *)dev_id;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_entity *p0_me = &(isp_ctx->pipes[0]->sc_subdev.pcsd.sd.entity);
|
|
+ struct media_entity *p1_me = &(isp_ctx->pipes[1]->sc_subdev.pcsd.sd.entity);
|
|
+ struct media_pipeline *mpipe0 = media_entity_pipeline(p0_me);
|
|
+ struct media_pipeline *mpipe1 = media_entity_pipeline(p1_me);
|
|
+ struct frame_id *frame_id = NULL;
|
|
+ struct isp_irq_data irq_data = { 0 };
|
|
+ struct spm_camera_ispfirm_ops *ispfirm_ops = NULL;
|
|
+ struct k1xvi_platform_data *drvdata = platform_get_drvdata(isp_ctx->pdev);
|
|
+ __u64 p0_frame_id = 0, p1_frame_id = 0;
|
|
+ static DEFINE_RATELIMIT_STATE(rs, HZ / 10, 5);
|
|
+
|
|
+ if (drvdata && drvdata->isp_firm)
|
|
+ ispfirm_ops = drvdata->isp_firm->ispfirm_ops;
|
|
+
|
|
+ pipe0_irq_status = hw_isp_top_get_irq_status(SC_BLOCK(isp_ctx->pipes[0]));
|
|
+ if (pipe0_irq_status) {
|
|
+ hw_isp_top_clr_irq_status(SC_BLOCK(isp_ctx->pipes[0]), pipe0_irq_status);
|
|
+ if ((pipe0_irq_status & ISP_IRQ_PIPE_SOF) && mpipe0) {
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe0);
|
|
+ spin_lock(&sc_pipeline->slock);
|
|
+ isp_ctx->pipe_frame_id[0].id++;
|
|
+ frame_id = fe_isp_frame_id(&sc_pipeline->frame_id_list);
|
|
+ if (frame_id) {
|
|
+ if (isp_ctx->pipe_frame_id[0].id < frame_id->id)
|
|
+ isp_ctx->pipe_frame_id[0].id = frame_id->id;
|
|
+ p0_frame_id = frame_id->id;
|
|
+ } else {
|
|
+ cam_warn("p0 irq handler:frame id list is null");
|
|
+ }
|
|
+ irq_data.pipe0_frame_id = isp_ctx->pipe_frame_id[0].id - 1;
|
|
+ spin_unlock(&sc_pipeline->slock);
|
|
+ }
|
|
+ if (pipe0_irq_status & ISP_IRQ_G_RST_DONE)
|
|
+ complete_all(&isp_ctx->global_reset_done);
|
|
+ if (pipe0_irq_status & ISP_IRQ_SDE_SOF)
|
|
+ complete_all(&isp_ctx->pipes[0]->sde_sof);
|
|
+ }
|
|
+ pipe1_irq_status = hw_isp_top_get_irq_status(SC_BLOCK(isp_ctx->pipes[1]));
|
|
+ if (pipe1_irq_status) {
|
|
+ hw_isp_top_clr_irq_status(SC_BLOCK(isp_ctx->pipes[1]), pipe1_irq_status);
|
|
+ if ((pipe1_irq_status & ISP_IRQ_PIPE_SOF) && mpipe1) {
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe1);
|
|
+ spin_lock(&sc_pipeline->slock);
|
|
+ isp_ctx->pipe_frame_id[1].id++;
|
|
+ frame_id = fe_isp_frame_id(&sc_pipeline->frame_id_list);
|
|
+ if (frame_id) {
|
|
+ if (isp_ctx->pipe_frame_id[1].id < frame_id->id)
|
|
+ isp_ctx->pipe_frame_id[1].id = frame_id->id;
|
|
+ p1_frame_id = frame_id->id;
|
|
+ } else {
|
|
+ cam_warn("p1 irq handler:frame id list is null");
|
|
+ }
|
|
+ irq_data.pipe1_frame_id = isp_ctx->pipe_frame_id[1].id - 1;
|
|
+ spin_unlock(&sc_pipeline->slock);
|
|
+ }
|
|
+ if (pipe1_irq_status & ISP_IRQ_SDE_SOF)
|
|
+ complete_all(&isp_ctx->pipes[1]->sde_sof);
|
|
+ }
|
|
+ posterr_status = hw_isp_top_get_posterr_irq_status(SC_BLOCK(isp_ctx->pipes[0]));
|
|
+ if (posterr_status) {
|
|
+ hw_isp_top_clr_posterr_irq_status(SC_BLOCK(isp_ctx->pipes[0]), posterr_status);
|
|
+ if (posterr_status & POSTERR_IRQ_PIP0_SDW_CLOSE_DONE) {
|
|
+ //cam_dbg("POSTERR_IRQ_PIP0_SDW_CLOSE_DONE");
|
|
+ complete_all(&(isp_ctx->pipes[0]->close_done));
|
|
+ }
|
|
+ if (posterr_status & POSTERR_IRQ_PIP1_SDW_CLOSE_DONE) {
|
|
+ //cam_dbg("POSTERR_IRQ_PIP1_SDW_CLOSE_DONE");
|
|
+ complete_all(&(isp_ctx->pipes[1]->close_done));
|
|
+ }
|
|
+ if (posterr_status & POSTERR_IRQ_RDP0_SDW_CLOSE_DONE) {
|
|
+ //cam_dbg("POSTERR_IRQ_RDP0_SDW_CLOSE_DONE");
|
|
+ atomic_set(&(isp_ctx->rawdumps[0]->close_done), 1);
|
|
+ }
|
|
+ if (posterr_status & POSTERR_IRQ_RDP1_SDW_CLOSE_DONE) {
|
|
+ //cam_dbg("POSTERR_IRQ_RDP1_SDW_CLOSE_DONE");
|
|
+ atomic_set(&(isp_ctx->rawdumps[1]->close_done), 1);
|
|
+ }
|
|
+ }
|
|
+ if (p0_frame_id == 0 && mpipe0) {
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe0);
|
|
+ spin_lock(&sc_pipeline->slock);
|
|
+ frame_id = fe_isp_frame_id(&sc_pipeline->frame_id_list);
|
|
+ if (frame_id)
|
|
+ p0_frame_id = frame_id->id;
|
|
+ spin_unlock(&sc_pipeline->slock);
|
|
+ }
|
|
+ if (p1_frame_id == 0 && mpipe1) {
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe1);
|
|
+ spin_lock(&sc_pipeline->slock);
|
|
+ frame_id = fe_isp_frame_id(&sc_pipeline->frame_id_list);
|
|
+ if (frame_id)
|
|
+ p1_frame_id = frame_id->id;
|
|
+ spin_unlock(&sc_pipeline->slock);
|
|
+ }
|
|
+ err0_irq_status = hw_isp_top_get_err0_irq_status(SC_BLOCK(isp_ctx->pipes[0]));
|
|
+ if (err0_irq_status)
|
|
+ hw_isp_top_clr_err0_irq_status(SC_BLOCK(isp_ctx->pipes[0]), err0_irq_status);
|
|
+ err2_irq_status = hw_isp_top_get_err2_irq_status(SC_BLOCK(isp_ctx->pipes[0]));
|
|
+ if (err2_irq_status)
|
|
+ hw_isp_top_clr_err2_irq_status(SC_BLOCK(isp_ctx->pipes[0]), err2_irq_status);
|
|
+ irq_data.pipe0_irq_status = pipe0_irq_status;
|
|
+ irq_data.pipe1_irq_status = pipe1_irq_status;
|
|
+ sc_ispfirm_call(ispfirm_ops, irq_callback, ISP_IRQ, &irq_data, sizeof(irq_data));
|
|
+ //cam_dbg("pipe0 irq status=0x%08x, pipe1 irq status=0x%08x", pipe0_irq_status, pipe1_irq_status);
|
|
+ if (mpipe0)
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe0);
|
|
+ else if (mpipe1)
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe1);
|
|
+ else
|
|
+ sc_pipeline = NULL;
|
|
+ if (!sc_pipeline || PIPELINE_TYPE(sc_pipeline->id) == PIPELINE_TYPE_SINGLE)
|
|
+ err0_detect_mask = 0xffff001f;
|
|
+ else
|
|
+ err0_detect_mask = 0xffff001c;
|
|
+ if (err2_irq_status & ERR2_PIPE0_OVERRUN)
|
|
+ isp_ctx->isp_fatal_error |= ISP_FATAL_ERR_PIPE0_OVERRUN;
|
|
+ if (err2_irq_status & ERR2_PIPE1_OVERRUN)
|
|
+ isp_ctx->isp_fatal_error |= ISP_FATAL_ERR_PIPE1_OVERRUN;
|
|
+
|
|
+ err1_irq_status = hw_isp_top_get_err1_irq_status(SC_BLOCK(isp_ctx->pipes[0]));
|
|
+ if (err1_irq_status)
|
|
+ hw_isp_top_clr_err1_irq_status(SC_BLOCK(isp_ctx->pipes[0]), err1_irq_status);
|
|
+
|
|
+ if ((err0_irq_status & err0_detect_mask) || (err2_irq_status & 0xf80001ff) || err1_irq_status) {
|
|
+ pipe0_irq_raw_status = hw_isp_top_get_irq_raw_status(SC_BLOCK(isp_ctx->pipes[0]));
|
|
+ pipe1_irq_raw_status = hw_isp_top_get_irq_raw_status(SC_BLOCK(isp_ctx->pipes[1]));
|
|
+ if (__ratelimit(&rs)) {
|
|
+ cam_err("err0_irq_status=0x%08x err1_irq_status=0x%08x err2_irq_status=0x%08x p0_frame_id=%llu p1_frame_id=%llu",
|
|
+ err0_irq_status, err1_irq_status, err2_irq_status, p0_frame_id, p1_frame_id);
|
|
+ cam_err("p0_irq_raw_status=0x%08x p1_irq_raw_status=0x%08x", pipe0_irq_raw_status, pipe1_irq_raw_status);
|
|
+ hw_isp_top_pipe0_debug_dump(SC_BLOCK(isp_ctx->pipes[0]));
|
|
+ hw_isp_top_pipe1_debug_dump(SC_BLOCK(isp_ctx->pipes[1]));
|
|
+ }
|
|
+ }
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static void fe_isp_print_work_handler(struct work_struct *work)
|
|
+{
|
|
+ struct isp_print_work_struct *print_work = container_of(work, struct isp_print_work_struct, print_work);
|
|
+ struct isp_context *isp_ctx = print_work->isp_ctx;
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ cam_err("%s", print_work->msg_string);
|
|
+ spin_lock_irqsave(&isp_ctx->slock, flags);
|
|
+ list_add(&print_work->list, &isp_ctx->print_work_list);
|
|
+ spin_unlock_irqrestore(&isp_ctx->slock, flags);
|
|
+}
|
|
+
|
|
+static void fe_isp_dma_work_handler(struct work_struct *work)
|
|
+{
|
|
+}
|
|
+
|
|
+static void fe_isp_export_camera_vbuffer(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer *sc_vb)
|
|
+{
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct isp_pipeline_context *pipe_ctx = NULL;
|
|
+ struct media_pad *remote_pad = NULL;
|
|
+ struct fe_formatter *formatter = NULL;
|
|
+ struct fe_dwt *dwt = NULL;
|
|
+ struct spm_camera_vbuffer *pos = NULL, *n = NULL;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(&sc_vnode->vnode.entity);
|
|
+ unsigned int buf_index = sc_vb->vb2_v4l2_buf.vb2_buf.index;
|
|
+
|
|
+ if (!pipe)
|
|
+ goto export_buffer;
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ pipe_ctx = (struct isp_pipeline_context *)sc_pipeline->usr_data;
|
|
+ if (!pipe_ctx)
|
|
+ goto export_buffer;
|
|
+ remote_pad = media_entity_remote_pad(&sc_vnode->pad);
|
|
+ if (!remote_pad)
|
|
+ goto export_buffer;
|
|
+ formatter = media_entity_to_formatter(remote_pad->entity);
|
|
+ dwt = media_entity_to_dwt(remote_pad->entity);
|
|
+ if (!formatter) {
|
|
+ if (!dwt)
|
|
+ goto export_buffer;
|
|
+ remote_pad = media_entity_remote_pad(&dwt->pads[PAD_IN]);
|
|
+ if (!remote_pad)
|
|
+ goto export_buffer;
|
|
+ formatter = media_entity_to_formatter(remote_pad->entity);
|
|
+ if (!formatter)
|
|
+ goto export_buffer;
|
|
+ }
|
|
+ if (pipe_ctx->fmt_wdma_start_cnt[formatter->idx] <= 1) {
|
|
+ goto export_buffer;
|
|
+ }
|
|
+ if (++(pipe_ctx->fmt_wdma_sync_cnt[formatter->idx][buf_index]) >= 5) {
|
|
+ list_for_each_entry_safe(pos, n, &pipe_ctx->fmt_wdma_sync[formatter->idx][buf_index], list_entry) {
|
|
+ list_del_init(&(pos->list_entry));
|
|
+ if (pos->flags & (SC_BUF_FLAG_HW_ERR | SC_BUF_FLAG_SW_ERR))
|
|
+ spm_vdev_export_camera_vbuffer(pos, 1);
|
|
+ else
|
|
+ spm_vdev_export_camera_vbuffer(pos, 0);
|
|
+ }
|
|
+ pipe_ctx->fmt_wdma_sync_cnt[formatter->idx][buf_index] = 0;
|
|
+ } else {
|
|
+ list_add(&sc_vb->list_entry, &pipe_ctx->fmt_wdma_sync[formatter->idx][buf_index]);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+export_buffer:
|
|
+ if (sc_vb->flags & (SC_BUF_FLAG_HW_ERR | SC_BUF_FLAG_SW_ERR))
|
|
+ spm_vdev_export_camera_vbuffer(sc_vb, 1);
|
|
+ else
|
|
+ spm_vdev_export_camera_vbuffer(sc_vb, 0);
|
|
+}
|
|
+
|
|
+static void fe_isp_dma_bh_handler(struct isp_dma_work_struct *isp_dma_work)
|
|
+{
|
|
+ struct isp_dma_context *dma_ctx = isp_dma_work->dma_ctx;
|
|
+ struct spm_camera_vnode *sc_vnode = dma_ctx->vnode;
|
|
+ //struct isp_context *isp_ctx = dma_ctx->isp_ctx;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(&sc_vnode->vnode.entity);
|
|
+ struct spm_camera_vbuffer *n = NULL, *pos = NULL;
|
|
+ unsigned int *hw_err_code = NULL;
|
|
+ unsigned int irq_status = isp_dma_work->irq_status;
|
|
+ LIST_HEAD(export_list);
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ if (!sc_vnode || !pipe) {
|
|
+ cam_dbg("a[%d] debug 1", dma_ctx->id);
|
|
+ goto dma_tasklet_finish;
|
|
+ }
|
|
+
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+
|
|
+ if (irq_status & DMA_IRQ_DONE && sc_pipeline && !sc_pipeline->is_online_mode)
|
|
+ cam_dbg("a[%d] direction=%d debug 2", dma_ctx->id, sc_vnode->direction);
|
|
+ spin_lock(&sc_vnode->waitq_head.lock);
|
|
+ sc_vnode->in_tasklet = 1;
|
|
+ if (sc_vnode->in_streamoff || !is_vnode_streaming(sc_vnode)) {
|
|
+ wake_up_locked(&sc_vnode->waitq_head);
|
|
+ spin_unlock(&sc_vnode->waitq_head.lock);
|
|
+ if (irq_status & DMA_IRQ_DONE && sc_pipeline && !sc_pipeline->is_online_mode)
|
|
+ cam_dbg("a[%d] debug 3", dma_ctx->id);
|
|
+ goto dma_tasklet_finish;
|
|
+ }
|
|
+ wake_up_locked(&sc_vnode->waitq_head);
|
|
+ spin_unlock(&sc_vnode->waitq_head.lock);
|
|
+
|
|
+ if (irq_status & DMA_IRQ_START) {
|
|
+ if (dma_ctx->used_for_hdr) {
|
|
+ cam_dbg("a[%d] debug 4", dma_ctx->id);
|
|
+ goto dma_tasklet_finish;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ list_for_each_entry_safe(pos, n, &sc_vnode->busy_list, list_entry) {
|
|
+ if (pos->flags & (SC_BUF_FLAG_HW_ERR | SC_BUF_FLAG_SW_ERR | SC_BUF_FLAG_DONE_TOUCH)) {
|
|
+ list_del_init(&(pos->list_entry));
|
|
+ atomic_dec(&sc_vnode->busy_buf_cnt);
|
|
+ list_add_tail(&(pos->list_entry), &export_list);
|
|
+ }
|
|
+ }
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ list_for_each_entry_safe(pos, n, &export_list, list_entry) {
|
|
+ if (!(pos->flags & SC_BUF_FLAG_SOF_TOUCH) && sc_pipeline && sc_pipeline->is_online_mode) {
|
|
+ cam_warn("%s export buf index=%u frameid=%u without sof touch", sc_vnode->name, pos->vb2_v4l2_buf.vb2_buf.index, pos->vb2_v4l2_buf.sequence);
|
|
+ }
|
|
+ if (pos->flags & SC_BUF_FLAG_HW_ERR) {
|
|
+ hw_err_code = (unsigned int *)(&(pos->reserved[0]));
|
|
+ pos->vb2_v4l2_buf.flags |= V4L2_BUF_FLAG_ERROR_HW;
|
|
+ if (*hw_err_code & PIPE_ERR(1)) {
|
|
+ pos->vb2_v4l2_buf.flags |= V4L2_BUF_FLAG_IDI_OVERRUN;
|
|
+ }
|
|
+ //vi_irq_print("%s export buf index=%u frameid=%u with hw error(0x%08x)", sc_vnode->name, pos->vb2_v4l2_buf.vb2_buf.index, pos->vb2_v4l2_buf.sequence, *hw_err_code);
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ fe_isp_export_camera_vbuffer(sc_vnode, pos);
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ sc_vnode->hw_err_frm++;
|
|
+ } else if (pos->flags & SC_BUF_FLAG_SW_ERR) {
|
|
+ pos->vb2_v4l2_buf.flags |= V4L2_BUF_FLAG_ERROR_SW;
|
|
+ cam_warn("%s export buf index=%u frameid=%u with sw error", sc_vnode->name, pos->vb2_v4l2_buf.vb2_buf.index, pos->vb2_v4l2_buf.sequence);
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ fe_isp_export_camera_vbuffer(sc_vnode, pos);
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ sc_vnode->sw_err_frm++;
|
|
+ } else if (pos->flags & SC_BUF_FLAG_DONE_TOUCH) {
|
|
+ if (sc_pipeline && !sc_pipeline->is_online_mode) {
|
|
+ cam_dbg("a[%d] direction=%d export buf index=%u frameid=%u",
|
|
+ dma_ctx->id, sc_vnode->direction, pos->vb2_v4l2_buf.vb2_buf.index, pos->vb2_v4l2_buf.sequence);
|
|
+ }
|
|
+ spin_lock_irqsave(&sc_pipeline->slock, flags);
|
|
+ fe_isp_export_camera_vbuffer(sc_vnode, pos);
|
|
+ spin_unlock_irqrestore(&sc_pipeline->slock, flags);
|
|
+ sc_vnode->ok_frm++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+dma_tasklet_finish:
|
|
+ if (sc_vnode) {
|
|
+ spin_lock(&sc_vnode->waitq_head.lock);
|
|
+ sc_vnode->in_tasklet = 0;
|
|
+ wake_up_locked(&sc_vnode->waitq_head);
|
|
+ spin_unlock(&sc_vnode->waitq_head.lock);
|
|
+ }
|
|
+ fe_isp_put_dma_work(dma_ctx, isp_dma_work);
|
|
+
|
|
+}
|
|
+
|
|
+static void fe_isp_dma_tasklet_handler(unsigned long param)
|
|
+{
|
|
+ struct isp_dma_work_struct *isp_dma_work = (struct isp_dma_work_struct *)param;
|
|
+ fe_isp_dma_bh_handler(isp_dma_work);
|
|
+}
|
|
+
|
|
+void *fe_isp_create_ctx(struct platform_device *pdev)
|
|
+{
|
|
+ struct device *dev = NULL;
|
|
+ struct isp_context *isp_ctx = NULL;
|
|
+ int i = 0;
|
|
+ int ret = 0;
|
|
+ struct resource *pdev_resc = NULL;
|
|
+ void __iomem *io_base = NULL;
|
|
+#ifdef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+ size_t tabs_size = 0, tab_offset = 0;
|
|
+ int j = 0;
|
|
+#endif
|
|
+ //unsigned long rsvd_mem_size = 0, align = 0;
|
|
+ //struct page **pages = NULL;
|
|
+
|
|
+ if (!pdev) {
|
|
+ pr_err("%s invalid arguments.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ dev = &pdev->dev;
|
|
+ isp_ctx = devm_kzalloc(dev, sizeof(*isp_ctx), GFP_KERNEL);
|
|
+ if (!isp_ctx) {
|
|
+ cam_err("%s not enough mem.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ isp_ctx->pdev = pdev;
|
|
+ pdev_resc = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vi");
|
|
+ if (!pdev_resc) {
|
|
+ cam_err("get reg resource for vi failed");
|
|
+ return NULL;
|
|
+ }
|
|
+ io_base = devm_ioremap_resource(dev, pdev_resc);
|
|
+ if (IS_ERR(io_base)) {
|
|
+ cam_err("ioremap for isp-frontend failed");
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ /* get clock(s) */
|
|
+#ifdef CONFIG_ARCH_SPACEMIT
|
|
+/*
|
|
+ isp_ctx->ahb_clk = devm_clk_get(&pdev->dev, "isp_ahb");
|
|
+ if (IS_ERR(isp_ctx->ahb_clk)) {
|
|
+ ret = PTR_ERR(isp_ctx->ahb_clk);
|
|
+ cam_err("failed to get ahb clock: %d\n", ret);
|
|
+ return NULL;
|
|
+ }
|
|
+*/
|
|
+ isp_ctx->ahb_reset = devm_reset_control_get_optional_shared(&pdev->dev, "isp_ahb_reset");
|
|
+ if (IS_ERR_OR_NULL(isp_ctx->ahb_reset)) {
|
|
+ ret = PTR_ERR(isp_ctx->ahb_reset);
|
|
+ dev_err(&pdev->dev, "not found core isp_ahb_reset, %d\n", ret);
|
|
+ return NULL;
|
|
+ }
|
|
+ isp_ctx->isp_reset = devm_reset_control_get_optional_shared(&pdev->dev, "isp_reset");
|
|
+ if (IS_ERR_OR_NULL(isp_ctx->isp_reset)) {
|
|
+ ret = PTR_ERR(isp_ctx->isp_reset);
|
|
+ dev_err(&pdev->dev, "not found core isp_reset, %d\n", ret);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ isp_ctx->isp_ci_reset = devm_reset_control_get_optional_shared(&pdev->dev, "isp_ci_reset");
|
|
+ if (IS_ERR_OR_NULL(isp_ctx->isp_ci_reset)) {
|
|
+ ret = PTR_ERR(isp_ctx->isp_ci_reset);
|
|
+ dev_err(&pdev->dev, "not found core isp_ci_reset, %d\n", ret);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ isp_ctx->lcd_mclk_reset = devm_reset_control_get_optional_shared(&pdev->dev, "lcd_mclk_reset");
|
|
+ if (IS_ERR_OR_NULL(isp_ctx->lcd_mclk_reset)) {
|
|
+ ret = PTR_ERR(isp_ctx->lcd_mclk_reset);
|
|
+ dev_err(&pdev->dev, "not found core lcd_mclk_reset, %d\n", ret);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ isp_ctx->fnc_clk = devm_clk_get(&pdev->dev, "isp_func");
|
|
+ if (IS_ERR(isp_ctx->fnc_clk)) {
|
|
+ ret = PTR_ERR(isp_ctx->fnc_clk);
|
|
+ cam_err("failed to get function clock: %d\n", ret);
|
|
+ return NULL;
|
|
+ }
|
|
+#ifdef CONFIG_SPACEMIT_DEBUG
|
|
+ vi_running_info.is_dev_running = check_dev_running_status;
|
|
+ vi_running_info.nb.notifier_call = dev_clkoffdet_notifier_handler;
|
|
+ clk_notifier_register(isp_ctx->fnc_clk, &vi_running_info.nb);
|
|
+#endif
|
|
+ isp_ctx->bus_clk = devm_clk_get(&pdev->dev, "isp_axi");
|
|
+ if (IS_ERR(isp_ctx->bus_clk)) {
|
|
+ ret = PTR_ERR(isp_ctx->bus_clk);
|
|
+ cam_err("failed to get bus clock: %d\n", ret);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ isp_ctx->dpu_clk = devm_clk_get(&pdev->dev, "dpu_mclk");
|
|
+ if (IS_ERR(isp_ctx->dpu_clk)) {
|
|
+ ret = PTR_ERR(isp_ctx->dpu_clk);
|
|
+ cam_err("failed to get dpu clock: %d\n", ret);
|
|
+ return NULL;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ isp_ctx->base_addr = (unsigned long)io_base;
|
|
+ for (i = 0; i < PIPE_NUM; i++) {
|
|
+ INIT_LIST_HEAD(&isp_ctx->pipe_frame_id[i].entry);
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+ isp_ctx->rsvd_vaddr = devm_kmalloc(&pdev->dev, MMU_RESERVED_MEM_SIZE, GFP_KERNEL);
|
|
+ if (!isp_ctx->rsvd_vaddr) {
|
|
+ cam_err("failed to alloc mem for mmu reserved");
|
|
+ return NULL;
|
|
+ }
|
|
+ isp_ctx->rsvd_phy_addr = phys_cpu2cam(virt_to_phys(isp_ctx->rsvd_vaddr));
|
|
+ cam_dbg("rsvd_phy_addr=0x%llx size=%d", (uint64_t)isp_ctx->rsvd_phy_addr, MMU_RESERVED_MEM_SIZE);
|
|
+ memset(isp_ctx->rsvd_vaddr, 0xff, MMU_RESERVED_MEM_SIZE);
|
|
+
|
|
+ isp_ctx->mmu_dev = isp_iommu_create(dev, isp_ctx->base_addr);
|
|
+ if (!isp_ctx->mmu_dev) {
|
|
+ cam_err("failed to create iommu device");
|
|
+ return NULL;
|
|
+ }
|
|
+ tabs_size = 2 * IOMMU_TRANS_TAB_MAX_NUM * sizeof(uint32_t) * ISP_IOMMU_TBU_NUM;
|
|
+ isp_ctx->trans_tab_cpu_addr = dmam_alloc_coherent(dev,
|
|
+ tabs_size,
|
|
+ &isp_ctx->trans_tab_dma_addr,
|
|
+ GFP_KERNEL);
|
|
+ if (!isp_ctx->trans_tab_cpu_addr) {
|
|
+ cam_err("%s alloc page tables failed", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ isp_ctx->total_trans_tab_sz = tabs_size;
|
|
+ tab_offset = 0;
|
|
+#endif
|
|
+
|
|
+ for (i = 0; i < AOUT_NUM; i++) {
|
|
+ isp_ctx->dma_out_ctx[i].id = i;
|
|
+ ret = fe_isp_init_dma_context(&isp_ctx->dma_out_ctx[i],
|
|
+ isp_ctx,
|
|
+ ISP_DMA_WORK_MAX_CNT,
|
|
+ fe_isp_dma_work_handler,
|
|
+ fe_isp_dma_tasklet_handler,
|
|
+ dev);
|
|
+ if (ret)
|
|
+ return NULL;
|
|
+#ifdef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+ if (i == 9 || i == 10)
|
|
+ continue;
|
|
+ for (j = 0; j < 2; j++) {
|
|
+ isp_ctx->dma_out_ctx[i].tt_addr[j][0] = isp_ctx->trans_tab_dma_addr + tab_offset;
|
|
+ isp_ctx->dma_out_ctx[i].tt_base[j][0] = isp_ctx->trans_tab_cpu_addr + tab_offset;
|
|
+ tab_offset += IOMMU_TRANS_TAB_MAX_NUM * sizeof(uint32_t);
|
|
+ if (i != 12 && i != 13) {
|
|
+ isp_ctx->dma_out_ctx[i].tt_addr[j][1] = isp_ctx->trans_tab_dma_addr + tab_offset;
|
|
+ isp_ctx->dma_out_ctx[i].tt_base[j][1] = isp_ctx->trans_tab_cpu_addr + tab_offset;
|
|
+ tab_offset += IOMMU_TRANS_TAB_MAX_NUM * sizeof(uint32_t);
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+ for (i = 0; i < AIN_NUM; i++) {
|
|
+ isp_ctx->dma_in_ctx[i].id = i;
|
|
+ ret = fe_isp_init_dma_context(&isp_ctx->dma_in_ctx[i],
|
|
+ isp_ctx,
|
|
+ ISP_DMA_WORK_MAX_CNT,
|
|
+ fe_isp_dma_work_handler,
|
|
+ fe_isp_dma_tasklet_handler,
|
|
+ dev);
|
|
+ if (ret)
|
|
+ return NULL;
|
|
+#ifdef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+ for (j = 0; j < 2; j++) {
|
|
+ isp_ctx->dma_in_ctx[i].tt_addr[j][0] = isp_ctx->trans_tab_dma_addr + tab_offset;
|
|
+ isp_ctx->dma_in_ctx[i].tt_base[j][0] = isp_ctx->trans_tab_cpu_addr + tab_offset;
|
|
+ tab_offset += IOMMU_TRANS_TAB_MAX_NUM * sizeof(uint32_t);
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < CCIC_MAX_CNT; i++) {
|
|
+ ret = ccic_ctrl_get(&(isp_ctx->ccic[i].csi_ctrl), i, NULL);
|
|
+ if (ret) {
|
|
+ cam_err("get csi%d ctrl failed ret=%d", i, ret);
|
|
+ return NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ INIT_LIST_HEAD(&isp_ctx->print_work_list);
|
|
+ for (i = 0; i < ISP_PRINT_WORK_MAX_CNT; i++) {
|
|
+ isp_ctx->print_works[i].isp_ctx = isp_ctx;
|
|
+ INIT_WORK(&(isp_ctx->print_works[i].print_work), fe_isp_print_work_handler);
|
|
+ list_add(&(isp_ctx->print_works[i].list), &isp_ctx->print_work_list);
|
|
+ }
|
|
+ spin_lock_init(&isp_ctx->slock);
|
|
+ atomic_set(&isp_ctx->pwr_cnt, 0);
|
|
+ init_completion(&isp_ctx->global_reset_done);
|
|
+
|
|
+ return isp_ctx;
|
|
+}
|
|
+
|
|
+void fe_isp_release_ctx(void *isp_context)
|
|
+{
|
|
+#ifdef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+ struct isp_context *isp_ctx = (struct isp_context *)isp_context;
|
|
+ isp_iommu_unregister(isp_ctx->mmu_dev);
|
|
+#endif
|
|
+}
|
|
+
|
|
+static int notify_caputre_until_done(int slice_index,
|
|
+ struct camera_capture_slice_info *slice_info,
|
|
+ int timeout)
|
|
+{
|
|
+ struct platform_device *pdev = k1xvi_get_platform_device();
|
|
+ struct k1xvi_platform_data *drvdata = NULL;
|
|
+ struct isp_context *isp_ctx = NULL;
|
|
+ struct media_pipeline *pipe = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct isp_pipeline_context *pipe_ctx = NULL;
|
|
+ long l_ret = 0;
|
|
+
|
|
+ cam_not("slice(%d/%d) slice_width:%d raw_read_offset:%d yuv_out_offset:%d dwt[1]_offset:%d dwt[2]_offset:%d dwt[3]_offset:%d dwt[4]_offset:%d notify",
|
|
+ slice_index, slice_info->total_slice_cnt,
|
|
+ slice_info->slice_width, slice_info->raw_read_offset,
|
|
+ slice_info->yuv_out_offset,
|
|
+ slice_info->dwt_offset[0], slice_info->dwt_offset[1],
|
|
+ slice_info->dwt_offset[2], slice_info->dwt_offset[3]);
|
|
+ if (!pdev) {
|
|
+ cam_err("%s pdev is null", __func__);
|
|
+ return -1;
|
|
+ }
|
|
+ if (slice_info->exception_exit) {
|
|
+ cam_err("%s isp exception exit", __func__);
|
|
+ return 0;
|
|
+ }
|
|
+ drvdata = platform_get_drvdata(pdev);
|
|
+ BUG_ON(!drvdata);
|
|
+ isp_ctx = drvdata->isp_ctx;
|
|
+ BUG_ON(!isp_ctx);
|
|
+ if (slice_info->hw_pipe_id < 0 || slice_info->hw_pipe_id > 1) {
|
|
+ cam_err("%s hw_pipe_id %d is invalid", __func__, slice_info->hw_pipe_id);
|
|
+ return -1;
|
|
+ }
|
|
+ pipe = media_entity_pipeline(&isp_ctx->pipes[slice_info->hw_pipe_id]->sc_subdev.pcsd.sd.entity);
|
|
+ BUG_ON(!pipe);
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ BUG_ON(!sc_pipeline);
|
|
+ mutex_lock(&sc_pipeline->mlock);
|
|
+ if (!sc_pipeline->usr_data) {
|
|
+ pipe_ctx = fe_pipeline_create_ctx(&pdev->dev);
|
|
+ if (!pipe_ctx) {
|
|
+ mutex_unlock(&sc_pipeline->mlock);
|
|
+ cam_err("%s create pipe_ctx failed", __func__);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_pipeline->usr_data = pipe_ctx;
|
|
+ } else {
|
|
+ pipe_ctx = (struct isp_pipeline_context *)sc_pipeline->usr_data;
|
|
+ }
|
|
+ pipe_ctx->cc_slice_info = *slice_info;
|
|
+ sc_pipeline->slice_id = slice_index;
|
|
+ sc_pipeline->total_slice_cnt = slice_info->total_slice_cnt;
|
|
+ sc_pipeline->slice_result = 0;
|
|
+ reinit_completion(&sc_pipeline->slice_done);
|
|
+ mutex_unlock(&sc_pipeline->mlock);
|
|
+ atomic_set(&sc_pipeline->slice_info_update, 1);
|
|
+ wake_up_interruptible_all(&sc_pipeline->slice_waitq);
|
|
+ if (sc_pipeline->state <= PIPELINE_ST_STOPPING) {
|
|
+ return 0;
|
|
+ }
|
|
+ l_ret = wait_for_completion_interruptible_timeout(&sc_pipeline->slice_done, msecs_to_jiffies(timeout));
|
|
+ if (sc_pipeline->state <= PIPELINE_ST_STOPPING) {
|
|
+ return 0;
|
|
+ }
|
|
+ if (l_ret == 0) {
|
|
+ cam_err("%s wait for slice(%d/%d) done timeout(%d)", __func__, slice_index, slice_info->total_slice_cnt, timeout);
|
|
+ hw_dma_dump_regs(isp_ctx->dma_block);
|
|
+ return -1;
|
|
+ } else if (l_ret < 0) {
|
|
+ cam_err("%s wait for slice doen interrupted by user app", __func__);
|
|
+ return -1;
|
|
+ }
|
|
+ return sc_pipeline->slice_result;
|
|
+}
|
|
+
|
|
+struct spm_camera_vi_ops vi_ops = {
|
|
+ .notify_caputre_until_done = notify_caputre_until_done,
|
|
+};
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/fe_isp.h b/drivers/media/platform/spacemit/camera/vi/k1xvi/fe_isp.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/fe_isp.h
|
|
@@ -0,0 +1,284 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * fe_isp.h - k1xisp front end
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef _SPACEMIT_ISP_IDI_H_
|
|
+#define _SPACEMIT_ISP_IDI_H_
|
|
+
|
|
+#include <linux/types.h>
|
|
+#include <media/media-entity.h>
|
|
+#include <media/v4l2-device.h>
|
|
+#include <media/v4l2-subdev.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include "../subdev.h"
|
|
+#include "../mlink.h"
|
|
+
|
|
+#define FE_ISP_MAX_WIDTH (5000)
|
|
+#define FE_ISP_MAX_HEIGHT (5000)
|
|
+
|
|
+#define FE_ISP_MIN_WIDTH (256)
|
|
+#define FE_ISP_MIN_HEIGHT (144)
|
|
+
|
|
+enum {
|
|
+ PIPELINE_ACTION_PIPE_ACK = PIPELINE_ACTION_CUSTOM_BASE + 1,
|
|
+};
|
|
+
|
|
+#define OFFLINE_CH_NUM (2)
|
|
+enum {
|
|
+ OFFLINE_CH_PAD_IN = 0,
|
|
+ OFFLINE_CH_PAD_P0OUT,
|
|
+ OFFLINE_CH_PAD_P1OUT,
|
|
+ OFFLINE_CH_PAD_NUM,
|
|
+};
|
|
+
|
|
+struct fe_x {
|
|
+ struct spm_camera_subdev sc_subdev;
|
|
+ struct notifier_block pipeline_notify_block;
|
|
+};
|
|
+
|
|
+struct fe_offline_channel {
|
|
+ struct spm_camera_subdev sc_subdev;
|
|
+ struct media_pad pads[OFFLINE_CH_PAD_NUM];
|
|
+ struct v4l2_subdev_format pad_fmts[OFFLINE_CH_PAD_NUM];
|
|
+ int idx;
|
|
+};
|
|
+
|
|
+#define CSI_NUM (3)
|
|
+enum {
|
|
+ CSI_PAD_IN = 0,
|
|
+ CSI_PAD_RAWDUMP0,
|
|
+ CSI_PAD_RAWDUMP1,
|
|
+ CSI_PAD_PIPE0,
|
|
+ CSI_PAD_PIPE1,
|
|
+ CSI_PAD_AOUT,
|
|
+ CSI_PAD_NUM
|
|
+};
|
|
+struct csi {
|
|
+ struct spm_camera_subdev sc_subdev;
|
|
+ struct notifier_block pipeline_notify_block;
|
|
+ struct media_pad pads[CSI_PAD_NUM];
|
|
+ struct v4l2_subdev_format pad_fmts[CSI_PAD_NUM];
|
|
+ int idx;
|
|
+ int channel_type;
|
|
+};
|
|
+
|
|
+#define RAWDUMP_NUM (2)
|
|
+#define RAWDUMP_PAD_NUM (2)
|
|
+struct fe_rawdump {
|
|
+ struct spm_camera_subdev sc_subdev;
|
|
+ struct notifier_block pipeline_notify_block;
|
|
+ struct media_pad pads[RAWDUMP_PAD_NUM];
|
|
+ struct v4l2_subdev_format pad_fmts[RAWDUMP_PAD_NUM];
|
|
+ atomic_t close_done;
|
|
+ int idx;
|
|
+ int rawdump_only;
|
|
+};
|
|
+
|
|
+#define FORMATTER_NUM (2)
|
|
+enum {
|
|
+ FMT_PAD_IN = 0,
|
|
+ FMT_PAD_AOUT,
|
|
+ FMT_PAD_D1OUT,
|
|
+ FMT_PAD_D2OUT,
|
|
+ FMT_PAD_D3OUT,
|
|
+ FMT_PAD_D4OUT,
|
|
+ FORMATTER_PAD_NUM,
|
|
+};
|
|
+
|
|
+struct fe_formatter {
|
|
+ struct spm_camera_subdev sc_subdev;
|
|
+ struct media_pad pads[FORMATTER_PAD_NUM];
|
|
+ struct v4l2_subdev_format pad_fmts[FORMATTER_PAD_NUM];
|
|
+ atomic_t dwt_refcnt;
|
|
+ int idx;
|
|
+};
|
|
+
|
|
+#define DWT_NUM (2)
|
|
+#define DWT_LAYER_NUM (4)
|
|
+#define DWT_PAD_NUM (2)
|
|
+struct fe_dwt {
|
|
+ struct spm_camera_subdev sc_subdev;
|
|
+ struct media_pad pads[DWT_PAD_NUM];
|
|
+ struct v4l2_subdev_format pad_fmts[DWT_PAD_NUM];
|
|
+ int idx;
|
|
+ int layer_idx;
|
|
+};
|
|
+
|
|
+#define PIPE_NUM (2)
|
|
+enum {
|
|
+ PIPE_PAD_IN = 0,
|
|
+ PIPE_PAD_HDROUT,
|
|
+ PIPE_PAD_F0OUT,
|
|
+ PIPE_PAD_F1OUT,
|
|
+ PIPE_PAD_F2OUT,
|
|
+ PIPE_PAD_F3OUT,
|
|
+ PIPE_PAD_F4OUT,
|
|
+ PIPE_PAD_F5OUT,
|
|
+ PIPE_PAD_RAWDUMP0OUT,
|
|
+ PIPE_PAD_NUM,
|
|
+};
|
|
+
|
|
+struct fe_pipe {
|
|
+ struct spm_camera_subdev sc_subdev;
|
|
+ struct notifier_block pipeline_notify_block;
|
|
+ struct media_pad pads[PIPE_PAD_NUM];
|
|
+ struct v4l2_subdev_format pad_fmts[PIPE_PAD_NUM];
|
|
+ struct completion close_done;
|
|
+ struct completion sde_sof;
|
|
+ int idx;
|
|
+};
|
|
+
|
|
+#define HDR_COMBINE_NUM (1)
|
|
+enum {
|
|
+ HDR_PAD_P0IN = 0,
|
|
+ HDR_PAD_P1IN,
|
|
+ HDR_PAD_F0OUT,
|
|
+ HDR_PAD_F1OUT,
|
|
+ HDR_PAD_F2OUT,
|
|
+ HDR_PAD_F3OUT,
|
|
+ HDR_PAD_F4OUT,
|
|
+ HDR_PAD_F5OUT,
|
|
+ HDR_COMBINE_PAD_NUM,
|
|
+};
|
|
+
|
|
+struct fe_hdr_combine {
|
|
+ struct spm_camera_subdev sc_subdev;
|
|
+ struct media_pad pads[HDR_COMBINE_PAD_NUM];
|
|
+ struct v4l2_subdev_format pad_fmts[HDR_COMBINE_PAD_NUM];
|
|
+};
|
|
+
|
|
+static inline struct csi *v4l2_subdev_to_csi(struct v4l2_subdev *sd)
|
|
+{
|
|
+ if (SD_GRP(sd->grp_id) != MIPI || (SD_SUB(sd->grp_id) != CSI_MAIN && SD_SUB(sd->grp_id) != CSI_VCDT))
|
|
+ return NULL;
|
|
+ return (struct csi *)sd;
|
|
+}
|
|
+
|
|
+static inline struct csi *media_entity_to_csi(struct media_entity *me)
|
|
+{
|
|
+ struct v4l2_subdev *sd = NULL;
|
|
+ if (!is_subdev(me))
|
|
+ return NULL;
|
|
+
|
|
+ sd = media_entity_to_v4l2_subdev(me);
|
|
+ return v4l2_subdev_to_csi(sd);
|
|
+}
|
|
+
|
|
+static inline struct fe_hdr_combine *v4l2_subdev_to_hdr_combine(struct v4l2_subdev *sd)
|
|
+{
|
|
+ if (SD_GRP(sd->grp_id) != FE_ISP || SD_SUB(sd->grp_id) != HDR_COMBINE)
|
|
+ return NULL;
|
|
+ return (struct fe_hdr_combine *)sd;
|
|
+}
|
|
+
|
|
+static inline struct fe_hdr_combine *media_entity_to_hdr_combine(struct media_entity *me)
|
|
+{
|
|
+ struct v4l2_subdev *sd = NULL;
|
|
+ if (!is_subdev(me))
|
|
+ return NULL;
|
|
+
|
|
+ sd = media_entity_to_v4l2_subdev(me);
|
|
+ return v4l2_subdev_to_hdr_combine(sd);
|
|
+}
|
|
+
|
|
+static inline struct fe_pipe *v4l2_subdev_to_pipe(struct v4l2_subdev *sd)
|
|
+{
|
|
+ if (SD_GRP(sd->grp_id) != FE_ISP || SD_SUB(sd->grp_id) != PIPE)
|
|
+ return NULL;
|
|
+ return (struct fe_pipe *)sd;
|
|
+}
|
|
+
|
|
+static inline struct fe_pipe *media_entity_to_pipe(struct media_entity *me)
|
|
+{
|
|
+ struct v4l2_subdev *sd = NULL;
|
|
+ if (!is_subdev(me))
|
|
+ return NULL;
|
|
+
|
|
+ sd = media_entity_to_v4l2_subdev(me);
|
|
+ return v4l2_subdev_to_pipe(sd);
|
|
+}
|
|
+
|
|
+static inline struct fe_dwt *v4l2_subdev_to_dwt(struct v4l2_subdev *sd)
|
|
+{
|
|
+ if (SD_GRP(sd->grp_id) != FE_ISP || (SD_SUB(sd->grp_id) != DWT0 && SD_SUB(sd->grp_id) != DWT1))
|
|
+ return NULL;
|
|
+ return (struct fe_dwt *)sd;
|
|
+}
|
|
+
|
|
+static inline struct fe_dwt *media_entity_to_dwt(struct media_entity *me)
|
|
+{
|
|
+ struct v4l2_subdev *sd = NULL;
|
|
+ if (!is_subdev(me))
|
|
+ return NULL;
|
|
+
|
|
+ sd = media_entity_to_v4l2_subdev(me);
|
|
+ return v4l2_subdev_to_dwt(sd);
|
|
+}
|
|
+
|
|
+static inline struct fe_formatter *v4l2_subdev_to_formatter(struct v4l2_subdev *sd)
|
|
+{
|
|
+ if (SD_GRP(sd->grp_id) != FE_ISP || SD_SUB(sd->grp_id) != FORMATTER)
|
|
+ return NULL;
|
|
+ return (struct fe_formatter *)sd;
|
|
+}
|
|
+
|
|
+static inline struct fe_formatter *media_entity_to_formatter(struct media_entity *me)
|
|
+{
|
|
+ struct v4l2_subdev *sd = NULL;
|
|
+ if (!is_subdev(me))
|
|
+ return NULL;
|
|
+
|
|
+ sd = media_entity_to_v4l2_subdev(me);
|
|
+ return v4l2_subdev_to_formatter(sd);
|
|
+}
|
|
+
|
|
+static inline struct fe_rawdump *v4l2_subdev_to_rawdump(struct v4l2_subdev *sd)
|
|
+{
|
|
+ if (SD_GRP(sd->grp_id) != FE_ISP || SD_SUB(sd->grp_id) != RAWDUMP)
|
|
+ return NULL;
|
|
+ return (struct fe_rawdump *)sd;
|
|
+}
|
|
+
|
|
+static inline struct fe_rawdump *media_entity_to_rawdump(struct media_entity *me)
|
|
+{
|
|
+ struct v4l2_subdev *sd = NULL;
|
|
+ if (!is_subdev(me))
|
|
+ return NULL;
|
|
+
|
|
+ sd = media_entity_to_v4l2_subdev(me);
|
|
+ return v4l2_subdev_to_rawdump(sd);
|
|
+}
|
|
+
|
|
+static inline struct fe_offline_channel *v4l2_subdev_to_offline_channel(struct v4l2_subdev *sd)
|
|
+{
|
|
+ if (SD_GRP(sd->grp_id) != FE_ISP || SD_SUB(sd->grp_id) != OFFLINE_CHANNEL)
|
|
+ return NULL;
|
|
+ return (struct fe_offline_channel *)sd;
|
|
+}
|
|
+
|
|
+static inline struct fe_offline_channel *media_entity_to_offline_channel(struct media_entity *me)
|
|
+{
|
|
+ struct v4l2_subdev *sd = NULL;
|
|
+ if (!is_subdev(me))
|
|
+ return NULL;
|
|
+
|
|
+ sd = media_entity_to_v4l2_subdev(me);
|
|
+ return v4l2_subdev_to_offline_channel(sd);
|
|
+}
|
|
+
|
|
+struct fe_pipe *fe_pipe_create(unsigned int grp_id, void *isp_ctx);
|
|
+struct fe_rawdump *fe_rawdump_create(unsigned int grp_id, void *isp_ctx);
|
|
+struct fe_offline_channel *fe_offline_channel_create(unsigned int grp_id,
|
|
+ void *isp_ctx);
|
|
+struct fe_formatter *fe_formatter_create(unsigned int grp_id, void *isp_ctx);
|
|
+struct fe_dwt *fe_dwt_create(unsigned int grp_id, void *isp_ctx);
|
|
+struct fe_hdr_combine *fe_hdr_combine_create(unsigned int grp_id, void *isp_ctx);
|
|
+struct csi *csi_create(unsigned int grp_id, void *isp_ctx);
|
|
+// return isp context pointer
|
|
+void *fe_isp_create_ctx(struct platform_device *pdev);
|
|
+void fe_isp_release_ctx(void *isp_context);
|
|
+int fe_isp_s_power(void *isp_context, int on);
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_ccic.c b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_ccic.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_ccic.c
|
|
@@ -0,0 +1,55 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * hw_isp.c - isp top hw sequence
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include "hw_reg.h"
|
|
+#include "hw_ccic.h"
|
|
+
|
|
+void hw_ccic_set_irq_enable(struct spm_camera_block *sc_block, unsigned int enable, unsigned int disable)
|
|
+{
|
|
+ unsigned int value = 0;
|
|
+
|
|
+ value = read32(sc_block->base_addr + REG_IRQMASK);
|
|
+ value &= ~disable;
|
|
+ value |= enable;
|
|
+ write32(sc_block->base_addr + REG_IRQMASK, value);
|
|
+}
|
|
+
|
|
+unsigned int hw_ccic_get_irq_status(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ unsigned int value = 0;
|
|
+
|
|
+ value = read32(sc_block->base_addr + REG_IRQSTAT);
|
|
+ return value;
|
|
+}
|
|
+
|
|
+void hw_ccic_clr_irq_status(struct spm_camera_block *sc_block, unsigned int clr)
|
|
+{
|
|
+ write32(sc_block->base_addr + REG_IRQSTAT, clr);
|
|
+}
|
|
+
|
|
+void hw_ccic_set_trig_line_num(struct spm_camera_block *sc_block, unsigned int trig_line_num)
|
|
+{
|
|
+ unsigned int value = 0;
|
|
+
|
|
+ value = read32(sc_block->base_addr + REG_IDI_TRIG_LINE_NUM);
|
|
+ value &= ~IDI_LINE_NUM_MASK;
|
|
+ trig_line_num &= IDI_LINE_NUM_MASK;
|
|
+ value |= trig_line_num;
|
|
+ write32(sc_block->base_addr + REG_IDI_TRIG_LINE_NUM, value);
|
|
+}
|
|
+
|
|
+void hw_ccic_set_trig_src(struct spm_camera_block *sc_block, int src)
|
|
+{
|
|
+ unsigned int value = 0;
|
|
+
|
|
+ value = read32(sc_block->base_addr + REG_IDI_TRIG_LINE_NUM);
|
|
+ if (src)
|
|
+ value |= IDI_LINE_TRIG_SRC;
|
|
+ else
|
|
+ value &= ~IDI_LINE_TRIG_SRC;
|
|
+ write32(sc_block->base_addr + REG_IDI_TRIG_LINE_NUM, value);
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_ccic.h b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_ccic.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_ccic.h
|
|
@@ -0,0 +1,25 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * hw_dma.h - isp front end dma hw sequence
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef _HW_CCIC_H_
|
|
+#define _HW_CCIC_H_
|
|
+#include "../../cam_block.h"
|
|
+
|
|
+#define REG_IRQMASK (0x2c) /* IRQ mask - same bits as IRQSTAT */
|
|
+#define REG_IRQSTAT (0x30) /* IRQ status / clear */
|
|
+#define REG_IDI_TRIG_LINE_NUM (0x330)
|
|
+#define IRQ_IPE_IDI_PRO_LINE (1 << 8)
|
|
+#define IDI_LINE_TRIG_SRC (1 << 15)
|
|
+#define IDI_LINE_NUM_MASK (IDI_LINE_TRIG_SRC - 1)
|
|
+
|
|
+void hw_ccic_set_irq_enable(struct spm_camera_block *sc_block, unsigned int enable, unsigned int disable);
|
|
+unsigned int hw_ccic_get_irq_status(struct spm_camera_block *sc_block);
|
|
+void hw_ccic_set_trig_line_num(struct spm_camera_block *sc_block, unsigned int trig_line_num);
|
|
+void hw_ccic_set_trig_src(struct spm_camera_block *sc_block, int src);
|
|
+void hw_ccic_clr_irq_status(struct spm_camera_block *sc_block, unsigned int clr);
|
|
+
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_dma.c b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_dma.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_dma.c
|
|
@@ -0,0 +1,346 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * hw_dma.c - isp front end dma hw sequence
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include "hw_reg.h"
|
|
+#include "hw_dma.h"
|
|
+#include <linux/delay.h>
|
|
+
|
|
+#define DMA_REG TOP_REG
|
|
+
|
|
+void hw_dma_set_wdma_pitch(struct spm_camera_block *sc_block,
|
|
+ unsigned int wdma_ch,
|
|
+ unsigned int num_plane,
|
|
+ unsigned int p0_pitch, unsigned int p1_pitch)
|
|
+{
|
|
+ union dma_reg_38 reg_38;
|
|
+ reg_38.value = 0;
|
|
+
|
|
+ reg_38.field.ch0_wr_pitch0 = p0_pitch;
|
|
+ if (num_plane > 1)
|
|
+ reg_38.field.ch0_wr_pitch1 = p1_pitch;
|
|
+ write32(DMA_REG(38 + wdma_ch), reg_38.value);
|
|
+}
|
|
+
|
|
+void hw_dma_set_rdma_pitch(struct spm_camera_block *sc_block,
|
|
+ unsigned int rdma_ch, unsigned int pitch)
|
|
+{
|
|
+ union dma_reg_79 reg_79;
|
|
+
|
|
+ reg_79.value = read32(DMA_REG(79 + rdma_ch));
|
|
+ reg_79.field.ch0_rd_pitch = pitch;
|
|
+ reg_79.field.ch0_rd_trigger = 0;
|
|
+ write32(DMA_REG(79 + rdma_ch), reg_79.value);
|
|
+}
|
|
+
|
|
+void hw_dma_update_rdma_address(struct spm_camera_block *sc_block,
|
|
+ unsigned int rdma_ch, uint64_t buf_addr)
|
|
+{
|
|
+ unsigned int low = 0, high = 0;
|
|
+
|
|
+ low = (unsigned int)(buf_addr & 0xffffffffUL);
|
|
+ high = (unsigned int)((buf_addr >> 32) & 0x3UL);
|
|
+ write32(DMA_REG(75 + rdma_ch), low);
|
|
+ write32(DMA_REG(124 + rdma_ch), high);
|
|
+}
|
|
+
|
|
+/*
|
|
+void hw_dma_set_rdma_weight(struct spm_camera_block *sc_block,
|
|
+ unsigned int rdma_ch,
|
|
+ unsigned int weight)
|
|
+{
|
|
+ union dma_reg_79 reg_79;
|
|
+
|
|
+ reg_79.value = read32(DMA_REG(79 + rdma_ch));
|
|
+ reg_79.field.ch0_rd_weight = weight;
|
|
+ reg_79.field.ch0_rd_trigger = 0;
|
|
+ write32(DMA_REG(79 + rdma_ch), reg_79.value);
|
|
+}
|
|
+*/
|
|
+void hw_dma_rdma_trigger(struct spm_camera_block *sc_block, unsigned int rdma_ch)
|
|
+{
|
|
+ union dma_reg_79 reg_79;
|
|
+
|
|
+ reg_79.value = read32(DMA_REG(79 + rdma_ch));
|
|
+ reg_79.field.ch0_rd_trigger = 1;
|
|
+ reg_79.field.ch0_rd_weight = 0;
|
|
+ write32(DMA_REG(79 + rdma_ch), reg_79.value);
|
|
+}
|
|
+
|
|
+static void hw_dma_set_wdma_burst_length(struct spm_camera_block *sc_block,
|
|
+ unsigned char burst_len)
|
|
+{
|
|
+ union dma_reg_68 reg_68;
|
|
+
|
|
+ reg_68.value = read32(DMA_REG(68));
|
|
+ reg_68.field.wr_burst_length = burst_len;
|
|
+ write32(DMA_REG(68), reg_68.value);
|
|
+}
|
|
+
|
|
+void hw_dma_reset(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ hw_dma_set_wdma_burst_length(sc_block, 0x30);
|
|
+}
|
|
+
|
|
+void hw_dma_enable_rawdump(struct spm_camera_block *sc_block, int rawdump_id,
|
|
+ unsigned int enable)
|
|
+{
|
|
+ union dma_reg_83 reg_83;
|
|
+
|
|
+ reg_83.value = read32(DMA_REG(83));
|
|
+ if (rawdump_id == 0)
|
|
+ reg_83.field.rawdump0_enable = enable;
|
|
+ else
|
|
+ reg_83.field.rawdump1_enable = enable;
|
|
+ write32(DMA_REG(83), reg_83.value);
|
|
+}
|
|
+
|
|
+void hw_dma_enable_afbc(struct spm_camera_block *sc_block, int afbc_id,
|
|
+ unsigned int enable)
|
|
+{
|
|
+ union dma_reg_83 reg_83;
|
|
+
|
|
+ reg_83.value = read32(DMA_REG(83));
|
|
+ if (afbc_id == 0)
|
|
+ reg_83.field.afbc0_enable = enable;
|
|
+ else
|
|
+ reg_83.field.afbc1_enable = enable;
|
|
+ write32(DMA_REG(83), reg_83.value);
|
|
+}
|
|
+
|
|
+void hw_dma_set_wdma_weight(struct spm_camera_block *sc_block, unsigned int wdma_ch,
|
|
+ unsigned int wr_weight)
|
|
+{
|
|
+ union dma_reg_52 reg_52;
|
|
+
|
|
+ reg_52.value = read32(DMA_REG(52 + wdma_ch));
|
|
+ reg_52.field.ch0_wr_weight = wr_weight;
|
|
+ write32(DMA_REG(52 + wdma_ch), reg_52.value);
|
|
+}
|
|
+
|
|
+void hw_dma_set_wdma_source(struct spm_camera_block *sc_block,
|
|
+ unsigned int wdma_ch,
|
|
+ int source,
|
|
+ unsigned int wr_offset,
|
|
+ unsigned int wr_fifo_depth,
|
|
+ unsigned int wr_weight, unsigned int div_mode)
|
|
+{
|
|
+ union dma_reg_52 reg_52;
|
|
+ reg_52.value = read32(DMA_REG(52 + wdma_ch));
|
|
+
|
|
+ reg_52.field.ch0_wr_weight = wr_weight;
|
|
+ reg_52.field.ch0_fifo_div_mode = div_mode;
|
|
+ reg_52.field.ch0_wr_ready = 0;
|
|
+ write32(DMA_REG(52 + wdma_ch), reg_52.value);
|
|
+}
|
|
+
|
|
+void hw_dma_set_wdma_ready(struct spm_camera_block *sc_block,
|
|
+ unsigned int wdma_ch, unsigned int ready)
|
|
+{
|
|
+ union dma_reg_52 reg_52;
|
|
+
|
|
+ reg_52.value = read32(DMA_REG(52 + wdma_ch));
|
|
+ reg_52.field.ch0_wr_ready = ready;
|
|
+ write32(DMA_REG(52 + wdma_ch), reg_52.value);
|
|
+}
|
|
+
|
|
+void hw_dma_update_wdma_address(struct spm_camera_block *sc_block,
|
|
+ unsigned int wdma_ch,
|
|
+ uint64_t p0_addr, uint64_t p1_addr)
|
|
+{
|
|
+ unsigned int low = 0, high = 0;
|
|
+
|
|
+ low = (unsigned int)(p0_addr & 0xffffffffUL);
|
|
+ high = (unsigned int)((p0_addr >> 32) & 0x3UL);
|
|
+ write32(DMA_REG(0 + wdma_ch * 2), low);
|
|
+ write32(DMA_REG(86 + wdma_ch * 2), high);
|
|
+ low = (unsigned int)(p1_addr & 0xffffffffUL);
|
|
+ high = (unsigned int)((p1_addr >> 32) & 0x3UL);
|
|
+ write32(DMA_REG(1 + wdma_ch * 2), low);
|
|
+ write32(DMA_REG(87 + wdma_ch * 2), high);
|
|
+}
|
|
+
|
|
+unsigned int hw_dma_get_irq_status1(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ unsigned int val = 0;
|
|
+
|
|
+ val = read32(DMA_REG(69));
|
|
+ return val;
|
|
+}
|
|
+
|
|
+void hw_dma_clr_irq_status1(struct spm_camera_block *sc_block, unsigned int clr)
|
|
+{
|
|
+ write32(DMA_REG(69), clr);
|
|
+}
|
|
+
|
|
+unsigned int hw_dma_get_irq_status2(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ unsigned int val = 0;
|
|
+
|
|
+ val = read32(DMA_REG(70));
|
|
+ return val;
|
|
+}
|
|
+
|
|
+void hw_dma_clr_irq_status2(struct spm_camera_block *sc_block, unsigned int clr)
|
|
+{
|
|
+ write32(DMA_REG(70), clr);
|
|
+}
|
|
+
|
|
+unsigned int hw_dma_get_irq_raw_status1(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ unsigned int val = 0;
|
|
+
|
|
+ val = read32(DMA_REG(71));
|
|
+ return val;
|
|
+}
|
|
+
|
|
+unsigned int hw_dma_get_irq_raw_status2(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ unsigned int val = 0;
|
|
+
|
|
+ val = read32(DMA_REG(72));
|
|
+ return val;
|
|
+}
|
|
+
|
|
+void __hw_dma_set_irq_enable(int irq_src,
|
|
+ unsigned int enable,
|
|
+ unsigned int disable,
|
|
+ unsigned int *value1, unsigned int *value2)
|
|
+{
|
|
+ unsigned int val = 0;
|
|
+ union dma_reg_69 *reg_1 = NULL;
|
|
+ union dma_reg_70 *reg_2 = NULL;
|
|
+
|
|
+ if (irq_src >= DMA_IRQ_SRC_WDMA_CH0 && irq_src <= DMA_IRQ_SRC_WDMA_CH9) {
|
|
+ val = ~(disable << ((irq_src - DMA_IRQ_SRC_WDMA_CH0) * 3));
|
|
+ *value1 &= val;
|
|
+ val = (enable << ((irq_src - DMA_IRQ_SRC_WDMA_CH0) * 3));
|
|
+ *value1 |= val;
|
|
+ } else if (irq_src == DMA_IRQ_SRC_WDMA_CH10) {
|
|
+ reg_1 = (union dma_reg_69 *)value1;
|
|
+ reg_2 = (union dma_reg_70 *)value2;
|
|
+ if (disable & DMA_IRQ_START)
|
|
+ reg_1->field.ch10_wr_start = 0;
|
|
+ if (disable & DMA_IRQ_DONE)
|
|
+ reg_1->field.ch10_wr_done = 0;
|
|
+ if (disable & DMA_IRQ_ERR)
|
|
+ reg_2->field.ch10_wr_err = 0;
|
|
+ if (enable & DMA_IRQ_START)
|
|
+ reg_1->field.ch10_wr_start = 1;
|
|
+ if (enable & DMA_IRQ_DONE)
|
|
+ reg_1->field.ch10_wr_done = 1;
|
|
+ if (enable & DMA_IRQ_ERR)
|
|
+ reg_2->field.ch10_wr_err = 1;
|
|
+ } else if (irq_src >= DMA_IRQ_SRC_WDMA_CH11 && irq_src <= DMA_IRQ_SRC_RDMA_CH2) {
|
|
+ val = ~(disable << ((irq_src - DMA_IRQ_SRC_WDMA_CH11) * 3 + 1));
|
|
+ *value2 &= val;
|
|
+ val = (enable << ((irq_src - DMA_IRQ_SRC_WDMA_CH11) * 3 + 1));
|
|
+ *value2 |= val;
|
|
+ } else {
|
|
+ pr_err ("set irq src %d no support", irq_src);
|
|
+ }
|
|
+}
|
|
+
|
|
+void hw_dma_set_irq_enable(struct spm_camera_block *sc_block, int irq_src,
|
|
+ unsigned int enable, unsigned int disable)
|
|
+{
|
|
+ unsigned int value1 = 0, value1_old = 0, value2 = 0, value2_old = 0;
|
|
+ if (irq_src < DMA_IRQ_SRC_ALL || irq_src > DMA_IRQ_SRC_RDMA_CH2)
|
|
+ return;
|
|
+ disable &= 0x07;
|
|
+ enable &= 0x07;
|
|
+ value1 = value1_old = read32(DMA_REG(73));
|
|
+ value2 = value2_old = read32(DMA_REG(74));
|
|
+ if (irq_src == DMA_IRQ_SRC_ALL)
|
|
+ for (irq_src = DMA_IRQ_SRC_WDMA_CH0; irq_src <= DMA_IRQ_SRC_RDMA_CH2; irq_src++)
|
|
+ __hw_dma_set_irq_enable(irq_src, enable, disable, &value1, &value2);
|
|
+ else
|
|
+ __hw_dma_set_irq_enable(irq_src, enable, disable, &value1, &value2);
|
|
+
|
|
+ if (value1 || (value2 & ((1 << 19) - 1)))
|
|
+ value2 |= (DMA_IRQ_OVERRUN | DMA_IRQ_OVERLAP);
|
|
+ else
|
|
+ value2 &= ~(DMA_IRQ_OVERRUN | DMA_IRQ_OVERLAP);
|
|
+ if (value1 != value1_old)
|
|
+ write32(DMA_REG(73), value1);
|
|
+ if (value2 != value2_old)
|
|
+ write32(DMA_REG(74), value2);
|
|
+}
|
|
+
|
|
+void hw_dma_set_fbc_irq_enable(struct spm_camera_block *sc_block, unsigned int enable,
|
|
+ unsigned int disable)
|
|
+{
|
|
+ unsigned int value = 0;
|
|
+
|
|
+ enable &= (DMA_IRQ_FBC_ENC0 | DMA_IRQ_FBC_ENC1);
|
|
+ disable &= (DMA_IRQ_FBC_ENC0 | DMA_IRQ_FBC_ENC1);
|
|
+ value = read32(DMA_REG(74));
|
|
+ value &= ~disable;
|
|
+ value |= enable;
|
|
+ write32(DMA_REG(74), value);
|
|
+}
|
|
+
|
|
+unsigned int hw_dma_irq_analyze(int irq_src, unsigned int status1, unsigned int status2)
|
|
+{
|
|
+ unsigned int val = 0;
|
|
+ union dma_reg_69 *reg_1 = (union dma_reg_69 *)(&status1);
|
|
+ union dma_reg_70 *reg_2 = (union dma_reg_70 *)(&status2);
|
|
+
|
|
+ if (irq_src < DMA_IRQ_SRC_WDMA_CH0 || irq_src > DMA_IRQ_SRC_RDMA_CH2)
|
|
+ return 0;
|
|
+ if (irq_src >= DMA_IRQ_SRC_WDMA_CH0 && irq_src <= DMA_IRQ_SRC_WDMA_CH9) {
|
|
+ val = status1 >> ((irq_src - DMA_IRQ_SRC_WDMA_CH0) * 3);
|
|
+ } else if (irq_src == DMA_IRQ_SRC_WDMA_CH10) {
|
|
+ val = reg_2->field.ch10_wr_err;
|
|
+ val <<= 1;
|
|
+ val += reg_1->field.ch10_wr_done;
|
|
+ val <<= 1;
|
|
+ val += reg_1->field.ch10_wr_start;
|
|
+ } else if (irq_src >= DMA_IRQ_SRC_WDMA_CH11 && irq_src <= DMA_IRQ_SRC_RDMA_CH2) {
|
|
+ val = status2 >> ((irq_src - DMA_IRQ_SRC_WDMA_CH11) * 3 + 1);
|
|
+ }
|
|
+ val &= 0x07;
|
|
+ return val;
|
|
+}
|
|
+
|
|
+void hw_dma_trigger_pipe_tbu_load(struct spm_camera_block *sc_block, int pipe_id)
|
|
+{
|
|
+ union dma_reg_86 reg_86;
|
|
+
|
|
+ reg_86.value = read32(DMA_REG(86));
|
|
+ if (pipe_id == 0)
|
|
+ reg_86.field.mmu_start_p0 = 1;
|
|
+ else
|
|
+ reg_86.field.mmu_start_p1 = 1;
|
|
+ write32(DMA_REG(86), reg_86.value);
|
|
+}
|
|
+
|
|
+void hw_dma_trigger_rdp_tbu_load(struct spm_camera_block *sc_block, int rawdump_id)
|
|
+{
|
|
+ union dma_reg_86 reg_86;
|
|
+
|
|
+ reg_86.value = read32(DMA_REG(86));
|
|
+ if (rawdump_id == 0)
|
|
+ reg_86.field.mmu_start_p0 = 1;
|
|
+ else
|
|
+ reg_86.field.mmu_start_p1 = 1;
|
|
+ write32(DMA_REG(86), reg_86.value);
|
|
+}
|
|
+
|
|
+void hw_dma_dump_regs(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ unsigned int val_69 = 0, val_70 = 0, val_71 = 0;
|
|
+ unsigned int val_72 = 0, val_73 = 0, val_74 = 0;
|
|
+
|
|
+ val_69 = read32(DMA_REG(69));
|
|
+ val_70 = read32(DMA_REG(70));
|
|
+ val_71 = read32(DMA_REG(71));
|
|
+ val_72 = read32(DMA_REG(72));
|
|
+ val_73 = read32(DMA_REG(73));
|
|
+ val_74 = read32(DMA_REG(74));
|
|
+ pr_info("cam_not: vi: dma reg_69=0x%08x reg_70=0x%08x reg_71=0x%08x reg_72=0x%08x reg_73=0x%08x reg_74=0x%08x\n",
|
|
+ val_69, val_70, val_71, val_72, val_73, val_74);
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_dma.h b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_dma.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_dma.h
|
|
@@ -0,0 +1,134 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * hw_dma.h - isp front end dma hw sequence
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef _HW_DMA_H_
|
|
+#define _HW_DMA_H_
|
|
+#include "../../cam_block.h"
|
|
+#define SPACEMIT_ISP_DMA_OFFSET (0x00011000)
|
|
+
|
|
+struct wdma_fifo_ctrl {
|
|
+ unsigned int offset;
|
|
+ unsigned int depth;
|
|
+ unsigned int weight;
|
|
+ unsigned int div_mode;
|
|
+};
|
|
+
|
|
+void hw_dma_set_wdma_pitch(struct spm_camera_block *sc_block,
|
|
+ unsigned int wdma_ch,
|
|
+ unsigned int num_plane,
|
|
+ unsigned int p0_pitch, unsigned int p1_pitch);
|
|
+void hw_dma_set_rdma_pitch(struct spm_camera_block *sc_block,
|
|
+ unsigned int rdma_ch, unsigned int pitch);
|
|
+void hw_dma_update_rdma_address(struct spm_camera_block *sc_block,
|
|
+ unsigned int rdma_ch, uint64_t buf_addr);
|
|
+void hw_dma_update_wdma_address(struct spm_camera_block *sc_block,
|
|
+ unsigned int wdma_ch,
|
|
+ uint64_t p0_addr, uint64_t p1_addr);
|
|
+void hw_dma_set_wdma_ready(struct spm_camera_block *sc_block,
|
|
+ unsigned int wdma_ch, unsigned int ready);
|
|
+/*
|
|
+void hw_dma_set_rdma_weight(unsigned int base_addr,
|
|
+ unsigned int rdma_ch,
|
|
+ unsigned int weight);
|
|
+*/
|
|
+void hw_dma_rdma_trigger(struct spm_camera_block *sc_block, unsigned int rdma_ch);
|
|
+//void hw_dma_set_wdma_burst_length(struct spm_camera_block *sc_block, unsigned char burst_len);
|
|
+void hw_dma_reset(struct spm_camera_block *sc_block);
|
|
+
|
|
+enum {
|
|
+ RAWDUMP0 = 0,
|
|
+ RAWDUMP1,
|
|
+ FORMATTER0,
|
|
+ FORMATTER1,
|
|
+ FORMATTER2,
|
|
+ WBMP0 = FORMATTER2,
|
|
+ EISP0 = FORMATTER2,
|
|
+ FORMATTER3,
|
|
+ WBMP1 = FORMATTER3,
|
|
+ EISP1 = FORMATTER3,
|
|
+ FORMATTER4,
|
|
+ AEMP0 = FORMATTER4,
|
|
+ FORMATTER5,
|
|
+ AEMP1 = FORMATTER5,
|
|
+ DWT0_LAYER1,
|
|
+ AFCP0 = DWT0_LAYER1,
|
|
+ DWT0_LAYER2,
|
|
+ AFCP1 = DWT0_LAYER2,
|
|
+ DWT0_LAYER3,
|
|
+ DWT0_LAYER4,
|
|
+ DWT1_LAYER1,
|
|
+ DWT1_LAYER2,
|
|
+ DWT1_LAYER3,
|
|
+ DWT1_LAYER4,
|
|
+};
|
|
+void hw_dma_set_wdma_source(struct spm_camera_block *sc_block,
|
|
+ unsigned int wdma_ch,
|
|
+ int source,
|
|
+ unsigned int wr_offset,
|
|
+ unsigned int wr_fifo_depth,
|
|
+ unsigned int wr_weight, unsigned int div_mode);
|
|
+
|
|
+unsigned int hw_dma_get_irq_status1(struct spm_camera_block *sc_block);
|
|
+void hw_dma_clr_irq_status1(struct spm_camera_block *sc_block, unsigned int clr);
|
|
+unsigned int hw_dma_get_irq_status2(struct spm_camera_block *sc_block);
|
|
+void hw_dma_clr_irq_status2(struct spm_camera_block *sc_block, unsigned int clr);
|
|
+unsigned int hw_dma_get_irq_raw_status1(struct spm_camera_block *sc_block);
|
|
+unsigned int hw_dma_get_irq_raw_status2(struct spm_camera_block *sc_block);
|
|
+enum {
|
|
+ DMA_IRQ_SRC_ALL = -1,
|
|
+ DMA_IRQ_SRC_WDMA_CH0,
|
|
+ DMA_IRQ_SRC_WDMA_CH1,
|
|
+ DMA_IRQ_SRC_WDMA_CH2,
|
|
+ DMA_IRQ_SRC_WDMA_CH3,
|
|
+ DMA_IRQ_SRC_WDMA_CH4,
|
|
+ DMA_IRQ_SRC_WDMA_CH5,
|
|
+ DMA_IRQ_SRC_WDMA_CH6,
|
|
+ DMA_IRQ_SRC_WDMA_CH7,
|
|
+ DMA_IRQ_SRC_WDMA_CH8,
|
|
+ DMA_IRQ_SRC_WDMA_CH9,
|
|
+ DMA_IRQ_SRC_WDMA_CH10,
|
|
+ DMA_IRQ_SRC_WDMA_CH11,
|
|
+ DMA_IRQ_SRC_WDMA_CH12,
|
|
+ DMA_IRQ_SRC_WDMA_CH13,
|
|
+ DMA_IRQ_SRC_WDMA_CH14_P0,
|
|
+ DMA_IRQ_SRC_WDMA_CH14_P1,
|
|
+ DMA_IRQ_SRC_WDMA_CH15,
|
|
+ DMA_IRQ_SRC_RDMA_CH0,
|
|
+ DMA_IRQ_SRC_RDMA_CH1,
|
|
+ DMA_IRQ_SRC_RDMA_CH2,
|
|
+};
|
|
+
|
|
+#define DMA_IRQ_START (1 << 0)
|
|
+#define DMA_IRQ_DONE (1 << 1)
|
|
+#define DMA_IRQ_ERR (1 << 2)
|
|
+#define DMA_IRQ_ALL (DMA_IRQ_START | DMA_IRQ_DONE | DMA_IRQ_ERR)
|
|
+
|
|
+void hw_dma_set_irq_enable(struct spm_camera_block *sc_block, int irq_src,
|
|
+ unsigned int enable, unsigned int disable);
|
|
+unsigned int hw_dma_irq_analyze(int irq_src, unsigned int status1,
|
|
+ unsigned int status2);
|
|
+
|
|
+#define DMA_IRQ_FBC_ENC0 (1 << 28)
|
|
+#define DMA_IRQ_FBC_ENC1 (1 << 29)
|
|
+#define DMA_IRQ_OVERRUN (1 << 30)
|
|
+#define DMA_IRQ_OVERLAP (1 << 31)
|
|
+void hw_dma_set_fbc_irq_enable(struct spm_camera_block *sc_block, unsigned int enable,
|
|
+ unsigned int disable);
|
|
+void hw_dma_enable_rawdump(struct spm_camera_block *sc_block, int rawdump_id,
|
|
+ unsigned int enable);
|
|
+void hw_dma_set_wdma_weight(struct spm_camera_block *sc_block, unsigned int wdma_ch,
|
|
+ unsigned int wr_weight);
|
|
+void hw_dma_enable_afbc(struct spm_camera_block *sc_block, int afbc_id,
|
|
+ unsigned int enable);
|
|
+void hw_dma_trigger_pipe_tbu_load(struct spm_camera_block *sc_block, int pipe_id);
|
|
+void hw_dma_trigger_rdp_tbu_load(struct spm_camera_block *sc_block, int rawdump_id);
|
|
+void __hw_dma_set_irq_enable(int irq_src,
|
|
+ unsigned int enable,
|
|
+ unsigned int disable,
|
|
+ unsigned int *value1, unsigned int *value2);
|
|
+void hw_dma_dump_regs(struct spm_camera_block *sc_block);
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_iommu.c b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_iommu.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_iommu.c
|
|
@@ -0,0 +1,373 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * isp_iommu.c - Driver for ISP IOMMU
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+//#define DEBUG
|
|
+
|
|
+#ifdef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+//#include <soc/spm/plat.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/bitops.h>
|
|
+#include <linux/err.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/printk.h>
|
|
+#include "hw_reg.h"
|
|
+#include "hw_reg_iommu.h"
|
|
+#include "hw_iommu.h"
|
|
+#undef CAM_MODULE_TAG
|
|
+#define CAM_MODULE_TAG CAM_MDL_VI
|
|
+#include <cam_dbg.h>
|
|
+
|
|
+static inline uint32_t iommu_reg_read(struct isp_iommu_device *mmu_dev, uint32_t reg)
|
|
+{
|
|
+ return read32(mmu_dev->regs_base + reg);
|
|
+}
|
|
+
|
|
+static inline void iommu_reg_write(struct isp_iommu_device *mmu_dev,
|
|
+ uint32_t reg, uint32_t val)
|
|
+{
|
|
+ write32(mmu_dev->regs_base + reg, val);
|
|
+}
|
|
+
|
|
+static inline void iommu_reg_write_mask(struct isp_iommu_device *mmu_dev,
|
|
+ uint32_t reg, uint32_t val, uint32_t mask)
|
|
+{
|
|
+ uint32_t v;
|
|
+
|
|
+ v = iommu_reg_read(mmu_dev, reg);
|
|
+ v = (v & ~mask) | (val & mask);
|
|
+ iommu_reg_write(mmu_dev, reg, v);
|
|
+}
|
|
+
|
|
+static inline void iommu_reg_set_bit(struct isp_iommu_device *mmu_dev,
|
|
+ uint32_t reg, uint32_t val)
|
|
+{
|
|
+ iommu_reg_write_mask(mmu_dev, reg, val, val);
|
|
+}
|
|
+
|
|
+static inline void iommu_reg_clr_bit(struct isp_iommu_device *mmu_dev,
|
|
+ uint32_t reg, uint32_t val)
|
|
+{
|
|
+ iommu_reg_write_mask(mmu_dev, reg, 0, val);
|
|
+}
|
|
+
|
|
+static void iommu_enable_tbu(struct isp_iommu_device *mmu_dev, int tbu)
|
|
+{
|
|
+ iommu_reg_set_bit(mmu_dev, REG_IOMMU_TCR0(tbu), 0x1);
|
|
+}
|
|
+
|
|
+static void iommu_disable_tbu(struct isp_iommu_device *mmu_dev, int tbu)
|
|
+{
|
|
+ iommu_reg_clr_bit(mmu_dev, REG_IOMMU_TCR0(tbu), 0x1);
|
|
+}
|
|
+
|
|
+static void iommu_set_tbu_ttaddr(struct isp_iommu_device *mmu_dev, int tbu,
|
|
+ uint64_t addr)
|
|
+{
|
|
+ iommu_reg_write(mmu_dev, REG_IOMMU_TTBL(tbu), addr & 0xffffffff);
|
|
+ iommu_reg_write(mmu_dev, REG_IOMMU_TTBH(tbu), (addr >> 32) & 0x1);
|
|
+}
|
|
+
|
|
+static void iommu_set_tbu_ttsize(struct isp_iommu_device *mmu_dev, int tbu, int size)
|
|
+{
|
|
+ iommu_reg_write_mask(mmu_dev, REG_IOMMU_TCR0(tbu),
|
|
+ ((size - 1) & 0x1fff) << 16, 0x1fff << 16);
|
|
+}
|
|
+
|
|
+static void __maybe_unused iommu_set_tbu_qos(struct isp_iommu_device *mmu_dev, int tbu,
|
|
+ int qos)
|
|
+{
|
|
+ iommu_reg_write_mask(mmu_dev, REG_IOMMU_TCR0(tbu), (qos & 0xf) << 4, 0xf << 4);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * iommu_update_trans_table - TBU translation table update
|
|
+ *
|
|
+ * this bit will be cleared to 0 after TLB preload.
|
|
+ * only work for full frame tbu.
|
|
+ */
|
|
+void iommu_update_trans_table(struct isp_iommu_device *mmu_dev, int tbu)
|
|
+{
|
|
+ iommu_reg_set_bit(mmu_dev, REG_IOMMU_TCR0(tbu), 0x1 << 2);
|
|
+}
|
|
+
|
|
+static void iommu_enable_irqs(struct isp_iommu_device *mmu_dev)
|
|
+{
|
|
+ iommu_reg_write_mask(mmu_dev, REG_IOMMU_GIRQ_ENA, 0xffffffff, 0xffffffff);
|
|
+}
|
|
+
|
|
+static inline uint32_t iommu_bva_low(struct isp_iommu_device *mmu_dev)
|
|
+{
|
|
+ return iommu_reg_read(mmu_dev, REG_IOMMU_BVAL);
|
|
+}
|
|
+
|
|
+static int tid_to_tbu(struct isp_iommu_device *mmu_dev, uint32_t tid)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < ISP_IOMMU_CH_NUM; ++i)
|
|
+ if (mmu_dev->ch_matrix[i] == tid)
|
|
+ return i;
|
|
+
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+static int isp_iommu_acquire_channel(struct isp_iommu_device *mmu_dev, uint32_t tid)
|
|
+{
|
|
+ int tbu;
|
|
+ unsigned long flags;
|
|
+
|
|
+ tbu = tid_to_tbu(mmu_dev, tid);
|
|
+ if (tbu < 0) {
|
|
+ pr_debug("no such channel %x to acquire\n", tid);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ spin_lock_irqsave(&mmu_dev->ops_lock, flags);
|
|
+ if (test_bit(tbu, &mmu_dev->ch_map)) {
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+ pr_err("channel %x not free\n", tid);
|
|
+ return -EBUSY;
|
|
+ }
|
|
+ set_bit(tbu, &mmu_dev->ch_map);
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int isp_iommu_release_channel(struct isp_iommu_device *mmu_dev, uint32_t tid)
|
|
+{
|
|
+ int tbu;
|
|
+ unsigned long flags;
|
|
+
|
|
+ tbu = tid_to_tbu(mmu_dev, tid);
|
|
+ if (tbu < 0) {
|
|
+ pr_err("no such channel %x to release\n", tid);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ spin_lock_irqsave(&mmu_dev->ops_lock, flags);
|
|
+ clear_bit(tbu, &mmu_dev->ch_map);
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int isp_iommu_enable_channel(struct isp_iommu_device *mmu_dev, uint32_t tid)
|
|
+{
|
|
+ int tbu;
|
|
+ unsigned long flags;
|
|
+
|
|
+ tbu = tid_to_tbu(mmu_dev, tid);
|
|
+ if (tbu < 0) {
|
|
+ pr_err("no such channel %x to enable\n", tid);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ spin_lock_irqsave(&mmu_dev->ops_lock, flags);
|
|
+ //if (!test_bit(tbu, &mmu_dev->ch_map)) {
|
|
+ // spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+ // return -EPERM;
|
|
+ //}
|
|
+
|
|
+ iommu_enable_tbu(mmu_dev, tbu);
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int isp_iommu_disable_channel(struct isp_iommu_device *mmu_dev, uint32_t tid)
|
|
+{
|
|
+ int tbu;
|
|
+ unsigned long flags;
|
|
+
|
|
+ tbu = tid_to_tbu(mmu_dev, tid);
|
|
+ if (tbu < 0) {
|
|
+ pr_err("no such channel %x to disable\n", tid);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ spin_lock_irqsave(&mmu_dev->ops_lock, flags);
|
|
+ //if (!test_bit(tbu, &mmu_dev->ch_map)) {
|
|
+ // spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+ // return -EPERM;
|
|
+ //}
|
|
+
|
|
+ iommu_disable_tbu(mmu_dev, tbu);
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int isp_iommu_config_channel(struct isp_iommu_device *mmu_dev,
|
|
+ uint32_t tid, uint64_t ttAddr, uint32_t ttSize)
|
|
+{
|
|
+ int tbu;
|
|
+ unsigned long flags;
|
|
+
|
|
+ tbu = tid_to_tbu(mmu_dev, tid);
|
|
+ if (tbu < 0) {
|
|
+ pr_err("no such channel %x to configure\n", tid);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ spin_lock_irqsave(&mmu_dev->ops_lock, flags);
|
|
+ //if (!test_bit(tbu, &mmu_dev->ch_map)) {
|
|
+ // spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+ // return -EPERM;
|
|
+ //}
|
|
+
|
|
+ //iommu_set_tbu_qos(mmu_dev, tbu, 4);
|
|
+ iommu_set_tbu_ttaddr(mmu_dev, tbu, ttAddr);
|
|
+ iommu_set_tbu_ttsize(mmu_dev, tbu, ttSize);
|
|
+ // iommu_update_trans_table(mmu_dev, tbu);
|
|
+ iommu_enable_irqs(mmu_dev);
|
|
+ spin_unlock_irqrestore(&mmu_dev->ops_lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const uint64_t IOMMU_VADDR_BASE = 0x80000000;
|
|
+static uint64_t isp_iommu_get_sva(struct isp_iommu_device *mmu_dev,
|
|
+ uint32_t tid, uint32_t offset)
|
|
+{
|
|
+ int tbu;
|
|
+ uint64_t svAddr;
|
|
+
|
|
+ tbu = tid_to_tbu(mmu_dev, tid);
|
|
+ if (tbu < 0) {
|
|
+ pr_err("no such channel %x to get sva\n", tid);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ svAddr = iommu_bva_low(mmu_dev) + 0x2000000 * (uint64_t) tbu + (offset & 0xfff);
|
|
+ return svAddr;
|
|
+}
|
|
+
|
|
+static unsigned int isp_iommu_irq_status(struct isp_iommu_device *mmu_dev)
|
|
+{
|
|
+ unsigned int status = 0;
|
|
+ status = iommu_reg_read(mmu_dev, REG_IOMMU_GIRQ_STAT);
|
|
+ if (status)
|
|
+ iommu_reg_write(mmu_dev, REG_IOMMU_GIRQ_STAT, status);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int isp_iommu_dump_regs(struct isp_iommu_device *mmu_dev, uint32_t ch_id)
|
|
+{
|
|
+ int ret = 0;
|
|
+ unsigned int status = 0, tlb_size = 0;
|
|
+ uint64_t addr1 = 0, addr2 = 0, addr3 = 0;
|
|
+
|
|
+ pr_info("**************start dump isp iommu ch%d regs:\n", ch_id);
|
|
+ addr1 = iommu_reg_read(mmu_dev, REG_IOMMU_LVAL);
|
|
+ status = iommu_reg_read(mmu_dev, REG_IOMMU_LVAH);
|
|
+ if (status)
|
|
+ addr1 = addr1 | (1ULL << 32);
|
|
+
|
|
+ addr2 = iommu_reg_read(mmu_dev, REG_IOMMU_LPAL);
|
|
+ status = iommu_reg_read(mmu_dev, REG_IOMMU_LPAH);
|
|
+ if (status)
|
|
+ addr2 |= (1ULL << 32);
|
|
+
|
|
+ addr3 = iommu_reg_read(mmu_dev, REG_IOMMU_TVAL);
|
|
+ status = iommu_reg_read(mmu_dev, REG_IOMMU_TVAH);
|
|
+ if (status)
|
|
+ addr3 = addr3 | (1ULL << 32);
|
|
+ pr_info("isp mmu: last virtual addr=0x%llx,last phy addr=0x%llx, timeout addr=0x%llx\n", addr1, addr2, addr3);
|
|
+
|
|
+ addr1 = iommu_reg_read(mmu_dev, REG_IOMMU_TTBL(ch_id));
|
|
+ status = iommu_reg_read(mmu_dev, REG_IOMMU_TTBH(ch_id));
|
|
+ if (status)
|
|
+ addr1 |= (1ULL << 32);
|
|
+ status = iommu_reg_read(mmu_dev, REG_IOMMU_TCR0(ch_id));
|
|
+ tlb_size = (status & 0x1fff0000) >> 16;
|
|
+ pr_info("isp mmu ch%d: tlb addr=0x%llx,tcr0=0x%x, tlb size=%d\n", ch_id, addr1, status, tlb_size);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void isp_iommu_set_timeout_default_addr(struct isp_iommu_device *mmu_dev,
|
|
+ uint64_t timeout_default_addr)
|
|
+{
|
|
+ unsigned int high = 0, low = 0;
|
|
+
|
|
+ low = timeout_default_addr & 0xffffffffULL;
|
|
+ high = (timeout_default_addr >> 32) & 0xffffffffULL;
|
|
+ iommu_reg_write(mmu_dev, REG_IOMMU_TIMEOUT_ADDR_LOW, low);
|
|
+ iommu_reg_write(mmu_dev, REG_IOMMU_TIMEOUT_ADDR_HIGH, high);
|
|
+}
|
|
+
|
|
+static struct isp_iommu_ops mmu_ops = {
|
|
+ .acquire_channel = isp_iommu_acquire_channel,
|
|
+ .release_channel = isp_iommu_release_channel,
|
|
+ .enable_channel = isp_iommu_enable_channel,
|
|
+ .disable_channel = isp_iommu_disable_channel,
|
|
+ .config_channel = isp_iommu_config_channel,
|
|
+ .get_sva = isp_iommu_get_sva,
|
|
+ .irq_status = isp_iommu_irq_status,
|
|
+ .dump_channel_regs = isp_iommu_dump_regs,
|
|
+ .set_timeout_default_addr = isp_iommu_set_timeout_default_addr,
|
|
+};
|
|
+
|
|
+static const uint32_t iommu_ch_dmac_mapping[ISP_IOMMU_CH_NUM] = {
|
|
+ MMU_TID(1, 0, 0), // fmt0_y aout0 TBU0
|
|
+ MMU_TID(1, 0, 1), // fmt0_uv aout0 TBU1
|
|
+ MMU_TID(0, 0, 0), // ain0 TBU2
|
|
+ MMU_TID(0, 1, 0), // ain1 TBU3
|
|
+ MMU_TID(1, 12, 0), // rawdump0 aout12 TBU4
|
|
+ MMU_TID(1, 13, 0), // rawdump1 aout13 TBU5
|
|
+ MMU_TID(1, 6, 0), // dwt0_l1_y aout6 TBU6
|
|
+ MMU_TID(1, 6, 1), // dwt0_l1_uv aout6 TBU7
|
|
+ MMU_TID(1, 7, 0), // dwt0_l2_y aout7 TBU8
|
|
+ MMU_TID(1, 7, 1), // dwt0_l2_uv aout7 TBU9
|
|
+ MMU_TID(1, 8, 0), // dwt0_l3_y aout8 TBU10
|
|
+ MMU_TID(1, 8, 1), // dwt0_l3_uv aout8 TBU11
|
|
+ MMU_TID(1, 11, 0), // dwt0_l4_y aout11 TBU12
|
|
+ MMU_TID(1, 11, 1), // dwt0_l4_uv aout11 TBU13
|
|
+ MMU_TID(1, 2, 0), // dwt1_l1_y aout2 TBU14
|
|
+ MMU_TID(1, 2, 1), // dwt1_l1_uv aout2 TBU15
|
|
+ MMU_TID(1, 3, 0), // dwt1_l2_y aout3 TBU16
|
|
+ MMU_TID(1, 3, 1), // dwt1_l2_uv aout3 TBU17
|
|
+ MMU_TID(1, 4, 0), // dwt1_l3_y aout4 TBU18
|
|
+ MMU_TID(1, 4, 1), // dwt1_l3_uv aout4 TBU19
|
|
+ MMU_TID(1, 5, 0), // dwt1_l4_y aout5 TBU20
|
|
+ MMU_TID(1, 5, 1), // dwt1_l4_uv aout5 TBU21
|
|
+ MMU_TID(1, 1, 0), // fmt1_y aout1 TBU22
|
|
+ MMU_TID(1, 1, 1), // fmt1_uv aout1 TBU23
|
|
+};
|
|
+
|
|
+struct isp_iommu_device *isp_iommu_create(struct device *dev, unsigned long regs_base)
|
|
+{
|
|
+ struct isp_iommu_device *mmu_dev = NULL;
|
|
+
|
|
+ mmu_dev = devm_kzalloc(dev, sizeof(struct isp_iommu_device), GFP_KERNEL);
|
|
+ if (!mmu_dev)
|
|
+ return NULL;
|
|
+
|
|
+ mmu_dev->regs_base = regs_base;
|
|
+ mmu_dev->ops = &mmu_ops;
|
|
+ memcpy(mmu_dev->ch_matrix, iommu_ch_dmac_mapping, sizeof(iommu_ch_dmac_mapping));
|
|
+
|
|
+ spin_lock_init(&mmu_dev->ops_lock);
|
|
+ mmu_dev->dev = dev;
|
|
+
|
|
+ pr_debug("%s X\n", __func__);
|
|
+
|
|
+ return mmu_dev;
|
|
+}
|
|
+
|
|
+void isp_iommu_unregister(struct isp_iommu_device *mmu_dev)
|
|
+{
|
|
+ struct device *dev = mmu_dev->dev;
|
|
+ devm_kfree(dev, mmu_dev);
|
|
+
|
|
+ pr_debug("%s X\n", __func__);
|
|
+}
|
|
+
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_iommu.h b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_iommu.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_iommu.h
|
|
@@ -0,0 +1,66 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * isp_iommu.h - Driver for ISP IOMMU
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef __ISP_IOMMU_H__
|
|
+#define __ISP_IOMMU_H__
|
|
+
|
|
+#include <linux/types.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include "hw_reg_iommu.h"
|
|
+
|
|
+#define ISP_IOMMU_TBU_NUM (24)
|
|
+#define ISP_IOMMU_CH_NUM (ISP_IOMMU_TBU_NUM)
|
|
+#define IOMMU_TRANS_TAB_MAX_NUM (8192)
|
|
+#define MMU_TID(direction, port_id, plane_id) (((direction) << 8) | ((port_id) << 4) | (plane_id))
|
|
+
|
|
+#define isp_mmu_call(mmu_dev, f, args...) \
|
|
+ ({ \
|
|
+ struct isp_iommu_device *__mmu_dev = (mmu_dev); \
|
|
+ int __result; \
|
|
+ if (!__mmu_dev) \
|
|
+ __result = -ENODEV; \
|
|
+ else if (!(__mmu_dev->ops && __mmu_dev->ops->f)) \
|
|
+ __result = -ENOIOCTLCMD; \
|
|
+ else \
|
|
+ __result = __mmu_dev->ops->f(__mmu_dev, ##args); \
|
|
+ __result; \
|
|
+ })
|
|
+
|
|
+struct iommu_ch_info {
|
|
+ uint32_t tid;
|
|
+ uint32_t ttSize;
|
|
+ uint64_t ttAddr;
|
|
+};
|
|
+
|
|
+struct isp_iommu_device {
|
|
+ struct device *dev;
|
|
+ unsigned long regs_base;
|
|
+ unsigned long ch_map;
|
|
+ uint32_t ch_matrix[ISP_IOMMU_CH_NUM];
|
|
+ struct iommu_ch_info info[ISP_IOMMU_CH_NUM];
|
|
+ spinlock_t ops_lock;
|
|
+
|
|
+ struct isp_iommu_ops *ops;
|
|
+};
|
|
+
|
|
+struct isp_iommu_ops {
|
|
+ int (*acquire_channel)(struct isp_iommu_device *mmu_dev, uint32_t tid);
|
|
+ int (*release_channel)(struct isp_iommu_device *mmu_dev, uint32_t tid);
|
|
+ int (*enable_channel)(struct isp_iommu_device *mmu_dev, uint32_t tid);
|
|
+ int (*disable_channel)(struct isp_iommu_device *mmu_dev, uint32_t tid);
|
|
+ int (*config_channel)(struct isp_iommu_device *mmu_dev, uint32_t tid,
|
|
+ uint64_t ttAddr, uint32_t ttSize);
|
|
+ uint64_t (*get_sva)(struct isp_iommu_device *mmu_dev, uint32_t tid,
|
|
+ uint32_t offset);
|
|
+ unsigned int (*irq_status)(struct isp_iommu_device *mmu_dev);
|
|
+ int (*dump_channel_regs)(struct isp_iommu_device *mmu_dev, uint32_t tid);
|
|
+ void (*set_timeout_default_addr)(struct isp_iommu_device *mmu_dev, uint64_t timeout_default_addr);
|
|
+};
|
|
+
|
|
+struct isp_iommu_device *isp_iommu_create(struct device *dev, unsigned long regs_base);
|
|
+void isp_iommu_unregister(struct isp_iommu_device *mmu_dev);
|
|
+#endif /* ifndef __ISP_IOMMU_H__ */
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_isp.c b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_isp.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_isp.c
|
|
@@ -0,0 +1,587 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * hw_isp.c - isp top hw sequence
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include "hw_reg.h"
|
|
+#include "hw_isp.h"
|
|
+#define CAM_MODULE_TAG CAM_MDL_VI
|
|
+#include <cam_dbg.h>
|
|
+
|
|
+#define CAM_ALIGN(a, b) ({ \
|
|
+ unsigned int ___tmp1 = (a); \
|
|
+ unsigned int ___tmp2 = (b); \
|
|
+ unsigned int ___tmp3 = ___tmp1 % ___tmp2; \
|
|
+ ___tmp1 /= ___tmp2; \
|
|
+ if (___tmp3) \
|
|
+ ___tmp1++; \
|
|
+ ___tmp1 *= ___tmp2; \
|
|
+ ___tmp1; \
|
|
+ })
|
|
+
|
|
+#define ISP_TOP_REG TOP_REG
|
|
+
|
|
+#define PIPE_ID(sc_block) ({ \
|
|
+ unsigned int ___tmp1 = 0; \
|
|
+ unsigned long ___addr = (sc_block)->base_addr; \
|
|
+ ___addr &= 0x0000ffff; \
|
|
+ if (___addr >= SPACEMIT_ISP_TOP1_OFFSET) \
|
|
+ ___tmp1 = 1; \
|
|
+ ___tmp1; \
|
|
+ })
|
|
+
|
|
+void hw_isp_top_set_idi_linebuf(struct spm_camera_block *sc_block,
|
|
+ unsigned int fifo_depth,
|
|
+ unsigned int line_depth, unsigned int pix_depth)
|
|
+{
|
|
+ union isp_top_reg_3 reg_3;
|
|
+ union isp_top_reg_4 reg_4;
|
|
+
|
|
+ reg_3.value = reg_4.value = 0;
|
|
+
|
|
+ reg_3.field.idi_fifo_depth = fifo_depth;
|
|
+ reg_3.field.idi_line_depth = line_depth;
|
|
+ write32(ISP_TOP_REG(3), reg_3.value);
|
|
+ reg_4.field.idi_pix_depth = pix_depth;
|
|
+ reg_4.field.idi_fifo_line_th = 2;
|
|
+ write32(ISP_TOP_REG(4), reg_4.value);
|
|
+}
|
|
+
|
|
+unsigned int hw_isp_top_get_idi_fifo_depth(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ union isp_top_reg_3 reg_3;
|
|
+
|
|
+ reg_3.value = read32(ISP_TOP_REG(3));
|
|
+ return reg_3.field.idi_fifo_depth;
|
|
+}
|
|
+
|
|
+static void hw_isp_top_set_idi_input_crop(struct spm_camera_block *sc_block,
|
|
+ unsigned int crop_width_offset,
|
|
+ unsigned int crop_height_offset,
|
|
+ unsigned int crop_width,
|
|
+ unsigned int crop_height)
|
|
+{
|
|
+ union isp_top_reg_93 reg_93;
|
|
+ union isp_top_reg_94 reg_94;
|
|
+
|
|
+ reg_93.value = 0;
|
|
+ reg_93.field.m_nwidth_offset = crop_width_offset;
|
|
+ reg_93.field.m_nheight_offset = crop_height_offset;
|
|
+ write32(ISP_TOP_REG(93), reg_93.value);
|
|
+ reg_94.value = 0;
|
|
+ reg_94.field.m_ncrop_width = crop_width;
|
|
+ reg_94.field.m_ncrop_height = crop_height;
|
|
+ write32(ISP_TOP_REG(94), reg_94.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_cfg_rdy(struct spm_camera_block *sc_block, unsigned int ready)
|
|
+{
|
|
+ union isp_top_reg_1 reg_1;
|
|
+
|
|
+ reg_1.value = read32(ISP_TOP_REG(1));
|
|
+ reg_1.field.m_cfg_ready = ready;
|
|
+ write32(ISP_TOP_REG(1), reg_1.value);
|
|
+}
|
|
+
|
|
+int hw_isp_top_set_idi_online_input_fmt(struct spm_camera_block *sc_block,
|
|
+ unsigned int width,
|
|
+ unsigned int height, int cfa_pattern)
|
|
+{
|
|
+ union isp_top_reg_0 reg_0;
|
|
+ union isp_top_reg_1 reg_1;
|
|
+
|
|
+ reg_0.value = 0;
|
|
+ reg_0.field.m_nwidth = width;
|
|
+ reg_0.field.m_nheight = height;
|
|
+ write32(ISP_TOP_REG(0), reg_0.value);
|
|
+ hw_isp_top_set_idi_input_crop(sc_block, 0, 0, width, height);
|
|
+
|
|
+ reg_1.value = read32(ISP_TOP_REG(1));
|
|
+ if (cfa_pattern != CFA_IGNR)
|
|
+ reg_1.field.m_ncfapattern = cfa_pattern;
|
|
+ //reg_1.field.m_cfg_ready = 1;
|
|
+ write32(ISP_TOP_REG(1), reg_1.value);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int hw_isp_top_set_idi_offline_input_fmt(struct spm_camera_block *sc_block,
|
|
+ unsigned int rdma_ch,
|
|
+ unsigned int width,
|
|
+ unsigned int height,
|
|
+ int cfa_pattern, unsigned int bit_depth)
|
|
+{
|
|
+ //union isp_top_reg_0 reg_0;
|
|
+ //union isp_top_reg_1 reg_1;
|
|
+ union isp_top_reg_21 reg_21;
|
|
+ union isp_top_reg_22 reg_22;
|
|
+ unsigned int pixel_align = 128 / bit_depth;
|
|
+ unsigned int pipe_idx = PIPE_ID(sc_block);
|
|
+
|
|
+ //reg_0.value = 0;
|
|
+ //reg_0.field.m_nwidth = width;
|
|
+ //reg_0.field.m_nheight = height;
|
|
+ //write32(ISP_TOP_REG(0), reg_0.value);
|
|
+
|
|
+ hw_isp_top_set_idi_input_crop(sc_block, 0, 0, width, height);
|
|
+
|
|
+ //reg_1.value = read32(ISP_TOP_REG(1));
|
|
+ //if (cfa_pattern != CFA_IGNR)
|
|
+ // reg_1.field.m_ncfapattern = cfa_pattern;
|
|
+ //write32(ISP_TOP_REG(1), reg_1.value);
|
|
+
|
|
+ reg_21.value = 0;
|
|
+ reg_21.field.idi_ch0_rd_burst_len_sel = 1;
|
|
+ reg_21.field.idi_ch0_rd_fifo_depth = 0;
|
|
+ reg_21.field.idi_ch0_dma_sync_fifo_th = 4;
|
|
+ reg_21.field.idi_ch0_img_rd_width_byte = (CAM_ALIGN(width, pixel_align) / pixel_align) * 16;
|
|
+ write32(ISP_TOP_REG(21 + rdma_ch * 2) - pipe_idx * SPACEMIT_PIPE_OFFSET, reg_21.value);
|
|
+
|
|
+ reg_22.value = 0;
|
|
+ reg_22.field.idi_ch0_img_rd_raw1214_type = (bit_depth == 12) ? 1 : 0;
|
|
+ reg_22.field.idi_ch0_img_rd_height = height;
|
|
+ reg_22.field.idi_ch0_img_rd_raw10_type = (bit_depth == 10) ? 1 : 0;
|
|
+ reg_22.field.idi_ch0_img_rd_raw8_type = (bit_depth == 8) ? 1 : 0;
|
|
+ reg_22.field.idi_ch0_img_rd_width_pix = width;
|
|
+ write32(ISP_TOP_REG(22 + rdma_ch * 2) - pipe_idx * SPACEMIT_PIPE_OFFSET, reg_22.value);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_idi_dummyline(struct spm_camera_block *sc_block,
|
|
+ unsigned int idi_insert_dummy_line)
|
|
+{
|
|
+ union isp_top_reg_14 reg_14;
|
|
+
|
|
+ reg_14.value = read32(ISP_TOP_REG(14));
|
|
+ //reg_14.field.idi_gap_value = 7 * width / 93;
|
|
+ reg_14.field.idi_gap_value = 0xc8;
|
|
+ //reg_14.field.idi_fifo_pix_th = 0;
|
|
+ reg_14.field.idi_insert_dummy_line = idi_insert_dummy_line;
|
|
+ write32(ISP_TOP_REG(14), reg_14.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_enable_hw_gap(struct spm_camera_block *sc_block, int pipe_id,
|
|
+ unsigned int enable)
|
|
+{
|
|
+ union isp_top_reg_14 reg_14;
|
|
+ union isp_top_reg_76 reg_76;
|
|
+
|
|
+ if (pipe_id == 0) {
|
|
+ reg_14.value = read32(ISP_TOP_REG(14));
|
|
+ reg_14.field.gap_hardware_mode = enable;
|
|
+ write32(ISP_TOP_REG(14), reg_14.value);
|
|
+ } else {
|
|
+ reg_76.value = read32(ISP_TOP_REG(76));
|
|
+ reg_76.field.pip1_gap_hardware_mode = enable;
|
|
+ write32(ISP_TOP_REG(76), reg_76.value);
|
|
+ }
|
|
+}
|
|
+
|
|
+void hw_isp_top_enable_vsync_pass_through(struct spm_camera_block *sc_block,
|
|
+ int pipe_id, unsigned int enable)
|
|
+{
|
|
+ union isp_top_reg_25 reg_25;
|
|
+
|
|
+ reg_25.value = read32(ISP_TOP_REG(25));
|
|
+ if (pipe_id == 0)
|
|
+ reg_25.field.pip0_vsync_pass_through = enable;
|
|
+ else
|
|
+ reg_25.field.pip1_vsync_pass_through = enable;
|
|
+ write32(ISP_TOP_REG(25), reg_25.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_vsync2href_dly_cnt(struct spm_camera_block *sc_block, int pipe_id,
|
|
+ unsigned int dly_cnt)
|
|
+{
|
|
+ union isp_top_reg_26 reg_26;
|
|
+
|
|
+ reg_26.value = read32(ISP_TOP_REG(26));
|
|
+ if (pipe_id == 0)
|
|
+ reg_26.field.pip0_vsync2href_dly_cnt = dly_cnt;
|
|
+ else
|
|
+ reg_26.field.pip1_vsync2href_dly_cnt = dly_cnt;
|
|
+ write32(ISP_TOP_REG(26), reg_26.value);
|
|
+}
|
|
+
|
|
+/*
|
|
+void hw_isp_top_set_vsync2href_dly_cnt(struct spm_camera_block *sc_block, unsigned int p0_dly_cnt, unsigned int p1_dly_cnt)
|
|
+{
|
|
+ union isp_top_reg_26 reg_26;
|
|
+ reg_26.value = 0;
|
|
+ reg_26.field.pip0_vsync2href_dly_cnt = p0_dly_cnt;
|
|
+ reg_26.field.pip1_vsync2href_dly_cnt = p1_dly_cnt;
|
|
+ write32(ISP_TOP_REG(26), reg_26.value);
|
|
+}
|
|
+*/
|
|
+void hw_isp_top_set_rawdump_fmt(struct spm_camera_block *sc_block,
|
|
+ unsigned int idi_wdma_ch,
|
|
+ unsigned int width,
|
|
+ unsigned int height, unsigned int bit_depth)
|
|
+{
|
|
+ union isp_top_reg_17 reg_17;
|
|
+ union isp_top_reg_18 reg_18;
|
|
+ unsigned int n = 128 / bit_depth;
|
|
+
|
|
+ reg_17.value = read32(ISP_TOP_REG(17 + idi_wdma_ch * 2));
|
|
+ reg_17.field.idi_ch0_wr_burst_len_sel = 4; //0:64byte 1:128byte 2:256byte 3:512byte 4:1024byte
|
|
+ reg_17.field.idi_ch0_wr_14bit_en = (bit_depth == 14) ? 1 : 0;
|
|
+ reg_17.field.idi_ch0_img_wr_width_byte =
|
|
+ CAM_ALIGN((((width % n) * bit_depth) >> 3) + ((width / n) << 4), 16);
|
|
+ write32(ISP_TOP_REG(17 + idi_wdma_ch * 2), reg_17.value);
|
|
+
|
|
+ reg_18.value = 0;
|
|
+ reg_18.field.idi_ch0_img_wr_height = height;
|
|
+ reg_18.field.idi_ch0_img_wr_width_pix = width;
|
|
+ write32(ISP_TOP_REG(18 + idi_wdma_ch * 2), reg_18.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_idi_input_source(struct spm_camera_block *sc_block, int source)
|
|
+{
|
|
+ union isp_top_reg_13 reg_13;
|
|
+
|
|
+ reg_13.value = read32(ISP_TOP_REG(13));
|
|
+ if (source >= SENSOR0_CH0 && source <= OFFLINE_CH1) {
|
|
+ reg_13.field.idi_src_sel = 1 << source;
|
|
+ reg_13.field.idi_online_ena = (source >= SENSOR0_CH0 && source <= SENSOR1_CH3) ? 1 : 0;
|
|
+ reg_13.field.idi_offline_ena = (reg_13.field.idi_online_ena) ? 0 : 1;
|
|
+ } else {
|
|
+ reg_13.field.idi_src_sel = 0;
|
|
+ //reg_13.field.idi_online_ena = 1;
|
|
+ //reg_13.field.idi_offline_ena = 0;
|
|
+ }
|
|
+ write32(ISP_TOP_REG(13), reg_13.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_enable_rawdump(struct spm_camera_block *sc_block,
|
|
+ int enable, int rawdump_only)
|
|
+{
|
|
+ union isp_top_reg_13 reg_13;
|
|
+
|
|
+ reg_13.value = read32(ISP_TOP_REG(13));
|
|
+ if (enable) {
|
|
+ reg_13.field.idi_rdp_ena = 1;
|
|
+ if (rawdump_only) {
|
|
+ reg_13.field.idi_offline_ena = 1;
|
|
+ reg_13.field.idi_online_ena = 0;
|
|
+ }
|
|
+ } else {
|
|
+ reg_13.field.idi_rdp_ena = 0;
|
|
+ if (rawdump_only) {
|
|
+ reg_13.field.idi_offline_ena = 0;
|
|
+ reg_13.field.idi_online_ena = 0;
|
|
+ }
|
|
+ }
|
|
+ write32(ISP_TOP_REG(13), reg_13.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_rawdump_source(struct spm_camera_block *sc_block,
|
|
+ unsigned int rawdump_idx, int source)
|
|
+{
|
|
+ union isp_top_reg_17 reg_17;
|
|
+
|
|
+ reg_17.value = read32(ISP_TOP_REG(17 + rawdump_idx * 2));
|
|
+ if (source >= SENSOR0_CH0 && source <= SENSOR1_CH3)
|
|
+ reg_17.field.idi_wdma_ch0_src_sel = 1 << source;
|
|
+ else
|
|
+ reg_17.field.idi_wdma_ch0_src_sel = 0;
|
|
+ write32(ISP_TOP_REG(17 + rawdump_idx * 2), reg_17.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_enable_hdr(struct spm_camera_block *sc_block, int hdr_mode)
|
|
+{
|
|
+ union isp_top_reg_76 reg_76;
|
|
+
|
|
+ reg_76.value = read32(ISP_TOP_REG(76));
|
|
+ reg_76.field.idi_online_hdr_ena = 0;
|
|
+ reg_76.field.idi_offline_hdr_ena = 0;
|
|
+ reg_76.field.idi_mix_hdr_ena = 0;
|
|
+ reg_76.field.mix_hdr_rdp_ena = 0;
|
|
+ if (hdr_mode == HDR_ONLINE) {
|
|
+ reg_76.field.idi_online_hdr_ena = 1;
|
|
+ } else if (hdr_mode == HDR_OFFLINE) {
|
|
+ reg_76.field.idi_offline_hdr_ena = 1;
|
|
+ } else if (hdr_mode == HDR_MIX) {
|
|
+ reg_76.field.idi_mix_hdr_ena = 1;
|
|
+ reg_76.field.mix_hdr_rdp_ena = 1;
|
|
+ }
|
|
+ write32(ISP_TOP_REG(76), reg_76.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_enable_rd_outstanding(struct spm_camera_block *sc_block, unsigned int enable)
|
|
+{
|
|
+ union isp_top_reg_76 reg_76;
|
|
+
|
|
+ reg_76.value = read32(ISP_TOP_REG(76));
|
|
+ reg_76.field.outstanding_read_en = enable;
|
|
+ write32(ISP_TOP_REG(76), reg_76.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_mix_hdr_line(struct spm_camera_block *sc_block, unsigned int mix_hdr_line)
|
|
+{
|
|
+ union isp_top_reg_76 reg_76;
|
|
+
|
|
+ mix_hdr_line = 4;
|
|
+ reg_76.value = read32(ISP_TOP_REG(76));
|
|
+ reg_76.field.idi_mix_hdr_line = mix_hdr_line;
|
|
+ write32(ISP_TOP_REG(76), reg_76.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_ddr_wr_line(struct spm_camera_block *sc_block, unsigned int ddr_wr_line_cnt)
|
|
+{
|
|
+ union isp_top_reg_76 reg_76;
|
|
+
|
|
+ reg_76.value = read32(ISP_TOP_REG(76));
|
|
+ reg_76.field.idi_ddr_wr_line_cnt = ddr_wr_line_cnt;
|
|
+ write32(ISP_TOP_REG(76), reg_76.value);
|
|
+}
|
|
+
|
|
+void hw_isp_pwr(unsigned int on)
|
|
+{
|
|
+}
|
|
+
|
|
+unsigned int hw_isp_top_get_irq_status(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ unsigned int val = 0;
|
|
+
|
|
+ val = read32(ISP_TOP_REG(32));
|
|
+ return val;
|
|
+}
|
|
+
|
|
+void hw_isp_top_clr_irq_status(struct spm_camera_block *sc_block, unsigned int clr)
|
|
+{
|
|
+ write32(ISP_TOP_REG(32), clr);
|
|
+}
|
|
+
|
|
+unsigned int hw_isp_top_get_irq_raw_status(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ unsigned int val = 0;
|
|
+
|
|
+ val = read32(ISP_TOP_REG(34));
|
|
+ return val;
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_irq_enable(struct spm_camera_block *sc_block, unsigned int enable,
|
|
+ unsigned int disable)
|
|
+{
|
|
+ unsigned int value = 0;
|
|
+
|
|
+ value = read32(ISP_TOP_REG(33));
|
|
+ value &= ~disable;
|
|
+ value |= enable;
|
|
+ write32(ISP_TOP_REG(33), value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_gap_value(struct spm_camera_block *sc_block, unsigned int p0_gap,
|
|
+ unsigned int p1_gap, unsigned int p01_gap)
|
|
+{
|
|
+ union isp_top_reg_77 reg_77;
|
|
+ union isp_top_reg_78 reg_78;
|
|
+
|
|
+ reg_77.field.idi_pip0_gap_value = p0_gap;
|
|
+ reg_77.field.idi_pip1_gap_value = p1_gap;
|
|
+ reg_78.value = read32(ISP_TOP_REG(78));
|
|
+ reg_78.field.idi_pip01_gap_value = p01_gap;
|
|
+ write32(ISP_TOP_REG(77), reg_77.value);
|
|
+ write32(ISP_TOP_REG(78), reg_78.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_idi_rd_burst_len(struct spm_camera_block *sc_block,
|
|
+ unsigned int rdma_ch,
|
|
+ unsigned int speed_cnt, unsigned int bstlen_sel)
|
|
+{
|
|
+ union isp_top_reg_82 reg_82;
|
|
+
|
|
+ reg_82.value = read32(ISP_TOP_REG(82 + rdma_ch));
|
|
+ reg_82.field.idi_pip0_rd_speed_end_cnt = speed_cnt;
|
|
+ reg_82.field.idi_ch0_rd_bst_len_sel = bstlen_sel;
|
|
+ write32(ISP_TOP_REG(82 + rdma_ch), reg_82.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_err0_irq_enable(struct spm_camera_block *sc_block,
|
|
+ unsigned int enable, unsigned int disable)
|
|
+{
|
|
+ unsigned int value = 0;
|
|
+
|
|
+ value = read32(ISP_TOP_REG(39));
|
|
+ value &= ~disable;
|
|
+ value |= enable;
|
|
+ write32(ISP_TOP_REG(39), value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_err2_irq_enable(struct spm_camera_block *sc_block,
|
|
+ unsigned int enable, unsigned int disable)
|
|
+{
|
|
+ unsigned int value = 0;
|
|
+
|
|
+ value = read32(ISP_TOP_REG(41));
|
|
+ value &= ~disable;
|
|
+ value |= enable;
|
|
+ write32(ISP_TOP_REG(41), value);
|
|
+}
|
|
+
|
|
+unsigned int hw_isp_top_get_err0_irq_status(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ unsigned int val = 0;
|
|
+
|
|
+ val = read32(ISP_TOP_REG(36));
|
|
+ return val;
|
|
+}
|
|
+
|
|
+void hw_isp_top_clr_err0_irq_status(struct spm_camera_block *sc_block, unsigned int clr)
|
|
+{
|
|
+ write32(ISP_TOP_REG(36), clr);
|
|
+}
|
|
+
|
|
+unsigned int hw_isp_top_get_err1_irq_status(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ unsigned int val = 0;
|
|
+
|
|
+ val = read32(ISP_TOP_REG(37));
|
|
+ return val;
|
|
+}
|
|
+
|
|
+unsigned int hw_isp_top_get_err2_irq_status(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ unsigned int val = 0;
|
|
+
|
|
+ val = read32(ISP_TOP_REG(38));
|
|
+ return val;
|
|
+}
|
|
+
|
|
+void hw_isp_top_clr_err1_irq_status(struct spm_camera_block *sc_block, unsigned int clr)
|
|
+{
|
|
+ write32(ISP_TOP_REG(37), clr);
|
|
+}
|
|
+
|
|
+void hw_isp_top_clr_err2_irq_status(struct spm_camera_block *sc_block, unsigned int clr)
|
|
+{
|
|
+ write32(ISP_TOP_REG(38), clr);
|
|
+}
|
|
+
|
|
+void hw_isp_top_config_tpg(struct spm_camera_block *sc_block,
|
|
+ unsigned int pipe_id,
|
|
+ unsigned int rolling,
|
|
+ unsigned int dummy_line,
|
|
+ unsigned int hblank, unsigned int vblank)
|
|
+{
|
|
+ union isp_top_reg_15 reg_15;
|
|
+ union isp_top_reg_16 reg_16;
|
|
+
|
|
+ reg_15.value = 0;
|
|
+ reg_15.field.sensor_timing_en = 0;
|
|
+ reg_15.field.rolling_en = rolling;
|
|
+ reg_15.field.tpg_select = pipe_id;
|
|
+ reg_15.field.hblank = hblank; //3218;
|
|
+ reg_15.field.dummy_line = dummy_line;
|
|
+
|
|
+ reg_16.value = 0;
|
|
+ reg_16.field.vblank = vblank;
|
|
+ reg_16.field.valid_blank = 0x0a;
|
|
+
|
|
+ write32(ISP_TOP_REG(16), reg_16.value);
|
|
+ write32(ISP_TOP_REG(15), reg_15.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_enable_tpg(struct spm_camera_block *sc_block, unsigned int enable)
|
|
+{
|
|
+ union isp_top_reg_15 reg_15;
|
|
+
|
|
+ reg_15.value = read32(ISP_TOP_REG(15));
|
|
+ reg_15.field.tpg_en = enable;
|
|
+
|
|
+ write32(ISP_TOP_REG(15), reg_15.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_enable_debug_clk(struct spm_camera_block *sc_block, unsigned int enable)
|
|
+{
|
|
+ union isp_top_reg_75 reg_75;
|
|
+
|
|
+ reg_75.value = read32(ISP_TOP_REG(75));
|
|
+ reg_75.field.debug_clk_en = enable;
|
|
+
|
|
+ write32(ISP_TOP_REG(75), reg_75.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_posterr_irq_enable(struct spm_camera_block *sc_block,
|
|
+ unsigned int enable, unsigned int disable)
|
|
+{
|
|
+ unsigned int value = 0;
|
|
+
|
|
+ value = read32(ISP_TOP_REG(62));
|
|
+ value &= ~disable;
|
|
+ value |= enable;
|
|
+ write32(ISP_TOP_REG(62), value);
|
|
+}
|
|
+
|
|
+unsigned int hw_isp_top_get_posterr_irq_status(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ unsigned int val = 0;
|
|
+
|
|
+ val = read32(ISP_TOP_REG(61));
|
|
+ return val;
|
|
+}
|
|
+
|
|
+void hw_isp_top_clr_posterr_irq_status(struct spm_camera_block *sc_block,
|
|
+ unsigned int clr)
|
|
+{
|
|
+ write32(ISP_TOP_REG(61), clr);
|
|
+}
|
|
+
|
|
+void hw_isp_top_shadow_latch(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ union isp_top_reg_1 reg_1;
|
|
+
|
|
+ reg_1.value = read32(ISP_TOP_REG(1));
|
|
+ reg_1.field.idi_reg_latch_trig_pip = 1;
|
|
+ write32(ISP_TOP_REG(1), reg_1.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_set_rdp_cfg_rdy(struct spm_camera_block *sc_block,
|
|
+ unsigned int rawdump_id, unsigned int ready)
|
|
+{
|
|
+ union isp_top_reg_17 reg_17;
|
|
+
|
|
+ reg_17.value = read32(ISP_TOP_REG(17 + rawdump_id * 2));
|
|
+ reg_17.field.rdp0_cfg_ready = ready;
|
|
+ write32(ISP_TOP_REG(17 + rawdump_id * 2), reg_17.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_global_reset(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ union isp_top_reg_86 reg_86;
|
|
+
|
|
+ reg_86.value = read32(ISP_TOP_REG(86));
|
|
+ reg_86.field.global_reset = 1;
|
|
+ write32(ISP_TOP_REG(86), reg_86.value);
|
|
+}
|
|
+
|
|
+void hw_isp_top_pipe0_debug_dump(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ union isp_top_reg_0 reg_0;
|
|
+ union isp_top_reg_14 reg_14;
|
|
+
|
|
+ reg_0.value = read32(ISP_TOP_REG(0));
|
|
+ reg_14.value = read32(ISP_TOP_REG(14));
|
|
+ cam_not("p0 regs dump: reg0.m_nWidth=%d reg0.m_nHeight=%d reg14.dummy_line=%d",
|
|
+ reg_0.field.m_nwidth, reg_0.field.m_nheight,
|
|
+ reg_14.field.idi_insert_dummy_line);
|
|
+}
|
|
+
|
|
+void hw_isp_top_pipe1_debug_dump(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ union isp_top_reg_0 reg_0;
|
|
+
|
|
+ reg_0.value = read32(ISP_TOP_REG(0));
|
|
+ cam_not("p1 regs dump: reg0.m_nWidth=%d reg0.m_nHeight=%d",
|
|
+ reg_0.field.m_nwidth, reg_0.field.m_nheight);
|
|
+}
|
|
+void hw_isp_top_set_speed_ctrl(struct spm_camera_block *sc_block, unsigned int speed_ctrl)
|
|
+{
|
|
+ union isp_top_reg_76 reg_76;
|
|
+
|
|
+ reg_76.value = read32(ISP_TOP_REG(76));
|
|
+ reg_76.field.send_speed_ctrl = speed_ctrl;
|
|
+ write32(ISP_TOP_REG(76), reg_76.value);
|
|
+}
|
|
\ No newline at end of file
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_isp.h b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_isp.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_isp.h
|
|
@@ -0,0 +1,172 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * hw_isp.h - isp top hw sequence
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef _HW_ISP_H_
|
|
+#define _HW_ISP_H_
|
|
+#include "../../cam_block.h"
|
|
+#define SPACEMIT_PIPE_OFFSET (0x00008000)
|
|
+#define SPACEMIT_ISP_TOP0_OFFSET (0x00001700)
|
|
+#define SPACEMIT_ISP_TOP1_OFFSET (SPACEMIT_ISP_TOP0_OFFSET + SPACEMIT_PIPE_OFFSET)
|
|
+#define ERR2_PIPE0_OVERRUN (1 << 28)
|
|
+#define ERR2_PIPE1_OVERRUN (1 << 29)
|
|
+
|
|
+enum {
|
|
+ RGGB = 0,
|
|
+ GRBG,
|
|
+ GBRG,
|
|
+ BGGR,
|
|
+ CFA_IGNR,
|
|
+};
|
|
+
|
|
+int hw_isp_top_set_idi_online_input_fmt(struct spm_camera_block *sc_block,
|
|
+ unsigned int width,
|
|
+ unsigned int height, int cfa_pattern);
|
|
+int hw_isp_top_set_idi_offline_input_fmt(struct spm_camera_block *sc_block,
|
|
+ unsigned int rdma_ch,
|
|
+ unsigned int width,
|
|
+ unsigned int height,
|
|
+ int cfa_pattern, unsigned int bit_depth);
|
|
+void hw_isp_top_set_rawdump_fmt(struct spm_camera_block *sc_block,
|
|
+ unsigned int idi_wdma_ch,
|
|
+ unsigned int width,
|
|
+ unsigned int height, unsigned int bit_depth);
|
|
+
|
|
+//void hw_isp_top_set_reg_14(struct spm_camera_block *sc_block, unsigned int width, unsigned int idi_insert_dummy_line);
|
|
+void hw_isp_top_set_idi_dummyline(struct spm_camera_block *sc_block,
|
|
+ unsigned int idi_insert_dummy_line);
|
|
+void hw_isp_top_set_idi_linebuf_depth(struct spm_camera_block *sc_block,
|
|
+ unsigned int width, unsigned int percentage);
|
|
+void hw_isp_top_set_idi_linebuf(struct spm_camera_block *sc_block,
|
|
+ unsigned int fifo_depth, unsigned int line_depth,
|
|
+ unsigned int pix_depth);
|
|
+
|
|
+enum {
|
|
+ INVALID_CH = -1,
|
|
+ SENSOR0_CH0 = 0,
|
|
+ SENSOR0_CH1,
|
|
+ SENSOR0_CH2,
|
|
+ SENSOR0_CH3,
|
|
+ SENSOR1_CH0,
|
|
+ SENSOR1_CH1,
|
|
+ SENSOR1_CH2,
|
|
+ SENSOR1_CH3,
|
|
+ OFFLINE_CH0,
|
|
+ OFFLINE_CH1,
|
|
+};
|
|
+
|
|
+void hw_isp_top_set_idi_input_source(struct spm_camera_block *sc_block, int source);
|
|
+void hw_isp_top_set_rawdump_source(struct spm_camera_block *sc_block,
|
|
+ unsigned int rawdump_idx, int source);
|
|
+void hw_isp_top_enable_rawdump(struct spm_camera_block *sc_block, int enable,
|
|
+ int rawdump_only);
|
|
+//void hw_isp_top_set_vsync2href_dly_cnt(struct spm_camera_block *sc_block, unsigned int p0_dly_cnt, unsigned int p1_dly_cnt);
|
|
+void hw_isp_top_set_vsync2href_dly_cnt(struct spm_camera_block *sc_block, int pipe_id,
|
|
+ unsigned int dly_cnt);
|
|
+void hw_isp_top_set_gap_value(struct spm_camera_block *sc_block, unsigned int p0_gap,
|
|
+ unsigned int p1_gap, unsigned int p01_gap);
|
|
+void hw_isp_top_set_idi_rd_burst_len(struct spm_camera_block *sc_block,
|
|
+ unsigned int rdma_ch, unsigned int speed_cnt,
|
|
+ unsigned int bstlen_sel);
|
|
+void hw_isp_top_enable_hw_gap(struct spm_camera_block *sc_block, int pipe_id,
|
|
+ unsigned int enable);
|
|
+
|
|
+enum {
|
|
+ HDR_NONE = 0,
|
|
+ HDR_OFFLINE,
|
|
+ HDR_ONLINE,
|
|
+ HDR_MIX,
|
|
+};
|
|
+
|
|
+void hw_isp_top_enable_hdr(struct spm_camera_block *sc_block, int hdr_mode);
|
|
+void hw_isp_top_enable_rd_outstanding(struct spm_camera_block *sc_block, unsigned int enable);
|
|
+void hw_isp_top_set_mix_hdr_line(struct spm_camera_block *sc_block, unsigned int mix_hdr_line);
|
|
+void hw_isp_top_set_ddr_wr_line(struct spm_camera_block *sc_block, unsigned int ddr_wr_line_cnt);
|
|
+void hw_isp_top_enable_vsync_pass_through(struct spm_camera_block *sc_block, int pipe_id, unsigned int enable);
|
|
+void hw_isp_pwr(unsigned int on);
|
|
+unsigned int hw_isp_top_get_irq_status(struct spm_camera_block *sc_block);
|
|
+void hw_isp_top_clr_irq_status(struct spm_camera_block *sc_block, unsigned int clr);
|
|
+unsigned int hw_isp_top_get_irq_raw_status(struct spm_camera_block *sc_block);
|
|
+void hw_isp_top_set_cfg_rdy(struct spm_camera_block *sc_block, unsigned int ready);
|
|
+void hw_isp_top_set_rdp_cfg_rdy(struct spm_camera_block *sc_block,
|
|
+ unsigned int rawdump_id, unsigned int ready);
|
|
+void hw_isp_top_shadow_latch(struct spm_camera_block *sc_block);
|
|
+
|
|
+#define ISP_IRQ_PIPE_SOF (1 << 0)
|
|
+#define ISP_IRQ_PDC_SOF (1 << 1)
|
|
+#define ISP_IRQ_PDF_SOF (1 << 2)
|
|
+#define ISP_IRQ_BPC_SOF (1 << 3)
|
|
+#define ISP_IRQ_LSC_SOF (1 << 4)
|
|
+#define ISP_IRQ_DNS_SOF (1 << 5)
|
|
+#define ISP_IRQ_BINNING_SOF (1 << 6)
|
|
+#define ISP_IRQ_DEMOSAIC_SOF (1 << 7)
|
|
+#define ISP_IRQ_HDR_SOF (1 << 8)
|
|
+#define ISP_IRQ_LTM_SOF (1 << 9)
|
|
+#define ISP_IRQ_MCU_TRIGGER (1 << 10)
|
|
+#define ISP_IRQ_STATS_ERR (1 << 11)
|
|
+#define ISP_IRQ_SDE_SOF (1 << 12)
|
|
+#define ISP_IRQ_SDE_EOF (1 << 13)
|
|
+#define ISP_IRQ_G_RST_DONE (1 << 14)
|
|
+#define ISP_IRQ_IDI_SHADOW_DONE (1 << 15)
|
|
+#define ISP_IRQ_PIPE_EOF (1 << 16)
|
|
+#define ISP_IRQ_PDC_EOF (1 << 17)
|
|
+#define ISP_IRQ_PDF_EOF (1 << 18)
|
|
+#define ISP_IRQ_BPC_EOF (1 << 19)
|
|
+#define ISP_IRQ_LSC_EOF (1 << 20)
|
|
+#define ISP_IRQ_DNS_EOF (1 << 21)
|
|
+#define ISP_IRQ_BINNING_EOF (1 << 22)
|
|
+#define ISP_IRQ_DEMOSAIC_EOF (1 << 23)
|
|
+#define ISP_IRQ_HDR_EOF (1 << 24)
|
|
+#define ISP_IRQ_LTM_EOF (1 << 25)
|
|
+#define ISP_IRQ_AEM_EOF (1 << 26)
|
|
+#define ISP_IRQ_WBM_EOF (1 << 27)
|
|
+#define ISP_IRQ_LSCM_EOF (1 << 28)
|
|
+#define ISP_IRQ_AFC_EOF (1 << 29)
|
|
+#define ISP_IRQ_FLICKER_EOF (1 << 30)
|
|
+#define ISP_IRQ_ERR (1 << 31)
|
|
+#define ISP_IRQ_ALL (0xffffffff)
|
|
+
|
|
+void hw_isp_top_set_irq_enable(struct spm_camera_block *sc_block, unsigned int enable,
|
|
+ unsigned int disable);
|
|
+void hw_isp_top_set_err0_irq_enable(struct spm_camera_block *sc_block,
|
|
+ unsigned int enable, unsigned int disable);
|
|
+void hw_isp_top_set_err2_irq_enable(struct spm_camera_block *sc_block,
|
|
+ unsigned int enable, unsigned int disable);
|
|
+#define POSTERR_IRQ_RDP0_SDW_OPEN_DONE (1 << 18)
|
|
+#define POSTERR_IRQ_RDP1_SDW_OPEN_DONE (1 << 18)
|
|
+#define POSTERR_IRQ_RDP0_SDW_CLOSE_DONE (1 << 28)
|
|
+#define POSTERR_IRQ_RDP1_SDW_CLOSE_DONE (1 << 29)
|
|
+#define POSTERR_IRQ_PIP0_SDW_OPEN_DONE (1 << 26)
|
|
+#define POSTERR_IRQ_PIP1_SDW_OPEN_DONE (1 << 27)
|
|
+#define POSTERR_IRQ_PIP0_SDW_CLOSE_DONE (1 << 30)
|
|
+#define POSTERR_IRQ_PIP1_SDW_CLOSE_DONE (1 << 31)
|
|
+
|
|
+void hw_isp_top_set_posterr_irq_enable(struct spm_camera_block *sc_block,
|
|
+ unsigned int enable, unsigned int disable);
|
|
+unsigned int hw_isp_top_get_err0_irq_status(struct spm_camera_block *sc_block);
|
|
+void hw_isp_top_clr_err0_irq_status(struct spm_camera_block *sc_block,
|
|
+ unsigned int clr);
|
|
+unsigned int hw_isp_top_get_err2_irq_status(struct spm_camera_block *sc_block);
|
|
+void hw_isp_top_clr_err2_irq_status(struct spm_camera_block *sc_block,
|
|
+ unsigned int clr);
|
|
+unsigned int hw_isp_top_get_err1_irq_status(struct spm_camera_block *sc_block);
|
|
+void hw_isp_top_clr_err1_irq_status(struct spm_camera_block *sc_block,
|
|
+ unsigned int clr);
|
|
+unsigned int hw_isp_top_get_posterr_irq_status(struct spm_camera_block *sc_block);
|
|
+void hw_isp_top_clr_posterr_irq_status(struct spm_camera_block *sc_block,
|
|
+ unsigned int clr);
|
|
+void hw_isp_top_config_tpg(struct spm_camera_block *sc_block, unsigned int pipe_id,
|
|
+ unsigned int rolling, unsigned int dummy_line,
|
|
+ unsigned int hblank, unsigned int vblank);
|
|
+void hw_isp_top_enable_tpg(struct spm_camera_block *sc_block, unsigned int enable);
|
|
+void hw_isp_top_enable_debug_clk(struct spm_camera_block *sc_block,
|
|
+ unsigned int enable);
|
|
+unsigned int hw_isp_top_get_idi_fifo_depth(struct spm_camera_block *sc_block);
|
|
+void hw_isp_top_global_reset(struct spm_camera_block *sc_block);
|
|
+void hw_isp_top_pipe0_debug_dump(struct spm_camera_block *sc_block);
|
|
+void hw_isp_top_pipe1_debug_dump(struct spm_camera_block *sc_block);
|
|
+void hw_isp_top_set_speed_ctrl(struct spm_camera_block *sc_block, unsigned int speed_ctrl);
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_postpipe.c b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_postpipe.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_postpipe.c
|
|
@@ -0,0 +1,138 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * hw_postpipe.c - postpipe hw sequence
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include "hw_reg.h"
|
|
+#include "hw_postpipe.h"
|
|
+
|
|
+#define SPACEMIT_POSTPIPE_SCL0_OFFSET (0x100)
|
|
+#define SPACEMIT_POSTPIPE_SCL1_OFFSET (0x200)
|
|
+#define PP_REG TOP_REG
|
|
+#define SCL_REG(n) (TOP_REG_ADDR(sc_block->base_addr + offset, (n)))
|
|
+
|
|
+void hw_postpipe_set_formatter_format(struct spm_camera_block *sc_block,
|
|
+ unsigned int idx, int format)
|
|
+{
|
|
+ union pp_reg_2 reg_2;
|
|
+
|
|
+ reg_2.value = read32(PP_REG(2 + idx));
|
|
+ reg_2.field.fmt0_m_bSwitchYCFlag = 0;
|
|
+ reg_2.field.fmt0_m_bConvertDithering = 1;
|
|
+ reg_2.field.fmt0_m_bCompressDithering = 1;
|
|
+ reg_2.field.fmt0_m_bSwitchUVFlag = 0;
|
|
+ reg_2.field.fmt0_m_bCompress = 0;
|
|
+ switch (format) {
|
|
+ case NV12:
|
|
+ reg_2.field.fmt0_m_nFormat = 1;
|
|
+ break;
|
|
+ case NV21:
|
|
+ reg_2.field.fmt0_m_nFormat = 1;
|
|
+ reg_2.field.fmt0_m_bSwitchUVFlag = 1;
|
|
+ break;
|
|
+ case RGB888:
|
|
+ reg_2.field.fmt0_m_bSwitchUVFlag = 1;
|
|
+ fallthrough;
|
|
+ case P210:
|
|
+ case Y210:
|
|
+ case P010:
|
|
+ case RGB565:
|
|
+ reg_2.field.fmt0_m_nFormat = 2 + (format - P210);
|
|
+ break;
|
|
+ default:
|
|
+ reg_2.field.fmt0_m_nFormat = 1;
|
|
+ }
|
|
+ write32(PP_REG(2 + idx), reg_2.value);
|
|
+}
|
|
+
|
|
+void hw_postpipe_dma_mux_enable(struct spm_camera_block *sc_block, int mux_select)
|
|
+{
|
|
+ union pp_reg_14 reg_14;
|
|
+
|
|
+ reg_14.value = read32(PP_REG(14));
|
|
+ switch (mux_select) {
|
|
+ case MUX_SEL_FORMATTER2:
|
|
+ reg_14.field.dma_mux_ctrl_6 = 0;
|
|
+ break;
|
|
+ case MUX_SEL_FORMATTER3:
|
|
+ reg_14.field.dma_mux_ctrl_7 = 0;
|
|
+ break;
|
|
+ case MUX_SEL_FORMATTER4:
|
|
+ reg_14.field.dma_mux_ctrl_0 = 0;
|
|
+ break;
|
|
+ case MUX_SEL_FORMATTER5:
|
|
+ reg_14.field.dma_mux_ctrl_1 = 0;
|
|
+ break;
|
|
+ case MUX_SEL_DWT0_LAYER1:
|
|
+ reg_14.field.dma_mux_ctrl_2 = 0;
|
|
+ break;
|
|
+ case MUX_SEL_DWT0_LAYER2:
|
|
+ reg_14.field.dma_mux_ctrl_3 = 0;
|
|
+ break;
|
|
+ case MUX_SEL_DWT0_LAYER3:
|
|
+ reg_14.field.dma_mux_ctrl_4 = 0;
|
|
+ break;
|
|
+ case MUX_SEL_DWT0_LAYER4:
|
|
+ reg_14.field.dma_mux_ctrl_5 = 0;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ write32(PP_REG(14), reg_14.value);
|
|
+}
|
|
+
|
|
+void hw_postpipe_enable_dwt(struct spm_camera_block *sc_block, unsigned int idx, int src, int enable)
|
|
+{
|
|
+ union pp_reg_8 reg_8;
|
|
+
|
|
+ reg_8.value = read32(PP_REG(8 + idx));
|
|
+ if (enable) {
|
|
+ reg_8.field.dwt0_src_sel = src;
|
|
+ if (src < DWT_SRC_SEL_FORMATTER2)
|
|
+ reg_8.field.dwt0_mode_sel = 0;
|
|
+ else
|
|
+ reg_8.field.dwt0_mode_sel = 1;
|
|
+ }
|
|
+ reg_8.field.dwt0_ena = enable ? 1 : 0;
|
|
+ write32(PP_REG(8 + idx), reg_8.value);
|
|
+}
|
|
+
|
|
+/*
|
|
+void hw_postpipe_set_scaler(struct spm_camera_block *sc_block, unsigned int idx,
|
|
+ unsigned int in_width, unsigned int in_height,
|
|
+ unsigned int out_width, unsigned int out_height)
|
|
+{
|
|
+ union scl_reg_12 reg_12;
|
|
+ union scl_reg_13 reg_13;
|
|
+ unsigned long offset = 0;
|
|
+
|
|
+ reg_12.value = 0;
|
|
+ if (idx == 0)
|
|
+ offset = SPACEMIT_POSTPIPE_SCL0_OFFSET;
|
|
+ else
|
|
+ offset = SPACEMIT_POSTPIPE_SCL1_OFFSET;
|
|
+ reg_12.field.m_nintrim_out_width = in_width;
|
|
+ reg_12.field.m_nintrim_out_height = in_height;
|
|
+ write32(SCL_REG(12), reg_12.value);
|
|
+ reg_13.value = 0;
|
|
+ reg_13.field.m_nouttrim_out_width = in_width;
|
|
+ reg_13.field.m_nouttrim_out_height = in_height;
|
|
+ write32(SCL_REG(13), reg_13.value);
|
|
+}
|
|
+*/
|
|
+void hw_postpipe_set_scaler_source(struct spm_camera_block *sc_block, unsigned int idx, int source)
|
|
+{
|
|
+ union scl_reg_11 reg_11;
|
|
+ unsigned long offset = 0;
|
|
+
|
|
+ reg_11.value = 0;
|
|
+ if (idx == 0)
|
|
+ offset = SPACEMIT_POSTPIPE_SCL0_OFFSET;
|
|
+ else
|
|
+ offset = SPACEMIT_POSTPIPE_SCL1_OFFSET;
|
|
+ //disable scaler & in/out trim
|
|
+ write32(SCL_REG(0), 0);
|
|
+ reg_11.field.pipe_sel = source;
|
|
+ write32(SCL_REG(11), reg_11.value);
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_postpipe.h b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_postpipe.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_postpipe.h
|
|
@@ -0,0 +1,55 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * hw_postpipe.h - postpipe hw sequence
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef _HW_POSTPIPE_H_
|
|
+#define _HW_POSTPIPE_H_
|
|
+#include "../../cam_block.h"
|
|
+#define SPACEMIT_POSTPIPE_OFFSET (0x00010000)
|
|
+
|
|
+enum {
|
|
+ NV12 = 2,
|
|
+ NV21,
|
|
+ P210,
|
|
+ Y210,
|
|
+ P010,
|
|
+ RGB888,
|
|
+ RGB565,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MUX_SEL_FORMATTER2 = 0,
|
|
+ MUX_SEL_FORMATTER3,
|
|
+ MUX_SEL_FORMATTER4,
|
|
+ MUX_SEL_FORMATTER5,
|
|
+ MUX_SEL_DWT0_LAYER1,
|
|
+ MUX_SEL_DWT0_LAYER2,
|
|
+ MUX_SEL_DWT0_LAYER3,
|
|
+ MUX_SEL_DWT0_LAYER4,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ DWT_SRC_SEL_FORMATTER0 = 0,
|
|
+ DWT_SRC_SEL_FORMATTER1,
|
|
+ DWT_SRC_SEL_FORMATTER2,
|
|
+ DWT_SRC_SEL_FORMATTER3,
|
|
+ DWT_SRC_SEL_FORMATTER4,
|
|
+ DWT_SRC_SEL_FORMATTER5,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SCL_SRC_SEL_PIPE0 = 0,
|
|
+ SCL_SRC_SEL_PIPE1,
|
|
+};
|
|
+
|
|
+void hw_postpipe_set_formatter_format(struct spm_camera_block *sc_block, unsigned int idx, int format);
|
|
+void hw_postpipe_dma_mux_enable(struct spm_camera_block *sc_block, int mux_select);
|
|
+void hw_postpipe_enable_dwt(struct spm_camera_block *sc_block, unsigned int idx, int src, int enable);
|
|
+//void hw_postpipe_set_scaler(struct spm_camera_block *sc_block, unsigned int idx,
|
|
+// unsigned int in_width, unsigned int in_height,
|
|
+// unsigned int out_width, unsigned int out_height);
|
|
+void hw_postpipe_set_scaler_source(struct spm_camera_block *sc_block, unsigned int idx, int source);
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_reg.h b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_reg.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_reg.h
|
|
@@ -0,0 +1,650 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * hw_reg.h - definition of isp hw registers
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef _SPACEMIT_ISP_HW_REG_H_
|
|
+#define _SPACEMIT_ISP_HW_REG_H_
|
|
+
|
|
+#include <asm/io.h>
|
|
+//#include <soc/spm/plat.h>
|
|
+#ifdef CAM_MODULE_TAG
|
|
+#undef CAM_MODULE_TAG
|
|
+#endif
|
|
+#define CAM_MODULE_TAG CAM_MDL_VI
|
|
+#include <cam_dbg.h>
|
|
+#define read32(a) readl((volatile void __iomem *)(a))
|
|
+#define write32(a, v) writel((v), (volatile void __iomem *)(a))
|
|
+
|
|
+
|
|
+#define TOP_REG_OFFSET(n) ((n) * 4)
|
|
+#define TOP_REG_ADDR(base, n) ((base) + TOP_REG_OFFSET((n)))
|
|
+#define TOP_REG(n) (TOP_REG_ADDR(sc_block->base_addr, (n)))
|
|
+
|
|
+union isp_top_reg_0 {
|
|
+ struct {
|
|
+ unsigned int m_nwidth : 13;
|
|
+ unsigned int rsvd0 : 3;
|
|
+ unsigned int m_nheight : 13;
|
|
+ unsigned int rsvd1 : 3;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_1 {
|
|
+ struct {
|
|
+ unsigned int m_nglobalblacklevel : 9;
|
|
+ unsigned int m_ncfapattern : 2;
|
|
+ unsigned int idi_reg_latch_trig_pip : 1;
|
|
+ unsigned int m_cfg_ready : 1;
|
|
+ unsigned int rsvd1 : 3;
|
|
+ unsigned int m_nglobalblccompensategain : 13;
|
|
+ unsigned int rsvd2 : 3;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_2 {
|
|
+ struct {
|
|
+ unsigned int m_nbinningwidth : 13;
|
|
+ unsigned int rsvd0 : 3;
|
|
+ unsigned int m_nbinningheight : 13;
|
|
+ unsigned int rsvd1 : 1;
|
|
+ unsigned int m_nratio_binning : 2;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_3 {
|
|
+ struct {
|
|
+ unsigned int idi_line_depth : 6;
|
|
+ unsigned int rsvd0 : 2;
|
|
+ unsigned int idi_fifo_depth : 13;
|
|
+ unsigned int rsvd1 : 11;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_4 {
|
|
+ struct {
|
|
+ unsigned int idi_pix_depth : 13;
|
|
+ unsigned int rsvd0 : 3;
|
|
+ unsigned int idi_fifo_line_th : 6;
|
|
+ unsigned int rsvd1 : 10;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_13 {
|
|
+ struct {
|
|
+ unsigned int idi_src_sel : 10;
|
|
+ unsigned int idi_online_ena : 1;
|
|
+ unsigned int idi_offline_ena : 1;
|
|
+ unsigned int idi_rdp_ena : 1;
|
|
+ unsigned int rsvd : 19;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_14 {
|
|
+ struct {
|
|
+ unsigned int idi_insert_dummy_line : 8;
|
|
+ unsigned int idi_gap_value : 10;
|
|
+ unsigned int idi_fifo_pix_th : 13;
|
|
+ unsigned int gap_hardware_mode : 1;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_15 {
|
|
+ struct {
|
|
+ unsigned int tpg_en : 1;
|
|
+ unsigned int sensor_timing_en : 1;
|
|
+ unsigned int rolling_en : 1;
|
|
+ unsigned int tpg_select : 1;
|
|
+ unsigned int rsvd : 4;
|
|
+ unsigned int hblank : 12;
|
|
+ unsigned int dummy_line : 10;
|
|
+ unsigned int rsvd1 : 2;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_16 {
|
|
+ struct {
|
|
+ unsigned int vblank : 21;
|
|
+ unsigned int rsvd : 3;
|
|
+ unsigned int valid_blank : 8;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_17 {
|
|
+ struct {
|
|
+ unsigned int idi_wdma_ch0_src_sel : 8;
|
|
+ unsigned int idi_ch0_img_wr_width_byte : 15;
|
|
+ unsigned int idi_ch0_wr_14bit_en : 1;
|
|
+ unsigned int idi_ch0_wr_burst_len_sel : 3;
|
|
+ unsigned int rsvd : 4;
|
|
+ unsigned int rdp0_cfg_ready : 1;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_18 {
|
|
+ struct {
|
|
+ unsigned int idi_ch0_img_wr_width_pix : 16;
|
|
+ unsigned int idi_ch0_img_wr_height : 16;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_19 {
|
|
+ struct {
|
|
+ unsigned int idi_wdma_ch1_src_sel : 8;
|
|
+ unsigned int idi_ch1_img_wr_width_byte : 15;
|
|
+ unsigned int idi_ch1_wr_14bit_en : 1;
|
|
+ unsigned int idi_ch1_wr_burst_len_sel : 3;
|
|
+ unsigned int rsvd : 4;
|
|
+ unsigned int rdp1_cfg_ready : 1;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_20 {
|
|
+ struct {
|
|
+ unsigned int idi_ch1_img_wr_width_pix : 16;
|
|
+ unsigned int idi_ch1_img_wr_height : 16;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_21 {
|
|
+ struct {
|
|
+ unsigned int idi_ch0_img_rd_width_byte : 16;
|
|
+ unsigned int idi_ch0_dma_sync_fifo_th : 8;
|
|
+ unsigned int idi_ch0_rd_fifo_depth : 7;
|
|
+ unsigned int idi_ch0_rd_burst_len_sel : 1;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_22 {
|
|
+ struct {
|
|
+ unsigned int idi_ch0_img_rd_width_pix : 14;
|
|
+ unsigned int idi_ch0_img_rd_raw8_type : 1;
|
|
+ unsigned int idi_ch0_img_rd_raw10_type : 1;
|
|
+ unsigned int idi_ch0_img_rd_height : 14;
|
|
+ unsigned int idi_ch0_img_rd_raw1214_type : 1;
|
|
+ unsigned int rsvd : 1;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_23 {
|
|
+ struct {
|
|
+ unsigned int idi_ch1_img_rd_width_byte : 16;
|
|
+ unsigned int idi_ch1_dma_sync_fifo_th : 8;
|
|
+ unsigned int idi_ch1_rd_fifo_depth : 7;
|
|
+ unsigned int idi_ch1_rd_burst_len_sel : 1;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_24 {
|
|
+ struct {
|
|
+ unsigned int idi_ch1_img_rd_width_pix : 14;
|
|
+ unsigned int idi_ch1_img_rd_raw8_type : 1;
|
|
+ unsigned int idi_ch1_img_rd_raw10_type : 1;
|
|
+ unsigned int idi_ch1_img_rd_height : 14;
|
|
+ unsigned int idi_ch1_img_rd_raw1214_type : 1;
|
|
+ unsigned int rsvd : 1;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_25 {
|
|
+ struct {
|
|
+ unsigned int offline_hdr_ena : 1;
|
|
+ unsigned int pip0_vsync_pass_through : 1;
|
|
+ unsigned int pip1_vsync_pass_through : 1;
|
|
+ unsigned int rsvd : 29;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_26 {
|
|
+ struct {
|
|
+ unsigned int pip0_vsync2href_dly_cnt : 11;
|
|
+ unsigned int rsvd0 : 5;
|
|
+ unsigned int pip1_vsync2href_dly_cnt : 11;
|
|
+ unsigned int rsvd1 : 5;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_75 {
|
|
+ struct {
|
|
+ unsigned int wbc_clk_auto_ena : 1;
|
|
+ unsigned int rdns_clk_auto_ena : 1;
|
|
+ unsigned int afc_clk_auto_ena : 1;
|
|
+ unsigned int wbm_clk_auto_ena : 1;
|
|
+ unsigned int bpc_clk_auto_ena : 1;
|
|
+ unsigned int binning_clk_auto_ena : 1;
|
|
+ unsigned int ltm_clk_auto_ena : 1;
|
|
+ unsigned int aem_clk_auto_ena : 1;
|
|
+ unsigned int eis_clk_auto_ena : 1;
|
|
+ unsigned int hdr_clk_auto_ena : 1;
|
|
+ unsigned int cmc_clk_auto_ena : 1;
|
|
+ unsigned int idi_clk_auto_ena : 1;
|
|
+ unsigned int demosaic_clk_auto_ena : 1;
|
|
+ unsigned int gtm_clk_auto_ena : 1;
|
|
+ unsigned int lsc_clk_auto_ena : 1;
|
|
+ unsigned int lscm_clk_auto_ena : 1;
|
|
+ unsigned int nlc_clk_auto_ena : 1;
|
|
+ unsigned int rgb4gain_clk_auto_ena : 1;
|
|
+ unsigned int top_reg_clk_auto_ena : 1;
|
|
+ unsigned int debug_clk_en : 1;
|
|
+ unsigned int auto_gate_clk_ctrl : 12;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_76 {
|
|
+ struct {
|
|
+ unsigned int idi_online_hdr_ena : 1;
|
|
+ unsigned int idi_offline_hdr_ena : 1;
|
|
+ unsigned int idi_mix_hdr_ena : 1;
|
|
+ unsigned int idi_rgb_ir_mode : 1;
|
|
+ unsigned int outstanding_read_en : 1;
|
|
+ unsigned int idi_sony_hdr_mode : 1;
|
|
+ unsigned int idi_low_pr_mode : 1;
|
|
+ unsigned int mix_hdr_rdp_ena : 1;
|
|
+ unsigned int idi_mix_hdr_line : 10;
|
|
+ unsigned int rsvd1 : 2;
|
|
+ unsigned int idi_ddr_wr_line_cnt : 5;
|
|
+ unsigned int send_speed_ctrl : 5;
|
|
+ unsigned int rsvd2 : 1;
|
|
+ unsigned int pip1_gap_hardware_mode : 1;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_77 {
|
|
+ struct {
|
|
+ unsigned int idi_pip0_gap_value : 16;
|
|
+ unsigned int idi_pip1_gap_value : 16;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_78 {
|
|
+ struct {
|
|
+ unsigned int idi_pip01_gap_value : 16;
|
|
+ unsigned int idi_dma_timeout : 16;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_82 {
|
|
+ struct {
|
|
+ unsigned int idi_pip0_rd_speed_end_cnt : 20;
|
|
+ unsigned int idi_ch0_rd_bst_len_sel : 6;
|
|
+ unsigned int rsvd : 6;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_83 {
|
|
+ struct {
|
|
+ unsigned int idi_pip1_rd_speed_end_cnt : 20;
|
|
+ unsigned int idi_ch1_rd_bst_len_sel : 5;
|
|
+ unsigned int rsvd : 7;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_86 {
|
|
+ struct {
|
|
+ unsigned int global_reset : 1;
|
|
+ unsigned int rsvd1 : 7;
|
|
+ unsigned int global_reset_cyc : 8;
|
|
+ unsigned int rsvd2 : 16;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_93 {
|
|
+ struct {
|
|
+ unsigned int m_nwidth_offset : 13;
|
|
+ unsigned int rsvd0 : 3;
|
|
+ unsigned int m_nheight_offset : 13;
|
|
+ unsigned int rsvd1 : 3;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union isp_top_reg_94 {
|
|
+ struct {
|
|
+ unsigned int m_ncrop_width : 13;
|
|
+ unsigned int rsvd0 : 3;
|
|
+ unsigned int m_ncrop_height : 13;
|
|
+ unsigned int rsvd1 : 3;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+//dma regs
|
|
+union dma_reg_38 {
|
|
+ struct {
|
|
+ unsigned int ch0_wr_pitch0 : 16;
|
|
+ unsigned int ch0_wr_pitch1 : 16;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union dma_reg_52 {
|
|
+ struct {
|
|
+ unsigned int ch0_wr_fifo_depth : 8;
|
|
+ unsigned int ch0_wr_offset : 8;
|
|
+ unsigned int ch0_wr_src_sel : 4;
|
|
+ unsigned int ch0_wr_weight : 3;
|
|
+ unsigned int rsvd0 : 1;
|
|
+ unsigned int ch0_fifo_div_mode : 4;
|
|
+ unsigned int rsvd1 : 3;
|
|
+ unsigned int ch0_wr_ready : 1;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union dma_reg_68 {
|
|
+ struct {
|
|
+ unsigned int wr_burst_length : 8;
|
|
+ unsigned int rsvd : 24;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union dma_reg_69 {
|
|
+ struct {
|
|
+ unsigned int ch0_wr_start : 1;
|
|
+ unsigned int ch0_wr_done : 1;
|
|
+ unsigned int ch0_wr_err : 1;
|
|
+ unsigned int ch1_wr_start : 1;
|
|
+ unsigned int ch1_wr_done : 1;
|
|
+ unsigned int ch1_wr_err : 1;
|
|
+ unsigned int ch2_wr_start : 1;
|
|
+ unsigned int ch2_wr_done : 1;
|
|
+ unsigned int ch2_wr_err : 1;
|
|
+ unsigned int ch3_wr_start : 1;
|
|
+ unsigned int ch3_wr_done : 1;
|
|
+ unsigned int ch3_wr_err : 1;
|
|
+ unsigned int ch4_wr_start : 1;
|
|
+ unsigned int ch4_wr_done : 1;
|
|
+ unsigned int ch4_wr_err : 1;
|
|
+ unsigned int ch5_wr_start : 1;
|
|
+ unsigned int ch5_wr_done : 1;
|
|
+ unsigned int ch5_wr_err : 1;
|
|
+ unsigned int ch6_wr_start : 1;
|
|
+ unsigned int ch6_wr_done : 1;
|
|
+ unsigned int ch6_wr_err : 1;
|
|
+ unsigned int ch7_wr_start : 1;
|
|
+ unsigned int ch7_wr_done : 1;
|
|
+ unsigned int ch7_wr_err : 1;
|
|
+ unsigned int ch8_wr_start : 1;
|
|
+ unsigned int ch8_wr_done : 1;
|
|
+ unsigned int ch8_wr_err : 1;
|
|
+ unsigned int ch9_wr_start : 1;
|
|
+ unsigned int ch9_wr_done : 1;
|
|
+ unsigned int ch9_wr_err : 1;
|
|
+ unsigned int ch10_wr_start : 1;
|
|
+ unsigned int ch10_wr_done : 1;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union dma_reg_70 {
|
|
+ struct {
|
|
+ unsigned int ch10_wr_err : 1;
|
|
+ unsigned int ch11_wr_start : 1;
|
|
+ unsigned int ch11_wr_done : 1;
|
|
+ unsigned int ch11_wr_err : 1;
|
|
+ unsigned int ch12_wr_start : 1;
|
|
+ unsigned int ch12_wr_done : 1;
|
|
+ unsigned int ch12_wr_err : 1;
|
|
+ unsigned int ch13_wr_start : 1;
|
|
+ unsigned int ch13_wr_done : 1;
|
|
+ unsigned int ch13_wr_err : 1;
|
|
+ unsigned int ch14_p0_wr_start : 1;
|
|
+ unsigned int ch14_p0_wr_done : 1;
|
|
+ unsigned int ch14_p0_wr_err : 1;
|
|
+ unsigned int ch14_p1_wr_start : 1;
|
|
+ unsigned int ch14_p1_wr_done : 1;
|
|
+ unsigned int ch14_p1_wr_err : 1;
|
|
+ unsigned int ch15_wr_start : 1;
|
|
+ unsigned int ch15_wr_done : 1;
|
|
+ unsigned int ch15_wr_err : 1;
|
|
+ unsigned int ch0_rd_start : 1;
|
|
+ unsigned int ch0_rd_done : 1;
|
|
+ unsigned int ch0_rd_err : 1;
|
|
+ unsigned int ch1_rd_start : 1;
|
|
+ unsigned int ch1_rd_done : 1;
|
|
+ unsigned int ch1_rd_err : 1;
|
|
+ unsigned int ch2_rd_start : 1;
|
|
+ unsigned int ch2_rd_done : 1;
|
|
+ unsigned int ch2_rd_err : 1;
|
|
+ unsigned int rsvd : 4;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union dma_reg_79 {
|
|
+ struct {
|
|
+ unsigned int ch0_rd_pitch : 16;
|
|
+ unsigned int ch0_rd_weight : 3;
|
|
+ unsigned int rsvd : 12;
|
|
+ unsigned int ch0_rd_trigger : 1;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union dma_reg_82 {
|
|
+ struct {
|
|
+ unsigned int dmac_postwr_en : 16;
|
|
+ unsigned int dmac_arqos : 4;
|
|
+ unsigned int dmac_awqos : 4;
|
|
+ unsigned int dmac_arb_mode : 2;
|
|
+ unsigned int dmac_max_req_num : 3;
|
|
+ unsigned int dmac_axi_sec : 1;
|
|
+ unsigned int dmac_rst_req : 1;
|
|
+ unsigned int dmac_rst_n_pwr : 1;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union dma_reg_83 {
|
|
+ struct {
|
|
+ unsigned int cfg_dmac_wr_int_clr : 1;
|
|
+ unsigned int cfg_dmac_rd_int_clr : 1;
|
|
+ unsigned int fbc_enc0_clk_auto_ena : 1;
|
|
+ unsigned int fbc_enc1_clk_auto_ena : 1;
|
|
+ unsigned int dma_master_ctrl_clk_auto_ena : 1;
|
|
+ unsigned int dmac_top_cfg_clk_auto_ena : 1;
|
|
+ unsigned int dma_err_sel : 1;
|
|
+ unsigned int rsvd : 19;
|
|
+ unsigned int dma_overrun_recover_en : 1;
|
|
+ unsigned int dma_overlap_recover_en : 1;
|
|
+ unsigned int afbc0_enable : 1;
|
|
+ unsigned int afbc1_enable : 1;
|
|
+ unsigned int rawdump0_enable : 1;
|
|
+ unsigned int rawdump1_enable : 1;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union dma_reg_86 {
|
|
+ struct {
|
|
+ unsigned int mmu_start_p0 : 1;
|
|
+ unsigned int mmu_start_p1 : 1;
|
|
+ unsigned int rsvd0 : 6;
|
|
+ unsigned int sw_mmu_shadow_en_p0 : 1;
|
|
+ unsigned int sw_mmu_shadow_en_p1 : 1;
|
|
+ unsigned int rsvd1 : 22;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+//postpipe regs
|
|
+union pp_reg_2 {
|
|
+ struct {
|
|
+ unsigned int fmt0_m_nFormat : 3;
|
|
+ unsigned int fmt0_m_bCompress : 1;
|
|
+ unsigned int fmt0_m_bSwitchUVFlag : 1;
|
|
+ unsigned int fmt0_m_bCompressDithering : 1;
|
|
+ unsigned int fmt0_m_bConvertDithering : 1;
|
|
+ unsigned int fmt0_m_bSwitchYCFlag : 1;
|
|
+ unsigned int rsvd : 24;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union pp_reg_14 {
|
|
+ struct {
|
|
+ unsigned int dma_mux_ctrl_0 : 1;
|
|
+ unsigned int dma_mux_ctrl_1 : 1;
|
|
+ unsigned int dma_mux_ctrl_2 : 1;
|
|
+ unsigned int dma_mux_ctrl_3 : 1;
|
|
+ unsigned int dma_mux_ctrl_4 : 1;
|
|
+ unsigned int dma_mux_ctrl_5 : 1;
|
|
+ unsigned int dma_mux_ctrl_6 : 2;
|
|
+ unsigned int dma_mux_ctrl_7 : 2;
|
|
+ unsigned int rsvd : 22;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union pp_reg_8 {
|
|
+ struct {
|
|
+ unsigned int dwt0_time_limit : 16;
|
|
+ unsigned int dwt0_src_sel : 3;
|
|
+ unsigned int dwt0_mode_sel : 1;
|
|
+ unsigned int dwt0_ena : 1;
|
|
+ unsigned int rsvd : 11;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+//scaler regs
|
|
+union scl_reg_0 {
|
|
+ struct {
|
|
+ unsigned int m_nintrimEb : 1;
|
|
+ unsigned int m_nouttrimEb : 1;
|
|
+ unsigned int m_nscalerEb : 1;
|
|
+ unsigned int m_nblock_en : 1;
|
|
+ unsigned int rsvd0 : 4;
|
|
+ unsigned int m_nintrimStartX : 13;
|
|
+ unsigned int rsvd1 : 11;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union scl_reg_11 {
|
|
+ struct {
|
|
+ unsigned int pipe_sel : 1;
|
|
+ unsigned int rsvd : 31;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+/*
|
|
+union scl_reg_12 {
|
|
+ struct {
|
|
+ unsigned int m_nintrim_out_width : 13;
|
|
+ unsigned int rsvd0 : 3;
|
|
+ unsigned int m_nintrim_out_height : 13;
|
|
+ unsigned int rsvd1 : 3;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union scl_reg_13 {
|
|
+ struct {
|
|
+ unsigned int m_nouttrim_out_width : 13;
|
|
+ unsigned int rsvd0 : 3;
|
|
+ unsigned int m_nouttrim_out_height : 13;
|
|
+ unsigned int rsvd1 : 3;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+*/
|
|
+//isp afbc
|
|
+#define ISP_AFBC_HEADER_BASE_ADDR_LOW (0x00)
|
|
+#define ISP_AFBC_HEADER_BASE_ADDR_HIGH (0x04)
|
|
+#define ISP_AFBC_PAYLOAD_BASE_ADDR_LOW (0x08)
|
|
+#define ISP_AFBC_PAYLOAD_BASE_ADDR_HIGH (0x0c)
|
|
+#define ISP_AFBC_Bbox_coor_x (0x10)
|
|
+#define ISP_AFBC_Bbox_coor_y (0x14)
|
|
+#define ISP_AFBC_Y_BUF_BASE_ADDR (0x18)
|
|
+#define ISP_AFBC_Y_BUF_PITCH (0x1c)
|
|
+#define ISP_AFBC_UV_BUF_BASE_ADDR (0x20)
|
|
+#define ISP_AFBC_UV_BUF_PITCH (0x24)
|
|
+#define ISP_AFBC_Y_BUF_SIZE (0x28)
|
|
+#define ISP_AFBC_UV_BUF_SIZE (0x2c)
|
|
+#define ISP_AFBC_REG_SHADOW_CTRL (0x30)
|
|
+#define ISP_AFBC_IRQ_MASK (0x34)
|
|
+#define ISP_AFBC_IRQ_CLEAR (0x38)
|
|
+#define ISP_AFBC_DMA_CTRL0 (0x3c)
|
|
+#define ISP_AFBC_ENC_MODE (0x40)
|
|
+#define ISP_AFBC_DMAC_LENGTH (0x44)
|
|
+#define ISP_AFBC_IRQ_STATUS (0x48)
|
|
+union afbc_reg_irq {
|
|
+ struct {
|
|
+ unsigned int dma_wr_err : 16;
|
|
+ unsigned int dma_wr_eof : 1;
|
|
+ unsigned int cfg_update_done : 1;
|
|
+ unsigned int rsvd : 14;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union afbc_reg_bbox_coor_x {
|
|
+ struct {
|
|
+ unsigned int bbox_start_x : 16;
|
|
+ unsigned int bbox_end_x : 16;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union afbc_reg_bbox_coor_y {
|
|
+ struct {
|
|
+ unsigned int bbox_start_y : 16;
|
|
+ unsigned int bbox_end_y : 16;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union afbc_reg_y_buf_size {
|
|
+ struct {
|
|
+ unsigned int y_buf_size_x : 16;
|
|
+ unsigned int y_buf_size_y : 16;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union afbc_reg_uv_buf_size {
|
|
+ struct {
|
|
+ unsigned int uv_buf_size_x : 16;
|
|
+ unsigned int uv_buf_size_y : 16;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+union afbc_reg_shadow_ctrl {
|
|
+ struct {
|
|
+ unsigned int direct_swap : 1;
|
|
+ unsigned int pending_swap : 1;
|
|
+ unsigned int rsvd : 30;
|
|
+ }field;
|
|
+ unsigned int value;
|
|
+};
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_reg_iommu.h b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_reg_iommu.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/hw-seq/hw_reg_iommu.h
|
|
@@ -0,0 +1,40 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * hw_reg_iommu.h
|
|
+ *
|
|
+ * register for isp iommu
|
|
+ *
|
|
+ * Copyright (C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef __REGS_ISP_IOMMU_H__
|
|
+#define __REGS_ISP_IOMMU_H__
|
|
+
|
|
+/* TBU(n) registers */
|
|
+#define REG_IOMMU_TTBL(n) (0x12240 + 0x20 * (n))
|
|
+#define REG_IOMMU_TTBH(n) (0x12244 + 0x20 * (n))
|
|
+#define REG_IOMMU_TCR0(n) (0x12248 + 0x20 * (n))
|
|
+#define REG_IOMMU_TCR1(n) (0x1224c + 0x20 * (n))
|
|
+#define REG_IOMMU_STAT(n) (0x12250 + 0x20 * (n))
|
|
+
|
|
+/* TOP registers */
|
|
+#define REG_IOMMU_BVAL (0x12200)
|
|
+#define REG_IOMMU_BVAH (0x12204)
|
|
+#define REG_IOMMU_TVAL (0x12208)
|
|
+#define REG_IOMMU_TVAH (0x1220c)
|
|
+#define REG_IOMMU_GIRQ_STAT (0x12210)
|
|
+#define REG_IOMMU_GIRQ_ENA (0x12214)
|
|
+#define REG_IOMMU_TIMEOUT (0x12218)
|
|
+#define REG_IOMMU_ERR_CLR (0x1221c)
|
|
+#define REG_IOMMU_LVAL (0x12220)
|
|
+#define REG_IOMMU_LVAH (0x12224)
|
|
+#define REG_IOMMU_LPAL (0x12228)
|
|
+#define REG_IOMMU_LPAH (0x1222c)
|
|
+#define REG_IOMMU_TIMEOUT_ADDR_LOW (0x12234)
|
|
+#define REG_IOMMU_TIMEOUT_ADDR_HIGH (0x12238)
|
|
+#define REG_IOMMU_VER (0x1223c)
|
|
+
|
|
+#define MMU_RD_TIMEOUT (1 << 16)
|
|
+#define MMU_WR_TIMEOUT (1 << 17)
|
|
+
|
|
+#endif /* ifndef __REGS_ISP_IOMMU_H__ */
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/k1xvi.c b/drivers/media/platform/spacemit/camera/vi/k1xvi/k1xvi.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/k1xvi.c
|
|
@@ -0,0 +1,565 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * k1xvi.c - k1xisp vi platform device driver
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/pm_runtime.h>
|
|
+#include <linux/of_device.h>
|
|
+#include <linux/slab.h>
|
|
+#include "k1xvi.h"
|
|
+#include "fe_isp.h"
|
|
+#include "../spacemit_videobuf2.h"
|
|
+#include "../vdev.h"
|
|
+#include "../subdev.h"
|
|
+#include "../vsensor.h"
|
|
+#define CAM_MODULE_TAG CAM_MDL_VI
|
|
+#include <cam_dbg.h>
|
|
+
|
|
+static DEFINE_MUTEX(g_init_mlock);
|
|
+static struct isp_firm *g_isp_firm = NULL;
|
|
+static struct platform_device *g_pdev = NULL;
|
|
+static struct v4l2_device *g_v4l2_dev = NULL;
|
|
+
|
|
+static int k1xvi_create_entities(struct platform_device *pdev, struct k1xvi_platform_data *drvdata)
|
|
+{
|
|
+ int i = 0;
|
|
+ char name[SPACEMIT_VI_ENTITY_NAME_LEN];
|
|
+ struct device *dev = &pdev->dev;
|
|
+ unsigned int min_buffers_needed = 0;
|
|
+
|
|
+ //2 SENSORs
|
|
+ for (i = 0; i < SENSOR_NUM; i++) {
|
|
+ drvdata->entities[MIPI][SENSOR][i] = (struct media_entity*)spm_sensor_create(SD_GRP_ID(MIPI, SENSOR, i), dev);
|
|
+ if (!drvdata->entities[MIPI][SENSOR][i]) {
|
|
+ cam_err("%s create sensor%d failed.", __func__, i);
|
|
+ goto SENSORs_FAIL;
|
|
+ }
|
|
+ }
|
|
+ //2 AINs
|
|
+ for (i = 0; i < AIN_NUM; i++) {
|
|
+ snprintf(name, 32, "ain%d", i);
|
|
+ drvdata->entities[DMA][AIN][i] =
|
|
+ (struct media_entity *)spm_vdev_create_vnode(name, SPACEMIT_VNODE_DIR_IN, i, g_v4l2_dev, dev, 0);
|
|
+ if (!drvdata->entities[DMA][AIN][i]) {
|
|
+ cam_err("%s create ain%d failed.", __func__, i);
|
|
+ goto AINs_FAIL;
|
|
+ }
|
|
+ }
|
|
+ //14 AOUTs
|
|
+ for (i = 0; i < AOUT_NUM; i++) {
|
|
+ snprintf(name, 32, "aout%d", i);
|
|
+ if (i == 12 || i == 13) { //raw dump
|
|
+ min_buffers_needed = 0;
|
|
+ } else {
|
|
+ min_buffers_needed = 0;
|
|
+ }
|
|
+ drvdata->entities[DMA][AOUT][i] =
|
|
+ (struct media_entity *)spm_vdev_create_vnode(name, SPACEMIT_VNODE_DIR_OUT, i, g_v4l2_dev, dev,
|
|
+ min_buffers_needed);
|
|
+ if (!drvdata->entities[DMA][AOUT][i]) {
|
|
+ cam_err("%s create aout%d failed.", __func__, i);
|
|
+ goto AOUTs_FAIL;
|
|
+ }
|
|
+ }
|
|
+ //2 RAWDUMPs
|
|
+ for (i = 0; i < RAWDUMP_NUM; i++) {
|
|
+ drvdata->entities[FE_ISP][RAWDUMP][i] =
|
|
+ (struct media_entity *)fe_rawdump_create(SD_GRP_ID(FE_ISP, RAWDUMP, i), drvdata->isp_ctx);
|
|
+ if (!drvdata->entities[FE_ISP][RAWDUMP][i]) {
|
|
+ cam_err("%s create rawdump%d failed.", __func__, i);
|
|
+ goto RAWDUMPs_FAIL;
|
|
+ }
|
|
+ }
|
|
+ //OFFLINE_CHANNELs
|
|
+ for (i = 0; i < OFFLINE_CH_NUM; i++) {
|
|
+ drvdata->entities[FE_ISP][OFFLINE_CHANNEL][i] =
|
|
+ (struct media_entity *)fe_offline_channel_create(SD_GRP_ID(FE_ISP, OFFLINE_CHANNEL, i), drvdata->isp_ctx);
|
|
+ if (!drvdata->entities[FE_ISP][OFFLINE_CHANNEL][i]) {
|
|
+ cam_err("%s create offline_channel%d failed", __func__, i);
|
|
+ goto OFFLINE_CHANNELs_FAIL;
|
|
+ }
|
|
+ }
|
|
+ //FORMATTERs
|
|
+ for (i = 0; i < FORMATTER_NUM; i++) {
|
|
+ drvdata->entities[FE_ISP][FORMATTER][i] =
|
|
+ (struct media_entity *)fe_formatter_create(SD_GRP_ID(FE_ISP, FORMATTER, i), drvdata->isp_ctx);
|
|
+ if (!drvdata->entities[FE_ISP][FORMATTER][i]) {
|
|
+ cam_err("%s create formatter%d failed", __func__, i);
|
|
+ goto FORMATTERs_FAIL;
|
|
+ }
|
|
+ }
|
|
+ //DWT0
|
|
+ for (i = 1; i <= DWT_LAYER_NUM; i++) {
|
|
+ drvdata->entities[FE_ISP][DWT0][i] =
|
|
+ (struct media_entity *)fe_dwt_create(SD_GRP_ID(FE_ISP, DWT0, i),
|
|
+ drvdata->isp_ctx);
|
|
+ if (!drvdata->entities[FE_ISP][DWT0][i]) {
|
|
+ cam_err("%s create dwt0_layer%d failed", __func__, i);
|
|
+ goto DWT0_FAIL;
|
|
+ }
|
|
+ }
|
|
+ //DWT1
|
|
+ for (i = 1; i <= DWT_LAYER_NUM; i++) {
|
|
+ drvdata->entities[FE_ISP][DWT1][i] =
|
|
+ (struct media_entity *)fe_dwt_create(SD_GRP_ID(FE_ISP, DWT1, i), drvdata->isp_ctx);
|
|
+ if (!drvdata->entities[FE_ISP][DWT1][i]) {
|
|
+ cam_err("%s create dwt1_layer%d failed", __func__, i);
|
|
+ goto DWT1_FAIL;
|
|
+ }
|
|
+ }
|
|
+ //PIPEs
|
|
+ for (i = 0; i < PIPE_NUM; i++) {
|
|
+ drvdata->entities[FE_ISP][PIPE][i] =
|
|
+ (struct media_entity *)fe_pipe_create(SD_GRP_ID(FE_ISP, PIPE, i), drvdata->isp_ctx);
|
|
+ if (!drvdata->entities[FE_ISP][PIPE][i]) {
|
|
+ cam_err("%s create pipe%d failed", __func__, i);
|
|
+ goto PIPEs_FAIL;
|
|
+ }
|
|
+ }
|
|
+ //HDR_COMBINE
|
|
+ drvdata->entities[FE_ISP][HDR_COMBINE][0] =
|
|
+ (struct media_entity *)fe_hdr_combine_create(SD_GRP_ID(FE_ISP, HDR_COMBINE, 0), drvdata->isp_ctx);
|
|
+ if (!drvdata->entities[FE_ISP][HDR_COMBINE][0]) {
|
|
+ cam_err("%s create hdr_combine failed", __func__);
|
|
+ goto HDR_COMBINE_FAIL;
|
|
+ }
|
|
+ //3 CSI_MAINs
|
|
+ for (i = 0; i < CSI_NUM; i++) {
|
|
+ drvdata->entities[MIPI][CSI_MAIN][i] =
|
|
+ (struct media_entity *)csi_create(SD_GRP_ID(MIPI, CSI_MAIN, i), drvdata->isp_ctx);
|
|
+ if (!drvdata->entities[MIPI][CSI_MAIN][i]) {
|
|
+ cam_err("%s create csi%d_main failed", __func__, i);
|
|
+ goto CSI_MAIN_FAIL;
|
|
+ }
|
|
+ }
|
|
+ //3 CSI_VCDTs
|
|
+ for (i = 0; i < CSI_NUM; i++) {
|
|
+ drvdata->entities[MIPI][CSI_VCDT][i] =
|
|
+ (struct media_entity *)csi_create(SD_GRP_ID(MIPI, CSI_VCDT, i), drvdata->isp_ctx);
|
|
+ if (!drvdata->entities[MIPI][CSI_VCDT][i]) {
|
|
+ cam_err("%s create csi%d_main failed", __func__, i);
|
|
+ goto CSI_VCDT_FAIL;
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+CSI_VCDT_FAIL:
|
|
+ while (i > 0) {
|
|
+ spm_camera_block_put(drvdata->entities[MIPI][CSI_VCDT][--i]);
|
|
+ drvdata->entities[MIPI][CSI_VCDT][i] = NULL;
|
|
+ }
|
|
+ i = CSI_NUM;
|
|
+CSI_MAIN_FAIL:
|
|
+ while (i > 0) {
|
|
+ spm_camera_block_put(drvdata->entities[MIPI][CSI_MAIN][--i]);
|
|
+ drvdata->entities[MIPI][CSI_MAIN][i] = NULL;
|
|
+ }
|
|
+ spm_camera_block_put(drvdata->entities[FE_ISP][HDR_COMBINE][0]);
|
|
+ drvdata->entities[FE_ISP][HDR_COMBINE][0] = NULL;
|
|
+HDR_COMBINE_FAIL:
|
|
+ i = PIPE_NUM;
|
|
+PIPEs_FAIL:
|
|
+ while (i > 0) {
|
|
+ spm_camera_block_put(drvdata->entities[FE_ISP][PIPE][--i]);
|
|
+ drvdata->entities[FE_ISP][PIPE][i] = NULL;
|
|
+ }
|
|
+ i = DWT_LAYER_NUM + 1;
|
|
+DWT1_FAIL:
|
|
+ while (i > 1) {
|
|
+ spm_camera_block_put(drvdata->entities[FE_ISP][DWT1][--i]);
|
|
+ drvdata->entities[FE_ISP][DWT1][i] = NULL;
|
|
+ }
|
|
+ i = DWT_LAYER_NUM + 1;
|
|
+DWT0_FAIL:
|
|
+ while (i > 1) {
|
|
+ spm_camera_block_put(drvdata->entities[FE_ISP][DWT0][--i]);
|
|
+ drvdata->entities[FE_ISP][DWT0][i] = NULL;
|
|
+ }
|
|
+ i = FORMATTER_NUM;
|
|
+FORMATTERs_FAIL:
|
|
+ while (i > 0) {
|
|
+ spm_camera_block_put(drvdata->entities[FE_ISP][FORMATTER][--i]);
|
|
+ drvdata->entities[FE_ISP][FORMATTER][i] = NULL;
|
|
+ }
|
|
+ i = OFFLINE_CH_NUM;
|
|
+OFFLINE_CHANNELs_FAIL:
|
|
+ while (i > 0) {
|
|
+ spm_camera_block_put(drvdata->entities[FE_ISP][OFFLINE_CHANNEL][--i]);
|
|
+ drvdata->entities[FE_ISP][OFFLINE_CHANNEL][i] = NULL;
|
|
+ }
|
|
+ i = RAWDUMP_NUM;
|
|
+RAWDUMPs_FAIL:
|
|
+ while (i > 0) {
|
|
+ spm_camera_block_put(drvdata->entities[FE_ISP][RAWDUMP][--i]);
|
|
+ drvdata->entities[FE_ISP][RAWDUMP][i] = NULL;
|
|
+ }
|
|
+ i = AOUT_NUM;
|
|
+AOUTs_FAIL:
|
|
+ while (i > 0) {
|
|
+ spm_camera_block_put(drvdata->entities[DMA][AOUT][--i]);
|
|
+ drvdata->entities[DMA][AOUT][i] = NULL;
|
|
+ }
|
|
+ i = AIN_NUM;
|
|
+AINs_FAIL:
|
|
+ while (i > 0) {
|
|
+ spm_camera_block_put(drvdata->entities[DMA][AIN][--i]);
|
|
+ drvdata->entities[DMA][AIN][i] = NULL;
|
|
+ }
|
|
+ i = SENSOR_NUM;
|
|
+SENSORs_FAIL:
|
|
+ while (i > 0) {
|
|
+ spm_camera_block_put(drvdata->entities[MIPI][SENSOR][--i]);
|
|
+ drvdata->entities[MIPI][SENSOR][i] = NULL;
|
|
+ }
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+static void k1xvi_release_entities(struct k1xvi_platform_data *drvdata)
|
|
+{
|
|
+ int i = 0, j = 0, k = 0;
|
|
+
|
|
+ for (i = 0; i < GRP_MAX; i++) {
|
|
+ for (j = 0; j < SUB_MAX; j++) {
|
|
+ for (k = 0; k < ID_MAX; k++) {
|
|
+ if (drvdata->entities[i][j][k]) {
|
|
+ spm_camera_block_put(drvdata->entities[i][j][k]);
|
|
+ drvdata->entities[i][j][k] = NULL;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+#define ENTITY(a, b, c) (drvdata->entities[(a)][(b)][(c)])
|
|
+#define CREATE_LINK(source, source_pad, sink, sink_pad) do { \
|
|
+ ret = SPACEMIT_MEDIA_CREATE_LINK((source), (source_pad), (sink), (sink_pad)); \
|
|
+ if (ret) { \
|
|
+ cam_err("create link " #source "-" #source_pad " <=> " #sink "-" #sink_pad " failed."); \
|
|
+ goto LINKS_FAIL; \
|
|
+ } \
|
|
+} while (0)
|
|
+
|
|
+static int k1xvi_create_entity_links(struct k1xvi_platform_data *drvdata)
|
|
+{
|
|
+ int ret = 0, i = 0, j = 0, k = 0;
|
|
+
|
|
+ //SENSOR => CSI_MAIN and CSI_VCDT links
|
|
+ CREATE_LINK(ENTITY(MIPI, SENSOR, 0), SENSOR_PAD_CSI_MAIN,
|
|
+ ENTITY(MIPI, CSI_MAIN, 0), CSI_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, SENSOR, 0), SENSOR_PAD_CSI_VCDT,
|
|
+ ENTITY(MIPI, CSI_VCDT, 0), CSI_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, SENSOR, 1), SENSOR_PAD_CSI_MAIN,
|
|
+ ENTITY(MIPI, CSI_MAIN, 1), CSI_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, SENSOR, 1), SENSOR_PAD_CSI_VCDT,
|
|
+ ENTITY(MIPI, CSI_VCDT, 1), CSI_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, SENSOR, 2), SENSOR_PAD_CSI_MAIN,
|
|
+ ENTITY(MIPI, CSI_MAIN, 2), CSI_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, SENSOR, 2), SENSOR_PAD_CSI_VCDT,
|
|
+ ENTITY(MIPI, CSI_VCDT, 2), CSI_PAD_IN);
|
|
+
|
|
+ //CSI_MAIN => RAWDUMP links
|
|
+ CREATE_LINK(ENTITY(MIPI, CSI_MAIN, 0), CSI_PAD_RAWDUMP0,
|
|
+ ENTITY(FE_ISP, RAWDUMP, 0), PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, CSI_MAIN, 0), CSI_PAD_RAWDUMP1,
|
|
+ ENTITY(FE_ISP, RAWDUMP, 1), PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, CSI_MAIN, 1), CSI_PAD_RAWDUMP0,
|
|
+ ENTITY(FE_ISP, RAWDUMP, 0), PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, CSI_MAIN, 1), CSI_PAD_RAWDUMP1,
|
|
+ ENTITY(FE_ISP, RAWDUMP, 1), PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, CSI_MAIN, 2), CSI_PAD_RAWDUMP0,
|
|
+ ENTITY(FE_ISP, RAWDUMP, 0), PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, CSI_MAIN, 2), CSI_PAD_RAWDUMP1,
|
|
+ ENTITY(FE_ISP, RAWDUMP, 1), PAD_IN);
|
|
+
|
|
+ //CSI_MAIN => PIPE links
|
|
+ CREATE_LINK(ENTITY(MIPI, CSI_MAIN, 0), CSI_PAD_PIPE0,
|
|
+ ENTITY(FE_ISP, PIPE, 0), PIPE_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, CSI_MAIN, 0), CSI_PAD_PIPE1,
|
|
+ ENTITY(FE_ISP, PIPE, 1), PIPE_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, CSI_MAIN, 1), CSI_PAD_PIPE0,
|
|
+ ENTITY(FE_ISP, PIPE, 0), PIPE_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, CSI_MAIN, 1), CSI_PAD_PIPE1,
|
|
+ ENTITY(FE_ISP, PIPE, 1), PIPE_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, CSI_MAIN, 2), CSI_PAD_PIPE0,
|
|
+ ENTITY(FE_ISP, PIPE, 0), PIPE_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(MIPI, CSI_MAIN, 2), CSI_PAD_PIPE1,
|
|
+ ENTITY(FE_ISP, PIPE, 1), PIPE_PAD_IN);
|
|
+
|
|
+ //PIPE => FORMATTER links
|
|
+ CREATE_LINK(ENTITY(FE_ISP, PIPE, 0), PIPE_PAD_F0OUT,
|
|
+ ENTITY(FE_ISP, FORMATTER, 0), FMT_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(FE_ISP, PIPE, 1), PIPE_PAD_F1OUT,
|
|
+ ENTITY(FE_ISP, FORMATTER, 1), FMT_PAD_IN);
|
|
+/*
|
|
+ //PIPE0 => RAWDUMP0 links
|
|
+ CREATE_LINK(ENTITY(FE_ISP, PIPE, 0), PIPE_PAD_RAWDUMP0OUT,
|
|
+ ENTITY(FE_ISP, RAWDUMP, 0), PAD_IN);
|
|
+ //PIPE => HDR_COMBINE links
|
|
+ CREATE_LINK(ENTITY(FE_ISP, PIPE, 0), PIPE_PAD_HDROUT,
|
|
+ ENTITY(FE_ISP, HDR_COMBINE, 0), HDR_PAD_P0IN);
|
|
+ CREATE_LINK(ENTITY(FE_ISP, PIPE, 1), PIPE_PAD_HDROUT,
|
|
+ ENTITY(FE_ISP, HDR_COMBINE, 0), HDR_PAD_P1IN);
|
|
+ //HDR_COMBINE => FORMATTER links
|
|
+ for (i = 0; i < FORMATTER_NUM; i++) {
|
|
+ CREATE_LINK(ENTITY(FE_ISP, HDR_COMBINE, 0), HDR_PAD_F0OUT + i,
|
|
+ ENTITY(FE_ISP, FORMATTER, i), FMT_PAD_IN);
|
|
+ }
|
|
+*/
|
|
+ //RAWDUMP => AOUT links
|
|
+ for (i = 0; i < RAWDUMP_NUM; i++) {
|
|
+ for (j = 0; j < AOUT_NUM; j++) {
|
|
+ CREATE_LINK(ENTITY(FE_ISP, RAWDUMP, i), PAD_OUT,
|
|
+ ENTITY(DMA, AOUT, j), VNODE_PAD_IN);
|
|
+ }
|
|
+ }
|
|
+ //FORMATTER => AOUT links
|
|
+ for (i = 0; i < FORMATTER_NUM; i++) {
|
|
+ for (j = 0; j < AOUT_NUM; j++) {
|
|
+ CREATE_LINK(ENTITY(FE_ISP, FORMATTER, i), FMT_PAD_AOUT,
|
|
+ ENTITY(DMA, AOUT, j), VNODE_PAD_IN);
|
|
+ }
|
|
+ }
|
|
+ //FORMATTER => DWT links
|
|
+ for (i = 0; i < FORMATTER_NUM; i++) {
|
|
+ for (j = DWT0; j <= DWT1; j++) {
|
|
+ for (k = FMT_PAD_D1OUT; k <= FMT_PAD_D4OUT; k++) {
|
|
+ CREATE_LINK(ENTITY(FE_ISP, FORMATTER, i), k,
|
|
+ ENTITY(FE_ISP, j, 1 + k - FMT_PAD_D1OUT),
|
|
+ PAD_IN);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ //DWT => AOUT links
|
|
+ for (i = DWT0; i <= DWT1; i++) {
|
|
+ for (j = 1; j <= DWT_LAYER_NUM; j++) {
|
|
+ for (k = 0; k < AOUT_NUM; k++) {
|
|
+ CREATE_LINK(ENTITY(FE_ISP, i, j), PAD_OUT,
|
|
+ ENTITY(DMA, AOUT, k), VNODE_PAD_IN);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ //AIN => OFFLINE_CHANNEL links
|
|
+ CREATE_LINK(ENTITY(DMA, AIN, 0), VNODE_PAD_OUT,
|
|
+ ENTITY(FE_ISP, OFFLINE_CHANNEL, 0), OFFLINE_CH_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(DMA, AIN, 1), VNODE_PAD_OUT,
|
|
+ ENTITY(FE_ISP, OFFLINE_CHANNEL, 1), OFFLINE_CH_PAD_IN);
|
|
+ //OFFLINE_CHANNEL => PIPE links
|
|
+ CREATE_LINK(ENTITY(FE_ISP, OFFLINE_CHANNEL, 0), OFFLINE_CH_PAD_P0OUT,
|
|
+ ENTITY(FE_ISP, PIPE, 0), PIPE_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(FE_ISP, OFFLINE_CHANNEL, 0), OFFLINE_CH_PAD_P1OUT,
|
|
+ ENTITY(FE_ISP, PIPE, 1), PIPE_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(FE_ISP, OFFLINE_CHANNEL, 1), OFFLINE_CH_PAD_P0OUT,
|
|
+ ENTITY(FE_ISP, PIPE, 0), PIPE_PAD_IN);
|
|
+ CREATE_LINK(ENTITY(FE_ISP, OFFLINE_CHANNEL, 1), OFFLINE_CH_PAD_P1OUT,
|
|
+ ENTITY(FE_ISP, PIPE, 1), PIPE_PAD_IN);
|
|
+
|
|
+ return 0;
|
|
+LINKS_FAIL:
|
|
+ for (i = 0; i < GRP_MAX; i++) {
|
|
+ for (j = 0; j < SUB_MAX; j++) {
|
|
+ for (k = 0; k < ID_MAX; k++) {
|
|
+ if (drvdata->entities[i][j][k]) {
|
|
+#ifdef MODULE
|
|
+ __spm_media_entity_remove_links(drvdata->entities[i][j][k]);
|
|
+#else
|
|
+ __media_entity_remove_links(drvdata->entities[i][j][k]);
|
|
+#endif
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+struct platform_device *k1xvi_get_platform_device(void)
|
|
+{
|
|
+ return g_pdev;
|
|
+}
|
|
+
|
|
+extern struct spm_camera_vi_ops vi_ops;
|
|
+
|
|
+int k1xvi_register_isp_firmware(struct isp_firm *isp_firm)
|
|
+{
|
|
+ struct k1xvi_platform_data *drvdata = NULL;
|
|
+
|
|
+ mutex_lock(&g_init_mlock);
|
|
+ if (!g_pdev) {
|
|
+ cam_err("%s g_pdev was null", __func__);
|
|
+ mutex_unlock(&g_init_mlock);
|
|
+ return -1;
|
|
+ }
|
|
+ if (!g_isp_firm) {
|
|
+ g_isp_firm = devm_kzalloc(&g_pdev->dev, sizeof(*g_isp_firm), GFP_KERNEL);
|
|
+ if (!g_isp_firm) {
|
|
+ cam_err("%s no enough mem", __func__);
|
|
+ mutex_unlock(&g_init_mlock);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ }
|
|
+ drvdata = platform_get_drvdata(g_pdev);
|
|
+ if (isp_firm) {
|
|
+ *g_isp_firm = *isp_firm;
|
|
+ drvdata->isp_firm = g_isp_firm;
|
|
+ isp_firm->vi_ops = &vi_ops;
|
|
+ } else {
|
|
+ drvdata->isp_firm = NULL;
|
|
+ }
|
|
+ mutex_unlock(&g_init_mlock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int k1xvi_register_sensor_ops(struct spm_camera_sensor_ops *sensor_ops)
|
|
+{
|
|
+ struct k1xvi_platform_data *drvdata = NULL;
|
|
+
|
|
+ mutex_lock(&g_init_mlock);
|
|
+ if (!g_pdev) {
|
|
+ cam_err("%s g_pdev was null", __func__);
|
|
+ mutex_unlock(&g_init_mlock);
|
|
+ return -1;
|
|
+ }
|
|
+ drvdata = platform_get_drvdata(g_pdev);
|
|
+ drvdata->sensor_ops = sensor_ops;
|
|
+ mutex_unlock(&g_init_mlock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int k1xvi_power(int on_off)
|
|
+{
|
|
+ struct k1xvi_platform_data *drvdata = NULL;
|
|
+
|
|
+ if (!g_pdev)
|
|
+ return -1;
|
|
+ drvdata = platform_get_drvdata(g_pdev);
|
|
+ return fe_isp_s_power(drvdata->isp_ctx, on_off);
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL_GPL(k1xvi_power);
|
|
+
|
|
+static int k1xvi_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct device *dev = &pdev->dev;
|
|
+ struct k1xvi_platform_data *drvdata = NULL;
|
|
+ int ret = 0;
|
|
+
|
|
+ cam_dbg("%s enter.", __func__);
|
|
+ mutex_lock(&g_init_mlock);
|
|
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
|
|
+ if (!drvdata) {
|
|
+ cam_err("%s not enough mem.", __func__);
|
|
+ mutex_unlock(&g_init_mlock);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ g_v4l2_dev = plat_cam_v4l2_device_get();
|
|
+ if (!g_v4l2_dev) {
|
|
+ cam_err("%s get v4l2 device failed", __func__);
|
|
+ mutex_unlock(&g_init_mlock);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ drvdata->isp_ctx = fe_isp_create_ctx(pdev);
|
|
+ if (!drvdata->isp_ctx) {
|
|
+ cam_err("%s fe_isp_create_ctx failed.", __func__);
|
|
+ mutex_unlock(&g_init_mlock);
|
|
+ return -1;
|
|
+ }
|
|
+#ifndef CONFIG_SPACEMIT_XILINX_ZYNQMP
|
|
+ /* enable runtime pm */
|
|
+ pm_runtime_enable(&pdev->dev);
|
|
+ device_init_wakeup(&pdev->dev, true);
|
|
+#endif
|
|
+
|
|
+ ret = k1xvi_create_entities(pdev, drvdata);
|
|
+ if (ret) {
|
|
+ cam_err("%s k1xvi_create_entities failed ret=%d.", __func__, ret);
|
|
+ goto entities_fail;
|
|
+ }
|
|
+
|
|
+ ret = k1xvi_create_entity_links(drvdata);
|
|
+ if (ret) {
|
|
+ cam_err("%s k1xvi_create_entity_links failed ret=%d.", __func__, ret);
|
|
+ goto entities_fail;
|
|
+ }
|
|
+ drvdata->isp_firm = g_isp_firm;
|
|
+ drvdata->pdev = pdev;
|
|
+ platform_set_drvdata(pdev, drvdata);
|
|
+ g_pdev = pdev;
|
|
+ cam_dbg("%s leave.", __func__);
|
|
+ mutex_unlock(&g_init_mlock);
|
|
+ return 0;
|
|
+entities_fail:
|
|
+ mutex_unlock(&g_init_mlock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int k1xvi_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct k1xvi_platform_data *drvdata = platform_get_drvdata(pdev);
|
|
+
|
|
+ cam_dbg("%s enter.", __func__);
|
|
+ k1xvi_release_entities(drvdata);
|
|
+ fe_isp_release_ctx(drvdata->isp_ctx);
|
|
+ g_pdev = NULL;
|
|
+ plat_cam_v4l2_device_put(g_v4l2_dev);
|
|
+ g_v4l2_dev = NULL;
|
|
+ /* disable runtime pm */
|
|
+#ifndef CONFIG_SPACEMIT_XILINX_ZYNQMP
|
|
+ device_init_wakeup(&pdev->dev, false);
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
+#endif
|
|
+
|
|
+ cam_dbg("%s leave.", __func__);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct of_device_id k1xvi_dt_match[] = {
|
|
+ {
|
|
+ .compatible = "spacemit,k1xvi",
|
|
+ },
|
|
+ {}
|
|
+};
|
|
+
|
|
+MODULE_DEVICE_TABLE(of, k1xvi_dt_match);
|
|
+
|
|
+static struct platform_driver k1xvi_driver = {
|
|
+ .probe = k1xvi_probe,
|
|
+ .remove = k1xvi_remove,
|
|
+ .driver = {
|
|
+ .name = "spacemit-k1xvi",
|
|
+ .of_match_table = k1xvi_dt_match,
|
|
+ },
|
|
+};
|
|
+
|
|
+static int __init spmisp_init(void)
|
|
+{
|
|
+ struct platform_device *pdev = NULL;
|
|
+ int ret = 0;
|
|
+
|
|
+ ret = platform_driver_register(&k1xvi_driver);
|
|
+ if (ret) {
|
|
+ cam_err("%s platform_driver_register failed ret=%d.", __func__, ret);
|
|
+ goto driver_fail;
|
|
+ }
|
|
+ return 0;
|
|
+driver_fail:
|
|
+ platform_device_unregister(pdev);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+module_init(spmisp_init);
|
|
+
|
|
+static void __exit spmisp_exit(void)
|
|
+{
|
|
+ platform_driver_unregister(&k1xvi_driver);
|
|
+}
|
|
+
|
|
+module_exit(spmisp_exit);
|
|
+
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/k1xvi/k1xvi.h b/drivers/media/platform/spacemit/camera/vi/k1xvi/k1xvi.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/k1xvi/k1xvi.h
|
|
@@ -0,0 +1,59 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * k1xvi.h - k1xisp vi platform device driver
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef _SPACEMIT_K1XISP_H_
|
|
+#define _SPACEMIT_K1XISP_H_
|
|
+#include <media/media-entity.h>
|
|
+#include "../cam_block.h"
|
|
+#include "../mlink.h"
|
|
+
|
|
+enum {
|
|
+ DMA = 0,
|
|
+ MIPI,
|
|
+ FE_ISP,
|
|
+ GRP_MAX,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ //for MIPI
|
|
+ SENSOR = 0,
|
|
+ CSI_MAIN,
|
|
+ CSI_VCDT,
|
|
+ //for DMA
|
|
+ AIN = 0,
|
|
+ AOUT,
|
|
+ //for FE_ISP
|
|
+ RAWDUMP = 0,
|
|
+ OFFLINE_CHANNEL,
|
|
+ PIPE,
|
|
+ FORMATTER,
|
|
+ DWT0,
|
|
+ DWT1,
|
|
+ HDR_COMBINE,
|
|
+ IDI,
|
|
+ SUB_MAX,
|
|
+};
|
|
+
|
|
+#define ID_MAX (16)
|
|
+
|
|
+#define AIN_NUM (2)
|
|
+#define AOUT_NUM (14)
|
|
+#define SENSOR_NUM (3)
|
|
+
|
|
+struct k1xvi_platform_data {
|
|
+ struct media_entity *entities[GRP_MAX][SUB_MAX][ID_MAX];
|
|
+ struct platform_device *pdev;
|
|
+ void *isp_ctx;
|
|
+ struct isp_firm *isp_firm;
|
|
+ struct spm_camera_sensor_ops *sensor_ops;
|
|
+};
|
|
+
|
|
+int k1xvi_register_isp_firmware(struct isp_firm *isp_firm);
|
|
+int k1xvi_register_sensor_ops(struct spm_camera_sensor_ops *sensor_ops);
|
|
+int k1xvi_power(int on_off);
|
|
+struct platform_device *k1xvi_get_platform_device(void);
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/mlink.c b/drivers/media/platform/spacemit/camera/vi/mlink.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/mlink.c
|
|
@@ -0,0 +1,831 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * mlink.c - media link functions
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include <media/media-entity.h>
|
|
+#define CAM_MODULE_TAG CAM_MDL_VI
|
|
+#include <cam_dbg.h>
|
|
+#include "mlink.h"
|
|
+#include "subdev.h"
|
|
+#include "vdev.h"
|
|
+
|
|
+const char *media_entity_name(struct media_entity *me)
|
|
+{
|
|
+ struct spm_camera_subdev *sc_subdev = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+
|
|
+ if (is_subdev(me)) {
|
|
+ sc_subdev = media_entity_to_sc_subdev(me);
|
|
+ return sc_subdev->name;
|
|
+ } else {
|
|
+ sc_vnode = media_entity_to_sc_vnode(me);
|
|
+ return sc_vnode->name;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void spm_mlink_reset_pipeline_context(struct spm_camera_pipeline *sc_pipeline)
|
|
+{
|
|
+ blocking_notifier_call_chain(&(sc_pipeline->blocking_notify_chain),
|
|
+ PIPELINE_ACTION_CLEAN_USR_DATA, sc_pipeline);
|
|
+ sc_pipeline->state = PIPELINE_ST_IDLE;
|
|
+ sc_pipeline->is_online_mode = 0;
|
|
+ sc_pipeline->is_slice_mode = 0;
|
|
+ sc_pipeline->slice_id = 0;
|
|
+ sc_pipeline->slice_result = 0;
|
|
+ atomic_set(&sc_pipeline->slice_info_update, 0);
|
|
+ INIT_LIST_HEAD(&sc_pipeline->frame_id_list);
|
|
+ down_write(&sc_pipeline->blocking_notify_chain.rwsem);
|
|
+ rcu_assign_pointer(sc_pipeline->blocking_notify_chain.head, NULL);
|
|
+ up_write(&sc_pipeline->blocking_notify_chain.rwsem);
|
|
+}
|
|
+
|
|
+#define MEDIA_ENTITY_MAX_PADS 512
|
|
+#define MAX_SENSORS (16)
|
|
+int spm_mlink_get_pipeline(struct media_pipeline *pipeline, struct media_entity *me)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(me);
|
|
+ struct media_pipeline *pipe_me = NULL;
|
|
+ struct media_graph *graph = NULL;
|
|
+ struct media_entity *error_entity = me;
|
|
+ struct media_device *mdev = me->graph_obj.mdev;
|
|
+ struct v4l2_subdev *sd = NULL;
|
|
+ struct media_entity *sensors[MAX_SENSORS] = { NULL };
|
|
+ int num_sensors = 0;
|
|
+ int power_loop = 0;
|
|
+ struct media_link *link = NULL;
|
|
+
|
|
+ mutex_lock(&mdev->graph_mutex);
|
|
+ if (NULL == pipe)
|
|
+ pipe = pipeline;
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ if (sc_pipeline->state > PIPELINE_ST_GET) {
|
|
+ cam_err("%s pipeline state(%d) error.", __func__, sc_pipeline->state);
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+ return -EPIPE;
|
|
+ }
|
|
+ graph = &sc_pipeline->graph;
|
|
+
|
|
+ if (!pipe->start_count++) {
|
|
+ ret = media_graph_walk_init(graph, mdev);
|
|
+ if (ret)
|
|
+ goto error_graph_walk_start;
|
|
+ }
|
|
+ media_graph_walk_start(graph, me);
|
|
+ while ((me = media_graph_walk_next(graph))) {
|
|
+ DECLARE_BITMAP(active, MEDIA_ENTITY_MAX_PADS);
|
|
+ DECLARE_BITMAP(has_no_links, MEDIA_ENTITY_MAX_PADS);
|
|
+
|
|
+ me->use_count++;
|
|
+ pipe_me = media_entity_pipeline(me);
|
|
+ if (WARN_ON(pipe_me && pipe_me != pipe)) {
|
|
+ ret = -EBUSY;
|
|
+ goto error;
|
|
+ }
|
|
+ me->pads[0].pipe = pipe;
|
|
+
|
|
+ /* Already streaming --- no need to check. */
|
|
+ if (me->use_count > 1)
|
|
+ continue;
|
|
+
|
|
+ if (is_subdev(me)) {
|
|
+ if (!is_sensor(me)) {
|
|
+ sd = media_entity_to_v4l2_subdev(me);
|
|
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
|
|
+ if (ret && ret != -ENOIOCTLCMD) {
|
|
+ cam_err("(%s) power 1 fail.", media_entity_name(me));
|
|
+ goto error;
|
|
+ }
|
|
+ } else {
|
|
+ if (num_sensors >= MAX_SENSORS) {
|
|
+ cam_err("too many sensors entity.");
|
|
+ BUG_ON(1);
|
|
+ }
|
|
+ sensors[num_sensors++] = me;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!me->ops || !me->ops->link_validate)
|
|
+ continue;
|
|
+
|
|
+ bitmap_zero(active, me->num_pads);
|
|
+ bitmap_fill(has_no_links, me->num_pads);
|
|
+
|
|
+ list_for_each_entry(link, &me->links, list) {
|
|
+ struct media_pad *pad = link->sink->entity == me ? link->sink : link->source;
|
|
+
|
|
+ /* Mark that a pad is connected by a link. */
|
|
+ bitmap_clear(has_no_links, pad->index, 1);
|
|
+
|
|
+ /*
|
|
+ * Pads that either do not need to connect or
|
|
+ * are connected through an enabled link are
|
|
+ * fine.
|
|
+ */
|
|
+ if (!(pad->flags & MEDIA_PAD_FL_MUST_CONNECT) ||
|
|
+ link->flags & MEDIA_LNK_FL_ENABLED)
|
|
+ bitmap_set(active, pad->index, 1);
|
|
+
|
|
+ /*
|
|
+ * Link validation will only take place for
|
|
+ * sink ends of the link that are enabled.
|
|
+ */
|
|
+ if (link->sink != pad || !(link->flags & MEDIA_LNK_FL_ENABLED))
|
|
+ continue;
|
|
+
|
|
+ ret = me->ops->link_validate(link);
|
|
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
|
|
+ cam_err("link validation failed for \"%s\":%u -> \"%s\":%u, error %d",
|
|
+ media_entity_name(link->source->entity),
|
|
+ link->source->index, media_entity_name(me),
|
|
+ link->sink->index, ret);
|
|
+ goto error;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Either no links or validated links are fine. */
|
|
+ bitmap_or(active, active, has_no_links, me->num_pads);
|
|
+
|
|
+ if (!bitmap_full(active, me->num_pads)) {
|
|
+ ret = -ENOLINK;
|
|
+ cam_err("\"%s\":%u must be connected by an enabled link",
|
|
+ media_entity_name(me),
|
|
+ (unsigned)find_first_zero_bit(active, me->num_pads));
|
|
+ goto error;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (power_loop = 0; power_loop < num_sensors; power_loop++) {
|
|
+ sd = media_entity_to_v4l2_subdev(sensors[power_loop]);
|
|
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
|
|
+ if (ret && ret != -ENOIOCTLCMD) {
|
|
+ cam_err("(%s) power 1 failed.", media_entity_name(sensors[power_loop]));
|
|
+ goto error;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (num_sensors > 0)
|
|
+ sc_pipeline->is_online_mode = 1;
|
|
+ else
|
|
+ sc_pipeline->is_online_mode = 0;
|
|
+ sc_pipeline->state = PIPELINE_ST_GET;
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+
|
|
+ return 0;
|
|
+error:
|
|
+ media_graph_walk_start(graph, error_entity);
|
|
+ while (power_loop > 0) {
|
|
+ sd = media_entity_to_v4l2_subdev(sensors[--power_loop]);
|
|
+ v4l2_subdev_call(sd, core, s_power, 0);
|
|
+ }
|
|
+ while ((error_entity = media_graph_walk_next(graph))) {
|
|
+ if (!WARN_ON_ONCE(error_entity->use_count <= 0)) {
|
|
+ error_entity->use_count--;
|
|
+ if (error_entity->use_count == 0) {
|
|
+ if (is_subdev(error_entity)) {
|
|
+ if (!is_sensor(error_entity)) {
|
|
+ sd = media_entity_to_v4l2_subdev(error_entity);
|
|
+ v4l2_subdev_call(sd, core, s_power, 0);
|
|
+ }
|
|
+ }
|
|
+ error_entity->pads[0].pipe = NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * We haven't increased use_count further than this
|
|
+ * so we quit here.
|
|
+ */
|
|
+ if (error_entity == me)
|
|
+ break;
|
|
+ }
|
|
+error_graph_walk_start:
|
|
+ if (!--pipe->start_count) {
|
|
+ spm_mlink_reset_pipeline_context(sc_pipeline);
|
|
+ media_graph_walk_cleanup(graph);
|
|
+ }
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int __spm_media_entity_setup_link_notify(struct media_link *link, u32 flags)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ /* Notify both entities. */
|
|
+ ret = media_entity_call(link->source->entity, link_setup, link->source, link->sink, flags);
|
|
+ if (ret < 0 && ret != -ENOIOCTLCMD)
|
|
+ return ret;
|
|
+
|
|
+ ret = media_entity_call(link->sink->entity, link_setup, link->sink, link->source, flags);
|
|
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
|
|
+ media_entity_call(link->source->entity, link_setup,
|
|
+ link->source, link->sink, link->flags);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ link->flags = flags;
|
|
+ link->reverse->flags = link->flags;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __spm_media_entity_setup_link(struct media_link *link, u32 flags)
|
|
+{
|
|
+ const u32 mask = MEDIA_LNK_FL_ENABLED;
|
|
+ struct media_device *mdev;
|
|
+ struct media_entity *source, *sink;
|
|
+ int ret = -EBUSY;
|
|
+
|
|
+ if (link == NULL)
|
|
+ return -EINVAL;
|
|
+
|
|
+ /* The non-modifiable link flags must not be modified. */
|
|
+ if ((link->flags & ~mask) != (flags & ~mask))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (link->flags & MEDIA_LNK_FL_IMMUTABLE)
|
|
+ return link->flags == flags ? 0 : -EINVAL;
|
|
+
|
|
+ if (link->flags == flags)
|
|
+ return 0;
|
|
+
|
|
+ source = link->source->entity;
|
|
+ sink = link->sink->entity;
|
|
+/*
|
|
+ if (!(link->flags & MEDIA_LNK_FL_DYNAMIC) &&
|
|
+ (source->stream_count || sink->stream_count))
|
|
+ return -EBUSY;
|
|
+*/
|
|
+ mdev = source->graph_obj.mdev;
|
|
+
|
|
+ if (mdev->ops && mdev->ops->link_notify) {
|
|
+ ret = mdev->ops->link_notify(link, flags, MEDIA_DEV_NOTIFY_PRE_LINK_CH);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = __spm_media_entity_setup_link_notify(link, flags);
|
|
+
|
|
+ if (mdev->ops && mdev->ops->link_notify)
|
|
+ mdev->ops->link_notify(link, flags, MEDIA_DEV_NOTIFY_POST_LINK_CH);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int __spm_mlink_put_pipeline(struct media_entity *me, int auto_disable_link)
|
|
+{
|
|
+ struct media_graph *graph = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct v4l2_subdev *sd = NULL;
|
|
+ struct media_link *link = NULL;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(me);
|
|
+
|
|
+ cam_dbg("%s enter.", __func__);
|
|
+ if (pipe) {
|
|
+ sc_pipeline = container_of(pipe, struct spm_camera_pipeline, media_pipe);
|
|
+ if (sc_pipeline->state > PIPELINE_ST_STOPPED) {
|
|
+ cam_err("%s pipeline state(%d) error.", __func__, sc_pipeline->state);
|
|
+ return -EPIPE;
|
|
+ } else if (sc_pipeline->state <= PIPELINE_ST_IDLE) {
|
|
+ return 0;
|
|
+ }
|
|
+ graph = &sc_pipeline->graph;
|
|
+ } else {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ media_graph_walk_start(graph, me);
|
|
+ while ((me = media_graph_walk_next(graph))) {
|
|
+ if (!WARN_ON_ONCE(me->use_count <= 0)) {
|
|
+ me->use_count--;
|
|
+ if (me->use_count == 0) {
|
|
+ if (is_subdev(me)) {
|
|
+ sd = media_entity_to_v4l2_subdev(me);
|
|
+ v4l2_subdev_call(sd, core, s_power, 0);
|
|
+ }
|
|
+ if (auto_disable_link) {
|
|
+ list_for_each_entry(link, &me->links, list) {
|
|
+ if (link->flags & MEDIA_LNK_FL_ENABLED)
|
|
+ __spm_media_entity_setup_link(link, link->flags & (~MEDIA_LNK_FL_ENABLED));
|
|
+ }
|
|
+ }
|
|
+ me->pads[0].pipe = NULL;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (!--pipe->start_count) {
|
|
+ spm_mlink_reset_pipeline_context(sc_pipeline);
|
|
+ media_graph_walk_cleanup(graph);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int spm_mlink_put_pipeline(struct media_entity *me, int auto_disable_link)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct media_device *mdev = me->graph_obj.mdev;
|
|
+
|
|
+ mutex_lock(&mdev->graph_mutex);
|
|
+ ret = __spm_mlink_put_pipeline(me, auto_disable_link);
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int spm_mlink_apply_format_forward(struct media_entity *me)
|
|
+{
|
|
+ struct media_link *link = NULL;
|
|
+ struct media_entity *remote_me = NULL;
|
|
+ struct v4l2_subdev *remote_sd = NULL, *sd = NULL;
|
|
+ int source_pad_id = 0, sink_pad_id = 0;
|
|
+ struct v4l2_subdev_format fmt;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (!is_subdev(me))
|
|
+ return 0;
|
|
+ list_for_each_entry(link, &me->links, list) {
|
|
+ if (!is_link_enabled(link))
|
|
+ continue;
|
|
+ if (is_link_sink(me, link))
|
|
+ continue;
|
|
+ source_pad_id = link->source->index;
|
|
+ sink_pad_id = link->sink->index;
|
|
+ sd = media_entity_to_v4l2_subdev(me);
|
|
+ remote_me = link->sink->entity;
|
|
+ if (!is_subdev(remote_me))
|
|
+ continue;
|
|
+ remote_sd = media_entity_to_v4l2_subdev(remote_me);
|
|
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
|
|
+ fmt.pad = source_pad_id;
|
|
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
|
|
+ if (ret && ret != -ENOIOCTLCMD) {
|
|
+ cam_err("%s get pad(%d) fmt from %s failed.", __func__,
|
|
+ source_pad_id, media_entity_name(me));
|
|
+ return ret;
|
|
+ }
|
|
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
|
|
+ fmt.pad = sink_pad_id;
|
|
+ cam_dbg("set format(%dx%d mbus_code=0x%08x) to %s pad%d.",
|
|
+ fmt.format.width, fmt.format.height, fmt.format.code,
|
|
+ media_entity_name(remote_me), fmt.pad);
|
|
+ ret = v4l2_subdev_call(remote_sd, pad, set_fmt, NULL, &fmt);
|
|
+ if (ret && ret != -ENOIOCTLCMD) {
|
|
+ cam_err("%s set pad(%d) fmt(%dx%d code:0x%08x) to %s failed.",
|
|
+ __func__, sink_pad_id, fmt.format.width,
|
|
+ fmt.format.height, fmt.format.code,
|
|
+ media_entity_name(remote_me));
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+struct media_ent_list_entry {
|
|
+ struct list_head list_entry;
|
|
+ struct media_entity *entity;
|
|
+};
|
|
+
|
|
+int spm_mlink_apply_pipeline(struct media_entity *me)
|
|
+{
|
|
+ int ret = 0, i = 0, idx_max = 0;
|
|
+ struct media_entity *start_points[MAX_SENSORS];
|
|
+ int num_points = 0;
|
|
+ struct media_graph *graph = NULL;
|
|
+ struct media_device *mdev = me->graph_obj.mdev;
|
|
+ static struct media_ent_list_entry *entities = NULL;
|
|
+ LIST_HEAD(entities_list);
|
|
+ static unsigned long *visited = NULL;
|
|
+ struct media_entity *next = NULL;
|
|
+ struct media_ent_list_entry *media_ent = NULL;
|
|
+ struct media_pad *remote_pad = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(me);
|
|
+ struct media_link *link = NULL;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(me);
|
|
+
|
|
+ mutex_lock(&mdev->graph_mutex);
|
|
+ if (NULL == pipe) {
|
|
+ cam_err("(%s) pipeline is null.", media_entity_name(me));
|
|
+ ret = -EPIPE;
|
|
+ goto apply_error;
|
|
+ } else {
|
|
+ sc_pipeline = container_of(pipe, struct spm_camera_pipeline, media_pipe);
|
|
+ if (sc_pipeline->state == PIPELINE_ST_IDLE ||
|
|
+ sc_pipeline->state >= PIPELINE_ST_STARTED) {
|
|
+ cam_err("%s pipeline state(%d) error.", __func__, sc_pipeline->state);
|
|
+ ret = -EPIPE;
|
|
+ goto apply_error;
|
|
+ }
|
|
+ }
|
|
+ graph = &sc_pipeline->graph;
|
|
+ if (!sd->dev) {
|
|
+ cam_err("%s(%s) dev is null", __func__, media_entity_name(me));
|
|
+ ret = -ENODEV;
|
|
+ goto apply_error;
|
|
+ }
|
|
+ if (!entities) {
|
|
+ cam_dbg("entities idx max %d", graph->ent_enum.idx_max);
|
|
+ entities = devm_kcalloc(sd->dev, graph->ent_enum.idx_max, sizeof(*entities), GFP_KERNEL);
|
|
+ if (!entities) {
|
|
+ cam_err("%s not enough mem for entities(%d max)", __func__, graph->ent_enum.idx_max);
|
|
+ ret = -ENOMEM;
|
|
+ goto apply_error;
|
|
+ }
|
|
+ }
|
|
+ if (!visited) {
|
|
+ idx_max = ALIGN(graph->ent_enum.idx_max, BITS_PER_LONG);
|
|
+ visited = devm_kcalloc(sd->dev, idx_max / BITS_PER_LONG, sizeof(long), GFP_KERNEL);
|
|
+ if (!visited) {
|
|
+ cam_err("%s not enough mem for visited map", __func__);
|
|
+ ret = -ENOMEM;
|
|
+ goto apply_error;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ media_graph_walk_start(graph, me);
|
|
+
|
|
+ while ((me = media_graph_walk_next(graph))) {
|
|
+ if (me->internal_idx >= graph->ent_enum.idx_max) {
|
|
+ cam_err("%s entity id(%d) exceeded max %d.", __func__,
|
|
+ me->internal_idx, graph->ent_enum.idx_max - 1);
|
|
+ ret = -EPIPE;
|
|
+ goto apply_error;
|
|
+ }
|
|
+ entities[me->internal_idx].entity = me;
|
|
+ INIT_LIST_HEAD(&(entities[me->internal_idx].list_entry));
|
|
+ if (is_source_leaf(me)) {
|
|
+ if (num_points >= MAX_SENSORS) {
|
|
+ cam_err("too many start points entity.");
|
|
+ ret = -EPIPE;
|
|
+ goto apply_error;
|
|
+ }
|
|
+ //if source leaf is a vnode
|
|
+ if (!is_subdev(me)) {
|
|
+ BUG_ON(0 == me->num_pads);
|
|
+ remote_pad = media_entity_remote_pad(&me->pads[0]);
|
|
+ if (NULL == remote_pad) {
|
|
+ cam_err("source leaf(%s) has no active links.", media_entity_name(me));
|
|
+ goto apply_error;
|
|
+ }
|
|
+ me = remote_pad->entity;
|
|
+ }
|
|
+ start_points[num_points++] = me;
|
|
+ }
|
|
+ }
|
|
+ if (num_points == 0) {
|
|
+ cam_err("not found start points entity in pipeline.");
|
|
+ goto apply_error;
|
|
+ }
|
|
+ for (i = 0; i < num_points; i++) {
|
|
+ INIT_LIST_HEAD(&entities_list);
|
|
+ bitmap_zero(visited, graph->ent_enum.idx_max);
|
|
+ me = start_points[i];
|
|
+ __set_bit(me->internal_idx, visited);
|
|
+ ret = spm_mlink_apply_format_forward(me);
|
|
+ if (ret)
|
|
+ goto apply_error;
|
|
+ list_add_tail(&entities[me->internal_idx].list_entry, &entities_list);
|
|
+ while (!list_empty(&entities_list)) {
|
|
+ media_ent = list_first_entry(&entities_list, struct media_ent_list_entry, list_entry);
|
|
+ me = media_ent->entity;
|
|
+ list_del_init(&media_ent->list_entry);
|
|
+ list_for_each_entry(link, &me->links, list) {
|
|
+ if (!is_link_enabled(link))
|
|
+ continue;
|
|
+ if (is_link_sink(me, link))
|
|
+ continue;
|
|
+ next = link->sink->entity;
|
|
+ if (__test_and_set_bit(next->internal_idx, visited))
|
|
+ continue;
|
|
+ ret = spm_mlink_apply_format_forward(next);
|
|
+ if (ret)
|
|
+ goto apply_error;
|
|
+ list_add_tail(&entities[next->internal_idx].list_entry, &entities_list);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+ return 0;
|
|
+apply_error:
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int spm_mlink_stream_entity(struct media_entity *me, int stream_on)
|
|
+{
|
|
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(me);
|
|
+
|
|
+ return v4l2_subdev_call(sd, video, s_stream, stream_on);
|
|
+}
|
|
+
|
|
+static int spm_mlink_reset_entity(struct media_entity *me, int reset_stage)
|
|
+{
|
|
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(me);
|
|
+
|
|
+ return v4l2_subdev_call(sd, core, reset, reset_stage);
|
|
+}
|
|
+
|
|
+int spm_mlink_start_pipeline(struct media_entity *me)
|
|
+{
|
|
+ int ret = 0, i = 0;
|
|
+ struct media_graph *graph = NULL;
|
|
+ struct media_device *mdev = me->graph_obj.mdev;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(me);
|
|
+ struct media_entity *sensors[MAX_SENSORS], *entity_error = me;
|
|
+ int num_sensors = 0;
|
|
+
|
|
+ cam_dbg("%s enter.", __func__);
|
|
+ mutex_lock(&mdev->graph_mutex);
|
|
+
|
|
+ if (NULL == pipe) {
|
|
+ cam_err("(%s) pipeline is null.", media_entity_name(me));
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+ return -EPIPE;
|
|
+ }
|
|
+ sc_pipeline = container_of(pipe, struct spm_camera_pipeline, media_pipe);
|
|
+ if (sc_pipeline->state >= PIPELINE_ST_STARTED) {
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+ return 0;
|
|
+ } else if (sc_pipeline->state <= PIPELINE_ST_IDLE) {
|
|
+ cam_err("%s pipeline state(%d) error.", __func__, sc_pipeline->state);
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+ return -EPIPE;
|
|
+ }
|
|
+
|
|
+ graph = &sc_pipeline->graph;
|
|
+ media_graph_walk_start(graph, me);
|
|
+
|
|
+ while ((me = media_graph_walk_next(graph))) {
|
|
+ if (is_subdev(me)) {
|
|
+ if (!is_sensor(me)) {
|
|
+ ret = spm_mlink_stream_entity(me, 1);
|
|
+ if (ret && ret != -ENOIOCTLCMD) {
|
|
+ cam_err("%s start pipe(%s) failed.",
|
|
+ __func__, media_entity_name(me));
|
|
+ goto start_fail;
|
|
+ }
|
|
+ } else {
|
|
+ if (num_sensors >= MAX_SENSORS) {
|
|
+ cam_err("%s too many sensors.", __func__);
|
|
+ BUG_ON(1);
|
|
+ }
|
|
+ sensors[num_sensors++] = me;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < num_sensors; i++) {
|
|
+ ret = spm_mlink_stream_entity(sensors[i], 1);
|
|
+ if (ret && ret != -ENOIOCTLCMD) {
|
|
+ cam_err("%s start pipe(%s) failed.",
|
|
+ __func__, media_entity_name(sensors[i]));
|
|
+ goto start_fail;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ sc_pipeline->state = PIPELINE_ST_STARTED;
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+ return 0;
|
|
+start_fail:
|
|
+ media_graph_walk_start(graph, entity_error);
|
|
+
|
|
+ while ((entity_error = media_graph_walk_next(graph))) {
|
|
+ if (is_subdev(entity_error) && !is_sensor(entity_error)) {
|
|
+ spm_mlink_stream_entity(entity_error, 0);
|
|
+ }
|
|
+ if (entity_error == me)
|
|
+ break;
|
|
+ }
|
|
+ while (i > 0) {
|
|
+ spm_mlink_stream_entity(sensors[--i], 0);
|
|
+ }
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int __spm_mlink_stop_pipeline(struct media_entity *me)
|
|
+{
|
|
+ struct media_graph *graph = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_entity *me_bak = me;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(me);
|
|
+
|
|
+ cam_dbg("%s enter.", __func__);
|
|
+
|
|
+ if (NULL == pipe) {
|
|
+ cam_err("(%s) pipeline is null.", media_entity_name(me));
|
|
+ return -EPIPE;
|
|
+ }
|
|
+ sc_pipeline = container_of(pipe, struct spm_camera_pipeline, media_pipe);
|
|
+ if (sc_pipeline->state <= PIPELINE_ST_STOPPED) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ sc_pipeline->state = PIPELINE_ST_STOPPING;
|
|
+ graph = &sc_pipeline->graph;
|
|
+ media_graph_walk_start(graph, me);
|
|
+
|
|
+ while ((me = media_graph_walk_next(graph))) {
|
|
+ if (is_subdev(me) && !is_sensor(me))
|
|
+ spm_mlink_stream_entity(me, 0);
|
|
+ }
|
|
+
|
|
+ media_graph_walk_start(graph, me_bak);
|
|
+
|
|
+ while ((me = media_graph_walk_next(graph))) {
|
|
+ if (is_sensor(me)) {
|
|
+ spm_mlink_stream_entity(me, 0);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ sc_pipeline->state = PIPELINE_ST_STOPPED;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int spm_mlink_stop_pipeline(struct media_entity *me)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct media_device *mdev = me->graph_obj.mdev;
|
|
+
|
|
+ mutex_lock(&mdev->graph_mutex);
|
|
+ ret = __spm_mlink_stop_pipeline(me);
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int __spm_mlink_reset_pipeline(struct media_entity *me, int reset_stage)
|
|
+{
|
|
+ struct media_graph *graph = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(me);
|
|
+ struct media_entity *me_bak = me, *sensors[MAX_SENSORS];
|
|
+ int ret = 0, num_sensors = 0;
|
|
+ int reset_stages[VI_PIPE_RESET_STAGE_CNT] = {RESET_STAGE1, RESET_STAGE2, RESET_STAGE3};
|
|
+
|
|
+ cam_dbg("%s enter", __func__);
|
|
+
|
|
+ if (NULL == pipe) {
|
|
+ cam_err("%s(%s) pipe was null", __func__, media_entity_name(me));
|
|
+ return -EPIPE;
|
|
+ }
|
|
+ if (reset_stage < VI_PIPE_RESET_STAGE1 || reset_stage >= VI_PIPE_RESET_STAGE_CNT) {
|
|
+ cam_err("%s(%s) invalid reset_stage(%d)", __func__, media_entity_name(me), reset_stage);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ if (sc_pipeline->state <= PIPELINE_ST_STOPPED) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ graph = &sc_pipeline->graph;
|
|
+ media_graph_walk_start(graph, me);
|
|
+
|
|
+ if (RESET_STAGE1 == reset_stages[reset_stage]) {
|
|
+ while ((me = media_graph_walk_next(graph))) {
|
|
+ if (is_sensor(me)) {
|
|
+ ret = spm_mlink_reset_entity(me, RESET_STAGE1);
|
|
+ if (ret) {
|
|
+ cam_err("%s(%s) reset stage1 failed", __func__, media_entity_name(me));
|
|
+ return -1;
|
|
+ }
|
|
+ if (num_sensors >= MAX_SENSORS) {
|
|
+ cam_err("%s too many sensors", __func__);
|
|
+ BUG_ON(1);
|
|
+ }
|
|
+ sensors[num_sensors++] = me;
|
|
+ }
|
|
+ }
|
|
+ me = me_bak;
|
|
+ media_graph_walk_start(graph, me);
|
|
+ while ((me = media_graph_walk_next(graph))) {
|
|
+ if (is_subdev(me) && !is_sensor(me)) {
|
|
+ ret = spm_mlink_reset_entity(me, RESET_STAGE1);
|
|
+ if (ret) {
|
|
+ cam_err("%s(%s) reset stage1 failed", __func__, media_entity_name(me));
|
|
+ return -1;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ while ((me = media_graph_walk_next(graph))) {
|
|
+ if (is_sensor(me)) {
|
|
+ if (num_sensors >= MAX_SENSORS) {
|
|
+ cam_err("%s too many sensors", __func__);
|
|
+ BUG_ON(1);
|
|
+ }
|
|
+ sensors[num_sensors++] = me;
|
|
+ }
|
|
+ }
|
|
+ me = me_bak;
|
|
+ media_graph_walk_start(graph, me);
|
|
+ while ((me = media_graph_walk_next(graph))) {
|
|
+ if (is_subdev(me) && !is_sensor(me)) {
|
|
+ ret = spm_mlink_reset_entity(me, reset_stages[reset_stage]);
|
|
+ if (ret) {
|
|
+ cam_err("%s(%s) reset stage%d failed", __func__, media_entity_name(me), reset_stages[reset_stage] + 1);
|
|
+ return -1;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ while (num_sensors > 0) {
|
|
+ ret = spm_mlink_reset_entity(sensors[--num_sensors], reset_stages[reset_stage]);
|
|
+ if (ret) {
|
|
+ cam_err("%s(%s) reset stage%d failed", __func__, media_entity_name(sensors[num_sensors]), reset_stages[reset_stage] + 1);
|
|
+ return -1;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int spm_mlink_reset_pipeline(struct media_entity *me, int reset_stage)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct media_device *mdev = me->graph_obj.mdev;
|
|
+
|
|
+ mutex_lock(&mdev->graph_mutex);
|
|
+ ret = __spm_mlink_reset_pipeline(me, reset_stage);
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int spm_mlink_pipeline_init(struct spm_camera_pipeline *sc_pipeline)
|
|
+{
|
|
+ sc_pipeline->state = PIPELINE_ST_IDLE;
|
|
+ INIT_LIST_HEAD(&sc_pipeline->frame_id_list);
|
|
+ spin_lock_init(&sc_pipeline->slock);
|
|
+ mutex_init(&sc_pipeline->mlock);
|
|
+ BLOCKING_INIT_NOTIFIER_HEAD(&sc_pipeline->blocking_notify_chain);
|
|
+ init_waitqueue_head(&sc_pipeline->slice_waitq);
|
|
+ atomic_set(&sc_pipeline->slice_info_update, 0);
|
|
+ init_completion(&sc_pipeline->slice_done);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void spm_mlink_pipeline_release(struct spm_camera_pipeline *sc_pipeline)
|
|
+{
|
|
+ mutex_destroy(&sc_pipeline->mlock);
|
|
+}
|
|
+
|
|
+struct media_entity *spm_mlink_find_sensor(struct media_entity *me)
|
|
+{
|
|
+ struct media_pipeline *pipe = NULL;
|
|
+ struct media_graph *graph = NULL;
|
|
+ struct media_device *mdev = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+
|
|
+ if (!me)
|
|
+ return NULL;
|
|
+ pipe = media_entity_pipeline(me);
|
|
+ if (!pipe)
|
|
+ return NULL;
|
|
+ mdev = me->graph_obj.mdev;
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ graph = &sc_pipeline->graph;
|
|
+ mutex_lock(&mdev->graph_mutex);
|
|
+ if (!pipe->start_count) {
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+ return NULL;
|
|
+ }
|
|
+ media_graph_walk_start(graph, me);
|
|
+ while ((me = media_graph_walk_next(graph))) {
|
|
+ if (is_sensor(me)) {
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+ return me;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+struct media_pad *media_entity_remote_pad(const struct media_pad *pad)
|
|
+{
|
|
+ struct media_link *link;
|
|
+
|
|
+ list_for_each_entry(link, &pad->entity->links, list) {
|
|
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED))
|
|
+ continue;
|
|
+
|
|
+ if (link->source == pad)
|
|
+ return link->sink;
|
|
+
|
|
+ if (link->sink == pad)
|
|
+ return link->source;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/mlink.h b/drivers/media/platform/spacemit/camera/vi/mlink.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/mlink.h
|
|
@@ -0,0 +1,171 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * mlink.h - media link functions
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef _SPACEMIT_MDEV_H_
|
|
+#define _SPACEMIT_MDEV_H_
|
|
+#include <media/v4l2-device.h>
|
|
+#include <media/media-device.h>
|
|
+#include <media/v4l2-ctrls.h>
|
|
+#include <cam_plat.h>
|
|
+
|
|
+enum {
|
|
+ PIPELINE_ST_IDLE = 0,
|
|
+ PIPELINE_ST_GET,
|
|
+ PIPELINE_ST_STOPPED = PIPELINE_ST_GET,
|
|
+ PIPELINE_ST_STOPPING,
|
|
+ PIPELINE_ST_STARTED,
|
|
+};
|
|
+
|
|
+#define sc_ispfirm_call(ispfirm_ops, f, args...) \
|
|
+ ({ \
|
|
+ int __result; \
|
|
+ if (!(ispfirm_ops)) \
|
|
+ __result = -ENODEV; \
|
|
+ else if (!(ispfirm_ops)->f) \
|
|
+ __result = -ENOIOCTLCMD; \
|
|
+ else \
|
|
+ __result = (ispfirm_ops)->f(args); \
|
|
+ __result; \
|
|
+ })
|
|
+
|
|
+#define sc_sensor_call(sensor_ops, f, args...) \
|
|
+ ({ \
|
|
+ int __result; \
|
|
+ if (!(sensor_ops)) \
|
|
+ __result = -ENODEV; \
|
|
+ else if (!(sensor_ops)->f) \
|
|
+ __result = -ENOIOCTLCMD; \
|
|
+ else \
|
|
+ __result = (sensor_ops)->f(args); \
|
|
+ __result; \
|
|
+ })
|
|
+
|
|
+#define MAX_PIPE_DOMAIN (1)
|
|
+#define PIPELINE_TYPE_SHIFT (8)
|
|
+#define PIPELINE_ID_MASK ((1 << PIPELINE_TYPE_SHIFT) - 1)
|
|
+#define MAKE_SC_PIPELINE_ID(type, id) (((type) << PIPELINE_TYPE_SHIFT) | (id))
|
|
+#define PIPELINE_ID(sc_id) ((sc_id) & PIPELINE_ID_MASK)
|
|
+#define PIPELINE_TYPE(sc_id) ((sc_id) >> PIPELINE_TYPE_SHIFT)
|
|
+enum {
|
|
+ PIPELINE_TYPE_SINGLE = 0,
|
|
+ PIPELINE_TYPE_HDR,
|
|
+};
|
|
+struct spm_camera_pipeline {
|
|
+ struct media_pipeline media_pipe;
|
|
+ struct media_graph graph;
|
|
+ int id;
|
|
+ int state;
|
|
+ int is_online_mode;
|
|
+ int is_slice_mode;
|
|
+ int slice_id;
|
|
+ int total_slice_cnt;
|
|
+ atomic_t slice_info_update;
|
|
+ struct wait_queue_head slice_waitq;
|
|
+ struct completion slice_done;
|
|
+ int slice_result;
|
|
+ struct list_head frame_id_list;
|
|
+ spinlock_t slock;
|
|
+ struct mutex mlock;
|
|
+ unsigned int max_width[MAX_PIPE_DOMAIN];
|
|
+ unsigned int max_height[MAX_PIPE_DOMAIN];
|
|
+ unsigned int min_width[MAX_PIPE_DOMAIN];
|
|
+ unsigned int min_height[MAX_PIPE_DOMAIN];
|
|
+ struct blocking_notifier_head blocking_notify_chain;
|
|
+ struct spm_camera_ispfirm_ops *ispfirm_ops;
|
|
+ struct spm_camera_sensor_ops *sensor_ops;
|
|
+ void *usr_data;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SC_PIPE_NOTIFY_PRIO_NORMAL = 0,
|
|
+ SC_PIPE_NOTIFY_PRIO_EMGER,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PIPELINE_ACTION_SET_ENTITY_USRDATA = 0,
|
|
+ PIPELINE_ACTION_GET_ENTITY_USRDATA,
|
|
+ PIPELINE_ACTION_WAIT_EOF,
|
|
+ PIPELINE_ACTION_CLEAN_USR_DATA,
|
|
+ PIPELINE_ACTION_SENSOR_STREAM_ON,
|
|
+ PIPELINE_ACTION_SENSOR_STREAM_OFF,
|
|
+ PIPELINE_ACTION_SLICE_READY,
|
|
+ PIPELINE_ACTION_CUSTOM_BASE = 1000,
|
|
+};
|
|
+
|
|
+struct entity_usrdata {
|
|
+ unsigned int entity_id;
|
|
+ void *usr_data;
|
|
+};
|
|
+
|
|
+#define media_pipeline_to_sc_pipeline(pipe) ((struct spm_camera_pipeline*)(pipe))
|
|
+
|
|
+#define VNODE_PAD_IN (0)
|
|
+#define VNODE_PAD_OUT (0)
|
|
+#define PAD_IN (0)
|
|
+#define PAD_OUT (1)
|
|
+
|
|
+enum {
|
|
+ RESET_STAGE1 = 0,
|
|
+ RESET_STAGE2,
|
|
+ RESET_STAGE3,
|
|
+};
|
|
+
|
|
+#define SPACEMIT_MEDIA_CREATE_LINK(source, source_pad, sink, sink_pad) \
|
|
+ media_create_pad_link((struct media_entity*)(source), (source_pad), (struct media_entity*)(sink), (sink_pad), 0)
|
|
+
|
|
+static inline int is_subdev(struct media_entity *me)
|
|
+{
|
|
+ return (me->obj_type == MEDIA_ENTITY_TYPE_V4L2_SUBDEV);
|
|
+}
|
|
+
|
|
+static inline int is_sensor(struct media_entity *me)
|
|
+{
|
|
+ return (me->obj_type == MEDIA_ENTITY_TYPE_V4L2_SUBDEV
|
|
+ && me->function == MEDIA_ENT_F_CAM_SENSOR);
|
|
+}
|
|
+
|
|
+static inline int is_link_source(struct media_entity *me, struct media_link *link)
|
|
+{
|
|
+ return (me == link->source->entity);
|
|
+}
|
|
+
|
|
+static inline int is_link_sink(struct media_entity *me, struct media_link *link)
|
|
+{
|
|
+ return (me == link->sink->entity);
|
|
+}
|
|
+
|
|
+static inline int is_link_enabled(struct media_link *link)
|
|
+{
|
|
+ return (link->flags & MEDIA_LNK_FL_ENABLED);
|
|
+}
|
|
+
|
|
+static inline int is_source_leaf(struct media_entity *me)
|
|
+{
|
|
+ struct media_link *link = NULL;
|
|
+
|
|
+ list_for_each_entry(link, &me->links, list) {
|
|
+ if (is_link_enabled(link) && is_link_sink(me, link))
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+int spm_mlink_get_pipeline(struct media_pipeline *pipeline, struct media_entity *me);
|
|
+int __spm_mlink_put_pipeline(struct media_entity *me, int auto_disable_link);
|
|
+int spm_mlink_put_pipeline(struct media_entity *me, int auto_disable_link);
|
|
+int spm_mlink_apply_pipeline(struct media_entity *me);
|
|
+int spm_mlink_start_pipeline(struct media_entity *me);
|
|
+int __spm_mlink_stop_pipeline(struct media_entity *me);
|
|
+int spm_mlink_stop_pipeline(struct media_entity *me);
|
|
+int spm_mlink_reset_pipeline(struct media_entity *me, int reset_stage);
|
|
+int spm_mlink_pipeline_init(struct spm_camera_pipeline *sc_pipeline);
|
|
+void spm_mlink_pipeline_release(struct spm_camera_pipeline *sc_pipeline);
|
|
+const char *media_entity_name(struct media_entity *me);
|
|
+struct media_entity *spm_mlink_find_sensor(struct media_entity *me);
|
|
+struct media_pad *media_entity_remote_pad(const struct media_pad *pad);
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/spacemit_videobuf2.h b/drivers/media/platform/spacemit/camera/vi/spacemit_videobuf2.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/spacemit_videobuf2.h
|
|
@@ -0,0 +1,41 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * spacemit_videobuf2.h - definition of spacemit video buffer operations
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef _SPACEMIT_VIDEOBUF2_H_
|
|
+#define _SPACEMIT_VIDEOBUF2_H_
|
|
+#include <media/videobuf2-core.h>
|
|
+#include <media/videobuf2-dma-contig.h>
|
|
+#include <media/videobuf2-dma-sg.h>
|
|
+#include "vdev.h"
|
|
+
|
|
+//#define spm_vb2_buf_paddr spm_vb2_usrptr_paddr
|
|
+//#define spm_vb2_mem_ops spm_vb2_get_usrptr_mem_ops()
|
|
+//#define spm_vb2_destroy_alloc_ctx(ctx)
|
|
+//#define SPACEMIT_VB2_IO_MODE VB2_USERPTR
|
|
+
|
|
+#ifndef CONFIG_SPACEMIT_K1X_VI_IOMMU
|
|
+static inline dma_addr_t spm_vb2_buf_paddr(struct vb2_buffer *vb, unsigned int plane_no)
|
|
+{
|
|
+ unsigned int offset = 0;
|
|
+ dma_addr_t paddr = 0;
|
|
+ struct spm_camera_vbuffer *sc_vb = vb2_buffer_to_spm_camera_vbuffer(vb);
|
|
+ struct spm_camera_vnode *sc_vnode = sc_vb->sc_vnode;
|
|
+ dma_addr_t *dma_addr = (dma_addr_t *) vb2_plane_cookie(vb, plane_no);
|
|
+
|
|
+ BUG_ON(!sc_vnode);
|
|
+ offset = sc_vnode->planes_offset[vb->index][plane_no];
|
|
+ paddr = *dma_addr + offset;
|
|
+ return paddr;
|
|
+}
|
|
+
|
|
+#define spm_vb2_mem_ops (&vb2_dma_contig_memops)
|
|
+#else
|
|
+#define spm_vb2_mem_ops (&vb2_dma_sg_memops)
|
|
+#endif
|
|
+#define spm_vb2_destroy_alloc_ctx(ctx)
|
|
+#define SPACEMIT_VB2_IO_MODE VB2_DMABUF
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/subdev.c b/drivers/media/platform/spacemit/camera/vi/subdev.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/subdev.c
|
|
@@ -0,0 +1,476 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * subdev.c - subdev functions
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include "subdev.h"
|
|
+#include "mlink.h"
|
|
+#include "vdev.h"
|
|
+#include <media/k1x/k1x_plat_cam.h>
|
|
+#include <linux/compat.h>
|
|
+#define CAM_MODULE_TAG CAM_MDL_VI
|
|
+#include <cam_dbg.h>
|
|
+
|
|
+static int spm_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
|
|
+{
|
|
+ struct spm_camera_subdev *sc_subdev = v4l2_subdev_to_sc_subdev(sd);
|
|
+ cam_dbg("%s(%s) enter.", __func__, sc_subdev->name);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int spm_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
|
|
+{
|
|
+ struct media_entity *me = &sd->entity;
|
|
+ struct media_device *mdev = me->graph_obj.mdev;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct spm_camera_subdev *sc_subdev = v4l2_subdev_to_sc_subdev(sd);
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(me);
|
|
+
|
|
+ cam_dbg("%s(%s) enter.", __func__, sc_subdev->name);
|
|
+ mutex_lock(&mdev->graph_mutex);
|
|
+ if (pipe) {
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ if (sc_pipeline->state >= PIPELINE_ST_STARTED) {
|
|
+ __spm_mlink_stop_pipeline(me);
|
|
+ }
|
|
+ while (sc_pipeline->state >= PIPELINE_ST_GET) {
|
|
+ __spm_mlink_put_pipeline(me, 1);
|
|
+ }
|
|
+ }
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void spm_subdev_notify(struct v4l2_subdev *sd, unsigned int notification, void *arg)
|
|
+{
|
|
+ struct spm_camera_subdev *sc_subdev = v4l2_subdev_to_sc_subdev(sd);
|
|
+
|
|
+ if (sc_subdev && sc_subdev->notify)
|
|
+ sc_subdev->notify(sc_subdev, notification, arg);
|
|
+}
|
|
+
|
|
+struct v4l2_subdev_internal_ops spm_subdev_internal_ops = {
|
|
+ .open = spm_subdev_open,
|
|
+ .close = spm_subdev_close,
|
|
+};
|
|
+
|
|
+static const struct spm_v4l2_subdev_ops spm_subdev_ops = {
|
|
+ .notify = spm_subdev_notify,
|
|
+};
|
|
+
|
|
+static void spm_subdev_block_release(struct spm_camera_block *sc_block)
|
|
+{
|
|
+ struct spm_camera_subdev *sc_subdev =
|
|
+ container_of(sc_block, struct spm_camera_subdev, sc_block);
|
|
+
|
|
+ if (sc_subdev->release)
|
|
+ sc_subdev->release(sc_subdev);
|
|
+ spm_mlink_pipeline_release(&sc_subdev->sc_pipeline);
|
|
+ plat_cam_unregister_subdev(&sc_subdev->pcsd);
|
|
+}
|
|
+
|
|
+static struct spm_camera_block_ops spm_subdev_block_ops = {
|
|
+ .release = spm_subdev_block_release,
|
|
+};
|
|
+
|
|
+int spm_subdev_init(unsigned int grp_id,
|
|
+ const char *name,
|
|
+ int is_sensor,
|
|
+ const struct v4l2_subdev_ops *ops,
|
|
+ unsigned int pads_cnt,
|
|
+ struct media_pad *pads,
|
|
+ void *drvdata, struct spm_camera_subdev *sc_subdev)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct plat_cam_subdev *pcsd = NULL;
|
|
+
|
|
+ if (!name || !sc_subdev) {
|
|
+ cam_err("%s invalid arguments.", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ cam_dbg("%s(%s) enter.", __func__, name);
|
|
+ pcsd = &sc_subdev->pcsd;
|
|
+ spm_mlink_pipeline_init(&sc_subdev->sc_pipeline);
|
|
+ spm_camera_block_init(&sc_subdev->sc_block, &spm_subdev_block_ops);
|
|
+ pcsd->ops = ops;
|
|
+ pcsd->internal_ops = &spm_subdev_internal_ops;
|
|
+ pcsd->spm_ops = &spm_subdev_ops;
|
|
+ pcsd->name = (char *)name;
|
|
+ pcsd->sd_flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
|
|
+ if (is_sensor)
|
|
+ pcsd->ent_function = MEDIA_ENT_F_CAM_SENSOR;
|
|
+ else
|
|
+ pcsd->ent_function = MEDIA_ENT_F_K1X_VI;
|
|
+ pcsd->pads_cnt = pads_cnt;
|
|
+ pcsd->pads = pads;
|
|
+ pcsd->token = drvdata;
|
|
+ ret = plat_cam_register_subdev(pcsd);
|
|
+ if (ret) {
|
|
+ cam_err("%s register plat cam(%s) failed ret=%d ", __func__, name, ret);
|
|
+ return ret;
|
|
+ }
|
|
+ strlcpy(sc_subdev->name, name, V4L2_SUBDEV_NAME_SIZE);
|
|
+ sc_subdev->pcsd.sd.grp_id = grp_id;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+long spm_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
|
|
+{
|
|
+ long ret = 0;
|
|
+ struct spm_camera_subdev *sc_subdev = v4l2_subdev_to_sc_subdev(sd);
|
|
+ struct v4l2_vi_entity_info *entity_info = NULL;
|
|
+ struct v4l2_subdev_format *format = NULL;
|
|
+
|
|
+ if (sc_subdev->ioctl) {
|
|
+ ret = sc_subdev->ioctl(sd, cmd, arg);
|
|
+ if (ret != -ENOIOCTLCMD)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ switch (cmd) {
|
|
+ case VIDIOC_GET_PIPELINE:
|
|
+ return spm_mlink_get_pipeline(&sc_subdev->sc_pipeline.media_pipe, &sd->entity);
|
|
+ case VIDIOC_PUT_PIPELINE:
|
|
+ return spm_mlink_put_pipeline(&sd->entity, *((int *)arg));
|
|
+ case VIDIOC_APPLY_PIPELINE:
|
|
+ return spm_mlink_apply_pipeline(&sd->entity);
|
|
+ case VIDIOC_START_PIPELINE:
|
|
+ return spm_mlink_start_pipeline(&sd->entity);
|
|
+ case VIDIOC_STOP_PIPELINE:
|
|
+ return spm_mlink_stop_pipeline(&sd->entity);
|
|
+ case VIDIOC_RESET_PIPELINE:
|
|
+ return spm_mlink_reset_pipeline(&sd->entity, *((int *)arg));
|
|
+ case VIDIOC_G_ENTITY_INFO:
|
|
+ entity_info = (struct v4l2_vi_entity_info *)arg;
|
|
+ entity_info->id = media_entity_id(&sd->entity);
|
|
+ strlcpy(entity_info->name, sc_subdev->name, SPACEMIT_VI_ENTITY_NAME_LEN);
|
|
+ return 0;
|
|
+ case VIDIOC_QUERYCAP: {
|
|
+ struct v4l2_capability *cap = (struct v4l2_capability*)arg;
|
|
+ strlcpy(cap->driver, "spacemitisp", 16);
|
|
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE;
|
|
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE;
|
|
+ }
|
|
+ return 0;
|
|
+ case VIDIOC_SUBDEV_S_FMT:
|
|
+ format = (struct v4l2_subdev_format *)arg;
|
|
+ return v4l2_subdev_call(sd, pad, set_fmt, NULL, format);
|
|
+ break;
|
|
+ default:
|
|
+ cam_warn("%s(%s) unknown ioctl(%d)", __func__, sc_subdev->name, cmd);
|
|
+ return -ENOIOCTLCMD;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+//#ifdef CONFIG_COMPAT
|
|
+#if 0
|
|
+
|
|
+static int alloc_userspace(unsigned int size, u32 aux_space, void __user **new_p64)
|
|
+{
|
|
+ *new_p64 = compat_alloc_user_space(size + aux_space);
|
|
+ if (!*new_p64)
|
|
+ return -ENOMEM;
|
|
+ if (clear_user(*new_p64, size))
|
|
+ return -EFAULT;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int video_get_user(void __user *arg, void *parg, unsigned int cmd,
|
|
+ bool *always_copy)
|
|
+{
|
|
+ unsigned int n = _IOC_SIZE(cmd);
|
|
+
|
|
+ if (!(_IOC_DIR(cmd) & _IOC_WRITE)) {
|
|
+ /* read-only ioctl */
|
|
+ memset(parg, 0, n);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ switch (cmd) {
|
|
+ default:
|
|
+
|
|
+ if (copy_from_user(parg, (void __user *)arg, n))
|
|
+ return -EFAULT;
|
|
+
|
|
+ /* zero out anything we don't copy from userspace */
|
|
+ if (n < _IOC_SIZE(cmd))
|
|
+ memset((u8 *) parg + n, 0, _IOC_SIZE(cmd) - n);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int video_put_user(void __user *arg, void *parg, unsigned int cmd)
|
|
+{
|
|
+ if (!(_IOC_DIR(cmd) & _IOC_READ))
|
|
+ return 0;
|
|
+
|
|
+ switch (cmd) {
|
|
+ default:
|
|
+ /* Copy results into user buffer */
|
|
+ if (copy_to_user(arg, parg, _IOC_SIZE(cmd)))
|
|
+ return -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static long spm_subdev_video_usercopy(struct v4l2_subdev *sd, unsigned int cmd, unsigned long arg,
|
|
+ long (*func)(struct v4l2_subdev *sd, unsigned int cmd, void *arg))
|
|
+{
|
|
+ char sbuf[128];
|
|
+ void *mbuf = NULL, *array_buf = NULL;
|
|
+ void *parg = (void *)arg;
|
|
+ long err = -EINVAL;
|
|
+ bool always_copy = false;
|
|
+ const size_t ioc_size = _IOC_SIZE(cmd);
|
|
+
|
|
+ /* Copy arguments into temp kernel buffer */
|
|
+ if (_IOC_DIR(cmd) != _IOC_NONE) {
|
|
+ if (ioc_size <= sizeof(sbuf)) {
|
|
+ parg = sbuf;
|
|
+ } else {
|
|
+ /* too big to allocate from stack */
|
|
+ mbuf = kvmalloc(ioc_size, GFP_KERNEL);
|
|
+ if (NULL == mbuf)
|
|
+ return -ENOMEM;
|
|
+ parg = mbuf;
|
|
+ }
|
|
+
|
|
+ err = video_get_user((void __user *)arg, parg, cmd, &always_copy);
|
|
+ if (err)
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /* Handles IOCTL */
|
|
+ err = func(sd, cmd, parg);
|
|
+ if (err == -ENOTTY || err == -ENOIOCTLCMD) {
|
|
+ err = -ENOTTY;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Some ioctls can return an error, but still have valid
|
|
+ * results that must be returned.
|
|
+ */
|
|
+ if (err < 0 && !always_copy)
|
|
+ goto out;
|
|
+
|
|
+ if (video_put_user((void __user *)arg, parg, cmd))
|
|
+ err = -EFAULT;
|
|
+out:
|
|
+ kvfree(array_buf);
|
|
+ kvfree(mbuf);
|
|
+ return err;
|
|
+}
|
|
+long spm_subdev_compat_ioctl32(struct v4l2_subdev *sd, unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ void __user *p32 = compat_ptr(arg);
|
|
+ void __user *new_p64 = NULL;
|
|
+ //void __user *aux_buf;
|
|
+ //u32 aux_space;
|
|
+ long err = 0;
|
|
+ const size_t ioc_size = _IOC_SIZE(cmd);
|
|
+ //size_t ioc_size64 = 0;
|
|
+
|
|
+ //if (_IOC_TYPE(cmd) == 'V') {
|
|
+ // switch (_IOC_NR(cmd)) {
|
|
+ // //int r
|
|
+ // case _IOC_NR(VIDIOC_G_SLICE_MODE):
|
|
+ // case _IOC_NR(VIDIOC_CPU_Z1):
|
|
+ // //int w
|
|
+ // case _IOC_NR(VIDIOC_PUT_PIPELINE):
|
|
+ // case _IOC_NR(VIDIOC_RESET_PIPELINE):
|
|
+ // ioc_size64 = sizeof(int);
|
|
+ // break;
|
|
+ // //unsigned int
|
|
+ // case _IOC_NR(VIDIOC_G_PIPE_STATUS):
|
|
+ // ioc_size64 = sizeof(int);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_S_PORT_CFG):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_port_cfg);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_DBG_REG_WRITE):
|
|
+ // case _IOC_NR(VIDIOC_DBG_REG_READ):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_dbg_reg);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_CFG_INPUT_INTF):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_input_interface);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_SET_SELECTION):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_selection);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_QUERY_SLICE_READY):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_slice_info);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_S_BANDWIDTH):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_bandwidth_info);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_G_ENTITY_INFO):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_entity_info);
|
|
+ // break;
|
|
+ // }
|
|
+ // cam_dbg("%s cmd_nr=%d ioc_size32=%u ioc_size64=%u",__func__, _IOC_NR(cmd), ioc_size, ioc_size64);
|
|
+ //}
|
|
+ if (_IOC_DIR(cmd) != _IOC_NONE) {
|
|
+ err = alloc_userspace(ioc_size, 0, &new_p64);
|
|
+ if (err) {
|
|
+ cam_err("%s alloc userspace failed err=%l cmd=%d ioc_size=%u",
|
|
+ __func__, err, _IOC_NR(cmd), ioc_size);
|
|
+ return err;
|
|
+ }
|
|
+ if ((_IOC_DIR(cmd) & _IOC_WRITE)) {
|
|
+ err = copy_in_user(new_p64, p32, ioc_size);
|
|
+ if (err) {
|
|
+ cam_err
|
|
+ ("%s copy in user 1 failed err=%l cmd=%d ioc_size=%u",
|
|
+ __func__, err, _IOC_NR(cmd), ioc_size);
|
|
+ return err;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ err = spm_subdev_video_usercopy(sd, cmd, (unsigned long)new_p64, spm_subdev_ioctl);
|
|
+ if (err) {
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ if ((_IOC_DIR(cmd) & _IOC_READ)) {
|
|
+ err = copy_in_user(p32, new_p64, ioc_size);
|
|
+ if (err) {
|
|
+ cam_err("%s copy in user 2 failed err=%l cmd=%d ioc_size=%u",
|
|
+ __func__, err, _IOC_NR(cmd), ioc_size);
|
|
+ return err;
|
|
+ }
|
|
+ }
|
|
+ //switch (cmd) {
|
|
+ // //int r
|
|
+ // case VIDIOC_G_SLICE_MODE:
|
|
+ // case VIDIOC_CPU_Z1:
|
|
+ // //int w
|
|
+ // case VIDIOC_PUT_PIPELINE:
|
|
+ // case VIDIOC_RESET_PIPELINE:
|
|
+ // err = alloc_userspace(sizeof(int), 0, &new_p64);
|
|
+ // if (!err && assign_in_user((int __user *)new_p64,
|
|
+ // (compat_int_t __user *)p32))
|
|
+ // err = -EFAULT;
|
|
+ // break;
|
|
+ // //unsigned int
|
|
+ // case VIDIOC_G_PIPE_STATUS:
|
|
+ // err = alloc_userspace(sizeof(unsigned int), 0, &new_p64);
|
|
+ // if (!err && assign_in_user((unsigned int __user *)new_p64,
|
|
+ // (compat_uint_t __user *)p32))
|
|
+ // err = -EFAULT;
|
|
+ // break;
|
|
+ // case VIDIOC_S_PORT_CFG:
|
|
+ // err = alloc_userspace(sizeof(struct v4l2_vi_port_cfg), 0, &new_p64);
|
|
+ // if (!err) {
|
|
+ // err = -EFAULT;
|
|
+ // break;
|
|
+ // }
|
|
+ // break;
|
|
+ // case VIDIOC_DBG_REG_WRITE:
|
|
+ // case VIDIOC_DBG_REG_READ:
|
|
+ // break;
|
|
+ // case VIDIOC_CFG_INPUT_INTF:
|
|
+ // break;
|
|
+ // case VIDIOC_SET_SELECTION:
|
|
+ // break;
|
|
+ // case VIDIOC_QUERY_SLICE_READY:
|
|
+ // break;
|
|
+ // case VIDIOC_S_BANDWIDTH:
|
|
+ // break;
|
|
+ // case VIDIOC_G_ENTITY_INFO:
|
|
+ // break;
|
|
+
|
|
+ //}
|
|
+ //if (err)
|
|
+ // return err;
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static struct v4l2_subdev_format pad_fmts[SUBDEV_MAX_PADS];
|
|
+int spm_subdev_reset(struct v4l2_subdev *sd, u32 val)
|
|
+{
|
|
+ int ret = 0;
|
|
+ unsigned int i = 0;
|
|
+ struct spm_camera_subdev *sc_subdev = v4l2_subdev_to_sc_subdev(sd);
|
|
+ struct v4l2_subdev_format pad_fmt;
|
|
+ struct media_pad *pad = NULL, *remote_pad = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+ struct spm_camera_vbuffer *sc_vb = NULL;
|
|
+
|
|
+ sc_subdev->is_resetting = 1;
|
|
+ if (RESET_STAGE1 == val) {
|
|
+ for (i = 0; i < sd->entity.num_pads; i++) {
|
|
+ pad_fmts[i].pad = i;
|
|
+ pad_fmts[i].which = V4L2_SUBDEV_FORMAT_ACTIVE;
|
|
+ v4l2_subdev_call(sd, pad, get_fmt, NULL, &pad_fmts[i]);
|
|
+ }
|
|
+ for (i = 0; i < sd->entity.num_pads; i++) {
|
|
+ v4l2_subdev_call(sd, pad, set_fmt, NULL, &pad_fmts[i]);
|
|
+ }
|
|
+ } else if (RESET_STAGE2 == val) {
|
|
+ for (i = 0; i < sd->entity.num_pads; i++) {
|
|
+ pad = sd->entity.pads + i;
|
|
+ remote_pad = media_entity_remote_pad(pad);
|
|
+ if (remote_pad) {
|
|
+ sc_vnode = media_entity_to_sc_vnode(remote_pad->entity);
|
|
+ if (sc_vnode) {
|
|
+ while (0 == ret) {
|
|
+ ret = spm_vdev_pick_busy_vbuffer(sc_vnode, &sc_vb);
|
|
+ if (0 == ret && !(sc_vb->flags & SC_BUF_FLAG_SPECIAL_USE)) {
|
|
+ ret = spm_vdev_dq_busy_vbuffer(sc_vnode, &sc_vb);
|
|
+ if (0 == ret) {
|
|
+ sc_vb->vb2_v4l2_buf.flags |= V4L2_BUF_FLAG_IGNOR;
|
|
+ spm_vdev_export_camera_vbuffer(sc_vb, 0);
|
|
+ }
|
|
+ } else {
|
|
+ ret = -1;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
|
|
+ if (ret && ret != -ENOIOCTLCMD) {
|
|
+ cam_err("%s(%s) video stream on failed", __func__, sc_subdev->name);
|
|
+ sc_subdev->is_resetting = 0;
|
|
+ return ret;
|
|
+ }
|
|
+ for (i = 0; i < sd->entity.num_pads; i++) {
|
|
+ if (sc_subdev->pads_stream_enable & (1 << i)) {
|
|
+ pad_fmt.pad = i;
|
|
+ pad_fmt.which = 1;
|
|
+ ret = v4l2_subdev_call(sd, pad, link_validate, NULL, &pad_fmt, &pad_fmt);
|
|
+ if (ret && ret != -ENOIOCTLCMD) {
|
|
+ cam_err("%s(%s) stream on pad%u failed", __func__, sc_subdev->name, pad_fmt.pad);
|
|
+ sc_subdev->is_resetting = 0;
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ sc_subdev->is_resetting = 0;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int spm_subdev_pad_s_stream(struct spm_camera_subdev *sc_subdev, unsigned int pad, int enable)
|
|
+{
|
|
+ if (pad >= SUBDEV_MAX_PADS)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (enable)
|
|
+ sc_subdev->pads_stream_enable |= (1 << pad);
|
|
+ else
|
|
+ sc_subdev->pads_stream_enable &= ~(1 << pad);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/subdev.h b/drivers/media/platform/spacemit/camera/vi/subdev.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/subdev.h
|
|
@@ -0,0 +1,79 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * subdev.h - subdev functions
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef _SPACEMIT_SUBDEV_H_
|
|
+#define _SPACEMIT_SUBDEV_H_
|
|
+#include <media/v4l2-device.h>
|
|
+#include <media/v4l2-subdev.h>
|
|
+#include <media/k1x/k1x_videodev2.h>
|
|
+#include <linux/notifier.h>
|
|
+#include <cam_plat.h>
|
|
+#include "mlink.h"
|
|
+#include "cam_block.h"
|
|
+
|
|
+#define SUBDEV_IDX_MASK (0xff)
|
|
+#define SUBDEV_SUBGRP_MASK (0xff00)
|
|
+#define SUBDEV_SUBGRP_OFFSET (8)
|
|
+#define SUBDEV_GRP_MASK (0xff0000)
|
|
+#define SUBDEV_GRP_OFFSET (16)
|
|
+
|
|
+#define SD_GRP_ID(grp, sub, id) (((grp) << SUBDEV_GRP_OFFSET) | ((sub) << SUBDEV_SUBGRP_OFFSET) | (id))
|
|
+#define SD_IDX(grp_id) ((grp_id) & SUBDEV_IDX_MASK)
|
|
+#define SD_SUB(grp_id) (((grp_id) & SUBDEV_SUBGRP_MASK) >> SUBDEV_SUBGRP_OFFSET)
|
|
+#define SD_GRP(grp_id) (((grp_id) & SUBDEV_GRP_MASK) >> SUBDEV_GRP_OFFSET)
|
|
+
|
|
+#define SUBDEV_MAX_PADS (16)
|
|
+
|
|
+struct spm_camera_subdev {
|
|
+ struct plat_cam_subdev pcsd;
|
|
+ char name[SPACEMIT_VI_ENTITY_NAME_LEN];
|
|
+ struct spm_camera_pipeline sc_pipeline;
|
|
+ struct spm_camera_block sc_block;
|
|
+ struct notifier_block vnode_nb;
|
|
+ uint32_t pads_stream_enable;
|
|
+ int is_resetting;
|
|
+ long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
|
|
+ void (*release)(struct spm_camera_subdev *sc_subdev);
|
|
+ void (*notify)(struct spm_camera_subdev *sc_subdev, unsigned int notification,
|
|
+ void *arg);
|
|
+};
|
|
+
|
|
+static inline struct spm_camera_subdev *v4l2_subdev_to_sc_subdev(struct v4l2_subdev *sd)
|
|
+{
|
|
+ return (struct spm_camera_subdev *)sd;
|
|
+}
|
|
+
|
|
+static inline struct spm_camera_subdev* media_entity_to_sc_subdev(struct media_entity *me)
|
|
+{
|
|
+ struct v4l2_subdev *sd = NULL;
|
|
+
|
|
+ if (!is_subdev(me))
|
|
+ return NULL;
|
|
+ sd = media_entity_to_v4l2_subdev(me);
|
|
+ return v4l2_subdev_to_sc_subdev(sd);
|
|
+}
|
|
+
|
|
+static inline void *spm_subdev_get_drvdata(struct spm_camera_subdev *sc_subdev)
|
|
+{
|
|
+ return sc_subdev->pcsd.token;
|
|
+}
|
|
+
|
|
+int spm_subdev_init(unsigned int grp_id,
|
|
+ const char *name,
|
|
+ int is_sensor,
|
|
+ const struct v4l2_subdev_ops *ops,
|
|
+ unsigned int pads_cnt,
|
|
+ struct media_pad *pads,
|
|
+ void *drvdata, struct spm_camera_subdev *sc_subdev);
|
|
+long spm_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
|
|
+#ifdef CONFIG_COMPAT
|
|
+long spm_subdev_compat_ioctl32(struct v4l2_subdev *sd, unsigned int cmd,
|
|
+ unsigned long arg);
|
|
+#endif
|
|
+int spm_subdev_reset(struct v4l2_subdev *sd, u32 val);
|
|
+int spm_subdev_pad_s_stream(struct spm_camera_subdev *sc_subdev, unsigned int pad, int enable);
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/vdev.c b/drivers/media/platform/spacemit/camera/vi/vdev.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/vdev.c
|
|
@@ -0,0 +1,2384 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * vdev.c - video divece functions
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+//#include <soc/spm/plat.h>
|
|
+#include <media/v4l2-dev.h>
|
|
+#include <media/media-entity.h>
|
|
+#include <media/media-device.h>
|
|
+#include <media/v4l2-subdev.h>
|
|
+#include <media/videobuf2-v4l2.h>
|
|
+#include <media/v4l2-ioctl.h>
|
|
+#include <linux/media-bus-format.h>
|
|
+#include <linux/compat.h>
|
|
+#include <media/k1x/k1x_media_bus_format.h>
|
|
+#define CAM_MODULE_TAG CAM_MDL_VI
|
|
+#include <cam_dbg.h>
|
|
+#include "vdev.h"
|
|
+#include "mlink.h"
|
|
+#include "spacemit_videobuf2.h"
|
|
+#include "subdev.h"
|
|
+
|
|
+#define SAINT_CHECK() ({ \
|
|
+ int r = 0; \
|
|
+ do { \
|
|
+ if (NULL == remote_pad) { \
|
|
+ cam_err("%s(%s) no remote entity linked with this devnode. ", __func__, sc_vnode->name); \
|
|
+ r = -ENODEV; \
|
|
+ break; \
|
|
+ } \
|
|
+ remote_me = remote_pad->entity; \
|
|
+ if (NULL == remote_me) { \
|
|
+ cam_err("%s(%s) remote_pad did not have entity associated with it. ", __func__, sc_vnode->name); \
|
|
+ BUG_ON(1); \
|
|
+ } \
|
|
+ if (!is_subdev(remote_me)) { \
|
|
+ cam_err("%s remote entity must be a v4l2 subdevice! ", __func__); \
|
|
+ r = -1; \
|
|
+ break; \
|
|
+ } \
|
|
+ remote_sd = container_of(remote_me, struct v4l2_subdev, entity); \
|
|
+ } while (0); \
|
|
+ r; \
|
|
+ })
|
|
+
|
|
+static struct {
|
|
+ __u32 pixelformat;
|
|
+ __u32 mbus_fmtcode;
|
|
+ __u8 num_planes;
|
|
+ __u32 pixel_width_align;
|
|
+ __u32 pixel_height_align;
|
|
+ __u32 plane_bytes_align[VIDEO_MAX_PLANES];
|
|
+ struct {
|
|
+ __u32 num;
|
|
+ __u32 den;
|
|
+ } plane_bpp[VIDEO_MAX_PLANES];
|
|
+ struct {
|
|
+ __u32 num;
|
|
+ __u32 den;
|
|
+ } height_subsampling[VIDEO_MAX_PLANES];
|
|
+} spm_camera_formats_table[] = {
|
|
+ /* bayer raw8 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SPACEMITGB8P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SRGB8_SPACEMITPACK_1X8,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 16,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 8,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SBGGR8_1X8,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 16,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 8,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SGBRG8,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SGBRG8_1X8,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 16,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 8,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SGRBG8,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SGRBG8_1X8,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 16,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 8,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SRGGB8,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SRGGB8_1X8,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 16,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 8,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* bayer raw10 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SPACEMITGB10P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SRGB10_SPACEMITPACK_1X10,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 12,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 12,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SBGGR10P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SBGGR10_1X10,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 12,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 12,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SGBRG10P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SGBRG10_1X10,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 12,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 12,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SGRBG10P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SGRBG10_1X10,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 12,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 12,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SRGGB10P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SRGGB10_1X10,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 12,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 12,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* bayer raw12 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SPACEMITGB12P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SRGB12_SPACEMITPACK_1X12,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 10,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 10,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SBGGR12P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SBGGR12_1X12,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 10,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 10,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SGBRG12P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SGBRG12_1X12,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 10,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 10,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SGRBG12P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SGRBG12_1X12,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 10,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 10,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SRGGB12P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SRGGB12_1X12,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 10,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 10,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* bayer raw14 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SPACEMITGB14P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SRGB14_SPACEMITPACK_1X14,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 8,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 8,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SBGGR14P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SBGGR14_1X14,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 8,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 8,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SGBRG14P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SGBRG14_1X14,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 8,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 8,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SGRBG14P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SGRBG14_1X14,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 8,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 8,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_SRGGB14P,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_SRGGB14_1X14,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 8,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 16,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 128,
|
|
+ .den = 8,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* rgb */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_RGB565,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_RGB565_1X16,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 2,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 16,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_RGB24,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_RGB888_1X24,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 2,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 24,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* yuv */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_NV12,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YUYV8_1_5X8,
|
|
+ .num_planes = 2,
|
|
+ .pixel_width_align = 2,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ [1] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 8,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 8,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 1,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_NV12_AFBC,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YUYV8_1_5X8_AFBC,
|
|
+ .num_planes = 2,
|
|
+ .pixel_width_align = 32,
|
|
+ .pixel_height_align = 4,
|
|
+ },
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_NV21,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YVYU8_1_5X8,
|
|
+ .num_planes = 2,
|
|
+ .pixel_width_align = 2,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ [1] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 8,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 8,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 1,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* YUYV YUV422 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_YUYV,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YUYV8_1X16,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 2,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 16,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* YVYU YUV422 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_YVYU,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YVYU8_1X16,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 2,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 16,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* Y210 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_Y210,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YUYV10_1X20,
|
|
+ .num_planes = 1,
|
|
+ .pixel_width_align = 2,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 64,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* P210 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_P210,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YUYV10_2X10,
|
|
+ .num_planes = 2,
|
|
+ .pixel_width_align = 2,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ [1] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 32,
|
|
+ .den = 2,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 32,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* P010 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_P010,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YUYV10_1_5X10,
|
|
+ .num_planes = 2,
|
|
+ .pixel_width_align = 2,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ [1] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 32,
|
|
+ .den = 2,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 32,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 1,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* D010 layer1 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_D010_1,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YUYV10_1_5X10_D1,
|
|
+ .num_planes = 2,
|
|
+ .pixel_width_align = 32,
|
|
+ .pixel_height_align = 16,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ [1] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 1,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* D010 layer2 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_D010_2,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YUYV10_1_5X10_D2,
|
|
+ .num_planes = 2,
|
|
+ .pixel_width_align = 16,
|
|
+ .pixel_height_align = 8,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ [1] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 1,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* D010 layer3 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_D010_3,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YUYV10_1_5X10_D3,
|
|
+ .num_planes = 2,
|
|
+ .pixel_width_align = 8,
|
|
+ .pixel_height_align = 4,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ [1] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 1,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* D010 layer4 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_D010_4,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YUYV10_1_5X10_D4,
|
|
+ .num_planes = 2,
|
|
+ .pixel_width_align = 4,
|
|
+ .pixel_height_align = 2,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ [1] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 1,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* D210 layer1 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_D210_1,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YVYU10_1_5X10_D1,
|
|
+ .num_planes = 2,
|
|
+ .pixel_width_align = 32,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ [1] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 1,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* D210 layer2 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_D210_2,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YVYU10_1_5X10_D2,
|
|
+ .num_planes = 2,
|
|
+ .pixel_width_align = 16,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ [1] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 1,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* D210 layer3 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_D210_3,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YVYU10_1_5X10_D3,
|
|
+ .num_planes = 2,
|
|
+ .pixel_width_align = 8,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ [1] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 1,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+ /* D210 layer4 */
|
|
+ {
|
|
+ .pixelformat = V4L2_PIX_FMT_D210_4,
|
|
+ .mbus_fmtcode = MEDIA_BUS_FMT_YVYU10_1_5X10_D4,
|
|
+ .num_planes = 2,
|
|
+ .pixel_width_align = 4,
|
|
+ .plane_bytes_align = {
|
|
+ [0] = 1,
|
|
+ [1] = 1,
|
|
+ },
|
|
+ .plane_bpp = {
|
|
+ [0] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 10,
|
|
+ .den = 1,
|
|
+ },
|
|
+ },
|
|
+ .height_subsampling = {
|
|
+ [0] = {
|
|
+ .num = 1,
|
|
+ .den = 1,
|
|
+ },
|
|
+ [1] = {
|
|
+ .num = 1,
|
|
+ .den = 2,
|
|
+ },
|
|
+ },
|
|
+ },
|
|
+};
|
|
+
|
|
+static int spm_vdev_lookup_formats_table(struct v4l2_format *f)
|
|
+{
|
|
+ struct v4l2_pix_format_mplane *pix_fmt = &f->fmt.pix_mp;
|
|
+ int loop = 0;
|
|
+
|
|
+ for (loop = 0; loop < ARRAY_SIZE(spm_camera_formats_table); loop++) {
|
|
+ if (spm_camera_formats_table[loop].pixelformat == pix_fmt->pixelformat)
|
|
+ break;
|
|
+ }
|
|
+ if (loop >= ARRAY_SIZE(spm_camera_formats_table))
|
|
+ return -1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void spm_vdev_fill_subdev_format(struct v4l2_format *f, struct v4l2_subdev_format *sub_f)
|
|
+{
|
|
+ int loop = 0;
|
|
+
|
|
+ for (loop = 0; loop < ARRAY_SIZE(spm_camera_formats_table); loop++) {
|
|
+ if (f->fmt.pix_mp.pixelformat == spm_camera_formats_table[loop].pixelformat) {
|
|
+ sub_f->format.code = spm_camera_formats_table[loop].mbus_fmtcode;
|
|
+ sub_f->format.width = f->fmt.pix_mp.width;
|
|
+ sub_f->format.height = f->fmt.pix_mp.height;
|
|
+ sub_f->format.field = f->fmt.pix_mp.field;
|
|
+ sub_f->format.colorspace = f->fmt.pix_mp.colorspace;
|
|
+ sub_f->format.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
|
|
+ sub_f->format.quantization = f->fmt.pix_mp.quantization;
|
|
+ sub_f->format.xfer_func = f->fmt.pix_mp.xfer_func;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+void spm_vdev_fill_v4l2_format(struct v4l2_subdev_format *sub_f, struct v4l2_format *f)
|
|
+{
|
|
+ int loop = 0, plane = 0;
|
|
+ unsigned int width = 0, height = 0, stride = 0;
|
|
+ struct v4l2_plane_pix_format *plane_fmt = NULL;
|
|
+
|
|
+ for (loop = 0; loop < ARRAY_SIZE(spm_camera_formats_table); loop++) {
|
|
+ if (sub_f->format.code == spm_camera_formats_table[loop].mbus_fmtcode) {
|
|
+ f->fmt.pix_mp.pixelformat = spm_camera_formats_table[loop].pixelformat;
|
|
+ f->fmt.pix_mp.width = sub_f->format.width;
|
|
+ f->fmt.pix_mp.height = height = sub_f->format.height;
|
|
+ f->fmt.pix_mp.field = sub_f->format.field;
|
|
+ f->fmt.pix_mp.colorspace = sub_f->format.colorspace;
|
|
+ f->fmt.pix_mp.ycbcr_enc = sub_f->format.ycbcr_enc;
|
|
+ f->fmt.pix_mp.quantization = sub_f->format.quantization;
|
|
+ f->fmt.pix_mp.xfer_func = sub_f->format.xfer_func;
|
|
+ width = CAM_ALIGN(sub_f->format.width, spm_camera_formats_table[loop].pixel_width_align);
|
|
+ if (0 == spm_camera_formats_table[loop].pixel_height_align)
|
|
+ spm_camera_formats_table[loop].pixel_height_align = 1;
|
|
+ height = CAM_ALIGN(sub_f->format.height, spm_camera_formats_table[loop].pixel_height_align);
|
|
+ cam_dbg("%s width=%u, width_align=%u",__func__ ,width, spm_camera_formats_table[loop].pixel_width_align);
|
|
+ f->fmt.pix_mp.num_planes = spm_camera_formats_table[loop].num_planes;
|
|
+ for (plane = 0; plane < f->fmt.pix_mp.num_planes; plane++) {
|
|
+ plane_fmt = &f->fmt.pix_mp.plane_fmt[plane];
|
|
+ if (V4L2_PIX_FMT_NV12_AFBC == spm_camera_formats_table[loop].pixelformat) {
|
|
+ //height = CAM_ALIGN(sub_f->format.height, 4);
|
|
+ plane_fmt->bytesperline = 0x1000;
|
|
+ if (0 == plane) {
|
|
+ //(ceil(width / 32) * ceil(height / 4)) * 8
|
|
+ plane_fmt->sizeimage = (width * height) >> 4;
|
|
+ } else {
|
|
+ //(ceil(width / 32) * ceil(height / 4)) * 192
|
|
+ plane_fmt->sizeimage = (width * height * 3) >> 1;
|
|
+ }
|
|
+ } else {
|
|
+ stride = CAM_ALIGN((width * spm_camera_formats_table[loop].plane_bpp[plane].num) / (spm_camera_formats_table[loop].plane_bpp[plane].den * 8),
|
|
+ spm_camera_formats_table[loop].plane_bytes_align[plane]);
|
|
+ plane_fmt->sizeimage =
|
|
+ height * stride * spm_camera_formats_table[loop].height_subsampling[plane].num / spm_camera_formats_table[loop].height_subsampling[plane].den;
|
|
+ plane_fmt->bytesperline = stride;
|
|
+ }
|
|
+ cam_dbg("plane%d stride=%u", plane, stride);
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static int spm_vdev_queue_setup(struct vb2_queue *q,
|
|
+ unsigned int *num_buffers,
|
|
+ unsigned int *num_planes,
|
|
+ unsigned int sizes[],
|
|
+ struct device *alloc_devs[])
|
|
+{
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(q, struct spm_camera_vnode, buf_queue);
|
|
+ int loop = 0;
|
|
+
|
|
+ if (num_buffers && num_planes) {
|
|
+ *num_planes = sc_vnode->cur_fmt.fmt.pix_mp.num_planes;
|
|
+ cam_dbg("%s num_buffers=%d num_planes=%d ", __func__, *num_buffers, *num_planes);
|
|
+ for (loop = 0; loop < *num_planes; loop++) {
|
|
+ sizes[loop] = sc_vnode->cur_fmt.fmt.pix_mp.plane_fmt[loop].sizeimage;
|
|
+ cam_dbg("plane%d size=%u ", loop, sizes[loop]);
|
|
+ }
|
|
+ } else {
|
|
+ cam_err("%s NULL num_buffers or num_planes ", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void spm_vdev_wait_prepare(struct vb2_queue *q)
|
|
+{
|
|
+ //going to wait sleep, release all locks that may block any vb2 buf/stream functions
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(q, struct spm_camera_vnode, buf_queue);
|
|
+ mutex_unlock(&sc_vnode->mlock);
|
|
+}
|
|
+
|
|
+static void spm_vdev_wait_finish(struct vb2_queue *q)
|
|
+{
|
|
+ //wakeup from wait sleep, reacquire all locks
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(q, struct spm_camera_vnode, buf_queue);
|
|
+ mutex_lock(&sc_vnode->mlock);
|
|
+}
|
|
+
|
|
+static int spm_vdev_buf_init(struct vb2_buffer *vb)
|
|
+{
|
|
+ struct spm_camera_vbuffer *sc_vb = to_camera_vbuffer(vb);
|
|
+
|
|
+ INIT_LIST_HEAD(&sc_vb->list_entry);
|
|
+ sc_vb->reset_flag = 0;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int spm_vdev_buf_prepare(struct vb2_buffer *vb)
|
|
+{
|
|
+ struct spm_camera_vbuffer *sc_vb = to_camera_vbuffer(vb);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vb->vb2_queue, struct spm_camera_vnode, buf_queue);
|
|
+
|
|
+ sc_vb->flags = 0;
|
|
+ sc_vb->timestamp_eof = 0;
|
|
+ memset(sc_vb->reserved, 0, SC_BUF_RESERVED_DATA_LEN);
|
|
+ //sc_vb->vb2_v4l2_buf.flags &= ~V4L2_BUF_FLAG_IGNOR;
|
|
+ sc_vb->vb2_v4l2_buf.flags = 0;
|
|
+ sc_vb->sc_vnode = sc_vnode;
|
|
+ blocking_notifier_call_chain(&sc_vnode->notify_chain, SPACEMIT_VNODE_NOTIFY_BUF_PREPARE, sc_vb);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void spm_vdev_buf_finish(struct vb2_buffer *vb)
|
|
+{
|
|
+}
|
|
+
|
|
+static void spm_vdev_buf_cleanup(struct vb2_buffer *vb)
|
|
+{
|
|
+
|
|
+}
|
|
+
|
|
+static int spm_vdev_start_streaming(struct vb2_queue *q, unsigned int count)
|
|
+{
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(q, struct spm_camera_vnode, buf_queue);
|
|
+ struct media_entity *me = &sc_vnode->vnode.entity, *remote_me = NULL;
|
|
+ struct media_pad *remote_pad = media_entity_remote_pad(&me->pads[0]);
|
|
+ struct v4l2_subdev *remote_sd = NULL;
|
|
+ struct spm_camera_subdev *remote_sc_subdev = NULL;
|
|
+ struct v4l2_subdev_format sd_fmt;
|
|
+ int ret = 0;
|
|
+
|
|
+ cam_dbg("%s(%s)", __func__, sc_vnode->name);
|
|
+ sc_vnode->total_frm = 0;
|
|
+ sc_vnode->sw_err_frm = 0;
|
|
+ sc_vnode->hw_err_frm = 0;
|
|
+ sc_vnode->ok_frm = 0;
|
|
+ ret = blocking_notifier_call_chain(&sc_vnode->notify_chain, SPACEMIT_VNODE_NOTIFY_STREAM_ON, sc_vnode);
|
|
+ if (ret) {
|
|
+ cam_err("%s notifer_call_chain failed. ", __func__);
|
|
+ }
|
|
+
|
|
+ ret = SAINT_CHECK();
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ remote_sc_subdev = v4l2_subdev_to_sc_subdev(remote_sd);
|
|
+ /* use link_validata to notify vnode stream on */
|
|
+ sd_fmt.which = 1; //1 means stream on
|
|
+ sd_fmt.pad = remote_pad->index;
|
|
+ ret = v4l2_subdev_call(remote_sd, pad, link_validate, NULL, &sd_fmt, &sd_fmt);
|
|
+ if (ret) {
|
|
+ cam_err("%s stream on from remote sd(%s) on pad(%d) failed.",
|
|
+ __func__, remote_sc_subdev->name, remote_pad->index);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void spm_vdev_stop_streaming(struct vb2_queue *q)
|
|
+{
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(q, struct spm_camera_vnode, buf_queue);
|
|
+ struct media_entity *me = &sc_vnode->vnode.entity, *remote_me = NULL;
|
|
+ struct media_pad *remote_pad = media_entity_remote_pad(&me->pads[0]);
|
|
+ struct v4l2_subdev *remote_sd = NULL;
|
|
+ struct spm_camera_subdev *remote_sc_subdev = NULL;
|
|
+ struct v4l2_subdev_format sd_fmt;
|
|
+ int ret = 0;
|
|
+
|
|
+ cam_dbg("%s(%s) enter", __func__, sc_vnode->name);
|
|
+ ret = blocking_notifier_call_chain(&sc_vnode->notify_chain, SPACEMIT_VNODE_NOTIFY_STREAM_OFF, sc_vnode);
|
|
+ if (ret) {
|
|
+ cam_err("%s notifer_call_chain failed. ", __func__);
|
|
+ }
|
|
+
|
|
+ ret = SAINT_CHECK();
|
|
+ if (ret)
|
|
+ return;
|
|
+ remote_sc_subdev = v4l2_subdev_to_sc_subdev(remote_sd);
|
|
+ /* use link_validata to notify vnode stream off */
|
|
+ sd_fmt.which = 0; //0 means stream off
|
|
+ sd_fmt.pad = remote_pad->index;
|
|
+ ret = v4l2_subdev_call(remote_sd, pad, link_validate, NULL, &sd_fmt, &sd_fmt);
|
|
+ if (ret) {
|
|
+ cam_err("%s stream off from remote sd(%s) on pad(%d) failed.",
|
|
+ __func__, remote_sc_subdev->name, remote_pad->index);
|
|
+ }
|
|
+ cam_not("%s total_frm(%u) sw_err_frm(%u) hw_err_frm(%u) ok_frm(%u)",
|
|
+ sc_vnode->name, sc_vnode->total_frm, sc_vnode->sw_err_frm, sc_vnode->hw_err_frm, sc_vnode->ok_frm);
|
|
+ cam_dbg("%s(%s) leave", __func__, sc_vnode->name);
|
|
+}
|
|
+
|
|
+static void spm_vdev_buf_queue(struct vb2_buffer *vb)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+ struct spm_camera_vbuffer *sc_vb = to_camera_vbuffer(vb);
|
|
+ struct vb2_queue *buf_queue = vb->vb2_queue;
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(buf_queue, struct spm_camera_vnode, buf_queue);
|
|
+ unsigned int v4l2_buf_flags = sc_vnode->v4l2_buf_flags[vb->index];
|
|
+
|
|
+ if (v4l2_buf_flags & V4l2_BUF_FLAG_FORCE_SHADOW
|
|
+ && (sc_vnode->idx == 12 || sc_vnode->idx == 13)) {
|
|
+ sc_vb->flags |= SC_BUF_FLAG_FORCE_SHADOW;
|
|
+ }
|
|
+ sc_vb->timestamp_qbuf = ktime_get_boottime_ns();
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ atomic_inc(&sc_vnode->queued_buf_cnt);
|
|
+ list_add_tail(&sc_vb->list_entry, &sc_vnode->queued_list);
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+}
|
|
+
|
|
+static struct vb2_ops spm_camera_vb2_ops = {
|
|
+ .queue_setup = spm_vdev_queue_setup,
|
|
+ .wait_prepare = spm_vdev_wait_prepare,
|
|
+ .wait_finish = spm_vdev_wait_finish,
|
|
+ .buf_init = spm_vdev_buf_init,
|
|
+ .buf_prepare = spm_vdev_buf_prepare,
|
|
+ .buf_finish = spm_vdev_buf_finish,
|
|
+ .buf_cleanup = spm_vdev_buf_cleanup,
|
|
+ .start_streaming = spm_vdev_start_streaming,
|
|
+ .stop_streaming = spm_vdev_stop_streaming,
|
|
+ .buf_queue = spm_vdev_buf_queue,
|
|
+};
|
|
+
|
|
+static void spm_vdev_cancel_all_buffers(struct spm_camera_vnode *sc_vnode)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+ struct spm_camera_vbuffer *pos = NULL, *n = NULL;
|
|
+ struct vb2_buffer *vb2_buf = NULL;
|
|
+
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ list_for_each_entry_safe(pos, n, &sc_vnode->queued_list, list_entry) {
|
|
+ vb2_buf = &(pos->vb2_v4l2_buf.vb2_buf);
|
|
+ vb2_buffer_done(vb2_buf, VB2_BUF_STATE_ERROR);
|
|
+ list_del_init(&pos->list_entry);
|
|
+ atomic_dec(&sc_vnode->queued_buf_cnt);
|
|
+ }
|
|
+ list_for_each_entry_safe(pos, n, &sc_vnode->busy_list, list_entry) {
|
|
+ vb2_buf = &(pos->vb2_v4l2_buf.vb2_buf);
|
|
+ vb2_buffer_done(vb2_buf, VB2_BUF_STATE_ERROR);
|
|
+ list_del_init(&pos->list_entry);
|
|
+ atomic_dec(&sc_vnode->busy_buf_cnt);
|
|
+ }
|
|
+ if (sc_vnode->sc_vb && !(sc_vnode->sc_vb->flags & SC_BUF_FLAG_RSVD_Z1)) {
|
|
+ vb2_buf = &(sc_vnode->sc_vb->vb2_v4l2_buf.vb2_buf);
|
|
+ vb2_buffer_done(vb2_buf, VB2_BUF_STATE_DONE);
|
|
+ sc_vnode->sc_vb = NULL;
|
|
+ }
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+}
|
|
+
|
|
+static int spm_vdev_vidioc_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *b)
|
|
+{
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&sc_vnode->mlock);
|
|
+ ret = vb2_reqbufs(&sc_vnode->buf_queue, b);
|
|
+ mutex_unlock(&sc_vnode->mlock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int spm_vdev_vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
|
|
+{
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&sc_vnode->mlock);
|
|
+ ret = vb2_querybuf(&sc_vnode->buf_queue, b);
|
|
+ mutex_unlock(&sc_vnode->mlock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int spm_vdev_vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
|
|
+{
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+ int ret = 0;
|
|
+ unsigned int i = 0;
|
|
+
|
|
+ sc_vnode->v4l2_buf_flags[b->index] = b->flags;
|
|
+ for (i = 0; i < b->length; i++) {
|
|
+ sc_vnode->planes_offset[b->index][i] = b->m.planes[i].data_offset;
|
|
+ }
|
|
+ mutex_lock(&sc_vnode->mlock);
|
|
+ ret = vb2_qbuf(&sc_vnode->buf_queue, vnode->v4l2_dev->mdev, b);
|
|
+ if (ret == 0) {
|
|
+ blocking_notifier_call_chain(&sc_vnode->notify_chain, SPACEMIT_VNODE_NOTIFY_BUF_QUEUED, sc_vnode);
|
|
+ }
|
|
+ mutex_unlock(&sc_vnode->mlock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int spm_vdev_vidioc_expbuf(struct file *file, void *fh, struct v4l2_exportbuffer *e)
|
|
+{
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&sc_vnode->mlock);
|
|
+#ifndef MODULE
|
|
+ ret = vb2_expbuf(&sc_vnode->buf_queue, e);
|
|
+#endif
|
|
+ mutex_unlock(&sc_vnode->mlock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int spm_vdev_vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
|
|
+{
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&sc_vnode->mlock);
|
|
+ ret = vb2_dqbuf(&sc_vnode->buf_queue, b, file->f_flags & O_NONBLOCK);
|
|
+ mutex_unlock(&sc_vnode->mlock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int spm_vdev_vidioc_create_bufs(struct file *file, void *fh, struct v4l2_create_buffers *b)
|
|
+{
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&sc_vnode->mlock);
|
|
+#ifndef MODULE
|
|
+ ret = vb2_create_bufs(&sc_vnode->buf_queue, b);
|
|
+#endif
|
|
+ mutex_unlock(&sc_vnode->mlock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int spm_vdev_vidioc_prepare_buf(struct file *file, void *fh, struct v4l2_buffer *b)
|
|
+{
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&sc_vnode->mlock);
|
|
+ ret = vb2_prepare_buf(&sc_vnode->buf_queue, vnode->v4l2_dev->mdev, b);
|
|
+ mutex_unlock(&sc_vnode->mlock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int spm_vdev_vidioc_streamon(struct file *file, void *fn, enum v4l2_buf_type i)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+ mutex_lock(&sc_vnode->mlock);
|
|
+ ret = vb2_streamon(&sc_vnode->buf_queue, i);
|
|
+ mutex_unlock(&sc_vnode->mlock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int spm_vdev_vidioc_streamoff(struct file *file, void *fn, enum v4l2_buf_type i)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ cam_dbg("%s(%s) enter", __func__, sc_vnode->name);
|
|
+ cam_dbg("%s(%s) queued_buf_cnt=%d busy_buf_cnt=%d.", __func__, sc_vnode->name,
|
|
+ atomic_read(&sc_vnode->queued_buf_cnt),
|
|
+ atomic_read(&sc_vnode->busy_buf_cnt));
|
|
+ spin_lock_irqsave(&sc_vnode->waitq_head.lock, flags);
|
|
+ wait_event_interruptible_locked_irq(sc_vnode->waitq_head, !sc_vnode->in_tasklet);
|
|
+ sc_vnode->in_streamoff = 1;
|
|
+ spin_unlock_irqrestore(&sc_vnode->waitq_head.lock, flags);
|
|
+ cam_dbg("%s tasklet clean", sc_vnode->name);
|
|
+ mutex_lock(&sc_vnode->mlock);
|
|
+ cam_dbg("%s cancel all buffers", sc_vnode->name);
|
|
+ spm_vdev_cancel_all_buffers(sc_vnode);
|
|
+ cam_dbg("%s streamoff", sc_vnode->name);
|
|
+ ret = vb2_streamoff(&sc_vnode->buf_queue, i);
|
|
+ mutex_unlock(&sc_vnode->mlock);
|
|
+ spin_lock_irqsave(&sc_vnode->waitq_head.lock, flags);
|
|
+ sc_vnode->in_streamoff = 0;
|
|
+ spin_unlock_irqrestore(&sc_vnode->waitq_head.lock, flags);
|
|
+ cam_dbg("%s(%s) leave", __func__, sc_vnode->name);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int spm_vdev_vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
|
|
+{
|
|
+ strlcpy(cap->driver, "spacemitisp", 16);
|
|
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE;
|
|
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * static int spm_vdev_vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *fh, struct v4l2_fmtdesc *f)
|
|
+ * {
|
|
+ * return 0;
|
|
+ * }
|
|
+ */
|
|
+
|
|
+static int spm_vdev_vidioc_g_fmt_vid_cap_mplane(struct file *file, void *fh, struct v4l2_format *f)
|
|
+{
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+ struct media_entity *me = &sc_vnode->vnode.entity, *remote_me = NULL;
|
|
+ struct media_pad *remote_pad = media_entity_remote_pad(&me->pads[0]);
|
|
+ struct v4l2_subdev *remote_sd = NULL;
|
|
+ int ret = 0;
|
|
+
|
|
+ ret = SAINT_CHECK();
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ cam_dbg("get format fourcc code[0x%08x] (%dx%d)",
|
|
+ sc_vnode->cur_fmt.fmt.pix_mp.pixelformat,
|
|
+ sc_vnode->cur_fmt.fmt.pix_mp.width,
|
|
+ sc_vnode->cur_fmt.fmt.pix_mp.height);
|
|
+ *f = sc_vnode->cur_fmt;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __spm_vdev_vidioc_s_fmt_vid_cap_mplane(struct file *file, void *fh, struct v4l2_format *f)
|
|
+{
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+ struct media_entity *me = &sc_vnode->vnode.entity, *remote_me = NULL;
|
|
+ struct media_pad *remote_pad = media_entity_remote_pad(&me->pads[0]);
|
|
+ struct v4l2_subdev *remote_sd = NULL;
|
|
+ struct spm_camera_subdev *remote_sc_subdev = NULL;
|
|
+ struct vb2_queue *vb2_queue = &sc_vnode->buf_queue;
|
|
+ int ret = 0;
|
|
+ struct v4l2_subdev_format pad_format = { 0 }, tmp_format = { 0 };
|
|
+
|
|
+ cam_dbg("set format fourcc code[0x%08x] (%dx%d)",
|
|
+ f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height);
|
|
+ if (vb2_is_streaming(vb2_queue)) {
|
|
+ cam_err("%s set format not allowed while streaming on.", __func__);
|
|
+ return -EBUSY;
|
|
+ }
|
|
+ ret = spm_vdev_lookup_formats_table(f);
|
|
+ if (ret) {
|
|
+ cam_err("%s failed to lookup formats table fourcc code[0x%08x]",
|
|
+ __func__, f->fmt.pix_mp.pixelformat);
|
|
+ return ret;
|
|
+ }
|
|
+ ret = SAINT_CHECK();
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ remote_sc_subdev = v4l2_subdev_to_sc_subdev(remote_sd);
|
|
+ pad_format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
|
|
+ pad_format.pad = remote_pad->index;
|
|
+ if (sc_vnode->direction == SPACEMIT_VNODE_DIR_OUT) {
|
|
+ spm_vdev_fill_subdev_format(f, &pad_format);
|
|
+ ret = v4l2_subdev_call(remote_sd, pad, set_fmt, NULL, &pad_format);
|
|
+ if (ret) {
|
|
+ cam_err("%s set format failed on remote subdev(%s).", __func__,
|
|
+ remote_sc_subdev->name);
|
|
+ return ret;
|
|
+ }
|
|
+ } else {
|
|
+ ret = v4l2_subdev_call(remote_sd, pad, get_fmt, NULL, &pad_format);
|
|
+ if (ret) {
|
|
+ cam_err("%s get format failed on remote subdev(%s).", __func__,
|
|
+ remote_sc_subdev->name);
|
|
+ return ret;
|
|
+ }
|
|
+ spm_vdev_fill_subdev_format(f, &tmp_format);
|
|
+ if (tmp_format.format.width != pad_format.format.width
|
|
+ || tmp_format.format.height != pad_format.format.height
|
|
+ || tmp_format.format.code != pad_format.format.code) {
|
|
+ cam_err("%s remote subdev(%s) didn't support this format.",
|
|
+ __func__, remote_sc_subdev->name);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ ret = v4l2_subdev_call(remote_sd, pad, set_fmt, NULL, &pad_format);
|
|
+ if (ret) {
|
|
+ cam_err("%s set format failed on remote subdev(%s).", __func__, remote_sc_subdev->name);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ spm_vdev_fill_v4l2_format(&pad_format, f);
|
|
+ sc_vnode->cur_fmt = *f;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int spm_vdev_vidioc_s_fmt_vid_cap_mplane(struct file *file, void *fh, struct v4l2_format *f)
|
|
+{
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&sc_vnode->mlock);
|
|
+ ret = __spm_vdev_vidioc_s_fmt_vid_cap_mplane(file, fh, f);
|
|
+ mutex_unlock(&sc_vnode->mlock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int spm_vdev_vidioc_try_fmt_vid_cap_mplane(struct file *file, void *fh, struct v4l2_format *f)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void spm_vdev_debug_dump(struct spm_camera_vnode *sc_vnode, int reason)
|
|
+{
|
|
+ __u64 now = 0;
|
|
+ unsigned long flags = 0, t = 0;
|
|
+ struct spm_camera_vbuffer *pos = NULL;
|
|
+
|
|
+ now = ktime_get_boottime_ns();
|
|
+ cam_not("%s(a[%u]) queued_buf_cnt=%d busy_buf_cnt=%d reason=%d", __func__,
|
|
+ sc_vnode->idx, atomic_read(&sc_vnode->queued_buf_cnt),
|
|
+ atomic_read(&sc_vnode->busy_buf_cnt), reason);
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ list_for_each_entry(pos, &sc_vnode->queued_list, list_entry) {
|
|
+ t = (unsigned long)(now - pos->timestamp_qbuf);
|
|
+ t /= 1000;
|
|
+ cam_not("a[%u] queued_list buf_index=%u t=%lu reason=%d", sc_vnode->idx,
|
|
+ pos->vb2_v4l2_buf.vb2_buf.index, t, reason);
|
|
+ }
|
|
+ list_for_each_entry(pos, &sc_vnode->busy_list, list_entry) {
|
|
+ t = (unsigned long)(now - pos->timestamp_qbuf);
|
|
+ t /= 1000;
|
|
+ cam_not("a[%u] busy_list buf_index=%u t=%lu reason=%d", sc_vnode->idx,
|
|
+ pos->vb2_v4l2_buf.vb2_buf.index, t, reason);
|
|
+ }
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+}
|
|
+
|
|
+static long spm_vdev_vidioc_default(struct file *file,
|
|
+ void *fh,
|
|
+ bool valid_prio, unsigned int cmd, void *arg)
|
|
+{
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+ struct v4l2_vi_entity_info *entity_info = NULL;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(&vnode->entity);
|
|
+ int *slice_mode = NULL, *is_z1 = NULL;
|
|
+ struct v4l2_vi_slice_info *slice_info = NULL;
|
|
+ struct v4l2_vi_debug_dump *debug_dump = NULL;
|
|
+ int ret = 0;
|
|
+
|
|
+ switch (cmd) {
|
|
+ case VIDIOC_G_ENTITY_INFO:
|
|
+ entity_info = (struct v4l2_vi_entity_info *)arg;
|
|
+ entity_info->id = media_entity_id(&sc_vnode->vnode.entity);
|
|
+ strlcpy(entity_info->name, sc_vnode->name, SPACEMIT_VI_ENTITY_NAME_LEN);
|
|
+ break;
|
|
+ case VIDIOC_G_SLICE_MODE:
|
|
+ BUG_ON(!pipe);
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ BUG_ON(!sc_pipeline);
|
|
+ slice_mode = (int *)arg;
|
|
+ *slice_mode = sc_pipeline->is_slice_mode;
|
|
+ cam_dbg("VIDIOC_G_SLICE_MODE slice_mode(%d) %s", *slice_mode, sc_vnode->name);
|
|
+ break;
|
|
+ case VIDIOC_QUERY_SLICE_READY:
|
|
+ cam_not("query slice info ready");
|
|
+ BUG_ON(!pipe);
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ BUG_ON(!sc_pipeline);
|
|
+ if (sc_pipeline->state <= PIPELINE_ST_STOPPING) {
|
|
+ return -2;
|
|
+ }
|
|
+ slice_info = (struct v4l2_vi_slice_info *)arg;
|
|
+ ret = wait_event_interruptible_timeout(sc_pipeline->slice_waitq,
|
|
+ atomic_read(&sc_pipeline->slice_info_update),
|
|
+ msecs_to_jiffies(slice_info->timeout));
|
|
+ if (sc_pipeline->state <= PIPELINE_ST_STOPPING) {
|
|
+ return -2;
|
|
+ }
|
|
+ if (ret == 0) {
|
|
+ cam_err("%s wait isp slice info notify timeout(%u)", __func__, slice_info->timeout);
|
|
+ return -1;
|
|
+ } else if (ret < 0) {
|
|
+ cam_err("%s wait isp slice info notify interrupted by user app", __func__);
|
|
+ return -1;
|
|
+ }
|
|
+ atomic_set(&sc_pipeline->slice_info_update, 0);
|
|
+ blocking_notifier_call_chain(&sc_pipeline->blocking_notify_chain,
|
|
+ PIPELINE_ACTION_SLICE_READY, sc_pipeline);
|
|
+ slice_info->slice_id = sc_pipeline->slice_id;
|
|
+ slice_info->total_slice_cnt = sc_pipeline->total_slice_cnt;
|
|
+ break;
|
|
+ case VIDIOC_S_SLICE_DONE:
|
|
+ BUG_ON(!pipe);
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ BUG_ON(!sc_pipeline);
|
|
+ atomic_set(&sc_pipeline->slice_info_update, 0);
|
|
+ sc_pipeline->slice_result = *((int *)arg);
|
|
+ complete_all(&sc_pipeline->slice_done);
|
|
+ break;
|
|
+ case VIDIOC_CPU_Z1:
|
|
+ is_z1 = (int *)arg;
|
|
+ *is_z1 = 0;
|
|
+ break;
|
|
+ case VIDIOC_DEBUG_DUMP:
|
|
+ debug_dump = (struct v4l2_vi_debug_dump *)arg;
|
|
+ spm_vdev_debug_dump(sc_vnode, debug_dump->reason);
|
|
+ break;
|
|
+ default:
|
|
+ cam_warn("unknown ioctl cmd(%d).", cmd);
|
|
+ return -ENOIOCTLCMD;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct v4l2_ioctl_ops spm_camera_v4l2_ioctl_ops = {
|
|
+ /* VIDIOC_QUERYCAP handler */
|
|
+ .vidioc_querycap = spm_vdev_vidioc_querycap,
|
|
+ /* VIDIOC_ENUM_FMT handlers */
|
|
+ /* .vidioc_enum_fmt_vid_cap = spm_vdev_vidioc_enum_fmt_vid_cap_mplane, */
|
|
+ /* VIDIOC_G_FMT handlers */
|
|
+ .vidioc_g_fmt_vid_cap_mplane = spm_vdev_vidioc_g_fmt_vid_cap_mplane,
|
|
+ /* VIDIOC_S_FMT handlers */
|
|
+ .vidioc_s_fmt_vid_cap_mplane = spm_vdev_vidioc_s_fmt_vid_cap_mplane,
|
|
+ /* VIDIOC_TRY_FMT handlers */
|
|
+ .vidioc_try_fmt_vid_cap_mplane = spm_vdev_vidioc_try_fmt_vid_cap_mplane,
|
|
+ /* Buffer handlers */
|
|
+ .vidioc_reqbufs = spm_vdev_vidioc_reqbufs,
|
|
+ .vidioc_querybuf = spm_vdev_vidioc_querybuf,
|
|
+ .vidioc_qbuf = spm_vdev_vidioc_qbuf,
|
|
+ .vidioc_expbuf = spm_vdev_vidioc_expbuf,
|
|
+ .vidioc_dqbuf = spm_vdev_vidioc_dqbuf,
|
|
+ .vidioc_create_bufs = spm_vdev_vidioc_create_bufs,
|
|
+ .vidioc_prepare_buf = spm_vdev_vidioc_prepare_buf,
|
|
+ .vidioc_streamon = spm_vdev_vidioc_streamon,
|
|
+ .vidioc_streamoff = spm_vdev_vidioc_streamoff,
|
|
+ .vidioc_default = spm_vdev_vidioc_default,
|
|
+};
|
|
+
|
|
+static int spm_vdev_open(struct file *file)
|
|
+{
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+
|
|
+ if (atomic_inc_return(&sc_vnode->ref_cnt) != 1) {
|
|
+ cam_err("vnode(%s - %s) was already openned.", sc_vnode->name,
|
|
+ video_device_node_name(vnode));
|
|
+ atomic_dec(&sc_vnode->ref_cnt);
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+ cam_dbg("open vnode(%s - %s).", sc_vnode->name, video_device_node_name(vnode));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void __spm_vdev_close(struct spm_camera_vnode *sc_vnode)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ cam_dbg("%s(%s) enter", __func__, sc_vnode->name);
|
|
+ cam_dbg("%s(%s) queued_buf_cnt=%d busy_buf_cnt=%d.", __func__, sc_vnode->name,
|
|
+ atomic_read(&sc_vnode->queued_buf_cnt),
|
|
+ atomic_read(&sc_vnode->busy_buf_cnt));
|
|
+ spin_lock_irqsave(&sc_vnode->waitq_head.lock, flags);
|
|
+ sc_vnode->in_streamoff = 1;
|
|
+ wait_event_interruptible_locked_irq(sc_vnode->waitq_head, !sc_vnode->in_tasklet);
|
|
+ spin_unlock_irqrestore(&sc_vnode->waitq_head.lock, flags);
|
|
+ cam_dbg("%s tasklet clean", sc_vnode->name);
|
|
+ mutex_lock(&sc_vnode->mlock);
|
|
+ cam_dbg("%s cancel all buffers", sc_vnode->name);
|
|
+ spm_vdev_cancel_all_buffers(sc_vnode);
|
|
+ cam_dbg("%s queue release", sc_vnode->name);
|
|
+ vb2_queue_release(&sc_vnode->buf_queue);
|
|
+ sc_vnode->buf_queue.owner = NULL;
|
|
+ mutex_unlock(&sc_vnode->mlock);
|
|
+ spin_lock_irqsave(&sc_vnode->waitq_head.lock, flags);
|
|
+ sc_vnode->in_streamoff = 0;
|
|
+ spin_unlock_irqrestore(&sc_vnode->waitq_head.lock, flags);
|
|
+ cam_dbg("%s(%s) leave", __func__, sc_vnode->name);
|
|
+}
|
|
+
|
|
+static int spm_vdev_close(struct file *file)
|
|
+{
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+
|
|
+ if (atomic_dec_and_test(&sc_vnode->ref_cnt)) {
|
|
+ __spm_vdev_close(sc_vnode);
|
|
+ }
|
|
+
|
|
+ return v4l2_fh_release(file);
|
|
+}
|
|
+
|
|
+static __poll_t spm_vdev_poll(struct file *file, struct poll_table_struct *wait)
|
|
+{
|
|
+ __poll_t ret;
|
|
+ struct video_device *vnode = video_devdata(file);
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vnode, struct spm_camera_vnode, vnode);
|
|
+
|
|
+ ret = vb2_poll(&sc_vnode->buf_queue, file, wait);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+//#ifdef CONFIG_COMPAT
|
|
+#if 0
|
|
+
|
|
+static int alloc_userspace(unsigned int size, u32 aux_space, void __user **new_p64)
|
|
+{
|
|
+ *new_p64 = compat_alloc_user_space(size + aux_space);
|
|
+ if (!*new_p64)
|
|
+ return -ENOMEM;
|
|
+ if (clear_user(*new_p64, size))
|
|
+ return -EFAULT;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+long spm_vdev_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ void __user *p32 = compat_ptr(arg);
|
|
+ void __user *new_p64 = NULL;
|
|
+ //void __user *aux_buf;
|
|
+ //u32 aux_space;
|
|
+ long err = 0;
|
|
+ const size_t ioc_size = _IOC_SIZE(cmd);
|
|
+ //size_t ioc_size64 = 0;
|
|
+
|
|
+ //if (_IOC_TYPE(cmd) == 'V') {
|
|
+ // switch (_IOC_NR(cmd)) {
|
|
+ // //int r
|
|
+ // case _IOC_NR(VIDIOC_G_SLICE_MODE):
|
|
+ // case _IOC_NR(VIDIOC_CPU_Z1):
|
|
+ // //int w
|
|
+ // case _IOC_NR(VIDIOC_PUT_PIPELINE):
|
|
+ // case _IOC_NR(VIDIOC_RESET_PIPELINE):
|
|
+ // ioc_size64 = sizeof(int);
|
|
+ // break;
|
|
+ // //unsigned int
|
|
+ // case _IOC_NR(VIDIOC_G_PIPE_STATUS):
|
|
+ // ioc_size64 = sizeof(int);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_S_PORT_CFG):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_port_cfg);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_DBG_REG_WRITE):
|
|
+ // case _IOC_NR(VIDIOC_DBG_REG_READ):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_dbg_reg);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_CFG_INPUT_INTF):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_input_interface);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_SET_SELECTION):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_selection);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_QUERY_SLICE_READY):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_slice_info);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_S_BANDWIDTH):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_bandwidth_info);
|
|
+ // break;
|
|
+ // case _IOC_NR(VIDIOC_G_ENTITY_INFO):
|
|
+ // ioc_size64 = sizeof(struct v4l2_vi_entity_info);
|
|
+ // break;
|
|
+ // }
|
|
+ // cam_dbg("%s cmd_nr=%d ioc_size32=%u ioc_size64=%u",__func__, _IOC_NR(cmd), ioc_size, ioc_size64);
|
|
+ //}
|
|
+ if (_IOC_DIR(cmd) != _IOC_NONE) {
|
|
+ err = alloc_userspace(ioc_size, 0, &new_p64);
|
|
+ if (err) {
|
|
+ cam_err("%s alloc userspace failed err=%l cmd=%d ioc_size=%u", __func__, err, _IOC_NR(cmd), ioc_size);
|
|
+ return err;
|
|
+ }
|
|
+ if ((_IOC_DIR(cmd) & _IOC_WRITE)) {
|
|
+ err = copy_in_user(new_p64, p32, ioc_size);
|
|
+ if (err) {
|
|
+ cam_err("%s copy in user 1 failed err=%l cmd=%d ioc_size=%u", __func__, err, _IOC_NR(cmd), ioc_size);
|
|
+ return err;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ err = video_ioctl2(file, cmd, (unsigned long)new_p64);
|
|
+ if (err) {
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ if ((_IOC_DIR(cmd) & _IOC_READ)) {
|
|
+ err = copy_in_user(p32, new_p64, ioc_size);
|
|
+ if (err) {
|
|
+ cam_err("%s copy in user 2 failed err=%l cmd=%d ioc_size=%u", __func__, err, _IOC_NR(cmd), ioc_size);
|
|
+ return err;
|
|
+ }
|
|
+ }
|
|
+ //switch (cmd) {
|
|
+ // //int r
|
|
+ // case VIDIOC_G_SLICE_MODE:
|
|
+ // case VIDIOC_CPU_Z1:
|
|
+ // //int w
|
|
+ // case VIDIOC_PUT_PIPELINE:
|
|
+ // case VIDIOC_RESET_PIPELINE:
|
|
+ // err = alloc_userspace(sizeof(int), 0, &new_p64);
|
|
+ // if (!err && assign_in_user((int __user *)new_p64,
|
|
+ // (compat_int_t __user *)p32))
|
|
+ // err = -EFAULT;
|
|
+ // break;
|
|
+ // //unsigned int
|
|
+ // case VIDIOC_G_PIPE_STATUS:
|
|
+ // err = alloc_userspace(sizeof(unsigned int), 0, &new_p64);
|
|
+ // if (!err && assign_in_user((unsigned int __user *)new_p64,
|
|
+ // (compat_uint_t __user *)p32))
|
|
+ // err = -EFAULT;
|
|
+ // break;
|
|
+ // case VIDIOC_S_PORT_CFG:
|
|
+ // err = alloc_userspace(sizeof(struct v4l2_vi_port_cfg), 0, &new_p64);
|
|
+ // if (!err) {
|
|
+ // err = -EFAULT;
|
|
+ // break;
|
|
+ // }
|
|
+ // break;
|
|
+ // case VIDIOC_DBG_REG_WRITE:
|
|
+ // case VIDIOC_DBG_REG_READ:
|
|
+ // break;
|
|
+ // case VIDIOC_CFG_INPUT_INTF:
|
|
+ // break;
|
|
+ // case VIDIOC_SET_SELECTION:
|
|
+ // break;
|
|
+ // case VIDIOC_QUERY_SLICE_READY:
|
|
+ // break;
|
|
+ // case VIDIOC_S_BANDWIDTH:
|
|
+ // break;
|
|
+ // case VIDIOC_G_ENTITY_INFO:
|
|
+ // break;
|
|
+
|
|
+ //}
|
|
+ //if (err)
|
|
+ // return err;
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static struct v4l2_file_operations spm_camera_file_operations = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .poll = spm_vdev_poll,
|
|
+ .unlocked_ioctl = video_ioctl2,
|
|
+ .open = spm_vdev_open,
|
|
+ .release = spm_vdev_close,
|
|
+//#ifdef CONFIG_COMPAT
|
|
+#if 0
|
|
+ .compat_ioctl32 = spm_vdev_compat_ioctl32,
|
|
+#endif
|
|
+};
|
|
+
|
|
+static void spm_vdev_release(struct video_device *vdev)
|
|
+{
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(vdev, struct spm_camera_vnode, vnode);
|
|
+
|
|
+ cam_dbg("%s(%s %s) enter.", __func__, sc_vnode->name, video_device_node_name(&sc_vnode->vnode));
|
|
+ media_entity_cleanup(&sc_vnode->vnode.entity);
|
|
+ mutex_destroy(&sc_vnode->mlock);
|
|
+}
|
|
+
|
|
+#ifdef MODULE
|
|
+void media_gobj_destroy(struct media_gobj *gobj)
|
|
+{
|
|
+ /* Do nothing if the object is not linked. */
|
|
+ if (gobj->mdev == NULL)
|
|
+ return;
|
|
+
|
|
+ gobj->mdev->topology_version++;
|
|
+
|
|
+ /* Remove the object from mdev list */
|
|
+ list_del(&gobj->list);
|
|
+
|
|
+ gobj->mdev = NULL;
|
|
+}
|
|
+
|
|
+static void __media_entity_remove_link(struct media_entity *entity,
|
|
+ struct media_link *link)
|
|
+{
|
|
+ struct media_link *rlink, *tmp;
|
|
+ struct media_entity *remote;
|
|
+
|
|
+ if (link->source->entity == entity)
|
|
+ remote = link->sink->entity;
|
|
+ else
|
|
+ remote = link->source->entity;
|
|
+
|
|
+ list_for_each_entry_safe(rlink, tmp, &remote->links, list) {
|
|
+ if (rlink != link->reverse)
|
|
+ continue;
|
|
+
|
|
+ if (link->source->entity == entity)
|
|
+ remote->num_backlinks--;
|
|
+
|
|
+ /* Remove the remote link */
|
|
+ list_del(&rlink->list);
|
|
+ media_gobj_destroy(&rlink->graph_obj);
|
|
+ kfree(rlink);
|
|
+
|
|
+ if (--remote->num_links == 0)
|
|
+ break;
|
|
+ }
|
|
+ list_del(&link->list);
|
|
+ media_gobj_destroy(&link->graph_obj);
|
|
+ kfree(link);
|
|
+}
|
|
+
|
|
+void __spm_media_entity_remove_links(struct media_entity *entity)
|
|
+{
|
|
+ struct media_link *link, *tmp;
|
|
+
|
|
+ list_for_each_entry_safe(link, tmp, &entity->links, list)
|
|
+ __media_entity_remove_link(entity, link);
|
|
+
|
|
+ entity->num_links = 0;
|
|
+ entity->num_backlinks = 0;
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL_GPL(__spm_media_entity_remove_links);
|
|
+
|
|
+void spm_media_entity_remove_links(struct media_entity *entity)
|
|
+{
|
|
+ struct media_device *mdev = entity->graph_obj.mdev;
|
|
+
|
|
+ /* Do nothing if the entity is not registered. */
|
|
+ if (mdev == NULL)
|
|
+ return;
|
|
+
|
|
+ mutex_lock(&mdev->graph_mutex);
|
|
+ __spm_media_entity_remove_links(entity);
|
|
+ mutex_unlock(&mdev->graph_mutex);
|
|
+}
|
|
+#endif
|
|
+static void spm_vdev_block_release(struct spm_camera_block *b)
|
|
+{
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(b, struct spm_camera_vnode, sc_block);
|
|
+
|
|
+ cam_dbg("%s(%s %s) enter.", __func__, sc_vnode->name, video_device_node_name(&sc_vnode->vnode));
|
|
+ vb2_queue_release(&sc_vnode->buf_queue);
|
|
+#ifdef MODULE
|
|
+ spm_media_entity_remove_links(&sc_vnode->vnode.entity);
|
|
+#else
|
|
+ media_entity_remove_links(&sc_vnode->vnode.entity);
|
|
+#endif
|
|
+ video_unregister_device(&sc_vnode->vnode);
|
|
+}
|
|
+
|
|
+static struct spm_camera_block_ops sc_block_ops = {
|
|
+ .release = spm_vdev_block_release,
|
|
+};
|
|
+
|
|
+static int spm_vdev_link_validate(struct media_link *link)
|
|
+{
|
|
+ struct media_entity *me = link->sink->entity;
|
|
+ struct spm_camera_vnode *sc_vnode = media_entity_to_sc_vnode(me);
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_pipeline *mpipe = media_entity_pipeline(me);
|
|
+ int ret = 0;
|
|
+
|
|
+ if (!sc_vnode) {
|
|
+ return -1;
|
|
+ }
|
|
+ if (!mpipe) {
|
|
+ return -2;
|
|
+ }
|
|
+
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(mpipe);
|
|
+ ret = blocking_notifier_chain_register(&sc_pipeline->blocking_notify_chain, &sc_vnode->pipeline_notify_block);
|
|
+ if (ret) {
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct media_entity_operations spm_camera_media_entity_ops = {
|
|
+ .link_validate = spm_vdev_link_validate,
|
|
+};
|
|
+
|
|
+static int spm_vdev_pipeline_notify_fn(struct notifier_block *nb,
|
|
+ unsigned long action, void *data)
|
|
+{
|
|
+ struct entity_usrdata *entity_usrdata = NULL;
|
|
+ struct spm_camera_vnode *sc_vnode = container_of(nb, struct spm_camera_vnode, pipeline_notify_block);
|
|
+ switch (action) {
|
|
+ case PIPELINE_ACTION_SET_ENTITY_USRDATA:
|
|
+ entity_usrdata = (struct entity_usrdata *)data;
|
|
+ cam_dbg("%s(%s) set entity usrdata(%d:%d)", __func__, sc_vnode->name,
|
|
+ media_entity_id(&sc_vnode->vnode.entity),
|
|
+ entity_usrdata->entity_id);
|
|
+ if (media_entity_id(&sc_vnode->vnode.entity) == entity_usrdata->entity_id) {
|
|
+ sc_vnode->usr_data = entity_usrdata->usr_data;
|
|
+ return NOTIFY_STOP;
|
|
+ }
|
|
+ break;
|
|
+ case PIPELINE_ACTION_GET_ENTITY_USRDATA:
|
|
+ entity_usrdata = (struct entity_usrdata *)data;
|
|
+ cam_dbg("%s(%s) get entity usrdata(%d:%d)", __func__, sc_vnode->name,
|
|
+ media_entity_id(&sc_vnode->vnode.entity),
|
|
+ entity_usrdata->entity_id);
|
|
+ if (media_entity_id(&sc_vnode->vnode.entity) == entity_usrdata->entity_id) {
|
|
+ entity_usrdata->usr_data = sc_vnode->usr_data;
|
|
+ return NOTIFY_STOP;
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ return NOTIFY_DONE;
|
|
+ }
|
|
+ return NOTIFY_DONE;
|
|
+}
|
|
+
|
|
+struct spm_camera_vnode *spm_vdev_create_vnode(const char *name,
|
|
+ int direction,
|
|
+ unsigned int idx,
|
|
+ struct v4l2_device *v4l2_dev,
|
|
+ struct device *alloc_dev,
|
|
+ unsigned int min_buffers_needed)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct spm_camera_vnode *sc_vnode = NULL;
|
|
+
|
|
+ if (NULL == name || NULL == v4l2_dev || NULL == alloc_dev) {
|
|
+ cam_err("%s invalid arguments.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ if (direction < SPACEMIT_VNODE_DIR_IN || direction > SPACEMIT_VNODE_DIR_OUT) {
|
|
+ cam_err("%s invalid direction.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ sc_vnode = devm_kzalloc(alloc_dev, sizeof(*sc_vnode), GFP_KERNEL);
|
|
+ if (NULL == sc_vnode) {
|
|
+ cam_err("%s failed to alloc mem for spm_camera_vnode(%s).", __func__, name);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ INIT_LIST_HEAD(&sc_vnode->queued_list);
|
|
+ INIT_LIST_HEAD(&sc_vnode->busy_list);
|
|
+ atomic_set(&sc_vnode->queued_buf_cnt, 0);
|
|
+ atomic_set(&sc_vnode->busy_buf_cnt, 0);
|
|
+ spin_lock_init(&sc_vnode->slock);
|
|
+ mutex_init(&sc_vnode->mlock);
|
|
+ init_waitqueue_head(&sc_vnode->waitq_head);
|
|
+ sc_vnode->direction = direction;
|
|
+ if (direction == SPACEMIT_VNODE_DIR_IN)
|
|
+ sc_vnode->pad.flags = MEDIA_PAD_FL_SOURCE;
|
|
+ else
|
|
+ sc_vnode->pad.flags = MEDIA_PAD_FL_SINK;
|
|
+ sc_vnode->idx = idx;
|
|
+ BLOCKING_INIT_NOTIFIER_HEAD(&sc_vnode->notify_chain);
|
|
+ sc_vnode->buf_queue.timestamp_flags =
|
|
+ V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC | V4L2_BUF_FLAG_TSTAMP_SRC_SOE;
|
|
+ sc_vnode->buf_queue.buf_struct_size = sizeof(struct spm_camera_vbuffer);
|
|
+ sc_vnode->buf_queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
|
|
+ sc_vnode->buf_queue.io_modes = SPACEMIT_VB2_IO_MODE;
|
|
+ sc_vnode->buf_queue.ops = &spm_camera_vb2_ops;
|
|
+ sc_vnode->buf_queue.mem_ops = spm_vb2_mem_ops;
|
|
+ sc_vnode->buf_queue.min_buffers_needed = min_buffers_needed;
|
|
+ sc_vnode->buf_queue.dev = alloc_dev;
|
|
+ ret = vb2_queue_init(&sc_vnode->buf_queue);
|
|
+ if (ret) {
|
|
+ cam_err("%s vb2_queue_init failed for spm_camera_vnode(%s).", __func__, name);
|
|
+ goto queue_init_fail;
|
|
+ }
|
|
+
|
|
+ strlcpy(sc_vnode->vnode.name, name, 32);
|
|
+ strlcpy(sc_vnode->name, name, SPACEMIT_VI_ENTITY_NAME_LEN);
|
|
+ sc_vnode->vnode.queue = &sc_vnode->buf_queue;
|
|
+ sc_vnode->vnode.fops = &spm_camera_file_operations;
|
|
+ sc_vnode->vnode.ioctl_ops = &spm_camera_v4l2_ioctl_ops;
|
|
+ sc_vnode->vnode.release = spm_vdev_release;
|
|
+ sc_vnode->vnode.entity.ops = &spm_camera_media_entity_ops;
|
|
+ sc_vnode->vnode.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE;
|
|
+ ret = media_entity_pads_init(&sc_vnode->vnode.entity, 1, &sc_vnode->pad);
|
|
+ if (ret) {
|
|
+ cam_err("%s media entity pads init failed for spm_camera_vnode(%s).",
|
|
+ __func__, name);
|
|
+ goto entity_pads_init_fail;
|
|
+ }
|
|
+ spm_camera_block_init(&sc_vnode->sc_block, &sc_block_ops);
|
|
+ sc_vnode->vnode.v4l2_dev = v4l2_dev;
|
|
+ ret = __video_register_device(&sc_vnode->vnode, VFL_TYPE_VIDEO, -1, 1, THIS_MODULE);
|
|
+ if (ret) {
|
|
+ cam_err("%s video dev register failed for spm_camera_vnode(%s).",
|
|
+ __func__, name);
|
|
+ goto vdev_register_fail;
|
|
+ }
|
|
+ sc_vnode->vnode.entity.name = video_device_node_name(&sc_vnode->vnode);
|
|
+ sc_vnode->pipeline_notify_block.notifier_call = spm_vdev_pipeline_notify_fn;
|
|
+ sc_vnode->pipeline_notify_block.priority = SC_PIPE_NOTIFY_PRIO_NORMAL;
|
|
+
|
|
+ cam_dbg("create vnode(%s - %s) successfully.", name,
|
|
+ video_device_node_name(&sc_vnode->vnode));
|
|
+ return sc_vnode;
|
|
+vdev_register_fail:
|
|
+entity_pads_init_fail:
|
|
+ vb2_queue_release(&sc_vnode->buf_queue);
|
|
+queue_init_fail:
|
|
+ devm_kfree(alloc_dev, sc_vnode);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+int __spm_vdev_dq_idle_vbuffer(struct spm_camera_vnode *sc_vnode, struct spm_camera_vbuffer **sc_vb)
|
|
+{
|
|
+ *sc_vb = list_first_entry_or_null(&sc_vnode->queued_list, struct spm_camera_vbuffer, list_entry);
|
|
+ if (NULL == *sc_vb)
|
|
+ return -1;
|
|
+ list_del_init(&(*sc_vb)->list_entry);
|
|
+ atomic_dec(&sc_vnode->queued_buf_cnt);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int __spm_vdev_q_idle_vbuffer(struct spm_camera_vnode *sc_vnode, struct spm_camera_vbuffer *sc_vb)
|
|
+{
|
|
+ list_add_tail(&sc_vb->list_entry, &sc_vnode->queued_list);
|
|
+ atomic_inc(&sc_vnode->queued_buf_cnt);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int spm_vdev_dq_idle_vbuffer(struct spm_camera_vnode *sc_vnode, struct spm_camera_vbuffer **sc_vb)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+ int ret = 0;
|
|
+
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ ret = __spm_vdev_dq_idle_vbuffer(sc_vnode, sc_vb);
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int spm_vdev_q_idle_vbuffer(struct spm_camera_vnode *sc_vnode, struct spm_camera_vbuffer *sc_vb)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+ int ret = 0;
|
|
+
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ ret = __spm_vdev_q_idle_vbuffer(sc_vnode, sc_vb);
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int spm_vdev_pick_idle_vbuffer(struct spm_camera_vnode *sc_vnode, struct spm_camera_vbuffer **sc_vb)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ *sc_vb = list_first_entry_or_null(&sc_vnode->queued_list, struct spm_camera_vbuffer, list_entry);
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ if (NULL == *sc_vb) {
|
|
+ return -1;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int __spm_vdev_pick_idle_vbuffer(struct spm_camera_vnode *sc_vnode, struct spm_camera_vbuffer **sc_vb)
|
|
+{
|
|
+ *sc_vb = list_first_entry_or_null(&sc_vnode->queued_list, struct spm_camera_vbuffer, list_entry);
|
|
+ if (NULL == *sc_vb) {
|
|
+ return -1;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int __spm_vdev_dq_busy_vbuffer(struct spm_camera_vnode *sc_vnode, struct spm_camera_vbuffer **sc_vb)
|
|
+{
|
|
+ *sc_vb = list_first_entry_or_null(&sc_vnode->busy_list, struct spm_camera_vbuffer, list_entry);
|
|
+ if (NULL == *sc_vb)
|
|
+ return -1;
|
|
+ list_del_init(&(*sc_vb)->list_entry);
|
|
+ atomic_dec(&sc_vnode->busy_buf_cnt);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+int __spm_vdev_dq_busy_vbuffer_by_paddr(struct spm_camera_vnode *sc_vnode, int plane_id, unsigned long plane_paddr, struct spm_camera_vbuffer **sc_vb)
|
|
+{
|
|
+ struct spm_camera_vbuffer *pos = NULL, *n = NULL;
|
|
+ struct vb2_buffer *vb = NULL;
|
|
+ unsigned long paddr = 0;
|
|
+
|
|
+ list_for_each_entry_safe(pos, n, &sc_vnode->busy_list, list_entry) {
|
|
+ vb = &pos->vb2_v4l2_buf.vb2_buf;
|
|
+ paddr = spm_vb2_buf_paddr(vb, plane_id);
|
|
+ if (paddr == plane_paddr) {
|
|
+ *sc_vb = pos;
|
|
+ list_del_init(&pos->list_entry);
|
|
+ atomic_dec(&sc_vnode->busy_buf_cnt);
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return -1;
|
|
+}
|
|
+*/
|
|
+int __spm_vdev_q_busy_vbuffer(struct spm_camera_vnode *sc_vnode, struct spm_camera_vbuffer *sc_vb)
|
|
+{
|
|
+ list_add_tail(&sc_vb->list_entry, &sc_vnode->busy_list);
|
|
+ atomic_inc(&sc_vnode->busy_buf_cnt);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int spm_vdev_dq_busy_vbuffer(struct spm_camera_vnode *sc_vnode, struct spm_camera_vbuffer **sc_vb)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+ int ret = 0;
|
|
+
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ ret = __spm_vdev_dq_busy_vbuffer(sc_vnode, sc_vb);
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int spm_vdev_pick_busy_vbuffer(struct spm_camera_vnode *sc_vnode, struct spm_camera_vbuffer **sc_vb)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ *sc_vb = list_first_entry_or_null(&sc_vnode->busy_list, struct spm_camera_vbuffer, list_entry);
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ if (NULL == *sc_vb)
|
|
+ return -1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int __spm_vdev_pick_busy_vbuffer(struct spm_camera_vnode *sc_vnode, struct spm_camera_vbuffer **sc_vb)
|
|
+{
|
|
+ *sc_vb = list_first_entry_or_null(&sc_vnode->busy_list, struct spm_camera_vbuffer, list_entry);
|
|
+ if (NULL == *sc_vb)
|
|
+ return -1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+/*
|
|
+int spm_vdev_dq_busy_vbuffer_by_paddr(struct spm_camera_vnode *sc_vnode, int plane_id, unsigned long plane_paddr, struct spm_camera_vbuffer **sc_vb)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+ int ret = 0;
|
|
+
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ ret = __spm_vdev_dq_busy_vbuffer_by_paddr(sc_vnode, plane_id, plane_paddr, sc_vb);
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ return ret;
|
|
+}
|
|
+*/
|
|
+int spm_vdev_q_busy_vbuffer(struct spm_camera_vnode *sc_vnode, struct spm_camera_vbuffer *sc_vb)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+ int ret = 0;
|
|
+
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ ret = __spm_vdev_q_busy_vbuffer(sc_vnode, sc_vb);
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*
|
|
+int spm_vdev_busy_list_cnt(struct spm_camera_vnode *sc_vnode)
|
|
+{
|
|
+ return atomic_read(&sc_vnode->busy_buf_cnt);
|
|
+}
|
|
+*/
|
|
+int spm_vdev_export_camera_vbuffer(struct spm_camera_vbuffer *sc_vb, int with_error)
|
|
+{
|
|
+ struct vb2_buffer *vb = &sc_vb->vb2_v4l2_buf.vb2_buf;
|
|
+ if (with_error)
|
|
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
|
|
+ else
|
|
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+struct spm_camera_vnode *spm_vdev_remote_vnode(struct media_pad *pad)
|
|
+{
|
|
+ struct media_pad *remote_pad = media_entity_remote_pad(pad);
|
|
+ struct media_entity *me = NULL;
|
|
+
|
|
+ if (!remote_pad)
|
|
+ return NULL;
|
|
+ me = remote_pad->entity;
|
|
+ if (is_subdev(me))
|
|
+ return NULL;
|
|
+ return (struct spm_camera_vnode *)me;
|
|
+}
|
|
+
|
|
+int spm_vdev_register_vnode_notify(struct spm_camera_vnode *sc_vnode,
|
|
+ struct notifier_block *notifier_block)
|
|
+{
|
|
+ return blocking_notifier_chain_register(&sc_vnode->notify_chain, notifier_block);
|
|
+}
|
|
+
|
|
+int spm_vdev_unregister_vnode_notify(struct spm_camera_vnode *sc_vnode,
|
|
+ struct notifier_block *notifier_block)
|
|
+{
|
|
+ return blocking_notifier_chain_unregister(&sc_vnode->notify_chain, notifier_block);
|
|
+}
|
|
+
|
|
+int __spm_vdev_busy_list_empty(struct spm_camera_vnode *sc_vnode)
|
|
+{
|
|
+ return list_empty(&sc_vnode->busy_list);
|
|
+}
|
|
+
|
|
+int spm_vdev_busy_list_empty(struct spm_camera_vnode *sc_vnode)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+ int ret = 0;
|
|
+
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ ret = __spm_vdev_busy_list_empty(sc_vnode);
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int __spm_vdev_idle_list_empty(struct spm_camera_vnode *sc_vnode)
|
|
+{
|
|
+ return list_empty(&sc_vnode->queued_list);
|
|
+}
|
|
+
|
|
+int spm_vdev_idle_list_empty(struct spm_camera_vnode *sc_vnode)
|
|
+{
|
|
+ unsigned long flags = 0;
|
|
+ int ret = 0;
|
|
+
|
|
+ spin_lock_irqsave(&sc_vnode->slock, flags);
|
|
+ ret = __spm_vdev_idle_list_empty(sc_vnode);
|
|
+ spin_unlock_irqrestore(&sc_vnode->slock, flags);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int spm_vdev_is_raw_vnode(struct spm_camera_vnode *sc_vnode)
|
|
+{
|
|
+ __u32 pixelformat = sc_vnode->cur_fmt.fmt.pix_mp.pixelformat;
|
|
+
|
|
+ switch (pixelformat) {
|
|
+ case V4L2_PIX_FMT_SBGGR8:
|
|
+ case V4L2_PIX_FMT_SGBRG8:
|
|
+ case V4L2_PIX_FMT_SGRBG8:
|
|
+ case V4L2_PIX_FMT_SRGGB8:
|
|
+ case V4L2_PIX_FMT_SBGGR10:
|
|
+ case V4L2_PIX_FMT_SGBRG10:
|
|
+ case V4L2_PIX_FMT_SGRBG10:
|
|
+ case V4L2_PIX_FMT_SRGGB10:
|
|
+ case V4L2_PIX_FMT_SBGGR10P:
|
|
+ case V4L2_PIX_FMT_SGBRG10P:
|
|
+ case V4L2_PIX_FMT_SGRBG10P:
|
|
+ case V4L2_PIX_FMT_SRGGB10P:
|
|
+ case V4L2_PIX_FMT_SBGGR10ALAW8:
|
|
+ case V4L2_PIX_FMT_SGBRG10ALAW8:
|
|
+ case V4L2_PIX_FMT_SGRBG10ALAW8:
|
|
+ case V4L2_PIX_FMT_SRGGB10ALAW8:
|
|
+ case V4L2_PIX_FMT_SBGGR10DPCM8:
|
|
+ case V4L2_PIX_FMT_SGBRG10DPCM8:
|
|
+ case V4L2_PIX_FMT_SGRBG10DPCM8:
|
|
+ case V4L2_PIX_FMT_SRGGB10DPCM8:
|
|
+ case V4L2_PIX_FMT_SBGGR12:
|
|
+ case V4L2_PIX_FMT_SGBRG12:
|
|
+ case V4L2_PIX_FMT_SGRBG12:
|
|
+ case V4L2_PIX_FMT_SRGGB12:
|
|
+ case V4L2_PIX_FMT_SBGGR12P:
|
|
+ case V4L2_PIX_FMT_SGBRG12P:
|
|
+ case V4L2_PIX_FMT_SGRBG12P:
|
|
+ case V4L2_PIX_FMT_SRGGB12P:
|
|
+ case V4L2_PIX_FMT_SBGGR14P:
|
|
+ case V4L2_PIX_FMT_SGBRG14P:
|
|
+ case V4L2_PIX_FMT_SGRBG14P:
|
|
+ case V4L2_PIX_FMT_SRGGB14P:
|
|
+ case V4L2_PIX_FMT_SBGGR16:
|
|
+ case V4L2_PIX_FMT_SGBRG16:
|
|
+ case V4L2_PIX_FMT_SGRBG16:
|
|
+ case V4L2_PIX_FMT_SRGGB16:
|
|
+ case V4L2_PIX_FMT_SPACEMITGB8P:
|
|
+ case V4L2_PIX_FMT_SPACEMITGB10P:
|
|
+ case V4L2_PIX_FMT_SPACEMITGB12P:
|
|
+ case V4L2_PIX_FMT_SPACEMITGB14P:
|
|
+ return 1;
|
|
+ break;
|
|
+ default:
|
|
+ return 0;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/vdev.h b/drivers/media/platform/spacemit/camera/vi/vdev.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/vdev.h
|
|
@@ -0,0 +1,170 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * vdev.h - video divece functions
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef _SPACEMIT_VDEV_H_
|
|
+#define _SPACEMIT_VDEV_H_
|
|
+#include <media/v4l2-dev.h>
|
|
+#include <media/v4l2-device.h>
|
|
+#include <media/videobuf2-core.h>
|
|
+#include <media/videobuf2-v4l2.h>
|
|
+#include <media/k1x/k1x_videodev2.h>
|
|
+#include <linux/notifier.h>
|
|
+#include "cam_block.h"
|
|
+#include "mlink.h"
|
|
+
|
|
+struct spm_camera_vbuffer;
|
|
+struct spm_camera_vnode {
|
|
+ struct video_device vnode;
|
|
+ char name[SPACEMIT_VI_ENTITY_NAME_LEN];
|
|
+ struct spm_camera_block sc_block;
|
|
+ struct vb2_queue buf_queue;
|
|
+ struct media_pad pad;
|
|
+ struct list_head queued_list;
|
|
+ struct list_head busy_list;
|
|
+ atomic_t queued_buf_cnt;
|
|
+ atomic_t busy_buf_cnt;
|
|
+ atomic_t ref_cnt;
|
|
+ spinlock_t slock;
|
|
+ struct mutex mlock;
|
|
+ struct blocking_notifier_head notify_chain;
|
|
+ struct notifier_block pipeline_notify_block;
|
|
+ struct v4l2_format cur_fmt;
|
|
+ struct wait_queue_head waitq_head;
|
|
+ int in_streamoff;
|
|
+ int in_tasklet;
|
|
+ int direction;
|
|
+ unsigned int idx;
|
|
+ unsigned int total_frm;
|
|
+ unsigned int sw_err_frm;
|
|
+ unsigned int hw_err_frm;
|
|
+ unsigned int ok_frm;
|
|
+ unsigned int planes_offset[VB2_MAX_FRAME][VB2_MAX_PLANES];
|
|
+ unsigned int v4l2_buf_flags[VB2_MAX_FRAME];
|
|
+ struct spm_camera_vbuffer *sc_vb;
|
|
+ void *usr_data;
|
|
+};
|
|
+
|
|
+#define SC_BUF_FLAG_SOF_TOUCH (1 << 0)
|
|
+#define SC_BUF_FLAG_DONE_TOUCH (1 << 1)
|
|
+#define SC_BUF_FLAG_HW_ERR (1 << 2)
|
|
+#define SC_BUF_FLAG_SW_ERR (1 << 3)
|
|
+#define SC_BUF_FLAG_TIMESTAMPED (1 << 4)
|
|
+#define SC_BUF_FLAG_CCIC_TOUCH (1 << 5)
|
|
+#define SC_BUF_FLAG_SPECIAL_USE (1 << 6)
|
|
+#define SC_BUF_FLAG_CONTINOUS (1 << 7)
|
|
+#define SC_BUF_FLAG_GEN_EOF (1 << 8)
|
|
+#define SC_BUF_FLAG_RSVD_Z1 (1 << 9)
|
|
+#define SC_BUF_FLAG_FORCE_SHADOW (1 << 10)
|
|
+
|
|
+#define SC_BUF_RESERVED_DATA_LEN (32)
|
|
+struct spm_camera_vbuffer {
|
|
+ struct vb2_v4l2_buffer vb2_v4l2_buf;
|
|
+ struct list_head list_entry;
|
|
+ unsigned int reset_flag;
|
|
+ unsigned int flags;
|
|
+ __u64 timestamp_eof;
|
|
+ __u64 timestamp_qbuf;
|
|
+ struct spm_camera_vnode *sc_vnode;
|
|
+ unsigned char reserved[SC_BUF_RESERVED_DATA_LEN];
|
|
+};
|
|
+
|
|
+#define vb2_buffer_to_spm_camera_vbuffer(vb) ((struct spm_camera_vbuffer*)(vb))
|
|
+
|
|
+enum {
|
|
+ SPACEMIT_VNODE_NOTIFY_STREAM_ON = 0,
|
|
+ SPACEMIT_VNODE_NOTIFY_STREAM_OFF,
|
|
+ SPACEMIT_VNODE_NOTIFY_BUF_QUEUED,
|
|
+ SPACEMIT_VNODE_NOTIFY_BUF_PREPARE,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SPACEMIT_VNODE_DIR_IN = 0,
|
|
+ SPACEMIT_VNODE_DIR_OUT,
|
|
+};
|
|
+
|
|
+#define CAM_ALIGN(a, b) ({ \
|
|
+ unsigned int ___tmp1 = (a); \
|
|
+ unsigned int ___tmp2 = (b); \
|
|
+ unsigned int ___tmp3 = ___tmp1 % ___tmp2; \
|
|
+ ___tmp1 /= ___tmp2; \
|
|
+ if (___tmp3) \
|
|
+ ___tmp1++; \
|
|
+ ___tmp1 *= ___tmp2; \
|
|
+ ___tmp1; \
|
|
+ })
|
|
+
|
|
+#define is_vnode_streaming(vnode) ((vnode)->buf_queue.streaming)
|
|
+
|
|
+static inline void *sc_vnode_get_usrdata(struct spm_camera_vnode *sc_vnode)
|
|
+{
|
|
+ return sc_vnode->usr_data;
|
|
+}
|
|
+
|
|
+static inline struct spm_camera_vnode *media_entity_to_sc_vnode(struct media_entity *me)
|
|
+{
|
|
+ if (is_subdev(me))
|
|
+ return NULL;
|
|
+ return (struct spm_camera_vnode *)me;
|
|
+}
|
|
+
|
|
+static inline struct spm_camera_vbuffer *to_camera_vbuffer(struct vb2_buffer *vb2)
|
|
+{
|
|
+ struct vb2_v4l2_buffer *vb2_v4l2_buf = to_vb2_v4l2_buffer(vb2);
|
|
+ return container_of(vb2_v4l2_buf, struct spm_camera_vbuffer, vb2_v4l2_buf);
|
|
+}
|
|
+
|
|
+struct spm_camera_vnode *spm_vdev_create_vnode(const char *name,
|
|
+ int direction,
|
|
+ unsigned int idx,
|
|
+ struct v4l2_device *v4l2_dev,
|
|
+ struct device *alloc_dev,
|
|
+ unsigned int min_buffers_needed);
|
|
+
|
|
+int spm_vdev_register_vnode_notify(struct spm_camera_vnode *sc_vnode,
|
|
+ struct notifier_block *notifier_block);
|
|
+int spm_vdev_unregister_vnode_notify(struct spm_camera_vnode *sc_vnode,
|
|
+ struct notifier_block *notifier_block);
|
|
+
|
|
+int spm_vdev_busy_list_empty(struct spm_camera_vnode *sc_vnode);
|
|
+int __spm_vdev_busy_list_empty(struct spm_camera_vnode *sc_vnode);
|
|
+int spm_vdev_idle_list_empty(struct spm_camera_vnode *sc_vnode);
|
|
+int __spm_vdev_idle_list_empty(struct spm_camera_vnode *sc_vnode);
|
|
+int spm_vdev_dq_idle_vbuffer(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer **sc_vb);
|
|
+int spm_vdev_pick_idle_vbuffer(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer **sc_vb);
|
|
+int __spm_vdev_pick_idle_vbuffer(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer **sc_vb);
|
|
+int spm_vdev_q_idle_vbuffer(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer *sc_vb);
|
|
+int __spm_vdev_dq_idle_vbuffer(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer **sc_vb);
|
|
+int __spm_vdev_q_idle_vbuffer(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer *sc_vb);
|
|
+int spm_vdev_dq_busy_vbuffer(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer **sc_vb);
|
|
+int spm_vdev_pick_busy_vbuffer(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer **sc_vb);
|
|
+int __spm_vdev_pick_busy_vbuffer(struct spm_camera_vnode *sc_vnode, struct spm_camera_vbuffer **sc_vb);
|
|
+//int spm_vdev_dq_busy_vbuffer_by_paddr(struct spm_camera_vnode *sc_vnode, int plane_id, unsigned long plane_paddr, struct spm_camera_vbuffer **sc_vb);
|
|
+int spm_vdev_q_busy_vbuffer(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer *sc_vb);
|
|
+int __spm_vdev_dq_busy_vbuffer(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer **sc_vb);
|
|
+//int __spm_vdev_dq_busy_vbuffer_by_paddr(struct spm_camera_vnode *sc_vnode, int plane_id, unsigned long plane_paddr, struct spm_camera_vbuffer **sc_vb);
|
|
+int __spm_vdev_q_busy_vbuffer(struct spm_camera_vnode *sc_vnode,
|
|
+ struct spm_camera_vbuffer *sc_vb);
|
|
+int spm_vdev_export_camera_vbuffer(struct spm_camera_vbuffer *sc_vb, int with_error);
|
|
+struct spm_camera_vnode *spm_vdev_remote_vnode(struct media_pad *pad);
|
|
+void spm_vdev_fill_v4l2_format(struct v4l2_subdev_format *sub_f, struct v4l2_format *f);
|
|
+void spm_vdev_fill_subdev_format(struct v4l2_format *f,
|
|
+ struct v4l2_subdev_format *sub_f);
|
|
+int spm_vdev_is_raw_vnode(struct spm_camera_vnode *sc_vnode);
|
|
+#ifdef MODULE
|
|
+void __spm_media_entity_remove_links(struct media_entity *entity);
|
|
+#endif
|
|
+#endif
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/vsensor.c b/drivers/media/platform/spacemit/camera/vi/vsensor.c
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/vsensor.c
|
|
@@ -0,0 +1,238 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * vsensor.c - virtual sensor funcitons
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#include "vsensor.h"
|
|
+#include "mlink.h"
|
|
+#include <cam_plat.h>
|
|
+#define CAM_MODULE_TAG CAM_MDL_VI
|
|
+#include <cam_dbg.h>
|
|
+
|
|
+static int spm_sensor_s_stream(struct v4l2_subdev *sd, int enable)
|
|
+{
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct spm_camera_sensor *sc_sensor = v4l2_subdev_to_sc_sensor(sd);
|
|
+ struct spm_camera_subdev *sc_subdev = NULL;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(&sd->entity);
|
|
+ int ret = 0, action = 0;
|
|
+
|
|
+ cam_dbg("%s s_stream %d.", sd->name, enable);
|
|
+ BUG_ON(!sc_sensor);
|
|
+ BUG_ON(!pipe);
|
|
+ sc_subdev = &sc_sensor->sc_subdev;
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ BUG_ON(!sc_pipeline);
|
|
+ if (enable)
|
|
+ action = PIPELINE_ACTION_SENSOR_STREAM_ON;
|
|
+ else
|
|
+ action = PIPELINE_ACTION_SENSOR_STREAM_OFF;
|
|
+ ret = blocking_notifier_call_chain(&sc_pipeline->blocking_notify_chain, action, NULL);
|
|
+ if (NOTIFY_BAD == ret) {
|
|
+ cam_err("%s(%s) blocking_notifier_call_chain failed", __func__, sc_subdev->name);
|
|
+ return -1;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct v4l2_subdev_video_ops spm_sensor_subdev_video_ops = {
|
|
+ .s_stream = spm_sensor_s_stream,
|
|
+};
|
|
+
|
|
+static int spm_sensor_s_power(struct v4l2_subdev *sd, int on)
|
|
+{
|
|
+ cam_dbg("%s s_power %d.", sd->name, on);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int spm_sensor_reset(struct v4l2_subdev *sd, u32 val)
|
|
+{
|
|
+ struct spm_camera_sensor *sc_sensor = v4l2_subdev_to_sc_sensor(sd);
|
|
+ struct spm_camera_sensor_strm_ctrl snr_strm_ctrl;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct spm_camera_ispfirm_ops *ispfirm_ops = NULL;
|
|
+ struct spm_camera_sensor_ops *sensor_ops = NULL;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(&sd->entity);
|
|
+ struct isp_pipe_reset pipe_reset;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (!sc_sensor) {
|
|
+ cam_err("%s sc_sensor was null", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (!pipe) {
|
|
+ cam_err("%s(%s) pipe was null", __func__, sc_sensor->sc_subdev.name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ ispfirm_ops = sc_pipeline->ispfirm_ops;
|
|
+ sensor_ops = sc_pipeline->sensor_ops;
|
|
+ if (sc_sensor->idx == 0)
|
|
+ snr_strm_ctrl.sensor_idx = 0;
|
|
+ else
|
|
+ snr_strm_ctrl.sensor_idx = 1;
|
|
+ if (RESET_STAGE1 == val) {
|
|
+ snr_strm_ctrl.cmd = SC_SENSOR_STRM_PAUSE;
|
|
+ sc_sensor_call(sensor_ops, send_cmd, SC_SENSOR_CMD_STRM_CTRL, &snr_strm_ctrl, sizeof(snr_strm_ctrl));
|
|
+ pipe_reset.pipe_id = PIPELINE_ID(sc_pipeline->id);
|
|
+ ret = sc_ispfirm_call(ispfirm_ops, send_cmd, SC_ISPFIRM_CMD_PIPE_RESET_START, &pipe_reset, sizeof(pipe_reset));
|
|
+ if (ret && ret != -ENODEV && ret != -ENOIOCTLCMD) {
|
|
+ cam_err("%s(%s) pipe%u global reset failed", __func__, sc_sensor->sc_subdev.name, PIPELINE_ID(sc_pipeline->id));
|
|
+ return ret;
|
|
+ }
|
|
+ cam_info("%s(%s) sensor stream ctrl(%u)", __func__, sc_sensor->sc_subdev.name, snr_strm_ctrl.cmd);
|
|
+ } else if (RESET_STAGE2 == val) {
|
|
+ pipe_reset.pipe_id = PIPELINE_ID(sc_pipeline->id);
|
|
+ sc_ispfirm_call(ispfirm_ops, send_cmd, SC_ISPFIRM_CMD_PIPE_RESET_END, &pipe_reset, sizeof(pipe_reset));
|
|
+ } else if (RESET_STAGE3 == val) {
|
|
+ snr_strm_ctrl.cmd = SC_SENSOR_STRM_RESUME;
|
|
+ sc_sensor_call(sensor_ops, send_cmd, SC_SENSOR_CMD_STRM_CTRL, &snr_strm_ctrl, sizeof(snr_strm_ctrl));
|
|
+ cam_info("%s(%s) sensor stream ctrl(%u)", __func__, sc_sensor->sc_subdev.name, snr_strm_ctrl.cmd);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct v4l2_subdev_core_ops spm_sensor_subdev_core_ops = {
|
|
+ .ioctl = spm_subdev_ioctl,
|
|
+ .s_power = spm_sensor_s_power,
|
|
+ .reset = spm_sensor_reset,
|
|
+//#ifdef CONFIG_COMPAT
|
|
+#if 0
|
|
+ .compat_ioctl32 = spm_subdev_compat_ioctl32,
|
|
+#endif
|
|
+};
|
|
+
|
|
+static int spm_sensor_pad_set_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct spm_camera_sensor *sc_sensor = v4l2_subdev_to_sc_sensor(sd);
|
|
+ struct v4l2_subdev_format remote_pad_fmt;
|
|
+ struct media_pad *remote_pad = NULL;
|
|
+ struct v4l2_subdev *remote_sd = NULL;
|
|
+ struct media_entity *me = &sd->entity;
|
|
+ struct spm_camera_pipeline *sc_pipeline = NULL;
|
|
+ struct media_pipeline *pipe = media_entity_pipeline(me);
|
|
+ int ret = 0;
|
|
+
|
|
+ if (!sc_sensor) {
|
|
+ cam_err("%s sc_sensor was null", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (format->which != V4L2_SUBDEV_FORMAT_ACTIVE) {
|
|
+ cam_err("%s didn't support format which(%d).", sd->name, format->which);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (format->pad >= SENSOR_PAD_NUM) {
|
|
+ cam_err("%s didn't have pad%d.", sd->name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ remote_pad = media_entity_remote_pad(&(sc_sensor->pads[format->pad]));
|
|
+ if (!remote_pad) {
|
|
+ cam_err("%s didn't have valid link.", sd->name);
|
|
+ return -1;
|
|
+ }
|
|
+ if (me->use_count <= 0 || !pipe) {
|
|
+ cam_err("%s need a pipeline!", sd->name);
|
|
+ return -1;
|
|
+ }
|
|
+ sc_pipeline = media_pipeline_to_sc_pipeline(pipe);
|
|
+ if (sc_pipeline->state >= PIPELINE_ST_STARTED) {
|
|
+ cam_err("%s %s if busy.", __func__, sd->name);
|
|
+ return -EBUSY;
|
|
+ }
|
|
+ remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
|
|
+ remote_pad_fmt = *format;
|
|
+ remote_pad_fmt.pad = remote_pad->index;
|
|
+ ret = v4l2_subdev_call(remote_sd, pad, set_fmt, NULL, &remote_pad_fmt);
|
|
+ if (ret) {
|
|
+ cam_err("%s didn't support format(%dx%d code=0x%08x)",
|
|
+ sd->name,
|
|
+ format->format.width,
|
|
+ format->format.height, format->format.code);
|
|
+ return ret;
|
|
+ }
|
|
+ format->format = remote_pad_fmt.format;
|
|
+ sc_sensor->pad_fmts[format->pad].format = format->format;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int spm_sensor_pad_get_fmt(struct v4l2_subdev *sd,
|
|
+ struct v4l2_subdev_state *state,
|
|
+ struct v4l2_subdev_format *format)
|
|
+{
|
|
+ struct spm_camera_sensor *sc_sensor = v4l2_subdev_to_sc_sensor(sd);
|
|
+ struct media_pad *remote_pad = NULL;
|
|
+
|
|
+ if (!sc_sensor) {
|
|
+ cam_err("%s sc_sensor was null", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (format->pad >= SENSOR_PAD_NUM) {
|
|
+ cam_err("%s didn't have pad%d.", sd->name, format->pad);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ remote_pad = media_entity_remote_pad(&sc_sensor->pads[format->pad]);
|
|
+ if (!remote_pad) {
|
|
+ cam_err("%s didn't have valid link.", sd->name);
|
|
+ return -1;
|
|
+ }
|
|
+ format->format = sc_sensor->pad_fmts[format->pad].format;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct v4l2_subdev_pad_ops spm_sensor_subdev_pad_ops = {
|
|
+ .set_fmt = spm_sensor_pad_set_fmt,
|
|
+ .get_fmt = spm_sensor_pad_get_fmt,
|
|
+};
|
|
+
|
|
+static struct v4l2_subdev_ops spm_sensor_subdev_ops = {
|
|
+ .video = &spm_sensor_subdev_video_ops,
|
|
+ .core = &spm_sensor_subdev_core_ops,
|
|
+ .pad = &spm_sensor_subdev_pad_ops,
|
|
+};
|
|
+
|
|
+static void spm_sensor_release(struct spm_camera_subdev *sc_subdev)
|
|
+{
|
|
+ cam_dbg("%s(%s) enter", __func__, sc_subdev->name);
|
|
+}
|
|
+
|
|
+struct spm_camera_sensor *spm_sensor_create(unsigned int grp_id, struct device *dev)
|
|
+{
|
|
+ char name[SPACEMIT_VI_ENTITY_NAME_LEN];
|
|
+ struct spm_camera_sensor *sc_sensor = NULL;
|
|
+ int ret = 0, i = 0;
|
|
+
|
|
+ if (!dev) {
|
|
+ cam_err("%s dev is null ", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+ sc_sensor = devm_kzalloc(dev, sizeof(*sc_sensor), GFP_KERNEL);
|
|
+ if (!sc_sensor) {
|
|
+ cam_err("%s not enough mem.", __func__);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ sc_sensor->idx = SD_IDX(grp_id);
|
|
+ for (i = 0; i < SENSOR_PAD_NUM; i++) {
|
|
+ sc_sensor->pads[i].flags = MEDIA_PAD_FL_SOURCE;
|
|
+ }
|
|
+
|
|
+ snprintf(name, SPACEMIT_VI_ENTITY_NAME_LEN, "sensor%d", sc_sensor->idx);
|
|
+ ret = spm_subdev_init(grp_id, name, 1, &spm_sensor_subdev_ops,
|
|
+ SENSOR_PAD_NUM, sc_sensor->pads, NULL,
|
|
+ &sc_sensor->sc_subdev);
|
|
+ if (ret) {
|
|
+ cam_err("%s spm_subdev_init failed ret=%d.", __func__, ret);
|
|
+ goto sc_subdev_init_fail;
|
|
+ }
|
|
+ sc_sensor->sc_subdev.release = spm_sensor_release;
|
|
+ sc_sensor->sc_subdev.pcsd.sd.dev = dev;
|
|
+ return sc_sensor;
|
|
+sc_subdev_init_fail:
|
|
+ devm_kfree(dev, sc_sensor);
|
|
+ return NULL;
|
|
+}
|
|
diff --git a/drivers/media/platform/spacemit/camera/vi/vsensor.h b/drivers/media/platform/spacemit/camera/vi/vsensor.h
|
|
new file mode 100644
|
|
index 000000000000..111111111111
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/spacemit/camera/vi/vsensor.h
|
|
@@ -0,0 +1,49 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * vsensor.h - virtual sensor funcitons
|
|
+ *
|
|
+ * Copyright(C) 2023 SPACEMIT Micro Limited
|
|
+ */
|
|
+
|
|
+#ifndef _SPACEMIT_VSENSOR_H_
|
|
+#define _SPACEMIT_VSENSOR_H_
|
|
+#include <media/v4l2-device.h>
|
|
+#include <linux/v4l2-subdev.h>
|
|
+#include <media/media-entity.h>
|
|
+#include "mlink.h"
|
|
+#include "subdev.h"
|
|
+
|
|
+enum {
|
|
+ //SENSOR_PAD_RAWDUMP0 = 0,
|
|
+ //SENSOR_PAD_RAWDUMP1,
|
|
+ //SENSOR_PAD_PIPE0,
|
|
+ //SENSOR_PAD_PIPE1,
|
|
+ SENSOR_PAD_CSI_MAIN,
|
|
+ SENSOR_PAD_CSI_VCDT,
|
|
+ SENSOR_PAD_NUM
|
|
+};
|
|
+
|
|
+struct spm_camera_sensor {
|
|
+ struct spm_camera_subdev sc_subdev;
|
|
+ struct media_pad pads[SENSOR_PAD_NUM];
|
|
+ struct v4l2_subdev_format pad_fmts[SENSOR_PAD_NUM];
|
|
+ int idx;
|
|
+ void *usr_data;
|
|
+};
|
|
+
|
|
+static inline struct spm_camera_sensor* media_entity_to_sc_sensor(struct media_entity *me)
|
|
+{
|
|
+ if (!is_sensor(me))
|
|
+ return NULL;
|
|
+ return (struct spm_camera_sensor *)me;
|
|
+}
|
|
+
|
|
+static inline struct spm_camera_sensor *v4l2_subdev_to_sc_sensor(struct v4l2_subdev *sd)
|
|
+{
|
|
+ if (!is_sensor(&(sd->entity)))
|
|
+ return NULL;
|
|
+ return (struct spm_camera_sensor *)sd;
|
|
+}
|
|
+
|
|
+struct spm_camera_sensor *spm_sensor_create(unsigned int grp_id, struct device *dev);
|
|
+#endif
|
|
--
|
|
Armbian
|
|
|