From 6afba3a1195e67438ff0eb9e14fd9813a5b17604 Mon Sep 17 00:00:00 2001 From: Marvin Wewer <102616081+rvdr@users.noreply.github.com> Date: Sat, 31 Jan 2026 06:52:26 +0100 Subject: [PATCH] sunxi a523: add support for PCIe, SPI Flash and Iommu (#9280) --- config/kernel/linux-sunxi64-edge.config | 9 +- ...-cubie-a5e-enable-usbc-pcie-combophy.patch | 87 + ...i-add-iommu-usbc-pcie-combophy-nodes.patch | 122 + ...527-orangepi-4a-enable-pcie-combophy.patch | 39 + ...x-clock-handling-for-ccu-sun55i-a523.patch | 64 + .../drv-iommu-sunxi-add-iommu-driver.patch | 2393 ++++++++++++ .../drv-pci-sunxi-enable-pcie-support.patch | 3284 +++++++++++++++++ ...v-phy-allwinner-add-pcie-usb3-driver.patch | 1238 +++++++ ...lwinner-a523-support-spi-controllers.patch | 28 +- patch/kernel/archive/sunxi-6.18/series.conf | 7 + ...lwinner-a523-support-spi-controllers.patch | 127 + ...ner-a523-support-spl-spi-controllers.patch | 236 ++ .../allwinner-add-nvme-boot-target.patch | 46 + ...-cubie-a5e-enable-spi0-pcie-combophy.patch | 141 + ...rangepi-4a-enable-spi0-pcie-combophy.patch | 84 + ...-sun55i-add-spi0-pcie-combophy-nodes.patch | 114 + ...i-fix-clock-handling-for-sun55i-a523.patch | 56 + .../edit-orangepi-4a-defconfig.patch | 43 + .../edit-radxa-cubie-a5e-defconfig.patch | 43 + .../pci-sunxi-enable-pcie-support.patch | 1498 ++++++++ .../phy-allwinner-add-pcie-usb3-driver.patch | 712 ++++ 21 files changed, 10368 insertions(+), 3 deletions(-) create mode 100644 patch/kernel/archive/sunxi-6.18/patches.armbian/arm64-dts-sun55i-a527-cubie-a5e-enable-usbc-pcie-combophy.patch create mode 100644 patch/kernel/archive/sunxi-6.18/patches.armbian/arm64-dts-sun55i-dtsi-add-iommu-usbc-pcie-combophy-nodes.patch create mode 100644 patch/kernel/archive/sunxi-6.18/patches.armbian/arm64-dts-sun55i-t527-orangepi-4a-enable-pcie-combophy.patch create mode 100644 patch/kernel/archive/sunxi-6.18/patches.armbian/drv-clk-sunxi-ng-fix-clock-handling-for-ccu-sun55i-a523.patch create mode 100644 patch/kernel/archive/sunxi-6.18/patches.armbian/drv-iommu-sunxi-add-iommu-driver.patch create mode 100644 patch/kernel/archive/sunxi-6.18/patches.armbian/drv-pci-sunxi-enable-pcie-support.patch create mode 100644 patch/kernel/archive/sunxi-6.18/patches.armbian/drv-phy-allwinner-add-pcie-usb3-driver.patch create mode 100644 patch/u-boot/sunxi-dev-u-boot-a523/allwinner-a523-support-spi-controllers.patch create mode 100644 patch/u-boot/sunxi-dev-u-boot-a523/allwinner-a523-support-spl-spi-controllers.patch create mode 100644 patch/u-boot/sunxi-dev-u-boot-a523/allwinner-add-nvme-boot-target.patch create mode 100644 patch/u-boot/sunxi-dev-u-boot-a523/arm64-dts-sun55i-a527-cubie-a5e-enable-spi0-pcie-combophy.patch create mode 100644 patch/u-boot/sunxi-dev-u-boot-a523/arm64-dts-sun55i-t527-orangepi-4a-enable-spi0-pcie-combophy.patch create mode 100644 patch/u-boot/sunxi-dev-u-boot-a523/arm64-dtsi-sun55i-add-spi0-pcie-combophy-nodes.patch create mode 100644 patch/u-boot/sunxi-dev-u-boot-a523/clk-sunxi-fix-clock-handling-for-sun55i-a523.patch create mode 100644 patch/u-boot/sunxi-dev-u-boot-a523/edit-orangepi-4a-defconfig.patch create mode 100644 patch/u-boot/sunxi-dev-u-boot-a523/edit-radxa-cubie-a5e-defconfig.patch create mode 100644 patch/u-boot/sunxi-dev-u-boot-a523/pci-sunxi-enable-pcie-support.patch create mode 100644 patch/u-boot/sunxi-dev-u-boot-a523/phy-allwinner-add-pcie-usb3-driver.patch diff --git a/config/kernel/linux-sunxi64-edge.config b/config/kernel/linux-sunxi64-edge.config index a2df2cc1aa..99a600ef53 100644 --- a/config/kernel/linux-sunxi64-edge.config +++ b/config/kernel/linux-sunxi64-edge.config @@ -579,6 +579,7 @@ CONFIG_PCI=y CONFIG_PCIEPORTBUS=y CONFIG_PCI_HOST_GENERIC=y CONFIG_PCIE_DW_PLAT_HOST=y +CONFIG_PCIE_SUN55I_RC=y CONFIG_UEVENT_HELPER=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y @@ -603,7 +604,7 @@ CONFIG_MTD_CMDLINE_PARTS=m CONFIG_MTD_BLOCK=y CONFIG_MTD_OOPS=m CONFIG_MTD_PARTITIONED_MASTER=y -CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_SPI_NOR=y CONFIG_OF_OVERLAY=y CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m CONFIG_ZRAM=m @@ -1137,11 +1138,13 @@ CONFIG_I2C_SLAVE_EEPROM=m CONFIG_I3C=m CONFIG_DW_I3C_MASTER=m CONFIG_SPI=y +CONFIG_SPI_DEBUG=y CONFIG_SPI_DESIGNWARE=m CONFIG_SPI_DW_DMA=y CONFIG_SPI_DW_PCI=m CONFIG_SPI_DW_MMIO=m -CONFIG_SPI_GPIO=m +CONFIG_SPI_GPIO=y +CONFIG_SPI_PL022=y CONFIG_SPI_SUN4I=y CONFIG_SPI_SUN6I=y CONFIG_SPI_MUX=m @@ -2223,6 +2226,7 @@ CONFIG_ARM_SMMU_V3=m CONFIG_ARM_SMMU_V3_SVA=y CONFIG_IOMMUFD=m CONFIG_SUN50I_IOMMU=y +CONFIG_SUN55I_IOMMU=y CONFIG_REMOTEPROC=y CONFIG_REMOTEPROC_CDEV=y CONFIG_SUN50I_H6_PRCM_PPU=m @@ -2644,6 +2648,7 @@ CONFIG_PHY_CAN_TRANSCEIVER=m CONFIG_PHY_SUN4I_USB=y CONFIG_PHY_SUN9I_USB=y CONFIG_PHY_SUN50I_USB3=y +CONFIG_AW_INNO_COMBOPHY=y CONFIG_ARM_CCI_PMU=y # CONFIG_ARM_CCI400_PMU is not set # CONFIG_ARM_CCI5xx_PMU is not set diff --git a/patch/kernel/archive/sunxi-6.18/patches.armbian/arm64-dts-sun55i-a527-cubie-a5e-enable-usbc-pcie-combophy.patch b/patch/kernel/archive/sunxi-6.18/patches.armbian/arm64-dts-sun55i-a527-cubie-a5e-enable-usbc-pcie-combophy.patch new file mode 100644 index 0000000000..4c54f558f2 --- /dev/null +++ b/patch/kernel/archive/sunxi-6.18/patches.armbian/arm64-dts-sun55i-a527-cubie-a5e-enable-usbc-pcie-combophy.patch @@ -0,0 +1,87 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Sun, 11 Jan 2026 12:11:10 +0000 +Subject: arm64: dts: allwinner: a527-cubie-a5e: enable PCIe/USB-C combophy + +Signed-off-by: Marvin Wewer +--- +--- + arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts | 58 ++++++++++ + 1 file changed, 58 insertions(+) + +diff --git a/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts b/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts +index 111111111111..222222222222 100644 +--- a/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts ++++ b/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts +@@ -76,6 +76,39 @@ reg_usb_vbus: vbus { + gpio = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */ + enable-active-high; + }; ++ ++ reg_pcie_vcc3v3: regulator-pcie-vcc3v3 { ++ compatible = "regulator-fixed"; ++ regulator-name = "pcie-3v3"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ regulator-enable-ramp-delay = <1000>; ++ regulator-always-on; ++ regulator-boot-on; ++ gpio = <&r_pio 0 11 GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ }; ++ ++ gma340_oe: gma340-oe { ++ compatible = "regulator-fixed"; ++ regulator-name = "gma340-oe"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ regulator-always-on; ++ regulator-boot-on; ++ gpio = <&pio 1 7 GPIO_ACTIVE_LOW>; ++ }; ++ ++ gma340_pcie: gma340-pcie { ++ compatible = "regulator-fixed"; ++ regulator-name = "gma340-pcie"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ regulator-always-on; ++ regulator-boot-on; ++ gpio = <&pio 1 6 GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ }; + }; + + &ehci0 { +@@ -434,3 +467,28 @@ w25q128: flash@0 { + status = "okay"; + }; + }; ++ ++/* PCIE and USB Switch */ ++&combophy { ++ resets = <&ccu RST_BUS_PCIE_USB3>; ++ phy_use_sel = <0>; /* 0:PCIE; 1:USB3 */ ++ status = "okay"; ++}; ++ ++&pcie { ++ reset-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; ++ wake-gpios = <&pio 7 12 GPIO_ACTIVE_HIGH>; ++ num-lanes = <1>; ++ clk-freq-100M; ++ pcie3v3-supply = <®_pcie_vcc3v3>; ++ status = "okay"; ++}; ++ ++&usbc1 { ++ device_type = "usbc1"; ++ usb_regulator_io = "nocare"; ++ usb_wakeup_suspend = <1>; ++ wakeup-source; ++ status = "okay"; ++}; ++ +-- +Armbian + diff --git a/patch/kernel/archive/sunxi-6.18/patches.armbian/arm64-dts-sun55i-dtsi-add-iommu-usbc-pcie-combophy-nodes.patch b/patch/kernel/archive/sunxi-6.18/patches.armbian/arm64-dts-sun55i-dtsi-add-iommu-usbc-pcie-combophy-nodes.patch new file mode 100644 index 0000000000..438d431ead --- /dev/null +++ b/patch/kernel/archive/sunxi-6.18/patches.armbian/arm64-dts-sun55i-dtsi-add-iommu-usbc-pcie-combophy-nodes.patch @@ -0,0 +1,122 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Mon, 5 Jan 2026 21:41:18 +0000 +Subject: arm64: dts: allwinner: sun55i-a523: add iommu and PCIe/USB-C nodes + +Signed-off-by: Marvin Wewer +--- + arch/arm64/boot/dts/allwinner/sun55i-a523.dtsi | 84 ++++++++++ + 1 file changed, 84 insertions(+) + +diff --git a/arch/arm64/boot/dts/allwinner/sun55i-a523.dtsi b/arch/arm64/boot/dts/allwinner/sun55i-a523.dtsi +index 111111111111..222222222222 100644 +--- a/arch/arm64/boot/dts/allwinner/sun55i-a523.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun55i-a523.dtsi +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + / { + interrupt-parent = <&gic>; +@@ -113,6 +114,17 @@ timer { + ; + }; + ++ iommu: iommu@2010000 { ++ compatible = "allwinner,sun55i-a523-iommu"; ++ reg = <0x0 0x02010000 0x0 0x1000>; ++ interrupts = ; ++ interrupt-names = "iommu-irq"; ++ clocks = <&ccu CLK_IOMMU>; ++ clock-names = "iommu"; ++ /* clock-frequency = <24000000>; */ ++ #iommu-cells = <2>; ++ }; ++ + soc { + compatible = "simple-bus"; + #address-cells = <1>; +@@ -844,6 +856,78 @@ gmac1_mtl_tx_setup: tx-queues-config { + }; + }; + ++usbc1: usbc1@11 { ++ device_type = "usbc1"; ++ reg = <0x11 0x1000>; ++ usb_regulator_io = "nocare"; ++ usb_wakeup_suspend = <0>; ++ status = "disabled"; ++}; ++ ++combophy: phy@4f00000 { ++ compatible = "allwinner,inno-combphy"; ++ reg = <0x04f00000 0x80000>, /* Sub-System Application Registers */ ++ <0x04f80000 0x80000>; /* Combo INNO PHY Registers */ ++ reg-names = "phy-ctl", "phy-clk"; ++ power-domains = <&pck600 PD_PCIE>; ++ phy_refclk_sel = <0>; /* 0:internal clk; 1:external clk */ ++ clocks = <&ccu CLK_USB3_REF>, <&ccu CLK_PLL_PERIPH0_200M>; ++ clock-names = "phyclk_ref","refclk_par"; ++ resets = <&ccu RST_BUS_PCIE_USB3>; ++ reset-names = "phy_rst"; ++ #phy-cells = <1>; ++ status = "disabled"; ++}; ++ ++pcie: pcie@4800000 { ++ compatible = "allwinner,sunxi-pcie-v210-rc"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ bus-range = <0x0 0xff>; ++ reg = <0x04800000 0x480000>; ++ reg-names = "dbi"; ++ device_type = "pci"; ++ ranges = <0x00000800 0 0x20000000 0x20000000 0 0x01000000 ++ 0x81000000 0 0x21000000 0x21000000 0 0x01000000 ++ 0x82000000 0 0x22000000 0x22000000 0 0x0e000000>; ++ num-lanes = <1>; ++ phys = <&combophy PHY_TYPE_PCIE>; ++ phy-names = "pcie-phy"; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ interrupt-names = "msi", "sii", "edma-w0", "edma-w1", "edma-w2", "edma-w3", ++ "edma-r0", "edma-r1", "edma-r2", "edma-r3"; ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0 0 0 1 &pcie_intc 0>, ++ <0 0 0 2 &pcie_intc 1>, ++ <0 0 0 3 &pcie_intc 2>, ++ <0 0 0 4 &pcie_intc 3>; ++ num-edma = <4>; ++ max-link-speed = <2>; ++ num-ib-windows = <8>; ++ num-ob-windows = <8>; ++ linux,pci-domain = <0>; ++ power-domains = <&pck600 PD_PCIE>; ++ clocks = <&osc24M>, <&ccu CLK_PCIE_AUX>; ++ clock-names = "hosc", "pclk_aux"; ++ status = "disabled"; ++ ++ pcie_intc: legacy-interrupt-controller { ++ interrupt-controller; ++ #address-cells = <0>; ++ #interrupt-cells = <1>; ++ }; ++}; ++ + ppu: power-controller@7001400 { + compatible = "allwinner,sun55i-a523-ppu"; + reg = <0x07001400 0x400>; +-- +Armbian + diff --git a/patch/kernel/archive/sunxi-6.18/patches.armbian/arm64-dts-sun55i-t527-orangepi-4a-enable-pcie-combophy.patch b/patch/kernel/archive/sunxi-6.18/patches.armbian/arm64-dts-sun55i-t527-orangepi-4a-enable-pcie-combophy.patch new file mode 100644 index 0000000000..9b54f76378 --- /dev/null +++ b/patch/kernel/archive/sunxi-6.18/patches.armbian/arm64-dts-sun55i-t527-orangepi-4a-enable-pcie-combophy.patch @@ -0,0 +1,39 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Tue, 6 Jan 2026 13:46:53 +0000 +Subject: arm64: dts: allwinner: t527-orangepi-4a: enable PCIe combophy + +Signed-off-by: Marvin Wewer +--- + arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts | 15 ++++++++++ + 1 file changed, 15 insertions(+) + +diff --git a/arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts b/arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts +index 111111111111..222222222222 100644 +--- a/arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts ++++ b/arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts +@@ -442,6 +442,21 @@ bluetooth { + }; + }; + ++&combophy { ++ resets = <&ccu RST_BUS_PCIE_USB3>; ++ phy_use_sel = <0>; ++ status = "okay"; ++}; ++ ++&pcie { ++ reset-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; ++ wake-gpios = <&pio 7 12 GPIO_ACTIVE_HIGH>; ++ num-lanes = <2>; ++ clk-freq-100M; ++ pcie3v3-supply = <®_pcie_vcc3v3>; ++ status = "okay"; ++}; ++ + &usb_otg { + /* + * The OTG controller is connected to one of the type-A ports. +-- +Armbian + diff --git a/patch/kernel/archive/sunxi-6.18/patches.armbian/drv-clk-sunxi-ng-fix-clock-handling-for-ccu-sun55i-a523.patch b/patch/kernel/archive/sunxi-6.18/patches.armbian/drv-clk-sunxi-ng-fix-clock-handling-for-ccu-sun55i-a523.patch new file mode 100644 index 0000000000..1828ddaf74 --- /dev/null +++ b/patch/kernel/archive/sunxi-6.18/patches.armbian/drv-clk-sunxi-ng-fix-clock-handling-for-ccu-sun55i-a523.patch @@ -0,0 +1,64 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Tue, 9 Dec 2025 16:43:16 +0000 +Subject: clk: sunxi-ng: add A523 USB3 ref clock and reset + +Signed-off-by: Marvin Wewer +--- + drivers/clk/sunxi-ng/ccu-sun55i-a523.c | 13 +++++++++- + include/dt-bindings/clock/sun55i-a523-ccu.h | 1 + + 3 files changed, 13 insertions(+), 1 deletion(-) + +diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c +index 111111111111..222222222222 100644 +--- a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c ++++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c +@@ -1186,6 +1186,15 @@ static SUNXI_CCU_MUX_DATA_WITH_GATE(fanout2_clk, "fanout2", fanout_parents, + BIT(23), /* gate */ + 0); + ++static const struct clk_parent_data usb3_ref_parents[] = { { .fw_name = "hosc" }, { .hw = &pll_periph0_200M_clk.hw }, { .hw = &pll_periph1_200M_clk.hw } }; ++static SUNXI_CCU_M_DATA_WITH_MUX_GATE(usb3_ref_clk, "usb3-ref", ++ usb3_ref_parents, 0x0A84, ++ 0, 5, /* M */ ++ 24, 3, /* mux */ ++ BIT(31), /* gate */ ++ 0); ++ ++ + /* + * Contains all clocks that are controlled by a hardware register. They + * have a (sunxi) .common member, which needs to be initialised by the common +@@ -1354,6 +1363,7 @@ static struct ccu_common *sun55i_a523_ccu_clks[] = { + &fanout0_clk.common, + &fanout1_clk.common, + &fanout2_clk.common, ++ &usb3_ref_clk.common, + }; + + static struct clk_hw_onecell_data sun55i_a523_hw_clks = { +@@ -1538,8 +1548,9 @@ static struct clk_hw_onecell_data sun55i_a523_hw_clks = { + [CLK_FANOUT1] = &fanout1_clk.common.hw, + [CLK_FANOUT2] = &fanout2_clk.common.hw, + [CLK_NPU] = &npu_clk.common.hw, ++ [CLK_USB3_REF] = &usb3_ref_clk.common.hw, + }, +- .num = CLK_NPU + 1, ++ .num = CLK_USB3_REF + 1, + }; + + static struct ccu_reset_map sun55i_a523_ccu_resets[] = { +diff --git a/include/dt-bindings/clock/sun55i-a523-ccu.h b/include/dt-bindings/clock/sun55i-a523-ccu.h +index 111111111111..222222222222 100644 +--- a/include/dt-bindings/clock/sun55i-a523-ccu.h ++++ b/include/dt-bindings/clock/sun55i-a523-ccu.h +@@ -186,5 +186,6 @@ + #define CLK_FANOUT1 177 + #define CLK_FANOUT2 178 + #define CLK_NPU 179 ++#define CLK_USB3_REF 180 + + #endif /* _DT_BINDINGS_CLK_SUN55I_A523_CCU_H_ */ +-- +Armbian + diff --git a/patch/kernel/archive/sunxi-6.18/patches.armbian/drv-iommu-sunxi-add-iommu-driver.patch b/patch/kernel/archive/sunxi-6.18/patches.armbian/drv-iommu-sunxi-add-iommu-driver.patch new file mode 100644 index 0000000000..52ead30fda --- /dev/null +++ b/patch/kernel/archive/sunxi-6.18/patches.armbian/drv-iommu-sunxi-add-iommu-driver.patch @@ -0,0 +1,2393 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Mon, 25 Aug 2025 10:45:25 +0000 +Subject: iommu: sunxi: add sun55i (A523) IOMMU driver + +Signed-off-by: Marvin Wewer +--- + drivers/iommu/Kconfig | 10 + + drivers/iommu/Makefile | 3 + + drivers/iommu/sun55i-iommu-pgtable.c | 468 +++ + drivers/iommu/sun55i-iommu-pgtable.h | 125 + + drivers/iommu/sun55i-iommu.c | 1606 ++++++++++ + drivers/iommu/sun55i-iommu.h | 57 + + include/sunxi-iommu.h | 50 + + 7 files changed, 2319 insertions(+) + +diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig +index 111111111111..222222222222 100644 +--- a/drivers/iommu/Kconfig ++++ b/drivers/iommu/Kconfig +@@ -244,6 +244,16 @@ config SUN50I_IOMMU + select IOMMU_API + help + Support for the IOMMU introduced in the Allwinner H6 SoCs. ++ ++config SUN55I_IOMMU ++ bool "Allwinner A523 IOMMU Support" ++ depends on HAS_DMA ++ depends on ARCH_SUNXI || COMPILE_TEST ++ select ARM_DMA_USE_IOMMU ++ select IOMMU_API ++ select IOMMU_DMA ++ help ++ Support for the IOMMU introduced in the Allwinner A523 SoCs. + + config TEGRA_IOMMU_SMMU + bool "NVIDIA Tegra SMMU Support" +diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile +index 111111111111..222222222222 100644 +--- a/drivers/iommu/Makefile ++++ b/drivers/iommu/Makefile +@@ -34,3 +34,6 @@ obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o + obj-$(CONFIG_IOMMU_IOPF) += io-pgfault.o + obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o + obj-$(CONFIG_APPLE_DART) += apple-dart.o ++obj-$(CONFIG_SUN55I_IOMMU) += sunxi-iommu.o ++sunxi-iommu-objs := sun55i-iommu-pgtable.o ++sunxi-iommu-objs += sun55i-iommu.o +\ No newline at end of file +diff --git a/drivers/iommu/sun55i-iommu-pgtable.c b/drivers/iommu/sun55i-iommu-pgtable.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/iommu/sun55i-iommu-pgtable.c +@@ -0,0 +1,468 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++/* ++ * Allwinner's pgtable controler ++ * ++ * Copyright (c) 2023, ouyangkun ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ */ ++ ++#include ++#include ++#include "sun55i-iommu.h" ++#include ++ ++#define NUM_ENTRIES_PDE (1UL << (IOMMU_VA_BITS - IOMMU_PD_SHIFT)) ++#define NUM_ENTRIES_PTE (1UL << (IOMMU_PD_SHIFT - IOMMU_PT_SHIFT)) ++#define PD_SIZE (NUM_ENTRIES_PDE * sizeof(u32)) ++#define PT_SIZE (NUM_ENTRIES_PTE * sizeof(u32)) ++ ++#define PAGE_OFFSET_MASK ((1UL << IOMMU_PT_SHIFT) - 1) ++#define IOPTE_BASE_MASK (~(PT_SIZE - 1)) ++ ++/* ++ * Page Directory Entry Control Bits ++ */ ++#define DENT_VALID 0x01 ++#define DENT_PTE_SHFIT 10 ++#define DENT_WRITABLE BIT(3) ++#define DENT_READABLE BIT(2) ++ ++/* ++ * Page Table Entry Control Bits ++ */ ++#define SUNXI_PTE_PAGE_WRITABLE BIT(3) ++#define SUNXI_PTE_PAGE_READABLE BIT(2) ++#define SUNXI_PTE_PAGE_VALID BIT(1) ++ ++#define IS_VALID(x) (((x)&0x03) == DENT_VALID) ++ ++#define IOPDE_INDEX(va) (((va) >> IOMMU_PD_SHIFT) & (NUM_ENTRIES_PDE - 1)) ++#define IOPTE_INDEX(va) (((va) >> IOMMU_PT_SHIFT) & (NUM_ENTRIES_PTE - 1)) ++ ++#define IOPTE_BASE(ent) ((ent)&IOPTE_BASE_MASK) ++ ++#define IOPTE_TO_PFN(ent) ((*ent) & IOMMU_PT_MASK) ++#define IOVA_PAGE_OFT(va) ((va)&PAGE_OFFSET_MASK) ++ ++/* IO virtual address start page frame number */ ++#define IOVA_START_PFN (1) ++#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) ++ ++/* TLB Invalid ALIGN */ ++#define IOVA_4M_ALIGN(iova) ((iova) & (~0x3fffff)) ++ ++struct sunxi_pgtable_t { ++ unsigned int *pgtable; ++ struct kmem_cache *iopte_cache; ++ struct device *dma_dev; ++} sunxi_pgtable_params; ++ ++/* pointer to l1 table entry */ ++static inline u32 *iopde_offset(u32 *iopd, dma_addr_t iova) ++{ ++ return iopd + IOPDE_INDEX(iova); ++} ++ ++/* pointer to l2 table entry */ ++static inline u32 *iopte_offset(u32 *ent, dma_addr_t iova) ++{ ++ u64 iopte_base = 0; ++ ++ iopte_base = IOPTE_BASE(*ent); ++ iopte_base = iommu_phy_to_cpu_phy(iopte_base); ++ ++ return (u32 *)__va(iopte_base) + IOPTE_INDEX(iova); ++} ++ ++static int sunxi_alloc_iopte(u32 *sent, int prot) ++{ ++ u32 *pent; ++ u32 flags = 0; ++ ++ flags |= (prot & IOMMU_READ) ? DENT_READABLE : 0; ++ flags |= (prot & IOMMU_WRITE) ? DENT_WRITABLE : 0; ++ ++ pent = kmem_cache_zalloc(sunxi_pgtable_params.iopte_cache, GFP_ATOMIC); ++ WARN_ON((unsigned long)pent & (PT_SIZE - 1)); ++ if (!pent) { ++ pr_err("%s, %d, kmalloc failed!\n", __func__, __LINE__); ++ return 0; ++ } ++ dma_sync_single_for_cpu(sunxi_pgtable_params.dma_dev, ++ virt_to_phys(sent), sizeof(*sent), ++ DMA_TO_DEVICE); ++ *sent = cpu_phy_to_iommu_phy(__pa(pent)) | DENT_VALID; ++ dma_sync_single_for_device(sunxi_pgtable_params.dma_dev, ++ virt_to_phys(sent), sizeof(*sent), ++ DMA_TO_DEVICE); ++ ++ return 1; ++} ++ ++static void sunxi_free_iopte(u32 *pent) ++{ ++ kmem_cache_free(sunxi_pgtable_params.iopte_cache, pent); ++} ++ ++static inline u32 sunxi_mk_pte(phys_addr_t page, int prot) ++{ ++ u32 flags = 0; ++ u32 high_addr = 0; ++ ++ flags |= (prot & IOMMU_READ) ? SUNXI_PTE_PAGE_READABLE : 0; ++ flags |= (prot & IOMMU_WRITE) ? SUNXI_PTE_PAGE_WRITABLE : 0; ++ page &= IOMMU_PT_MASK; ++ ++ return page | high_addr | flags | SUNXI_PTE_PAGE_VALID; ++} ++ ++int sunxi_pgtable_prepare_l1_tables(unsigned int *pgtable, ++ dma_addr_t iova_start, dma_addr_t iova_end, ++ int prot) ++{ ++ u32 *dent; ++ for (; iova_start <= iova_end; iova_start += SPD_SIZE) { ++ dent = iopde_offset(pgtable, iova_start); ++ if (!IS_VALID(*dent) && !sunxi_alloc_iopte(dent, prot)) { ++ return -ENOMEM; ++ } ++ } ++ return 0; ++} ++ ++int sunxi_pgtable_prepare_l2_tables(unsigned int *pgtable, ++ dma_addr_t iova_start, dma_addr_t iova_end, ++ phys_addr_t paddr, int prot) ++{ ++ size_t paddr_start; ++ u32 *dent, *pent; ++ u32 iova_tail_count, iova_tail_size; ++ u32 pent_val; ++ int i; ++ paddr = cpu_phy_to_iommu_phy(paddr); ++ paddr_start = paddr & IOMMU_PT_MASK; ++ for (; iova_start < iova_end;) { ++ iova_tail_count = NUM_ENTRIES_PTE - IOPTE_INDEX(iova_start); ++ iova_tail_size = iova_tail_count * SPAGE_SIZE; ++ if (iova_start + iova_tail_size > iova_end) { ++ iova_tail_size = iova_end - iova_start; ++ iova_tail_count = iova_tail_size / SPAGE_SIZE; ++ } ++ ++ dent = iopde_offset(pgtable, iova_start); ++ pent = iopte_offset(dent, iova_start); ++ pent_val = sunxi_mk_pte(paddr_start, prot); ++ for (i = 0; i < iova_tail_count; i++) { ++ WARN_ON(*pent); ++ *pent = pent_val + SPAGE_SIZE * i; ++ pent++; ++ } ++ ++ dma_sync_single_for_device( ++ sunxi_pgtable_params.dma_dev, ++ virt_to_phys(iopte_offset(dent, iova_start)), ++ iova_tail_count << 2, DMA_TO_DEVICE); ++ iova_start += iova_tail_size; ++ paddr_start += iova_tail_size; ++ } ++ return 0; ++} ++ ++ ++int sunxi_pgtable_delete_l2_tables(unsigned int *pgtable, dma_addr_t iova_start, ++ dma_addr_t iova_end) ++{ ++ u32 *dent, *pent; ++ u32 iova_tail_count, iova_tail_size; ++ iova_tail_count = NUM_ENTRIES_PTE - IOPTE_INDEX(iova_start); ++ iova_tail_size = iova_tail_count * SPAGE_SIZE; ++ if (iova_start + iova_tail_size > iova_end) { ++ iova_tail_size = iova_end - iova_start; ++ iova_tail_count = iova_tail_size / SPAGE_SIZE; ++ } ++ ++ dent = iopde_offset(pgtable, iova_start); ++ if (!IS_VALID(*dent)) ++ return -EINVAL; ++ pent = iopte_offset(dent, iova_start); ++ memset(pent, 0, iova_tail_count * sizeof(u32)); ++ dma_sync_single_for_device(sunxi_pgtable_params.dma_dev, ++ virt_to_phys(iopte_offset(dent, iova_start)), ++ iova_tail_count << 2, DMA_TO_DEVICE); ++ ++ if (iova_tail_size == SPD_SIZE) { ++ *dent = 0; ++ dma_sync_single_for_device(sunxi_pgtable_params.dma_dev, ++ virt_to_phys(dent), sizeof(*dent), ++ DMA_TO_DEVICE); ++ sunxi_free_iopte(pent); ++ } ++ return iova_tail_size; ++} ++ ++ ++phys_addr_t sunxi_pgtable_iova_to_phys(unsigned int *pgtable, dma_addr_t iova) ++{ ++ u32 *dent, *pent; ++ phys_addr_t ret = 0; ++ dent = iopde_offset(pgtable, iova); ++ if (IS_VALID(*dent)) { ++ pent = iopte_offset(dent, iova); ++ if (*pent) { ++ ret = IOPTE_TO_PFN(pent) + IOVA_PAGE_OFT(iova); ++ ret = iommu_phy_to_cpu_phy(ret); ++ } ++ } ++ return ret; ++} ++ ++ ++int sunxi_pgtable_invalid_helper(unsigned int *pgtable, dma_addr_t iova) ++{ ++ u32 *pte_addr, *dte_addr; ++ ++ dte_addr = iopde_offset(pgtable, iova); ++ if ((*dte_addr & 0x3) != 0x1) { ++ pr_err("0x%pad is not mapped!\n", &iova); ++ return 1; ++ } ++ pte_addr = iopte_offset(dte_addr, iova); ++ if ((*pte_addr & 0x2) == 0) { ++ pr_err("0x%pad is not mapped!\n", &iova); ++ return 1; ++ } ++ pr_err("0x%pad is mapped!\n", &iova); ++ ++ return 0; ++} ++ ++ ++void sunxi_pgtable_clear(unsigned int *pgtable) ++{ ++ int i = 0; ++ u32 *dent, *pent; ++ size_t iova; ++ ++ for (i = 0; i < NUM_ENTRIES_PDE; ++i) { ++ dent = pgtable + i; ++ iova = (unsigned long)i << IOMMU_PD_SHIFT; ++ if (IS_VALID(*dent)) { ++ pent = iopte_offset(dent, iova); ++ dma_sync_single_for_cpu(sunxi_pgtable_params.dma_dev, ++ virt_to_phys(pent), PT_SIZE, ++ DMA_TO_DEVICE); ++ memset(pent, 0, PT_SIZE); ++ dma_sync_single_for_device(sunxi_pgtable_params.dma_dev, ++ virt_to_phys(pent), PT_SIZE, ++ DMA_TO_DEVICE); ++ dma_sync_single_for_cpu(sunxi_pgtable_params.dma_dev, ++ virt_to_phys(dent), PT_SIZE, ++ DMA_TO_DEVICE); ++ *dent = 0; ++ dma_sync_single_for_device(sunxi_pgtable_params.dma_dev, ++ virt_to_phys(dent), ++ sizeof(*dent), ++ DMA_TO_DEVICE); ++ sunxi_free_iopte(pent); ++ } ++ } ++} ++ ++ ++unsigned int *sunxi_pgtable_alloc(void) ++{ ++ unsigned int *pgtable; ++ pgtable = (unsigned int *)__get_free_pages(GFP_KERNEL, ++ get_order(PD_SIZE)); ++ ++ if (pgtable != NULL) { ++ memset(pgtable, 0, PD_SIZE); ++ } ++ sunxi_pgtable_params.pgtable = pgtable; ++ return pgtable; ++} ++ ++ ++void sunxi_pgtable_free(unsigned int *pgtable) ++{ ++ free_pages((unsigned long)pgtable, get_order(PD_SIZE)); ++ sunxi_pgtable_params.pgtable = NULL; ++} ++ ++ ++static inline bool __region_ended(u32 pent) ++{ ++ return !(pent & SUNXI_PTE_PAGE_VALID); ++} ++ ++static inline bool __access_mask_changed(u32 pent, u32 old_mask) ++{ ++ return old_mask != ++ (pent & (SUNXI_PTE_PAGE_READABLE | SUNXI_PTE_PAGE_WRITABLE)); ++} ++ ++static u32 __print_region(char *buf, size_t buf_len, ssize_t len, ++ struct dump_region *active_region, ++ bool for_sysfs_show) ++{ ++ if (active_region->type == DUMP_REGION_RESERVE) { ++ if (for_sysfs_show) { ++ len += sysfs_emit_at( ++ buf, len, ++ "iova:%pad size:0x%zx\n", ++ &active_region->iova, active_region->size); ++ } else { ++ len += scnprintf( ++ buf + len, buf_len - len, ++ "iova:%pad size:0x%zx\n", ++ &active_region->iova, active_region->size); ++ } ++ } else { ++ if (for_sysfs_show) { ++ len += sysfs_emit_at( ++ buf, len, ++ "iova:%pad phys:%pad %s%s size:0x%zx\n", ++ &active_region->iova, &active_region->phys, ++ active_region->access_mask & ++ SUNXI_PTE_PAGE_READABLE ? ++ "R" : ++ " ", ++ active_region->access_mask & ++ SUNXI_PTE_PAGE_WRITABLE ? ++ "W" : ++ " ", ++ active_region->size); ++ } else { ++ len += scnprintf( ++ buf + len, buf_len - len, ++ "iova:%pad phys:%pad %s%s size:0x%zx\n", ++ &active_region->iova, &active_region->phys, ++ active_region->access_mask & ++ SUNXI_PTE_PAGE_READABLE ? ++ "R" : ++ " ", ++ active_region->access_mask & ++ SUNXI_PTE_PAGE_WRITABLE ? ++ "W" : ++ " ", ++ active_region->size); ++ } ++ } ++ return len; ++} ++ ++ssize_t sunxi_pgtable_dump(unsigned int *pgtable, ssize_t len, char *buf, ++ size_t buf_len, bool for_sysfs_show) ++{ ++ /* walk and dump */ ++ int i, j; ++ u32 *dent, *pent; ++ struct dump_region active_region; ++ ++ if (for_sysfs_show) { ++ len += sysfs_emit_at(buf, len, "mapped\n"); ++ } else { ++ len += scnprintf(buf + len, buf_len - len, "mapped\n"); ++ } ++ ++ dent = pgtable; ++ active_region.type = DUMP_REGION_MAP; ++ active_region.size = 0; ++ active_region.access_mask = 0; ++ for (i = 0; i < NUM_ENTRIES_PDE; i++) { ++ j = 0; ++ if (!IS_VALID(dent[i])) { ++ /* empty dentry measn ended of region, print it*/ ++ if (active_region.size) { ++ len = __print_region(buf, buf_len, len, ++ &active_region, ++ for_sysfs_show); ++ /* prepare next region */ ++ active_region.size = 0; ++ active_region.access_mask = 0; ++ } ++ continue; ++ } ++ /* iova here use for l1 idx, safe to pass 0 to get entry for 1st page(idx 0)*/ ++ pent = iopte_offset(dent + i, 0); ++ for (; j < NUM_ENTRIES_PTE; j++) { ++ if (active_region.size) { ++ /* looks like we are counting something, check if it need printing */ ++ if (__region_ended(pent[j]) /* not contiguous */ ++ || ++ (active_region.access_mask && ++ __access_mask_changed( ++ pent[j], ++ active_region ++ .access_mask)) /* different access */ ++ ) { ++ len = __print_region(buf, buf_len, len, ++ &active_region, ++ for_sysfs_show); ++ ++ /* prepare next region */ ++ active_region.size = 0; ++ active_region.access_mask = 0; ++ } ++ } ++ ++ if (pent[j] & SUNXI_PTE_PAGE_VALID) { ++ /* no on count region, mark start address */ ++ if (active_region.size == 0) { ++ active_region.iova = ++ ((dma_addr_t)i ++ << IOMMU_PD_SHIFT) + ++ ((dma_addr_t)j ++ << IOMMU_PT_SHIFT); ++ active_region.phys = ++ iommu_phy_to_cpu_phy( ++ IOPTE_TO_PFN(&pent[j])); ++ active_region.access_mask = ++ (pent[j] & ++ (SUNXI_PTE_PAGE_READABLE | ++ SUNXI_PTE_PAGE_WRITABLE)); ++ } ++ active_region.size += 1 << IOMMU_PT_SHIFT; ++ } ++ } ++ } ++ //dump last region (if any) ++ if (active_region.size) { ++ len = __print_region(buf, buf_len, len, &active_region, ++ for_sysfs_show); ++ } ++ return len; ++} ++ ++ ++struct kmem_cache *sunxi_pgtable_alloc_pte_cache(void) ++{ ++ struct kmem_cache *cache; ++ cache = kmem_cache_create("sunxi-iopte-cache", PT_SIZE, PT_SIZE, ++ SLAB_HWCACHE_ALIGN, NULL); ++ sunxi_pgtable_params.iopte_cache = cache; ++ return cache; ++} ++ ++ ++void sunxi_pgtable_free_pte_cache(struct kmem_cache *iopte_cache) ++{ ++ kmem_cache_destroy(iopte_cache); ++} ++ ++ ++void sunxi_pgtable_set_dma_dev(struct device *dma_dev) ++{ ++ sunxi_pgtable_params.dma_dev = dma_dev; ++} +diff --git a/drivers/iommu/sun55i-iommu-pgtable.h b/drivers/iommu/sun55i-iommu-pgtable.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/iommu/sun55i-iommu-pgtable.h +@@ -0,0 +1,125 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++/* ++ * Allwinner's pgtable controler ++ * ++ * Copyright (c) 2023, ouyangkun ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ */ ++#ifndef __SUNXI_IOMMU_PGTALBE__ ++#define __SUNXI_IOMMU_PGTALBE__ ++#include ++ ++#define SUNXI_PHYS_OFFSET 0x40000000UL ++ ++#define IOMMU_VA_BITS 32 ++ ++#define IOMMU_PD_SHIFT 20 ++#define IOMMU_PD_MASK (~((1UL << IOMMU_PD_SHIFT) - 1)) ++ ++#define IOMMU_PT_SHIFT 12 ++#define IOMMU_PT_MASK (~((1UL << IOMMU_PT_SHIFT) - 1)) ++ ++#define SPAGE_SIZE (1 << IOMMU_PT_SHIFT) ++#define SPD_SIZE (1 << IOMMU_PD_SHIFT) ++#define SPAGE_ALIGN(addr) ALIGN(addr, SPAGE_SIZE) ++#define SPDE_ALIGN(addr) ALIGN(addr, SPD_SIZE) ++ ++/* ++ * This version Hardware just only support 4KB page. It have ++ * a two level page table structure, where the first level has ++ * 4096 entries, and the second level has 256 entries. And, the ++ * first level is "Page Directory(PG)", every entry include a ++ * Page Table base address and a few of control bits. Second ++ * level is "Page Table(PT)", every entry include a physical ++ * page address and a few of control bits. Each entry is one ++ * 32-bit word. Most of the bits in the second level entry are ++ * used by hardware. ++ * ++ * Virtual Address Format: ++ * 31 20|19 12|11 0 ++ * +-----------------+------------+--------+ ++ * | PDE Index | PTE Index | offset | ++ * +-----------------+------------+--------+ ++ * ++ * Table Layout: ++ * ++ * First Level Second Level ++ * (Page Directory) (Page Table) ++ * ----+---------+0 ++ * ^ | PDE | ---> -+--------+---- ++ * | ----------+1 | PTE | ^ ++ * | | | +--------+ | ++ * ----------+2 | | 1K ++ * 16K | | +--------+ | ++ * ----------+3 | | v ++ * | | | +--------+---- ++ * | ---------- ++ * | | | ++ * v | | ++ * ----+--------+ ++ * ++ * IOPDE: ++ * 31 10|9 0 ++ * +------------------------+--------+ ++ * | PTE Base Address |CTRL BIT| ++ * +------------------------+--------+ ++ * ++ * IOPTE: ++ * 31 12|11 0 ++ * +---------------------+-----------+ ++ * | Phy Page Address | CTRL BIT | ++ * +---------------------+-----------+ ++ * ++ * cpu phy 0x0000 0000 ~ 0x4000 0000 is reserved for IO access, ++ * iommu phy in between 0x0000 0000 ~ 0x4000 0000 should not used ++ * as cpu phy directly, move this address space beyond iommu ++ * phy max, so iommu phys 0x0000 0000 ~ 0x4000 0000 shoule be ++ * iommu_phy_max + 0x0000 0000 ~ iommu_phy_max + 0x4000 0000(as ++ * spec said) ++ */ ++ ++static inline dma_addr_t iommu_phy_to_cpu_phy(dma_addr_t iommu_phy) ++{ ++ return iommu_phy < SUNXI_PHYS_OFFSET ? ++ iommu_phy + (1ULL << IOMMU_VA_BITS) : ++ iommu_phy; ++} ++ ++static inline dma_addr_t cpu_phy_to_iommu_phy(dma_addr_t cpu_phy) ++{ ++ return cpu_phy > (1ULL << IOMMU_VA_BITS) ? ++ cpu_phy - (1ULL << IOMMU_VA_BITS) : ++ cpu_phy; ++} ++ ++int sunxi_pgtable_prepare_l1_tables(unsigned int *pgtable, ++ dma_addr_t iova_start, dma_addr_t iova_end, ++ int prot); ++int sunxi_pgtable_prepare_l2_tables(unsigned int *pgtable, ++ dma_addr_t iova_start, dma_addr_t iova_end, ++ phys_addr_t paddr, int prot); ++int sunxi_pgtable_delete_l2_tables(unsigned int *pgtable, dma_addr_t iova_start, ++ dma_addr_t iova_end); ++phys_addr_t sunxi_pgtable_iova_to_phys(unsigned int *pgtable, dma_addr_t iova); ++int sunxi_pgtable_invalid_helper(unsigned int *pgtable, dma_addr_t iova); ++void sunxi_pgtable_clear(unsigned int *pgtable); ++unsigned int *sunxi_pgtable_alloc(void); ++void sunxi_pgtable_free(unsigned int *pgtable); ++ssize_t sunxi_pgtable_dump(unsigned int *pgtable, ssize_t len, char *buf, ++ size_t buf_len, bool for_sysfs_show); ++struct kmem_cache *sunxi_pgtable_alloc_pte_cache(void); ++void sunxi_pgtable_free_pte_cache(struct kmem_cache *iopte_cache); ++void sunxi_pgtable_set_dma_dev(struct device *dma_dev); ++ ++#endif +diff --git a/drivers/iommu/sun55i-iommu.c b/drivers/iommu/sun55i-iommu.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/iommu/sun55i-iommu.c +@@ -0,0 +1,1606 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++/******************************************************************************* ++ * Copyright (C) 2016-2018, Allwinner Technology CO., LTD. ++ * Author: zhuxianbin ++ * ++ * This file is provided under a dual BSD/GPL license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ ******************************************************************************/ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "sun55i-iommu.h" ++ ++/* ++ * Register of IOMMU device ++ */ ++#define IOMMU_VERSION_REG 0x0000 ++#define IOMMU_RESET_REG 0x0010 ++#define IOMMU_ENABLE_REG 0x0020 ++#define IOMMU_BYPASS_REG 0x0030 ++#define IOMMU_AUTO_GATING_REG 0x0040 ++#define IOMMU_WBUF_CTRL_REG 0x0044 ++#define IOMMU_OOO_CTRL_REG 0x0048 ++#define IOMMU_4KB_BDY_PRT_CTRL_REG 0x004C ++#define IOMMU_TTB_REG 0x0050 ++#define IOMMU_TLB_ENABLE_REG 0x0060 ++#define IOMMU_TLB_PREFETCH_REG 0x0070 ++#define IOMMU_TLB_FLUSH_ENABLE_REG 0x0080 ++#define IOMMU_TLB_IVLD_MODE_SEL_REG 0x0084 ++#define IOMMU_TLB_IVLD_START_ADDR_REG 0x0088 ++#define IOMMU_TLB_IVLD_END_ADDR_REG 0x008C ++#define IOMMU_TLB_IVLD_ADDR_REG 0x0090 ++#define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x0094 ++#define IOMMU_TLB_IVLD_ENABLE_REG 0x0098 ++#define IOMMU_PC_IVLD_MODE_SEL_REG 0x009C ++#define IOMMU_PC_IVLD_ADDR_REG 0x00A0 ++#define IOMMU_PC_IVLD_START_ADDR_REG 0x00A4 ++#define IOMMU_PC_IVLD_ENABLE_REG 0x00A8 ++#define IOMMU_PC_IVLD_END_ADDR_REG 0x00Ac ++#define IOMMU_DM_AUT_CTRL_REG0 0x00B0 ++#define IOMMU_DM_AUT_CTRL_REG1 0x00B4 ++#define IOMMU_DM_AUT_CTRL_REG2 0x00B8 ++#define IOMMU_DM_AUT_CTRL_REG3 0x00BC ++#define IOMMU_DM_AUT_CTRL_REG4 0x00C0 ++#define IOMMU_DM_AUT_CTRL_REG5 0x00C4 ++#define IOMMU_DM_AUT_CTRL_REG6 0x00C8 ++#define IOMMU_DM_AUT_CTRL_REG7 0x00CC ++#define IOMMU_DM_AUT_OVWT_REG 0x00D0 ++#define IOMMU_INT_ENABLE_REG 0x0100 ++#define IOMMU_INT_CLR_REG 0x0104 ++#define IOMMU_INT_STA_REG 0x0108 ++#define IOMMU_INT_ERR_ADDR_REG0 0x0110 ++ ++#define IOMMU_INT_ERR_ADDR_REG1 0x0114 ++#define IOMMU_INT_ERR_ADDR_REG2 0x0118 ++ ++#define IOMMU_INT_ERR_ADDR_REG3 0x011C ++#define IOMMU_INT_ERR_ADDR_REG4 0x0120 ++#define IOMMU_INT_ERR_ADDR_REG5 0x0124 ++ ++#define IOMMU_INT_ERR_ADDR_REG6 0x0128 ++#define IOMMU_INT_ERR_ADDR_REG7 0x0130 ++#define IOMMU_INT_ERR_ADDR_REG8 0x0134 ++ ++#define IOMMU_INT_ERR_DATA_REG0 0x0150 ++#define IOMMU_INT_ERR_DATA_REG1 0x0154 ++#define IOMMU_INT_ERR_DATA_REG2 0x0158 ++#define IOMMU_INT_ERR_DATA_REG3 0x015C ++#define IOMMU_INT_ERR_DATA_REG4 0x0160 ++#define IOMMU_INT_ERR_DATA_REG5 0x0164 ++ ++#define IOMMU_INT_ERR_DATA_REG6 0x0168 ++#define IOMMU_INT_ERR_DATA_REG7 0x0170 ++#define IOMMU_INT_ERR_DATA_REG8 0x0174 ++ ++#define IOMMU_L1PG_INT_REG 0x0180 ++#define IOMMU_L2PG_INT_REG 0x0184 ++#define IOMMU_VA_REG 0x0190 ++#define IOMMU_VA_DATA_REG 0x0194 ++#define IOMMU_VA_CONFIG_REG 0x0198 ++#define IOMMU_PMU_ENABLE_REG 0x0200 ++#define IOMMU_PMU_CLR_REG 0x0210 ++#define IOMMU_PMU_ACCESS_LOW_REG0 0x0230 ++#define IOMMU_PMU_ACCESS_HIGH_REG0 0x0234 ++#define IOMMU_PMU_HIT_LOW_REG0 0x0238 ++#define IOMMU_PMU_HIT_HIGH_REG0 0x023C ++#define IOMMU_PMU_ACCESS_LOW_REG1 0x0240 ++#define IOMMU_PMU_ACCESS_HIGH_REG1 0x0244 ++#define IOMMU_PMU_HIT_LOW_REG1 0x0248 ++#define IOMMU_PMU_HIT_HIGH_REG1 0x024C ++#define IOMMU_PMU_ACCESS_LOW_REG2 0x0250 ++#define IOMMU_PMU_ACCESS_HIGH_REG2 0x0254 ++#define IOMMU_PMU_HIT_LOW_REG2 0x0258 ++#define IOMMU_PMU_HIT_HIGH_REG2 0x025C ++#define IOMMU_PMU_ACCESS_LOW_REG3 0x0260 ++#define IOMMU_PMU_ACCESS_HIGH_REG3 0x0264 ++#define IOMMU_PMU_HIT_LOW_REG3 0x0268 ++#define IOMMU_PMU_HIT_HIGH_REG3 0x026C ++#define IOMMU_PMU_ACCESS_LOW_REG4 0x0270 ++#define IOMMU_PMU_ACCESS_HIGH_REG4 0x0274 ++#define IOMMU_PMU_HIT_LOW_REG4 0x0278 ++#define IOMMU_PMU_HIT_HIGH_REG4 0x027C ++#define IOMMU_PMU_ACCESS_LOW_REG5 0x0280 ++#define IOMMU_PMU_ACCESS_HIGH_REG5 0x0284 ++#define IOMMU_PMU_HIT_LOW_REG5 0x0288 ++#define IOMMU_PMU_HIT_HIGH_REG5 0x028C ++ ++#define IOMMU_PMU_ACCESS_LOW_REG6 0x0290 ++#define IOMMU_PMU_ACCESS_HIGH_REG6 0x0294 ++#define IOMMU_PMU_HIT_LOW_REG6 0x0298 ++#define IOMMU_PMU_HIT_HIGH_REG6 0x029C ++#define IOMMU_PMU_ACCESS_LOW_REG7 0x02D0 ++#define IOMMU_PMU_ACCESS_HIGH_REG7 0x02D4 ++#define IOMMU_PMU_HIT_LOW_REG7 0x02D8 ++#define IOMMU_PMU_HIT_HIGH_REG7 0x02DC ++#define IOMMU_PMU_ACCESS_LOW_REG8 0x02E0 ++#define IOMMU_PMU_ACCESS_HIGH_REG8 0x02E4 ++#define IOMMU_PMU_HIT_LOW_REG8 0x02E8 ++#define IOMMU_PMU_HIT_HIGH_REG8 0x02EC ++ ++#define IOMMU_PMU_TL_LOW_REG0 0x0300 ++#define IOMMU_PMU_TL_HIGH_REG0 0x0304 ++#define IOMMU_PMU_ML_REG0 0x0308 ++ ++#define IOMMU_PMU_TL_LOW_REG1 0x0310 ++#define IOMMU_PMU_TL_HIGH_REG1 0x0314 ++#define IOMMU_PMU_ML_REG1 0x0318 ++ ++#define IOMMU_PMU_TL_LOW_REG2 0x0320 ++#define IOMMU_PMU_TL_HIGH_REG2 0x0324 ++#define IOMMU_PMU_ML_REG2 0x0328 ++ ++#define IOMMU_PMU_TL_LOW_REG3 0x0330 ++#define IOMMU_PMU_TL_HIGH_REG3 0x0334 ++#define IOMMU_PMU_ML_REG3 0x0338 ++ ++#define IOMMU_PMU_TL_LOW_REG4 0x0340 ++#define IOMMU_PMU_TL_HIGH_REG4 0x0344 ++#define IOMMU_PMU_ML_REG4 0x0348 ++ ++#define IOMMU_PMU_TL_LOW_REG5 0x0350 ++#define IOMMU_PMU_TL_HIGH_REG5 0x0354 ++#define IOMMU_PMU_ML_REG5 0x0358 ++ ++#define IOMMU_PMU_TL_LOW_REG6 0x0360 ++#define IOMMU_PMU_TL_HIGH_REG6 0x0364 ++#define IOMMU_PMU_ML_REG6 0x0368 ++ ++#define IOMMU_RESET_SHIFT 31 ++#define IOMMU_RESET_MASK (1 << IOMMU_RESET_SHIFT) ++#define IOMMU_RESET_SET (0 << 31) ++#define IOMMU_RESET_RELEASE (1 << 31) ++ ++/* ++ * IOMMU enable register field ++ */ ++#define IOMMU_ENABLE 0x1 ++ ++/* ++ * IOMMU interrupt id mask ++ */ ++#define MICRO_TLB0_INVALID_INTER_MASK 0x1 ++#define MICRO_TLB1_INVALID_INTER_MASK 0x2 ++#define MICRO_TLB2_INVALID_INTER_MASK 0x4 ++#define MICRO_TLB3_INVALID_INTER_MASK 0x8 ++#define MICRO_TLB4_INVALID_INTER_MASK 0x10 ++#define MICRO_TLB5_INVALID_INTER_MASK 0x20 ++#define MICRO_TLB6_INVALID_INTER_MASK 0x40 ++ ++#define L1_PAGETABLE_INVALID_INTER_MASK 0x10000 ++#define L2_PAGETABLE_INVALID_INTER_MASK 0x20000 ++ ++/** ++ * sun8iw15p1 ++ * DE : masterID 0 ++ * E_EDMA: masterID 1 ++ * E_FE: masterID 2 ++ * VE: masterID 3 ++ * CSI: masterID 4 ++ * G2D: masterID 5 ++ * E_BE: masterID 6 ++ * ++ * sun50iw9p1: ++ * DE : masterID 0 ++ * DI: masterID 1 ++ * VE_R: masterID 2 ++ * VE: masterID 3 ++ * CSI0: masterID 4 ++ * CSI1: masterID 5 ++ * G2D: masterID 6 ++ * sun8iw19p1: ++ * DE :>--->-------masterID 0 ++ * EISE: masterID 1 ++ * AI: masterID 2 ++ * VE:>---->-------masterID 3 ++ * CSI: >-->----masterID 4 ++ * ISP:>-->------ masterID 5 ++ * G2D:>--->-------masterID 6 ++ * sun8iw21: ++ * VE : masterID 0 ++ * CSI: masterID 1 ++ * DE: masterID 2 ++ * G2D: masterID 3 ++ * ISP: masterID 4 ++ * RISCV: masterID 5 ++ * NPU: masterID 6 ++ */ ++#define DEFAULT_BYPASS_VALUE 0x7f ++static const u32 master_id_bitmap[] = {0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40}; ++ ++#define sunxi_wait_when(COND, MS) ({ \ ++ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ ++ int ret__ = 0; \ ++ while ((COND)) { \ ++ if (time_after(jiffies, timeout__)) { \ ++ ret__ = (!COND) ? 0 : -ETIMEDOUT; \ ++ break; \ ++ } \ ++ udelay(1); \ ++ } \ ++ ret__; \ ++}) ++ ++/* ++ * The format of device tree, and client device how to use it. ++ * ++ * /{ ++ * .... ++ * smmu: iommu@xxxxx { ++ * compatible = "allwinner,iommu"; ++ * reg = ; ++ * interrupts = ; ++ * interrupt-names = "iommu-irq"; ++ * clocks = <&iommu_clk>; ++ * clock-name = "iommu-clk"; ++ * #iommu-cells = <1>; ++ * status = "enabled"; ++ * }; ++ * ++ * de@xxxxx { ++ * ..... ++ * iommus = <&smmu ID>; ++ * }; ++ * ++ * } ++ * ++ * Here, ID number is 0 ~ 5, every client device have a unique id. ++ * Every id represent a micro TLB, also represent a master device. ++ * ++ */ ++ ++enum sunxi_iommu_version { ++ IOMMU_VERSION_V10 = 0x10, ++ IOMMU_VERSION_V11, ++ IOMMU_VERSION_V12, ++ IOMMU_VERSION_V13, ++ IOMMU_VERSION_V14, ++}; ++ ++struct sunxi_iommu_plat_data { ++ u32 version; ++ u32 tlb_prefetch; ++ u32 tlb_invalid_mode; ++ u32 ptw_invalid_mode; ++ const char *master[8]; ++}; ++ ++struct sunxi_iommu_dev { ++ struct iommu_device iommu; ++ struct device *dev; ++ void __iomem *base; ++ struct clk *clk; ++ int irq; ++ u32 bypass; ++ spinlock_t iommu_lock; ++ struct list_head rsv_list; ++ const struct sunxi_iommu_plat_data *plat_data; ++ struct iommu_domain *identity_domain; ++ struct sunxi_iommu_domain *debug_domain; ++}; ++ ++struct sunxi_iommu_domain { ++ unsigned int *pgtable; /* first page directory, size is 16KB */ ++ u32 *sg_buffer; ++ struct spinlock dt_lock; /* lock for modifying page table @ pgtable */ ++ struct dma_iommu_mapping *mapping; ++ struct iommu_domain domain; ++ /* struct iova_domain iovad; */ ++ /* list of master device, it represent a micro TLB */ ++ struct list_head mdevs; ++ spinlock_t lock; ++}; ++ ++/* ++ * sunxi master device which use iommu. ++ */ ++struct sunxi_mdev { ++ struct list_head node; /* for sunxi_iommu mdevs list */ ++ struct device *dev; /* the master device */ ++ unsigned int tlbid; /* micro TLB id, distinguish device by it */ ++ bool flag; ++}; ++ ++struct sunxi_iommu_owner { ++ unsigned int tlbid; ++ bool flag; ++ struct sunxi_iommu_dev *data; ++ struct device *dev; ++ struct dma_iommu_mapping *mapping; ++}; ++ ++#define _max(x, y) (((u64)(x) > (u64)(y)) ? (x) : (y)) ++ ++static struct kmem_cache *iopte_cache; ++static struct sunxi_iommu_dev *global_iommu_dev; ++static bool iommu_hw_init_flag; ++static struct device *dma_dev; ++ ++static sunxi_iommu_fault_cb sunxi_iommu_fault_notify_cbs[7]; ++u32 sunxi_iommu_dump_rsv_list(struct list_head *rsv_list, ssize_t len, ++ char *buf, size_t buf_len, bool for_sysfs_show); ++int sunxi_iommu_check_cmd(struct device *dev, void *data); ++ ++void sun55i_iommu_register_fault_cb(sunxi_iommu_fault_cb cb, unsigned int master_id) ++{ ++ if (master_id >= ARRAY_SIZE(sunxi_iommu_fault_notify_cbs)) ++ return; ++ sunxi_iommu_fault_notify_cbs[master_id] = cb; ++} ++EXPORT_SYMBOL_GPL(sun55i_iommu_register_fault_cb); ++ ++static inline u32 sunxi_iommu_read(struct sunxi_iommu_dev *iommu, ++ u32 offset) ++{ ++ return readl(iommu->base + offset); ++} ++ ++static inline void sunxi_iommu_write(struct sunxi_iommu_dev *iommu, ++ u32 offset, u32 value) ++{ ++ writel(value, iommu->base + offset); ++} ++ ++void sun55i_reset_device_iommu(unsigned int master_id) ++{ ++ unsigned int regval; ++ struct sunxi_iommu_dev *iommu = global_iommu_dev; ++ ++ if (master_id >= 7) ++ return; ++ ++ if (!iommu) ++ return; ++ ++ regval = sunxi_iommu_read(iommu, IOMMU_RESET_REG); ++ sunxi_iommu_write(iommu, IOMMU_RESET_REG, regval & (~(1 << master_id))); ++ regval = sunxi_iommu_read(iommu, IOMMU_RESET_REG); ++ if (!(regval & ((1 << master_id)))) { ++ sunxi_iommu_write(iommu, IOMMU_RESET_REG, regval | ((1 << master_id))); ++ } ++} ++EXPORT_SYMBOL(sun55i_reset_device_iommu); ++ ++void sun55i_enable_device_iommu(struct sunxi_iommu_dev *iommu, unsigned int master_id, bool flag) ++{ ++ unsigned long mflag; ++ ++ if (!iommu) ++ return; ++ ++ if (master_id >= ARRAY_SIZE(master_id_bitmap)) ++ return; ++ ++ spin_lock_irqsave(&iommu->iommu_lock, mflag); ++ if (flag) ++ iommu->bypass &= ~(master_id_bitmap[master_id]); ++ else ++ iommu->bypass |= master_id_bitmap[master_id]; ++ sunxi_iommu_write(iommu, IOMMU_BYPASS_REG, iommu->bypass); ++ spin_unlock_irqrestore(&iommu->iommu_lock, mflag); ++} ++EXPORT_SYMBOL(sun55i_enable_device_iommu); ++ ++static int sun55i_tlb_flush(struct sunxi_iommu_dev *iommu) ++{ ++ int ret; ++ ++ /* enable the maximum number(7) of master to fit all platform */ ++ sunxi_iommu_write(iommu, IOMMU_TLB_FLUSH_ENABLE_REG, 0x0003007f); ++ ret = sunxi_wait_when( ++ (sunxi_iommu_read(iommu, IOMMU_TLB_FLUSH_ENABLE_REG)), 2); ++ if (ret) ++ dev_err(iommu->dev, "Enable flush all request timed out\n"); ++ ++ return ret; ++} ++ ++static int sun55i_iommu_hw_init(struct sunxi_iommu_dev *iommu, struct sunxi_iommu_domain *sunxi_domain) ++{ ++ int ret = 0; ++ int iommu_enable = 0; ++ unsigned long mflag; ++ const struct sunxi_iommu_plat_data *plat_data = iommu->plat_data; ++ ++ spin_lock_irqsave(&iommu->iommu_lock, mflag); ++ ++ if (sunxi_domain) { ++ phys_addr_t dte_addr = __pa(sunxi_domain->pgtable); ++ sunxi_iommu_write(iommu, IOMMU_TTB_REG, dte_addr); ++ } ++ ++ /* ++ * set preftech functions, including: ++ * master prefetching and only prefetch valid page to TLB/PTW ++ */ ++ sunxi_iommu_write(iommu, IOMMU_TLB_PREFETCH_REG, plat_data->tlb_prefetch); ++ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_MODE_SEL_REG, plat_data->tlb_invalid_mode); ++ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_MODE_SEL_REG, plat_data->ptw_invalid_mode); ++ ++ /* disable interrupt of prefetch */ ++ sunxi_iommu_write(iommu, IOMMU_INT_ENABLE_REG, 0x3003f); ++ sunxi_iommu_write(iommu, IOMMU_BYPASS_REG, iommu->bypass); ++ ++ ret = sun55i_tlb_flush(iommu); ++ if (ret) { ++ dev_err(iommu->dev, "Enable flush all request timed out\n"); ++ goto out; ++ } ++ sunxi_iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0x1); ++ sunxi_iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE); ++ iommu_enable = sunxi_iommu_read(iommu, IOMMU_ENABLE_REG); ++ if (iommu_enable != 0x1) { ++ iommu_enable = sunxi_iommu_read(iommu, IOMMU_ENABLE_REG); ++ if (iommu_enable != 0x1) { ++ dev_err(iommu->dev, "iommu enable failed! No iommu in bitfile!\n"); ++ ret = -ENODEV; ++ goto out; ++ } ++ } ++ iommu_hw_init_flag = true; ++ ++out: ++ spin_unlock_irqrestore(&iommu->iommu_lock, mflag); ++ ++ return ret; ++} ++ ++static int sun55i_tlb_invalid(dma_addr_t iova, dma_addr_t iova_mask) ++{ ++ struct sunxi_iommu_dev *iommu = global_iommu_dev; ++ dma_addr_t iova_end = iova_mask; ++ int ret = 0; ++ unsigned long mflag; ++ ++ spin_lock_irqsave(&iommu->iommu_lock, mflag); ++ /* new TLB invalid function: use range(start, end) to invalid TLB page */ ++ pr_debug("iommu: TLB invalid:0x%x-0x%x\n", (unsigned int)iova, ++ (unsigned int)iova_end); ++ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_START_ADDR_REG, iova); ++ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_END_ADDR_REG, iova_end); ++ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG, 0x1); ++ ++ ret = sunxi_wait_when( ++ (sunxi_iommu_read(iommu, IOMMU_TLB_IVLD_ENABLE_REG)&0x1), 2); ++ if (ret) { ++ dev_err(iommu->dev, "TLB cache invalid timed out\n"); ++ } ++ spin_unlock_irqrestore(&iommu->iommu_lock, mflag); ++ ++ return ret; ++} ++ ++static int sun55i_ptw_cache_invalid(dma_addr_t iova_start, dma_addr_t iova_end) ++{ ++ struct sunxi_iommu_dev *iommu = global_iommu_dev; ++ int ret = 0; ++ unsigned long mflag; ++ ++ spin_lock_irqsave(&iommu->iommu_lock, mflag); ++ /* new PTW invalid function: use range(start, end) to invalid PTW page */ ++ pr_debug("iommu: PTW invalid:0x%x-0x%x\n", (unsigned int)iova_start, ++ (unsigned int)iova_end); ++ WARN_ON(iova_end == 0); ++ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_START_ADDR_REG, iova_start); ++ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_END_ADDR_REG, iova_end); ++ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG, 0x1); ++ ++ ret = sunxi_wait_when( ++ (sunxi_iommu_read(iommu, IOMMU_PC_IVLD_ENABLE_REG)&0x1), 2); ++ if (ret) { ++ dev_err(iommu->dev, "PTW cache invalid timed out\n"); ++ goto out; ++ } ++ ++out: ++ spin_unlock_irqrestore(&iommu->iommu_lock, mflag); ++ ++ return ret; ++} ++ ++static void sun55i_zap_tlb(unsigned long iova, size_t size) ++{ ++ sun55i_tlb_invalid(iova, iova + 2 * SPAGE_SIZE); ++ sun55i_tlb_invalid(iova + size - SPAGE_SIZE, iova + size + 8 * SPAGE_SIZE); ++ sun55i_ptw_cache_invalid(iova, iova + SPD_SIZE); ++ sun55i_ptw_cache_invalid(iova + size - SPD_SIZE, iova + size); ++ ++ return; ++} ++ ++static int sun55i_iommu_map(struct iommu_domain *domain, unsigned long iova, ++ phys_addr_t paddr, size_t size, size_t count, int prot, ++ gfp_t gfp, size_t *mapped) ++{ ++ struct sunxi_iommu_domain *sunxi_domain; ++ size_t iova_start, iova_end; ++ unsigned long total_size = size * count; ++ int ret; ++ unsigned long flags; ++ ++ sunxi_domain = container_of(domain, struct sunxi_iommu_domain, domain); ++ WARN_ON(sunxi_domain->pgtable == NULL); ++ ++ iova_start = iova & IOMMU_PT_MASK; ++ iova_end = SPAGE_ALIGN(iova + total_size); ++ ++ spin_lock_irqsave(&sunxi_domain->dt_lock, flags); ++ ++ ret = sunxi_pgtable_prepare_l1_tables(sunxi_domain->pgtable, iova_start, ++ iova_end, prot); ++ if (ret) { ++ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags); ++ if (mapped) ++ *mapped = 0; ++ return -ENOMEM; ++ } ++ ++ sunxi_pgtable_prepare_l2_tables(sunxi_domain->pgtable, ++ iova_start, iova_end, paddr, prot); ++ ++ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags); ++ ++ if (mapped) ++ *mapped = total_size; ++ ++ return 0; ++} ++ ++static size_t sun55i_iommu_unmap(struct iommu_domain *domain, unsigned long iova, ++ size_t size, size_t count, ++ struct iommu_iotlb_gather *gather) ++{ ++ struct sunxi_iommu_domain *sunxi_domain; ++ const struct sunxi_iommu_plat_data *plat_data; ++ size_t iova_start, iova_end; ++ unsigned long total_size = size * count; ++ int iova_tail_size; ++ unsigned long flags; ++ ++ sunxi_domain = container_of(domain, struct sunxi_iommu_domain, domain); ++ plat_data = global_iommu_dev->plat_data; ++ WARN_ON(sunxi_domain->pgtable == NULL); ++ ++ iova_start = iova & IOMMU_PT_MASK; ++ iova_end = SPAGE_ALIGN(iova + total_size); ++ ++ if (gather) { ++ if (gather->start > iova_start) ++ gather->start = iova_start; ++ if (gather->end < iova_end) ++ gather->end = iova_end; ++ } ++ ++ spin_lock_irqsave(&sunxi_domain->dt_lock, flags); ++ ++ sun55i_tlb_invalid(iova_start, iova_end); ++ sun55i_ptw_cache_invalid(iova_start, iova_end); ++ ++ for (; iova_start < iova_end; ) { ++ iova_tail_size = sunxi_pgtable_delete_l2_tables( ++ sunxi_domain->pgtable, iova_start, iova_end); ++ if (iova_tail_size < 0) { ++ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags); ++ return 0; ++ } ++ if (iova_tail_size == 0) ++ break; ++ ++ sun55i_ptw_cache_invalid(iova_start, iova_start + iova_tail_size); ++ iova_start += iova_tail_size; ++ } ++ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags); ++ ++ return total_size; ++} ++ ++static int sun55i_iommu_iotlb_sync_map(struct iommu_domain *domain, ++ unsigned long iova, size_t size) ++{ ++ struct sunxi_iommu_domain *sunxi_domain = ++ container_of(domain, struct sunxi_iommu_domain, domain); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&sunxi_domain->dt_lock, flags); ++ sun55i_zap_tlb(iova, size); ++ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags); ++ ++ return 0; ++} ++ ++static phys_addr_t sun55i_iommu_iova_to_phys(struct iommu_domain *domain, ++ dma_addr_t iova) ++{ ++ struct sunxi_iommu_domain *sunxi_domain = ++ container_of(domain, struct sunxi_iommu_domain, domain); ++ phys_addr_t ret = 0; ++ unsigned long flags; ++ ++ ++ WARN_ON(sunxi_domain->pgtable == NULL); ++ spin_lock_irqsave(&sunxi_domain->dt_lock, flags); ++ ret = sunxi_pgtable_iova_to_phys(sunxi_domain->pgtable, iova); ++ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags); ++ ++ return ret; ++} ++ ++static struct iommu_domain *sun55i_iommu_domain_alloc_paging(struct device *dev) ++{ ++ struct sunxi_iommu_domain *sunxi_domain; ++ ++ sunxi_domain = kzalloc(sizeof(*sunxi_domain), GFP_KERNEL); ++ if (!sunxi_domain) ++ return NULL; ++ ++ sunxi_domain->pgtable = sunxi_pgtable_alloc(); ++ if (!sunxi_domain->pgtable) { ++ pr_err("sunxi domain get pgtable failed\n"); ++ goto err_page; ++ } ++ ++ sunxi_domain->sg_buffer = (unsigned int *)__get_free_pages( ++ GFP_KERNEL, get_order(MAX_SG_TABLE_SIZE)); ++ if (!sunxi_domain->sg_buffer) { ++ pr_err("sunxi domain get sg_buffer failed\n"); ++ goto err_sg_buffer; ++ } ++ ++ sunxi_domain->domain.geometry.aperture_start = 0; ++ sunxi_domain->domain.geometry.aperture_end = (1ULL << 32) - 1; ++ sunxi_domain->domain.geometry.force_aperture = true; ++ spin_lock_init(&sunxi_domain->dt_lock); ++ ++ if (global_iommu_dev) ++ global_iommu_dev->debug_domain = sunxi_domain; ++ ++ if (!iommu_hw_init_flag) { ++ if (sun55i_iommu_hw_init(global_iommu_dev, sunxi_domain)) ++ pr_err("sunxi iommu hardware init failed\n"); ++ } ++ ++ return &sunxi_domain->domain; ++ ++err_sg_buffer: ++ sunxi_pgtable_free(sunxi_domain->pgtable); ++ sunxi_domain->pgtable = NULL; ++err_page: ++ kfree(sunxi_domain); ++ ++ return NULL; ++} ++ ++static void sun55i_iommu_domain_free(struct iommu_domain *domain) ++{ ++ struct sunxi_iommu_domain *sunxi_domain = ++ container_of(domain, struct sunxi_iommu_domain, domain); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&sunxi_domain->dt_lock, flags); ++ sunxi_pgtable_clear(sunxi_domain->pgtable); ++ sun55i_tlb_flush(global_iommu_dev); ++ spin_unlock_irqrestore(&sunxi_domain->dt_lock, flags); ++ sunxi_pgtable_free(sunxi_domain->pgtable); ++ sunxi_domain->pgtable = NULL; ++ free_pages((unsigned long)sunxi_domain->sg_buffer, ++ get_order(MAX_SG_TABLE_SIZE)); ++ sunxi_domain->sg_buffer = NULL; ++ kfree(sunxi_domain); ++} ++ ++static int sun55i_iommu_attach_dev(struct iommu_domain *domain, ++ struct device *dev) ++{ ++ return 0; ++} ++ ++static void sun55i_iommu_probe_device_finalize(struct device *dev) ++{ ++ struct sunxi_iommu_owner *owner = dev_iommu_priv_get(dev); ++ ++ WARN(!dev->dma_mask || *dev->dma_mask == 0, "NULL or 0 dma mask will fail iommu setup\n"); ++ iommu_setup_dma_ops(dev); ++ ++ sun55i_enable_device_iommu(owner->data, owner->tlbid, owner->flag); ++} ++ ++static struct iommu_device *sun55i_iommu_probe_device(struct device *dev) ++{ ++ struct sunxi_iommu_owner *owner = dev_iommu_priv_get(dev); ++ ++ if (!owner) /* Not a iommu client device */ ++ return ERR_PTR(-ENODEV); ++ ++ return &owner->data->iommu; ++} ++ ++static void sun55i_iommu_release_device(struct device *dev) ++{ ++ struct sunxi_iommu_owner *owner = dev_iommu_priv_get(dev); ++ ++ if (!owner) ++ return; ++ ++ sun55i_enable_device_iommu(owner->data, owner->tlbid, false); ++ dev->iommu_group = NULL; ++ devm_kfree(dev, dev->dma_parms); ++ dev->dma_parms = NULL; ++ kfree(owner); ++ owner = NULL; ++ dev_iommu_priv_set(dev, NULL); ++} ++ ++static int sun55i_iommu_of_xlate(struct device *dev, ++ const struct of_phandle_args *args) ++{ ++ struct sunxi_iommu_owner *owner = dev_iommu_priv_get(dev); ++ struct platform_device *sysmmu = of_find_device_by_node(args->np); ++ struct sunxi_iommu_dev *data; ++ ++ if (!sysmmu) ++ return -ENODEV; ++ ++ data = platform_get_drvdata(sysmmu); ++ if (data == NULL) ++ return -ENODEV; ++ ++ if (!owner) { ++ owner = kzalloc(sizeof(*owner), GFP_KERNEL); ++ if (!owner) ++ return -ENOMEM; ++ owner->tlbid = args->args[0]; ++ if (args->args_count > 1) ++ owner->flag = args->args[1]; ++ else ++ owner->flag = 0; ++ owner->data = data; ++ owner->dev = dev; ++ dev_iommu_priv_set(dev, owner); ++ } ++ ++ return 0; ++} ++ ++static irqreturn_t sunxi_iommu_irq(int irq, void *dev_id) ++{ ++ ++ u32 inter_status_reg = 0; ++ u32 addr_reg = 0; ++ u32 int_masterid_bitmap = 0; ++ u32 data_reg = 0; ++ u32 l1_pgint_reg = 0; ++ u32 l2_pgint_reg = 0; ++ u32 master_id = 0; ++ unsigned long mflag; ++ struct sunxi_iommu_dev *iommu = dev_id; ++ const struct sunxi_iommu_plat_data *plat_data = iommu->plat_data; ++ ++ spin_lock_irqsave(&iommu->iommu_lock, mflag); ++ inter_status_reg = sunxi_iommu_read(iommu, IOMMU_INT_STA_REG) & 0x3ffff; ++ l1_pgint_reg = sunxi_iommu_read(iommu, IOMMU_L1PG_INT_REG); ++ l2_pgint_reg = sunxi_iommu_read(iommu, IOMMU_L2PG_INT_REG); ++ int_masterid_bitmap = inter_status_reg | l1_pgint_reg | l2_pgint_reg; ++ ++ if (inter_status_reg & MICRO_TLB0_INVALID_INTER_MASK) { ++ pr_err("%s Invalid Authority\n", plat_data->master[0]); ++ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG0); ++ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG0); ++ } else if (inter_status_reg & MICRO_TLB1_INVALID_INTER_MASK) { ++ pr_err("%s Invalid Authority\n", plat_data->master[1]); ++ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG1); ++ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG1); ++ } else if (inter_status_reg & MICRO_TLB2_INVALID_INTER_MASK) { ++ pr_err("%s Invalid Authority\n", plat_data->master[2]); ++ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG2); ++ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG2); ++ } else if (inter_status_reg & MICRO_TLB3_INVALID_INTER_MASK) { ++ pr_err("%s Invalid Authority\n", plat_data->master[3]); ++ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG3); ++ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG3); ++ } else if (inter_status_reg & MICRO_TLB4_INVALID_INTER_MASK) { ++ pr_err("%s Invalid Authority\n", plat_data->master[4]); ++ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG4); ++ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG4); ++ } else if (inter_status_reg & MICRO_TLB5_INVALID_INTER_MASK) { ++ pr_err("%s Invalid Authority\n", plat_data->master[5]); ++ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG5); ++ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG5); ++ } else if (inter_status_reg & MICRO_TLB6_INVALID_INTER_MASK) { ++ pr_err("%s Invalid Authority\n", plat_data->master[6]); ++ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG6); ++ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG6); ++ } else if (inter_status_reg & L1_PAGETABLE_INVALID_INTER_MASK) { ++ /* It's OK to prefetch an invalid page, no need to print msg for debug. */ ++ if (!(int_masterid_bitmap & (1U << 31))) ++ pr_err("L1 PageTable Invalid\n"); ++ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG7); ++ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG7); ++ } else if (inter_status_reg & L2_PAGETABLE_INVALID_INTER_MASK) { ++ if (!(int_masterid_bitmap & (1U << 31))) ++ pr_err("L2 PageTable Invalid\n"); ++ addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG8); ++ data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG8); ++ } else ++ pr_err("sunxi iommu int error!!!\n"); ++ ++ if (!(int_masterid_bitmap & (1U << 31))) { ++ int_masterid_bitmap &= 0xffff; ++ ++ if (int_masterid_bitmap) { ++ master_id = __ffs(int_masterid_bitmap); ++ pr_err("Bug is in %s module, invalid address: 0x%x, data:0x%x, id:0x%x\n", ++ plat_data->master[master_id], addr_reg, data_reg, ++ int_masterid_bitmap); ++ ++ if (sunxi_iommu_fault_notify_cbs[master_id]) ++ sunxi_iommu_fault_notify_cbs[master_id](); ++ } else { ++ pr_err("Bug in unknown module (id=0), invalid address: 0x%x, data:0x%x\n", ++ addr_reg, data_reg); ++ } ++ } ++ ++ /* invalid TLB */ ++ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_START_ADDR_REG, addr_reg); ++ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_END_ADDR_REG, addr_reg + 4 * SPAGE_SIZE); ++ sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG, 0x1); ++ while (sunxi_iommu_read(iommu, IOMMU_TLB_IVLD_ENABLE_REG) & 0x1) ++ ; ++ ++ /* invalid PTW */ ++ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_START_ADDR_REG, addr_reg); ++ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_END_ADDR_REG, addr_reg + 2 * SPD_SIZE); ++ sunxi_iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG, 0x1); ++ while (sunxi_iommu_read(iommu, IOMMU_PC_IVLD_ENABLE_REG) & 0x1) ++ ; ++ ++ sunxi_iommu_write(iommu, IOMMU_INT_CLR_REG, inter_status_reg); ++ inter_status_reg |= (l1_pgint_reg | l2_pgint_reg); ++ inter_status_reg &= 0xffff; ++ sunxi_iommu_write(iommu, IOMMU_RESET_REG, ~inter_status_reg); ++ sunxi_iommu_write(iommu, IOMMU_RESET_REG, 0xffffffff); ++ spin_unlock_irqrestore(&iommu->iommu_lock, mflag); ++ ++ return IRQ_HANDLED; ++} ++ ++static ssize_t sunxi_iommu_enable_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct sunxi_iommu_dev *iommu = global_iommu_dev; ++ u32 data; ++ ++ spin_lock(&iommu->iommu_lock); ++ data = sunxi_iommu_read(iommu, IOMMU_PMU_ENABLE_REG); ++ spin_unlock(&iommu->iommu_lock); ++ ++ return scnprintf(buf, PAGE_SIZE, ++ "enable = %d\n", data & 0x1 ? 1 : 0); ++} ++ ++static ssize_t sunxi_iommu_enable_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct sunxi_iommu_dev *iommu = global_iommu_dev; ++ unsigned long val; ++ u32 data; ++ int retval; ++ ++ if (kstrtoul(buf, 0, &val)) ++ return -EINVAL; ++ ++ if (val) { ++ spin_lock(&iommu->iommu_lock); ++ data = sunxi_iommu_read(iommu, IOMMU_PMU_ENABLE_REG); ++ sunxi_iommu_write(iommu, IOMMU_PMU_ENABLE_REG, data | 0x1); ++ data = sunxi_iommu_read(iommu, IOMMU_PMU_CLR_REG); ++ sunxi_iommu_write(iommu, IOMMU_PMU_CLR_REG, data | 0x1); ++ retval = sunxi_wait_when((sunxi_iommu_read(iommu, ++ IOMMU_PMU_CLR_REG) & 0x1), 1); ++ if (retval) ++ dev_err(iommu->dev, "Clear PMU Count timed out\n"); ++ spin_unlock(&iommu->iommu_lock); ++ } else { ++ spin_lock(&iommu->iommu_lock); ++ data = sunxi_iommu_read(iommu, IOMMU_PMU_CLR_REG); ++ sunxi_iommu_write(iommu, IOMMU_PMU_CLR_REG, data | 0x1); ++ retval = sunxi_wait_when((sunxi_iommu_read(iommu, ++ IOMMU_PMU_CLR_REG) & 0x1), 1); ++ if (retval) ++ dev_err(iommu->dev, "Clear PMU Count timed out\n"); ++ data = sunxi_iommu_read(iommu, IOMMU_PMU_ENABLE_REG); ++ sunxi_iommu_write(iommu, IOMMU_PMU_ENABLE_REG, data & ~0x1); ++ spin_unlock(&iommu->iommu_lock); ++ } ++ ++ return count; ++} ++ ++static ssize_t sunxi_iommu_profilling_show(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct sunxi_iommu_dev *iommu = global_iommu_dev; ++ const struct sunxi_iommu_plat_data *plat_data = iommu->plat_data; ++ struct { ++ u64 macrotlb_access_count; ++ u64 macrotlb_hit_count; ++ u64 ptwcache_access_count; ++ u64 ptwcache_hit_count; ++ struct { ++ u64 access_count; ++ u64 hit_count; ++ u64 latency; ++ u32 max_latency; ++ } micro_tlb[7]; ++ } *iommu_profile; ++ iommu_profile = kmalloc(sizeof(*iommu_profile), GFP_KERNEL); ++ if (!iommu_profile) ++ return 0; ++ int len; ++ spin_lock(&iommu->iommu_lock); ++ ++ iommu_profile->micro_tlb[0].access_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG0) & ++ 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG0); ++ iommu_profile->micro_tlb[0].hit_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG0) & 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG0); ++ ++ iommu_profile->micro_tlb[1].access_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG1) & ++ 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG1); ++ iommu_profile->micro_tlb[1].hit_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG1) & 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG1); ++ ++ iommu_profile->micro_tlb[2].access_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG2) & ++ 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG2); ++ iommu_profile->micro_tlb[2].hit_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG2) & 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG2); ++ ++ iommu_profile->micro_tlb[3].access_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG3) & ++ 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG3); ++ iommu_profile->micro_tlb[3].hit_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG3) & 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG3); ++ ++ iommu_profile->micro_tlb[4].access_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG4) & ++ 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG4); ++ iommu_profile->micro_tlb[4].hit_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG4) & 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG4); ++ ++ iommu_profile->micro_tlb[5].access_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG5) & ++ 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG5); ++ iommu_profile->micro_tlb[5].hit_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG5) & 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG5); ++ ++ iommu_profile->micro_tlb[6].access_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG6) & ++ 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG6); ++ iommu_profile->micro_tlb[6].hit_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG6) & 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG6); ++ ++ iommu_profile->macrotlb_access_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG7) & ++ 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG7); ++ iommu_profile->macrotlb_hit_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG7) & 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG7); ++ ++ iommu_profile->ptwcache_access_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG8) & ++ 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG8); ++ iommu_profile->ptwcache_hit_count = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG8) & 0x7ff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG8); ++ ++ iommu_profile->micro_tlb[0].latency = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG0) & ++ 0x3ffff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG0); ++ iommu_profile->micro_tlb[1].latency = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG1) & ++ 0x3ffff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG1); ++ iommu_profile->micro_tlb[2].latency = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG2) & ++ 0x3ffff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG2); ++ iommu_profile->micro_tlb[3].latency = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG3) & ++ 0x3ffff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG3); ++ iommu_profile->micro_tlb[4].latency = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG4) & ++ 0x3ffff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG4); ++ iommu_profile->micro_tlb[5].latency = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG5) & ++ 0x3ffff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG5); ++ ++ iommu_profile->micro_tlb[6].latency = ++ ((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG6) & ++ 0x3ffff) ++ << 32) | ++ sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG6); ++ ++ iommu_profile->micro_tlb[0].max_latency = ++ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG0); ++ iommu_profile->micro_tlb[1].max_latency = ++ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG1); ++ iommu_profile->micro_tlb[2].max_latency = ++ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG2); ++ iommu_profile->micro_tlb[3].max_latency = ++ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG3); ++ iommu_profile->micro_tlb[4].max_latency = ++ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG4); ++ iommu_profile->micro_tlb[5].max_latency = ++ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG5); ++ iommu_profile->micro_tlb[6].max_latency = ++ sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG6); ++ ++ spin_unlock(&iommu->iommu_lock); ++err: ++ return 0; ++ ++ len = scnprintf( ++ buf, PAGE_SIZE, ++ "%s_access_count = 0x%llx\n" ++ "%s_hit_count = 0x%llx\n" ++ "%s_access_count = 0x%llx\n" ++ "%s_hit_count = 0x%llx\n" ++ "%s_access_count = 0x%llx\n" ++ "%s_hit_count = 0x%llx\n" ++ "%s_access_count = 0x%llx\n" ++ "%s_hit_count = 0x%llx\n" ++ "%s_access_count = 0x%llx\n" ++ "%s_hit_count = 0x%llx\n" ++ "%s_access_count = 0x%llx\n" ++ "%s_hit_count = 0x%llx\n" ++ "%s_access_count = 0x%llx\n" ++ "%s_hit_count = 0x%llx\n" ++ "macrotlb_access_count = 0x%llx\n" ++ "macrotlb_hit_count = 0x%llx\n" ++ "ptwcache_access_count = 0x%llx\n" ++ "ptwcache_hit_count = 0x%llx\n" ++ "%s_total_latency = 0x%llx\n" ++ "%s_total_latency = 0x%llx\n" ++ "%s_total_latency = 0x%llx\n" ++ "%s_total_latency = 0x%llx\n" ++ "%s_total_latency = 0x%llx\n" ++ "%s_total_latency = 0x%llx\n" ++ "%s_total_latency = 0x%llx\n" ++ "%s_max_latency = 0x%x\n" ++ "%s_max_latency = 0x%x\n" ++ "%s_max_latency = 0x%x\n" ++ "%s_max_latency = 0x%x\n" ++ "%s_max_latency = 0x%x\n" ++ "%s_max_latency = 0x%x\n" ++ "%s_max_latency = 0x%x\n", ++ plat_data->master[0], iommu_profile->micro_tlb[0].access_count, ++ plat_data->master[0], iommu_profile->micro_tlb[0].hit_count, ++ plat_data->master[1], iommu_profile->micro_tlb[1].access_count, ++ plat_data->master[1], iommu_profile->micro_tlb[1].hit_count, ++ plat_data->master[2], iommu_profile->micro_tlb[2].access_count, ++ plat_data->master[2], iommu_profile->micro_tlb[2].hit_count, ++ plat_data->master[3], iommu_profile->micro_tlb[3].access_count, ++ plat_data->master[3], iommu_profile->micro_tlb[3].hit_count, ++ plat_data->master[4], iommu_profile->micro_tlb[4].access_count, ++ plat_data->master[4], iommu_profile->micro_tlb[4].hit_count, ++ plat_data->master[5], iommu_profile->micro_tlb[5].access_count, ++ plat_data->master[5], iommu_profile->micro_tlb[5].hit_count, ++ plat_data->master[6], iommu_profile->micro_tlb[6].access_count, ++ plat_data->master[6], iommu_profile->micro_tlb[6].hit_count, ++ iommu_profile->macrotlb_access_count, ++ iommu_profile->macrotlb_hit_count, ++ iommu_profile->ptwcache_access_count, ++ iommu_profile->ptwcache_hit_count, plat_data->master[0], ++ iommu_profile->micro_tlb[0].latency, plat_data->master[1], ++ iommu_profile->micro_tlb[1].latency, plat_data->master[2], ++ iommu_profile->micro_tlb[2].latency, plat_data->master[3], ++ iommu_profile->micro_tlb[3].latency, plat_data->master[4], ++ iommu_profile->micro_tlb[4].latency, plat_data->master[5], ++ iommu_profile->micro_tlb[5].latency, plat_data->master[6], ++ iommu_profile->micro_tlb[6].latency, plat_data->master[0], ++ iommu_profile->micro_tlb[0].max_latency, plat_data->master[1], ++ iommu_profile->micro_tlb[1].max_latency, plat_data->master[2], ++ iommu_profile->micro_tlb[2].max_latency, plat_data->master[3], ++ iommu_profile->micro_tlb[3].max_latency, plat_data->master[4], ++ iommu_profile->micro_tlb[4].max_latency, plat_data->master[5], ++ iommu_profile->micro_tlb[5].max_latency, plat_data->master[6], ++ iommu_profile->micro_tlb[6].max_latency); ++ kfree(iommu_profile); ++ return len; ++} ++ ++ ++static u32 __print_rsv_region(char *buf, size_t buf_len, ssize_t len, ++ struct dump_region *active_region, ++ bool for_sysfs_show) ++{ ++ if (active_region->type == DUMP_REGION_RESERVE) { ++ if (for_sysfs_show) { ++ len += sysfs_emit_at( ++ buf, len, ++ "iova:%pad size:0x%zx\n", ++ &active_region->iova, active_region->size); ++ } else { ++ len += scnprintf( ++ buf + len, buf_len - len, ++ "iova:%pad size:0x%zx\n", ++ &active_region->iova, active_region->size); ++ } ++ } ++ return len; ++} ++ ++u32 sunxi_iommu_dump_rsv_list(struct list_head *rsv_list, ssize_t len, ++ char *buf, size_t buf_len, bool for_sysfs_show) ++{ ++ struct iommu_resv_region *resv; ++ struct dump_region active_region; ++ if (for_sysfs_show) { ++ len += sysfs_emit_at(buf, len, "reserved\n"); ++ } else { ++ len += scnprintf(buf + len, buf_len - len, "reserved\n"); ++ } ++ list_for_each_entry(resv, rsv_list, list) { ++ active_region.access_mask = 0; ++ active_region.iova = resv->start; ++ active_region.type = DUMP_REGION_RESERVE; ++ active_region.size = resv->length; ++ len = __print_rsv_region(buf, buf_len, len, &active_region, ++ for_sysfs_show); ++ } ++ return len; ++} ++ ++static ssize_t sun55i_iommu_dump_pgtable(struct sunxi_iommu_dev *iommu, char *buf, size_t buf_len, ++ bool for_sysfs_show) ++{ ++ struct sunxi_iommu_domain *sunxi_domain = iommu->debug_domain; ++ ssize_t len = 0; ++ ++ len = sunxi_iommu_dump_rsv_list(&iommu->rsv_list, len, buf, ++ buf_len, for_sysfs_show); ++ ++ if (sunxi_domain && sunxi_domain->pgtable) { ++ len = sunxi_pgtable_dump(sunxi_domain->pgtable, len, buf, buf_len, ++ for_sysfs_show); ++ } else { ++ if (for_sysfs_show) { ++ len += sysfs_emit_at(buf, len, "no active domain to dump\n"); ++ } else { ++ len += scnprintf(buf + len, buf_len - len, "no active domain to dump\n"); ++ } ++ } ++ ++ return len; ++} ++ ++static ssize_t sun55i_iommu_map_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct sunxi_iommu_dev *iommu = dev_get_drvdata(dev); ++ ++ if (!iommu) ++ return -ENODEV; ++ ++ return sun55i_iommu_dump_pgtable(iommu, buf, PAGE_SIZE, true); ++} ++ ++static struct device_attribute sunxi_iommu_enable_attr = ++ __ATTR(enable, 0644, sunxi_iommu_enable_show, ++ sunxi_iommu_enable_store); ++static struct device_attribute sunxi_iommu_profilling_attr = ++ __ATTR(profilling, 0444, sunxi_iommu_profilling_show, NULL); ++static struct device_attribute sun55i_iommu_map_attr = ++ __ATTR(page_debug, 0444, sun55i_iommu_map_show, NULL); ++ ++static void sun55i_iommu_sysfs_create(struct platform_device *_pdev, ++ struct sunxi_iommu_dev *sunxi_iommu) ++{ ++ device_create_file(&_pdev->dev, &sunxi_iommu_enable_attr); ++ device_create_file(&_pdev->dev, &sunxi_iommu_profilling_attr); ++ device_create_file(&_pdev->dev, &sun55i_iommu_map_attr); ++} ++ ++static void sun55i_iommu_sysfs_remove(struct platform_device *_pdev) ++{ ++ device_remove_file(&_pdev->dev, &sunxi_iommu_enable_attr); ++ device_remove_file(&_pdev->dev, &sunxi_iommu_profilling_attr); ++ device_remove_file(&_pdev->dev, &sun55i_iommu_map_attr); ++} ++ ++ ++int sunxi_iommu_check_cmd(struct device *dev, void *data) ++{ ++ struct iommu_resv_region *region; ++ int prot = IOMMU_WRITE | IOMMU_READ; ++ struct list_head *rsv_list = data; ++ struct { ++ const char *name; ++ u32 region_type; ++ } supported_region[2] = { { "sunxi-iova-reserve", IOMMU_RESV_RESERVED }, ++ { "sunxi-iova-premap", IOMMU_RESV_DIRECT } }; ++ int i, j; ++#define REGION_CNT_MAX (8) ++ struct { ++ u64 array[REGION_CNT_MAX * 2]; ++ int count; ++ } *tmp_data; ++ ++ tmp_data = kzalloc(sizeof(*tmp_data), GFP_KERNEL); ++ if (!tmp_data) ++ return -ENOMEM; ++ ++ for (i = 0; i < ARRAY_SIZE(supported_region); i++) { ++ /* search all supported argument */ ++ if (!of_find_property(dev->of_node, supported_region[i].name, ++ NULL)) ++ continue; ++ ++ tmp_data->count = of_property_read_variable_u64_array( ++ dev->of_node, supported_region[i].name, tmp_data->array, ++ 0, REGION_CNT_MAX); ++ if (tmp_data->count <= 0) ++ continue; ++ if ((tmp_data->count & 1) != 0) { ++ dev_err(dev, "size %d of array %s should be even\n", ++ tmp_data->count, supported_region[i].name); ++ continue; ++ } ++ ++ /* two u64 describe one region */ ++ tmp_data->count /= 2; ++ ++ /* prepared reserve region data */ ++ for (j = 0; j < tmp_data->count; j++) { ++ region = iommu_alloc_resv_region( ++ tmp_data->array[j * 2], ++ tmp_data->array[j * 2 + 1], prot, ++ supported_region[i].region_type, ++ GFP_KERNEL); ++ if (!region) { ++ dev_err(dev, "no memory for iova rsv region"); ++ } else { ++ struct iommu_resv_region *walk; ++ /* warn on region overlaps */ ++ list_for_each_entry(walk, rsv_list, list) { ++ phys_addr_t walk_end = ++ walk->start + walk->length; ++ phys_addr_t region_end = ++ region->start + region->length; ++ if (!(walk->start > ++ region->start + ++ region->length || ++ walk->start + walk->length < ++ region->start)) { ++ dev_warn( ++ dev, ++ "overlap on iova-reserve %pap~%pap with %pap~%pap", ++ &walk->start, &walk_end, ++ ®ion->start, ++ ®ion_end); ++ } ++ } ++ list_add_tail(®ion->list, rsv_list); ++ } ++ } ++ } ++ kfree(tmp_data); ++#undef REGION_CNT_MAX ++ ++ return 0; ++} ++ ++static int __init_reserve_mem(struct sunxi_iommu_dev *dev) ++{ ++ return bus_for_each_dev(&platform_bus_type, NULL, &dev->rsv_list, ++ sunxi_iommu_check_cmd); ++} ++ ++static const struct iommu_ops sunxi_iommu_ops = { ++ .domain_alloc_paging = sun55i_iommu_domain_alloc_paging, ++ .probe_device = sun55i_iommu_probe_device, ++ .probe_finalize = sun55i_iommu_probe_device_finalize, ++ .release_device = sun55i_iommu_release_device, ++ .device_group = generic_device_group, ++ .of_xlate = sun55i_iommu_of_xlate, ++ .owner = THIS_MODULE, ++ .default_domain_ops = &(const struct iommu_domain_ops) { ++ .attach_dev = sun55i_iommu_attach_dev, ++ .map_pages = sun55i_iommu_map, ++ .unmap_pages = sun55i_iommu_unmap, ++ .iotlb_sync_map = sun55i_iommu_iotlb_sync_map, ++ .iova_to_phys = sun55i_iommu_iova_to_phys, ++ .free = sun55i_iommu_domain_free, ++ } ++}; ++ ++static int sun55i_iommu_probe(struct platform_device *pdev) ++{ ++ int ret, irq; ++ struct device *dev = &pdev->dev; ++ struct sunxi_iommu_dev *sunxi_iommu; ++ struct resource *res; ++ ++ iopte_cache = sunxi_pgtable_alloc_pte_cache(); ++ if (!iopte_cache) { ++ pr_err("%s: Failed to create sunx-iopte-cache.\n", __func__); ++ return -ENOMEM; ++ } ++ ++ sunxi_iommu = devm_kzalloc(dev, sizeof(*sunxi_iommu), GFP_KERNEL); ++ if (!sunxi_iommu) { ++ kmem_cache_destroy(iopte_cache); ++ iopte_cache = NULL; ++ return -ENOMEM; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_dbg(dev, "Unable to find resource region\n"); ++ ret = -ENOENT; ++ goto err_res; ++ } ++ ++ sunxi_iommu->base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(sunxi_iommu->base)) { ++ dev_dbg(dev, "Unable to map IOMEM @ PA:%pa\n", &res->start); ++ ret = PTR_ERR(sunxi_iommu->base); ++ goto err_res; ++ } ++ ++ sunxi_iommu->bypass = DEFAULT_BYPASS_VALUE; ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq <= 0) { ++ dev_dbg(dev, "Unable to find IRQ resource\n"); ++ ret = -ENOENT; ++ goto err_res; ++ } ++ pr_info("sunxi iommu: irq = %d\n", irq); ++ ++ ret = devm_request_irq(dev, irq, sunxi_iommu_irq, 0, ++ dev_name(dev), (void *)sunxi_iommu); ++ if (ret < 0) { ++ dev_dbg(dev, "Unabled to register interrupt handler\n"); ++ goto err_res; ++ } ++ ++ sunxi_iommu->irq = irq; ++ ++ sunxi_iommu->clk = of_clk_get_by_name(dev->of_node, "iommu"); ++ if (IS_ERR(sunxi_iommu->clk)) { ++ sunxi_iommu->clk = NULL; ++ dev_dbg(dev, "Unable to find clock\n"); ++ ret = PTR_ERR(sunxi_iommu->clk); ++ goto err_clk; ++ } ++ clk_prepare_enable(sunxi_iommu->clk); ++ ++ platform_set_drvdata(pdev, sunxi_iommu); ++ sunxi_iommu->dev = dev; ++ spin_lock_init(&sunxi_iommu->iommu_lock); ++ global_iommu_dev = sunxi_iommu; ++ sunxi_iommu->plat_data = of_device_get_match_data(dev); ++ ++ if (sunxi_iommu->plat_data->version != ++ sunxi_iommu_read(sunxi_iommu, IOMMU_VERSION_REG)) { ++ dev_err(dev, "iommu version mismatch, please check and reconfigure\n"); ++ ++ clk_disable_unprepare(sunxi_iommu->clk); ++ ret = -EINVAL; ++ goto err_clk; ++ } ++ ++ sun55i_iommu_sysfs_create(pdev, sunxi_iommu); ++ ret = iommu_device_sysfs_add(&sunxi_iommu->iommu, dev, NULL, ++ dev_name(dev)); ++ if (ret) { ++ dev_err(dev, "Failed to register iommu in sysfs\n"); ++ clk_disable_unprepare(sunxi_iommu->clk); ++ goto err_clk; ++ } ++ ++ ret = iommu_device_register(&sunxi_iommu->iommu, &sunxi_iommu_ops, dev); ++ if (ret) { ++ dev_err(dev, "Failed to register iommu\n"); ++ goto err_sysfs_remove; ++ } ++ ++ INIT_LIST_HEAD(&sunxi_iommu->rsv_list); ++ __init_reserve_mem(sunxi_iommu); ++ ++ sunxi_iommu->identity_domain = sun55i_iommu_domain_alloc_paging(&pdev->dev); ++ if (!sunxi_iommu->identity_domain) { ++ dev_err(dev, "Failed to allocate identity domain\n"); ++ ret = -ENOMEM; ++ goto err_iommu_unregister; ++ } ++ ++ if (!list_empty(&sunxi_iommu->rsv_list)) { ++ struct iommu_resv_region *entry; ++ ++ dev_info(dev, "Mapping %zu reserved regions for identity domain\n", ++ list_count_nodes(&sunxi_iommu->rsv_list)); ++ ++ list_for_each_entry(entry, &sunxi_iommu->rsv_list, list) { ++ size_t size = entry->length; ++ phys_addr_t phys = entry->start; ++ ++ if (sun55i_iommu_map(sunxi_iommu->identity_domain, phys, phys, size, 1, entry->prot, GFP_KERNEL, NULL)) { ++ dev_err(dev, "Failed to map reserved region %pa [%zx]\n", ++ &phys, size); ++ } ++ } ++ } ++ ++ if (!dma_dev) { ++ dma_dev = &pdev->dev; ++ sunxi_pgtable_set_dma_dev(dma_dev); ++ } ++ ++ return 0; ++ ++err_iommu_unregister: ++ iommu_device_unregister(&sunxi_iommu->iommu); ++err_sysfs_remove: ++ iommu_device_sysfs_remove(&sunxi_iommu->iommu); ++err_clk: ++ clk_disable_unprepare(sunxi_iommu->clk); ++err_res: ++ sunxi_pgtable_free_pte_cache(iopte_cache); ++ dev_err(dev, "Failed to initialize\n"); ++ ++ return ret; ++} ++ ++static void sun55i_iommu_remove(struct platform_device *pdev) ++{ ++ struct sunxi_iommu_dev *sunxi_iommu = platform_get_drvdata(pdev); ++ struct iommu_resv_region *entry, *next; ++ ++ sunxi_pgtable_free_pte_cache(iopte_cache); ++ if (!list_empty(&sunxi_iommu->rsv_list)) { ++ list_for_each_entry_safe (entry, next, &sunxi_iommu->rsv_list, ++ list) ++ kfree(entry); ++ } ++ devm_free_irq(sunxi_iommu->dev, sunxi_iommu->irq, sunxi_iommu); ++ devm_iounmap(sunxi_iommu->dev, sunxi_iommu->base); ++ sun55i_iommu_sysfs_remove(pdev); ++ iommu_device_sysfs_remove(&sunxi_iommu->iommu); ++ iommu_device_unregister(&sunxi_iommu->iommu); ++ global_iommu_dev = NULL; ++ ++ return; ++} ++ ++static int sun55i_iommu_suspend(struct device *dev) ++{ ++ clk_disable_unprepare(global_iommu_dev->clk); ++ ++ return 0; ++} ++ ++static int sun55i_iommu_resume(struct device *dev) ++{ ++ struct sunxi_iommu_dev *iommu = dev_get_drvdata(dev); ++ ++ clk_prepare_enable(iommu->clk); ++ ++ return sun55i_iommu_hw_init(iommu, NULL); ++} ++ ++static const struct dev_pm_ops sunxi_iommu_pm_ops = { ++ .suspend = sun55i_iommu_suspend, ++ .resume = sun55i_iommu_resume, ++}; ++ ++static const struct sunxi_iommu_plat_data iommu_v15_sun55iw3_data = { ++ .version = 0x15, ++ /* disable preftech to test display rcq bug */ ++ .tlb_prefetch = 0x30000, ++ .tlb_invalid_mode = 0x1, ++ .ptw_invalid_mode = 0x1, ++ .master = {"ISP", "CSI", "VE0", "VE1", "G2D", "DE", ++ "DI", "DEBUG_MODE"}, ++}; ++ ++static const struct of_device_id sunxi_iommu_dt_ids[] = { ++ { .compatible = "allwinner,sun55i-a523-iommu", .data = &iommu_v15_sun55iw3_data}, ++ { /* sentinel */ }, ++}; ++ ++static struct platform_driver sunxi_iommu_driver = { ++ .probe = sun55i_iommu_probe, ++ .remove = sun55i_iommu_remove, ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "sunxi-iommu", ++ .pm = &sunxi_iommu_pm_ops, ++ .of_match_table = sunxi_iommu_dt_ids, ++ } ++}; ++ ++static int __init sunxi_iommu_init(void) ++{ ++ return platform_driver_register(&sunxi_iommu_driver); ++} ++ ++static void __exit sunxi_iommu_exit(void) ++{ ++ return platform_driver_unregister(&sunxi_iommu_driver); ++} ++ ++subsys_initcall(sunxi_iommu_init); ++module_exit(sunxi_iommu_exit); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_VERSION("1.5.1"); ++MODULE_AUTHOR("huangshuosheng"); ++MODULE_AUTHOR("ouayngkun"); +diff --git a/drivers/iommu/sun55i-iommu.h b/drivers/iommu/sun55i-iommu.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/iommu/sun55i-iommu.h +@@ -0,0 +1,57 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++/* ++ * sunxi iommu: main structures ++ * ++ * Copyright (C) 2008-2009 Nokia Corporation ++ * ++ * Written by Hiroshi DOYU ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include "sun55i-iommu-pgtable.h" ++ ++//iommu domain have seperate ops ++#define SEPERATE_DOMAIN_API ++//dma-iommu is enclosed into iommu-core ++#define DMA_IOMMU_IN_IOMMU ++//not used anywhere since refactoring ++#define GROUP_NOTIFIER_DEPRECATED ++//iommu now have correct probe order ++//no more need bus set op as workaround ++#define BUS_SET_OP_DEPRECATED ++//dma cookie handled by iommu core, not driver ++#define COOKIE_HANDLE_BY_CORE ++//iommu resv region allocation require gfp flags ++#define RESV_REGION_NEED_GFP_FLAG ++ ++#ifdef DMA_IOMMU_IN_IOMMU ++#include ++/* ++ * by design iommu driver should be part of iommu ++ * and get to it by ../../dma-iommu.h ++ * sunxi bsp have seperate root, use different path ++ * to reach dma-iommu.h ++ */ ++#include <../drivers/iommu/dma-iommu.h> ++#else ++#include ++#endif ++ ++#define MAX_SG_SIZE (128 << 20) ++#define MAX_SG_TABLE_SIZE ((MAX_SG_SIZE / SPAGE_SIZE) * sizeof(u32)) ++#define DUMP_REGION_MAP 0 ++#define DUMP_REGION_RESERVE 1 ++struct dump_region { ++ u32 access_mask; ++ size_t size; ++ u32 type; ++ dma_addr_t phys, iova; ++}; ++struct sunxi_iommu_dev; ++void sun55i_reset_device_iommu(unsigned int master_id); ++void sun55i_enable_device_iommu(struct sunxi_iommu_dev *iommu, unsigned int master_id, bool flag); +diff --git a/include/sunxi-iommu.h b/include/sunxi-iommu.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/include/sunxi-iommu.h +@@ -0,0 +1,50 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++/* ++ * ++ * Copyright (C) 2015 AllWinnertech Ltd. ++ * ++ * Author: huangshuosheng ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef __LINUX_SUNXI_IOMMU_H ++#define __LINUX_SUNXI_IOMMU_H ++#include ++#include ++ ++struct sunxi_iommu_dev; ++typedef void (*sunxi_iommu_fault_cb)(void); ++extern void sun55i_iommu_register_fault_cb(sunxi_iommu_fault_cb cb, unsigned int master_id); ++extern void sun55i_enable_device_iommu(struct sunxi_iommu_dev *iommu, unsigned int master_id, bool flag); ++extern void sun55i_reset_device_iommu(unsigned int master_id); ++ ++enum iommu_dma_cookie_type { ++ IOMMU_DMA_IOVA_COOKIE, ++ IOMMU_DMA_MSI_COOKIE, ++}; ++ ++struct iommu_dma_cookie { ++ enum iommu_dma_cookie_type type; ++ union { ++ /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ ++ struct iova_domain iovad; ++ /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ ++ dma_addr_t msi_iova; ++ }; ++ struct list_head msi_page_list; ++ ++ /* Domain for flush queue callback; NULL if flush queue not in use */ ++ struct iommu_domain *fq_domain; ++}; ++ ++#endif /* __LINUX_SUNXI_IOMMU_H */ +\ No newline at end of file +-- +Armbian diff --git a/patch/kernel/archive/sunxi-6.18/patches.armbian/drv-pci-sunxi-enable-pcie-support.patch b/patch/kernel/archive/sunxi-6.18/patches.armbian/drv-pci-sunxi-enable-pcie-support.patch new file mode 100644 index 0000000000..def89c547c --- /dev/null +++ b/patch/kernel/archive/sunxi-6.18/patches.armbian/drv-pci-sunxi-enable-pcie-support.patch @@ -0,0 +1,3284 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Sun, 14 Dec 2025 11:07:45 +0000 +Subject: pci: sunxi: add sun55i PCIe RC and DMA support + +Signed-off-by: Marvin Wewer +--- + drivers/pci/Kconfig | 1 + + drivers/pci/Makefile | 1 + + drivers/pci/pcie-sunxi/Kconfig | 26 + + drivers/pci/pcie-sunxi/Makefile | 8 + + drivers/pci/pcie-sunxi/pcie-sunxi-dma.c | 198 ++ + drivers/pci/pcie-sunxi/pcie-sunxi-dma.h | 279 +++ + drivers/pci/pcie-sunxi/pcie-sunxi-plat.c | 1233 ++++++++++ + drivers/pci/pcie-sunxi/pcie-sunxi-rc.c | 864 +++++++ + drivers/pci/pcie-sunxi/pcie-sunxi.h | 392 +++ + include/sunxi-gpio.h | 188 ++ + 10 files changed, 3190 insertions(+) + +diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig +index 111111111111..222222222222 100644 +--- a/drivers/pci/Kconfig ++++ b/drivers/pci/Kconfig +@@ -321,5 +321,6 @@ source "drivers/pci/controller/Kconfig" + source "drivers/pci/endpoint/Kconfig" + source "drivers/pci/switch/Kconfig" + source "drivers/pci/pwrctrl/Kconfig" ++source "drivers/pci/pcie-sunxi/Kconfig" + + endif +diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile +index 111111111111..222222222222 100644 +--- a/drivers/pci/Makefile ++++ b/drivers/pci/Makefile +@@ -43,5 +43,6 @@ obj-$(CONFIG_PCI_ENDPOINT) += endpoint/ + + obj-y += controller/ + obj-y += switch/ ++obj-y += pcie-sunxi/ + + subdir-ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG +diff --git a/drivers/pci/pcie-sunxi/Kconfig b/drivers/pci/pcie-sunxi/Kconfig +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/pci/pcie-sunxi/Kconfig +@@ -0,0 +1,26 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ ++menu "PCIe Drivers" ++ depends on ARCH_SUNXI ++ ++choice ++ prompt "Allwinner PCIe controller" ++ default PCIE_SUN55I_NONE ++ ++config PCIE_SUN55I_RC ++ bool "Sun55i RC controller - Host mode" ++ depends on ARCH_SUNXI ++ help ++ Enables support for the PCIe RC controller in the Allwinner Sun55i SoC. ++ ++config PCIE_SUN55I_NONE ++ bool "None" ++ depends on ARCH_SUNXI ++ help ++ Disable support for the PCIe controller in the Allwinner Sun55i SoC. ++ ++endchoice ++ ++endmenu ++ ++ccflags-y += -Idrivers/pci/pcie-sunxi/include +diff --git a/drivers/pci/pcie-sunxi/Makefile b/drivers/pci/pcie-sunxi/Makefile +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/pci/pcie-sunxi/Makefile +@@ -0,0 +1,8 @@ ++# SPDX-License-Identifier: GPL-2.0 ++ccflag-y += -DDYNAMIC_DEBUG_MODULE ++ ++ccflags-y += -I $(srctree)/drivers/pci/ ++pcie_sunxi_host-objs := pcie-sunxi-rc.o pcie-sunxi-dma.o pcie-sunxi-plat.o ++pcie_sunxi_ep-objs := pcie-sunxi-ep.o pcie-sunxi-dma.o pcie-sunxi-plat.o ++obj-$(CONFIG_PCIE_SUN55I_RC) += pcie_sunxi_host.o ++ +diff --git a/drivers/pci/pcie-sunxi/pcie-sunxi-dma.c b/drivers/pci/pcie-sunxi/pcie-sunxi-dma.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/pci/pcie-sunxi/pcie-sunxi-dma.c +@@ -0,0 +1,198 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++/* ++ * Copyright (C) 2022 Allwinner Co., Ltd. ++ * ++ * The pcie_dma_chnl_request() is used to apply for pcie DMA channels; ++ * The pcie_dma_mem_xxx() is to initiate DMA read and write operations; ++ * ++ */ ++ ++#define SUNXI_MODNAME "pcie-edma" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "pcie-sunxi-dma.h" ++ ++ ++static struct dma_trx_obj *obj_global; ++ ++sunxi_pci_edma_chan_t *sunxi_pcie_dma_chan_request(enum dma_dir dma_trx, void *cb, void *data) ++{ ++ struct sunxi_pcie *pci = dev_get_drvdata(obj_global->dev); ++ sunxi_pci_edma_chan_t *edma_chan = NULL; ++ u32 free_chan; ++ ++ if (dma_trx == PCIE_DMA_WRITE) { ++ free_chan = find_first_zero_bit(pci->wr_edma_map, pci->num_edma); ++ ++ if (free_chan >= pci->num_edma) { ++ dev_err(pci->dev, "No free pcie edma write channel.\n"); ++ return NULL; ++ } ++ ++ set_bit(free_chan, pci->wr_edma_map); ++ ++ edma_chan = &pci->dma_wr_chn[free_chan]; ++ ++ edma_chan->dma_trx = PCIE_DMA_WRITE; ++ edma_chan->chnl_num = free_chan; ++ edma_chan->callback = cb; ++ edma_chan->callback_param = data; ++ ++ return edma_chan; ++ } else if (dma_trx == PCIE_DMA_READ) { ++ free_chan = find_first_zero_bit(pci->rd_edma_map, pci->num_edma); ++ ++ if (free_chan >= pci->num_edma) { ++ dev_err(pci->dev, "No free pcie edma read channel.\n"); ++ return NULL; ++ } ++ ++ set_bit(free_chan, pci->rd_edma_map); ++ ++ edma_chan = &pci->dma_rd_chn[free_chan]; ++ ++ edma_chan->dma_trx = PCIE_DMA_READ; ++ edma_chan->chnl_num = free_chan; ++ edma_chan->callback = cb; ++ edma_chan->callback_param = data; ++ ++ return edma_chan; ++ } else { ++ dev_err(pci->dev, "ERR: unsupported type:%d \n", dma_trx); ++ } ++ ++ return NULL; ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_dma_chan_request); ++ ++int sunxi_pcie_dma_chan_release(struct sunxi_pci_edma_chan *edma_chan, enum dma_dir dma_trx) ++{ ++ struct sunxi_pcie *pci = dev_get_drvdata(obj_global->dev); ++ ++ if (edma_chan->chnl_num >= pci->num_edma) { ++ dev_err(pci->dev, "ERR: the channel num:%d is error\n", edma_chan->chnl_num); ++ return -1; ++ } ++ ++ if (PCIE_DMA_WRITE == dma_trx) { ++ edma_chan->callback = NULL; ++ edma_chan->callback_param = NULL; ++ clear_bit(edma_chan->chnl_num, pci->wr_edma_map); ++ } else if (PCIE_DMA_READ == dma_trx) { ++ edma_chan->callback = NULL; ++ edma_chan->callback_param = NULL; ++ clear_bit(edma_chan->chnl_num, pci->rd_edma_map); ++ } else { ++ dev_err(pci->dev, "ERR: unsupported type:%d \n", dma_trx); ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_dma_chan_release); ++ ++static int sunxi_pcie_init_edma_map(struct sunxi_pcie *pci) ++{ ++ pci->rd_edma_map = devm_bitmap_zalloc(pci->dev, pci->num_edma, GFP_KERNEL); ++ if (!pci->rd_edma_map) ++ return -ENOMEM; ++ ++ pci->wr_edma_map = devm_bitmap_zalloc(pci->dev, pci->num_edma, GFP_KERNEL); ++ if (!pci->wr_edma_map) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++int sunxi_pcie_dma_get_chan(struct platform_device *pdev) ++{ ++ struct sunxi_pcie *pci = platform_get_drvdata(pdev); ++ sunxi_pci_edma_chan_t *edma_chan = NULL; ++ int ret, i; ++ ++ ret = of_property_read_u32(pdev->dev.of_node, "num-edma", &pci->num_edma); ++ if (ret) { ++ dev_err(&pdev->dev, "Failed to parse the number of edma\n"); ++ return -EINVAL; ++ } else { ++ ret = sunxi_pcie_init_edma_map(pci); ++ if (ret) ++ return -EINVAL; ++ } ++ ++ pci->dma_wr_chn = devm_kcalloc(&pdev->dev, pci->num_edma, sizeof(sunxi_pci_edma_chan_t), GFP_KERNEL); ++ pci->dma_rd_chn = devm_kcalloc(&pdev->dev, pci->num_edma, sizeof(sunxi_pci_edma_chan_t), GFP_KERNEL); ++ if (!pci->dma_wr_chn || !pci->dma_rd_chn) { ++ dev_err(&pdev->dev, "PCIe edma init failed\n"); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < pci->num_edma; i++) { ++ edma_chan = &pci->dma_wr_chn[i]; ++ spin_lock_init(&edma_chan->lock); ++ } ++ ++ for (i = 0; i < pci->num_edma; i++) { ++ edma_chan = &pci->dma_rd_chn[i]; ++ spin_lock_init(&edma_chan->lock); ++ } ++ ++ return 0; ++} ++ ++struct dma_trx_obj *sunxi_pcie_dma_obj_probe(struct device *dev) ++{ ++ struct dma_trx_obj *obj; ++ ++ obj = devm_kzalloc(dev, sizeof(*obj), GFP_KERNEL); ++ if (!obj) ++ return ERR_PTR(-ENOMEM); ++ ++ obj_global = obj; ++ obj->dev = dev; ++ ++ INIT_LIST_HEAD(&obj->dma_list); ++ spin_lock_init(&obj->dma_list_lock); ++ ++ mutex_init(&obj->count_mutex); ++ ++ return obj; ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_dma_obj_probe); ++ ++int sunxi_pcie_dma_obj_remove(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct sunxi_pcie *pci = platform_get_drvdata(pdev); ++ ++ memset(pci->dma_wr_chn, 0, sizeof(sunxi_pci_edma_chan_t) * pci->num_edma); ++ memset(pci->dma_rd_chn, 0, sizeof(sunxi_pci_edma_chan_t) * pci->num_edma); ++ ++ obj_global->dma_list.next = NULL; ++ obj_global->dma_list.prev = NULL; ++ mutex_destroy(&obj_global->count_mutex); ++ ++ obj_global = NULL; ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_dma_obj_remove); +\ No newline at end of file +diff --git a/drivers/pci/pcie-sunxi/pcie-sunxi-dma.h b/drivers/pci/pcie-sunxi/pcie-sunxi-dma.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/pci/pcie-sunxi/pcie-sunxi-dma.h +@@ -0,0 +1,279 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++/* ++ * allwinner PCIe dma driver ++ * ++ * Copyright (C) 2022 allwinner Co., Ltd. ++ * ++ * Author: songjundong ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef _PCIE_SUNXI_DMA_H ++#define _PCIE_SUNXI_DMA_H ++ ++#include ++#include ++ ++#include "pcie-sunxi.h" ++ ++#define PCIE_DMA_TABLE_NUM 8 ++#define PCIE_DMA_TRX_TYPE_NUM 3 ++ ++#define PCIE_WEIGHT 0x1f ++/* ++ * MASK_DONE_CNT_xx and MASK_ABORT_CNT_xx used in dma interrupt ++ */ ++#define MASK_DONE_CNT_WR ((2 << (PCIE_DMA_WR_CHN_CNT - 1)) - 1) ++#define MASK_DONE_CNT_RD ((2 << (PCIE_DMA_RD_CHN_CNT - 1)) - 1) ++ ++#define MASK_ABORD_CNT_WR (((2 << (PCIE_DMA_WR_CHN_CNT - 1)) - 1)) ++#define MASK_ABORD_CNT_RD (((2 << (PCIE_DMA_RD_CHN_CNT - 1)) - 1)) ++ ++#define PCIE_DMA_OFFSET 0x380000 ++ ++#define PCIE_DMA_WR_ENB 0xc ++#define PCIE_DMA_WR_CTRL_LO 0x200 ++#define PCIE_DMA_WR_CTRL_HI 0x204 ++#define PCIE_DMA_WR_XFERSIZE 0x208 ++#define PCIE_DMA_WR_SAR_LO 0x20c ++#define PCIE_DMA_WR_SAR_HI 0x210 ++#define PCIE_DMA_WR_DAR_LO 0x214 ++#define PCIE_DMA_WR_DAR_HI 0x218 ++#define PCIE_DMA_WR_WEILO 0x18 ++#define PCIE_DMA_WR_WEIHI 0x1c ++#define PCIE_DMA_WR_DOORBELL 0x10 ++#define PCIE_DMA_WR_INT_STATUS 0x4c ++#define PCIE_DMA_WR_INT_MASK 0x54 ++#define PCIE_DMA_WR_INT_CLEAR 0x58 ++ ++#define PCIE_DMA_RD_ENB 0x2c ++#define PCIE_DMA_RD_CTRL_LO 0x300 ++#define PCIE_DMA_RD_CTRL_HI 0x304 ++#define PCIE_DMA_RD_XFERSIZE 0x308 ++#define PCIE_DMA_RD_SAR_LO 0x30c ++#define PCIE_DMA_RD_SAR_HI 0x310 ++#define PCIE_DMA_RD_DAR_LO 0x314 ++#define PCIE_DMA_RD_DAR_HI 0x318 ++#define PCIE_DMA_RD_WEILO 0x38 ++#define PCIE_DMA_RD_WEIHI 0x3c ++#define PCIE_DMA_RD_DOORBELL 0x30 ++#define PCIE_DMA_RD_INT_STATUS 0xa0 ++#define PCIE_DMA_RD_INT_MASK 0xa8 ++#define PCIE_DMA_RD_INT_CLEAR 0xac ++ ++#define PCIE_DMA_INT_MASK 0xf000f ++ ++enum dma_dir { ++ PCIE_DMA_WRITE = 0, ++ PCIE_DMA_READ, ++}; ++ ++typedef void (*sunxi_pcie_edma_callback)(void *param); ++ ++typedef struct sunxi_pci_edma_chan { ++ u32 chnl_num; ++ spinlock_t lock; ++ bool cookie; ++ phys_addr_t src_addr; ++ phys_addr_t dst_addr; ++ u32 size; ++ enum dma_dir dma_trx; ++ void *callback_param; ++ sunxi_pcie_edma_callback callback; ++} sunxi_pci_edma_chan_t; ++ ++/* ++ * The Channel Control Register for read and write. ++ */ ++union chan_ctrl_lo { ++ struct { ++ u32 cb :1; /* 0 bit */ ++ u32 tcb :1; /* 1 */ ++ u32 llp :1; /* 2 */ ++ u32 lie :1; /* 3 */ ++ u32 rie :1; /* 4 */ ++ u32 cs :2; /* 5:6 */ ++ u32 rsvd1 :1; /* 7 */ ++ u32 ccs :1; /* 8 */ ++ u32 llen :1; /* 9 */ ++ u32 b_64s :1; /* 10 */ ++ u32 b_64d :1; /* 11 */ ++ u32 fn :5; /* 12:16 */ ++ u32 rsvd2 :7; /* 17:23 */ ++ u32 ns :1; /* 24 */ ++ u32 ro :1; /* 25 */ ++ u32 td :1; /* 26 */ ++ u32 tc :3; /* 27:29 */ ++ u32 at :2; /* 30:31 */ ++ }; ++ u32 dword; ++}; ++ ++/* ++ * The Channel Control Register high part for read and write. ++ * Note: depend on CX_SRIOV_ENABLE ++ * Note: Need to confirm the difference between PCIe 2.0 with 3.0 ++ */ ++union chan_ctrl_hi { ++ struct { ++ u32 vfenb :1; /* 0 bit */ ++ u32 vfunc :8; /* 1-8 */ ++ u32 rsvd0 :23; /* 9-31 */ ++ }; ++ u32 dword; ++}; ++ ++struct ctx_reg { ++ union chan_ctrl_lo ctrllo; ++ union chan_ctrl_hi ctrlhi; ++ u32 xfersize; ++ u32 sarptrlo; ++ u32 sarptrhi; ++ u32 darptrlo; ++ u32 darptrhi; ++}; ++ ++/* ++ * The Channel Weight Register for read and write. ++ * ++ * weight_lo->weight0 means set channel 0 ++ * weight_hi->weight0 means set channel 4; ++ * ++ * Example: ++ * write channel #0 weight to 32 ++ * write channel #1 weight to 16 ++ * ++ * Then the DMA will issue 32 MRd requests for #0,followed by 16 MRd requests for #1, ++ * followed by the 32 MRd requests for #0 and so on... ++ */ ++union weight { ++ struct { ++ u32 weight0 :5; /* 0:4 bit */ ++ u32 weight1 :5; /* 5:9 */ ++ u32 weight2 :5; /* 10:14 */ ++ u32 weight3 :5; /* 15:19 */ ++ u32 rsvd :12; /* 20:31 */ ++ }; ++ u32 dword; ++}; ++ ++ ++/* ++ * The Doorbell Register for read and write. ++ * if is read db: you need write 0x0 for that channel ++ * if is write db: you need write channel number for that channel. ++ */ ++union db { ++ struct { ++ u32 chnl :3; /* 0 bit */ ++ u32 rsvd :28; /* 3:30 */ ++ u32 stop :1; /* 31 */ ++ }; ++ u32 dword; ++}; ++ ++/* ++ * The Enable VIEWPORT Register for read and write. ++ */ ++union enb { ++ struct { ++ u32 enb :1; /* 0 bit */ ++ u32 rsvd :31; /* 1:31 */ ++ }; ++ u32 dword; ++}; ++ ++/* ++ * The Interrupt Status Register for read and write. ++ */ ++union int_status { ++ struct { ++ u32 done :8; /* 0:7 bit */ ++ u32 rsvd0 :8; /* 8:15 */ ++ u32 abort :8; /* 16:23 */ ++ u32 rsvd1 :8; /* 24:31 */ ++ }; ++ u32 dword; ++}; ++ ++/* ++ * The Interrupt Status Register for read and write. ++ */ ++union int_clear { ++ struct { ++ u32 doneclr :8; /* 0:7 bit */ ++ u32 rsvd0 :8; /* 8:15 */ ++ u32 abortclr :8; /* 16:23 */ ++ u32 rsvd1 :8; /* 24:31 */ ++ }; ++ u32 dword; ++}; ++ ++/* ++ * The Context Registers for read and write. ++ */ ++struct ctx_regs { ++ union chan_ctrl_lo ctrllo; ++ union chan_ctrl_hi ctrlhi; ++ u32 xfersize; ++ u32 sarptrlo; ++ u32 sarptrhi; ++ u32 darptrlo; ++ u32 darptrhi; ++}; ++ ++struct dma_table { ++ u32 *descs; ++ int chn; ++ phys_addr_t phys_descs; ++ enum dma_dir dir; ++ u32 type; ++ struct list_head dma_tbl; ++ union enb enb; ++ struct ctx_regs ctx_reg; ++ union weight weilo; ++ union weight weihi; ++ union db start; ++ phys_addr_t local; ++ phys_addr_t bus; ++ size_t size; ++}; ++ ++struct dma_trx_obj { ++ struct device *dev; ++ void *mem_base; ++ phys_addr_t mem_start; ++ size_t mem_size; ++ int dma_free; ++ spinlock_t dma_list_lock; /* lock dma table */ ++ struct list_head dma_list; ++ struct work_struct dma_trx_work; ++ wait_queue_head_t event_queue; ++ struct workqueue_struct *dma_trx_wq; ++ struct dma_table *table[PCIE_DMA_TABLE_NUM]; ++ struct task_struct *scan_thread; ++ struct hrtimer scan_timer; ++ void *priv; ++ struct completion done; ++ int ref_count; ++ struct mutex count_mutex; ++ unsigned long irq_num; ++ struct dentry *pcie_root; ++ struct pcie_misc_dev *pcie_dev; ++ void (*start_dma_trx_func)(struct dma_table *table, struct dma_trx_obj *obj); ++ int (*config_dma_trx_func)(struct dma_table *table, phys_addr_t sar_addr, phys_addr_t dar_addr, ++ unsigned int size, enum dma_dir dma_trx, sunxi_pci_edma_chan_t *edma_chn); ++}; ++ ++struct dma_trx_obj *sunxi_pcie_dma_obj_probe(struct device *dev); ++int sunxi_pcie_dma_obj_remove(struct device *dev); ++sunxi_pci_edma_chan_t *sunxi_pcie_dma_chan_request(enum dma_dir dma_trx, void *cb, void *data); ++int sunxi_pcie_dma_chan_release(struct sunxi_pci_edma_chan *edma_chan, enum dma_dir dma_trx); ++ ++ ++int sunxi_pcie_dma_get_chan(struct platform_device *pdev); ++ ++#endif +\ No newline at end of file +diff --git a/drivers/pci/pcie-sunxi/pcie-sunxi-plat.c b/drivers/pci/pcie-sunxi/pcie-sunxi-plat.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/pci/pcie-sunxi/pcie-sunxi-plat.c +@@ -0,0 +1,1233 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++/* ++ * PCIe driver for Allwinner Soc ++ * ++ * Copyright (C) 2022 Allwinner Co., Ltd. ++ * ++ * Author: songjundong ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#define SUNXI_MODNAME "pcie" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pci.h" ++#include "pcie-sunxi-dma.h" ++#include "pcie-sunxi.h" ++ ++#define SUNXI_PCIE_MODULE_VERSION "1.2.4" ++ ++void sunxi_pcie_writel(u32 val, struct sunxi_pcie *pcie, u32 offset) ++{ ++ writel(val, pcie->app_base + offset); ++} ++ ++u32 sunxi_pcie_readl(struct sunxi_pcie *pcie, u32 offset) ++{ ++ return readl(pcie->app_base + offset); ++} ++ ++void sunxi_pcie_writel_dbi(struct sunxi_pcie *pci, u32 reg, u32 val) ++{ ++ sunxi_pcie_write_dbi(pci, reg, 0x4, val); ++} ++ ++u32 sunxi_pcie_readl_dbi(struct sunxi_pcie *pci, u32 reg) ++{ ++ return sunxi_pcie_read_dbi(pci, reg, 0x4); ++} ++ ++void sunxi_pcie_writew_dbi(struct sunxi_pcie *pci, u32 reg, u16 val) ++{ ++ sunxi_pcie_write_dbi(pci, reg, 0x2, val); ++} ++ ++u16 sunxi_pcie_readw_dbi(struct sunxi_pcie *pci, u32 reg) ++{ ++ return sunxi_pcie_read_dbi(pci, reg, 0x2); ++} ++ ++void sunxi_pcie_writeb_dbi(struct sunxi_pcie *pci, u32 reg, u8 val) ++{ ++ sunxi_pcie_write_dbi(pci, reg, 0x1, val); ++} ++ ++u8 sunxi_pcie_readb_dbi(struct sunxi_pcie *pci, u32 reg) ++{ ++ return sunxi_pcie_read_dbi(pci, reg, 0x1); ++} ++ ++void sunxi_pcie_dbi_ro_wr_en(struct sunxi_pcie *pci) ++{ ++ u32 val; ++ ++ val = sunxi_pcie_readl_dbi(pci, PCIE_MISC_CONTROL_1_CFG); ++ val |= (0x1 << 0); ++ sunxi_pcie_writel_dbi(pci, PCIE_MISC_CONTROL_1_CFG, val); ++} ++ ++void sunxi_pcie_dbi_ro_wr_dis(struct sunxi_pcie *pci) ++{ ++ u32 val; ++ ++ val = sunxi_pcie_readl_dbi(pci, PCIE_MISC_CONTROL_1_CFG); ++ val &= ~(0x1 << 0); ++ sunxi_pcie_writel_dbi(pci, PCIE_MISC_CONTROL_1_CFG, val); ++} ++ ++static void sunxi_pcie_plat_set_mode(struct sunxi_pcie *pci) ++{ ++ u32 val; ++ ++ switch (pci->drvdata->mode) { ++ case SUNXI_PCIE_EP_TYPE: ++ val = sunxi_pcie_readl(pci, PCIE_LTSSM_CTRL); ++ val &= ~DEVICE_TYPE_MASK; ++ sunxi_pcie_writel(val, pci, PCIE_LTSSM_CTRL); ++ break; ++ case SUNXI_PCIE_RC_TYPE: ++ val = sunxi_pcie_readl(pci, PCIE_LTSSM_CTRL); ++ val |= DEVICE_TYPE_RC; ++ sunxi_pcie_writel(val, pci, PCIE_LTSSM_CTRL); ++ break; ++ default: ++ dev_err(pci->dev, "unsupported device type:%d\n", pci->drvdata->mode); ++ break; ++ } ++} ++ ++static u8 __sunxi_pcie_find_next_cap(struct sunxi_pcie *pci, u8 cap_ptr, ++ u8 cap) ++{ ++ u8 cap_id, next_cap_ptr; ++ u16 reg; ++ ++ if (!cap_ptr) ++ return 0; ++ ++ reg = sunxi_pcie_readw_dbi(pci, cap_ptr); ++ cap_id = (reg & CAP_ID_MASK); ++ ++ if (cap_id > PCI_CAP_ID_MAX) ++ return 0; ++ ++ if (cap_id == cap) ++ return cap_ptr; ++ ++ next_cap_ptr = (reg & NEXT_CAP_PTR_MASK) >> 8; ++ return __sunxi_pcie_find_next_cap(pci, next_cap_ptr, cap); ++} ++ ++u8 sunxi_pcie_plat_find_capability(struct sunxi_pcie *pci, u8 cap) ++{ ++ u8 next_cap_ptr; ++ u16 reg; ++ ++ reg = sunxi_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); ++ next_cap_ptr = (reg & CAP_ID_MASK); ++ ++ return __sunxi_pcie_find_next_cap(pci, next_cap_ptr, cap); ++} ++ ++int sunxi_pcie_cfg_read(void __iomem *addr, int size, u32 *val) ++{ ++ if ((uintptr_t)addr & (size - 1)) { ++ *val = 0; ++ return PCIBIOS_BAD_REGISTER_NUMBER; ++ } ++ ++ if (size == 4) { ++ *val = readl(addr); ++ } else if (size == 2) { ++ *val = readw(addr); ++ } else if (size == 1) { ++ *val = readb(addr); ++ } else { ++ *val = 0; ++ return PCIBIOS_BAD_REGISTER_NUMBER; ++ } ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_cfg_read); ++ ++int sunxi_pcie_cfg_write(void __iomem *addr, int size, u32 val) ++{ ++ if ((uintptr_t)addr & (size - 1)) ++ return PCIBIOS_BAD_REGISTER_NUMBER; ++ ++ if (size == 4) ++ writel(val, addr); ++ else if (size == 2) ++ writew(val, addr); ++ else if (size == 1) ++ writeb(val, addr); ++ else ++ return PCIBIOS_BAD_REGISTER_NUMBER; ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_cfg_write); ++ ++void sunxi_pcie_write_dbi(struct sunxi_pcie *pci, u32 reg, size_t size, u32 val) ++{ ++ int ret; ++ ++ ret = sunxi_pcie_cfg_write(pci->dbi_base + reg, size, val); ++ if (ret) ++ dev_err(pci->dev, "Write DBI address failed\n"); ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_write_dbi); ++ ++u32 sunxi_pcie_read_dbi(struct sunxi_pcie *pci, u32 reg, size_t size) ++{ ++ int ret; ++ u32 val; ++ ++ ret = sunxi_pcie_cfg_read(pci->dbi_base + reg, size, &val); ++ if (ret) ++ dev_err(pci->dev, "Read DBI address failed\n"); ++ ++ return val; ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_read_dbi); ++ ++static void sunxi_pcie_plat_set_link_cap(struct sunxi_pcie *pci, u32 link_gen) ++{ ++ u32 cap, ctrl2, link_speed; ++ ++ u8 offset = sunxi_pcie_plat_find_capability(pci, PCI_CAP_ID_EXP); ++ ++ cap = sunxi_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); ++ ctrl2 = sunxi_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2); ++ ctrl2 &= ~PCI_EXP_LNKCTL2_TLS; ++ ++ switch (pcie_link_speed[link_gen]) { ++ case PCIE_SPEED_2_5GT: ++ link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT; ++ break; ++ case PCIE_SPEED_5_0GT: ++ link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT; ++ break; ++ case PCIE_SPEED_8_0GT: ++ link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT; ++ break; ++ case PCIE_SPEED_16_0GT: ++ link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT; ++ break; ++ default: ++ /* Use hardware capability */ ++ link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap); ++ ctrl2 &= ~PCI_EXP_LNKCTL2_HASD; ++ break; ++ } ++ ++ sunxi_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed); ++ ++ cap &= ~((u32)PCI_EXP_LNKCAP_SLS); ++ sunxi_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed); ++} ++ ++void sunxi_pcie_plat_set_rate(struct sunxi_pcie *pci) ++{ ++ u32 val; ++ ++ sunxi_pcie_plat_set_link_cap(pci, pci->link_gen); ++ /* set the number of lanes */ ++ val = sunxi_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); ++ val &= ~PORT_LINK_MODE_MASK; ++ switch (pci->lanes) { ++ case 1: ++ val |= PORT_LINK_MODE_1_LANES; ++ break; ++ case 2: ++ val |= PORT_LINK_MODE_2_LANES; ++ break; ++ case 4: ++ val |= PORT_LINK_MODE_4_LANES; ++ break; ++ default: ++ dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->lanes); ++ return; ++ } ++ sunxi_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); ++ ++ /* set link width speed control register */ ++ val = sunxi_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); ++ val &= ~PORT_LOGIC_LINK_WIDTH_MASK; ++ switch (pci->lanes) { ++ case 1: ++ val |= PORT_LOGIC_LINK_WIDTH_1_LANES; ++ break; ++ case 2: ++ val |= PORT_LOGIC_LINK_WIDTH_2_LANES; ++ break; ++ case 4: ++ val |= PORT_LOGIC_LINK_WIDTH_4_LANES; ++ break; ++ } ++ sunxi_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_plat_set_rate); ++ ++static unsigned int sunxi_pcie_ep_func_conf_select(struct sunxi_pcie_ep *ep, ++ u8 func_no) ++{ ++ struct sunxi_pcie *pcie = to_sunxi_pcie_from_ep(ep); ++ ++ WARN_ON(func_no && !pcie->drvdata->func_offset); ++ return pcie->drvdata->func_offset * func_no; ++} ++ ++static const struct sunxi_pcie_ep_ops sunxi_ep_ops = { ++ .func_conf_select = sunxi_pcie_ep_func_conf_select, ++}; ++ ++static const struct sunxi_pcie_of_data sunxi_pcie_rc_v210_of_data = { ++ .mode = SUNXI_PCIE_RC_TYPE, ++ .cpu_pcie_addr_quirk = true, ++}; ++ ++static const struct sunxi_pcie_of_data sunxi_pcie_rc_v210_v2_of_data = { ++ .mode = SUNXI_PCIE_RC_TYPE, ++ .has_pcie_slv_clk = true, ++ .need_pcie_rst = true, ++}; ++ ++static const struct sunxi_pcie_of_data sunxi_pcie_rc_v210_v3_of_data = { ++ .mode = SUNXI_PCIE_RC_TYPE, ++ .has_pcie_slv_clk = true, ++ .need_pcie_rst = true, ++}; ++ ++static const struct sunxi_pcie_of_data sunxi_pcie_rc_v300_of_data = { ++ .mode = SUNXI_PCIE_RC_TYPE, ++ .has_pcie_slv_clk = true, ++ .need_pcie_rst = true, ++ .pcie_slv_clk_400m = true, ++ .has_pcie_its_clk = true, ++}; ++ ++static const struct sunxi_pcie_of_data sunxi_pcie_ep_v210_of_data = { ++ .mode = SUNXI_PCIE_EP_TYPE, ++ .func_offset = 0x10000, ++ .ops = &sunxi_ep_ops, ++ .has_pcie_slv_clk = true, ++ .need_pcie_rst = true, ++}; ++ ++static const struct sunxi_pcie_of_data sunxi_pcie_ep_v300_of_data = { ++ .mode = SUNXI_PCIE_EP_TYPE, ++ .func_offset = 0x10000, ++ .ops = &sunxi_ep_ops, ++}; ++ ++static const struct of_device_id sunxi_pcie_plat_of_match[] = { ++ { ++ .compatible = "allwinner,sunxi-pcie-v210-rc", ++ .data = &sunxi_pcie_rc_v210_of_data, ++ }, ++ { ++ .compatible = "allwinner,sunxi-pcie-v210-v2-rc", ++ .data = &sunxi_pcie_rc_v210_v2_of_data, ++ }, ++ { ++ .compatible = "allwinner,sunxi-pcie-v210-v3-rc", ++ .data = &sunxi_pcie_rc_v210_v3_of_data, ++ }, ++ { ++ .compatible = "allwinner,sunxi-pcie-v210-ep", ++ .data = &sunxi_pcie_ep_v210_of_data, ++ }, ++ { ++ .compatible = "allwinner,sunxi-pcie-v300-rc", ++ .data = &sunxi_pcie_rc_v300_of_data, ++ }, ++ { ++ .compatible = "allwinner,sunxi-pcie-v300-ep", ++ .data = &sunxi_pcie_ep_v300_of_data, ++ }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, sunxi_pcie_plat_of_match); ++ ++void sunxi_pcie_plat_ltssm_enable(struct sunxi_pcie *pcie) ++{ ++ u32 val; ++ ++ val = sunxi_pcie_readl(pcie, PCIE_LTSSM_CTRL); ++ val |= PCIE_LINK_TRAINING; ++ sunxi_pcie_writel(val, pcie, PCIE_LTSSM_CTRL); ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_plat_ltssm_enable); ++ ++void sunxi_pcie_plat_ltssm_disable(struct sunxi_pcie *pcie) ++{ ++ u32 val; ++ ++ val = sunxi_pcie_readl(pcie, PCIE_LTSSM_CTRL); ++ val &= ~PCIE_LINK_TRAINING; ++ sunxi_pcie_writel(val, pcie, PCIE_LTSSM_CTRL); ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_plat_ltssm_disable); ++ ++static void sunxi_pcie_plat_irqpending(struct sunxi_pcie_port *pp) ++{ ++ struct sunxi_pcie *pcie = to_sunxi_pcie_from_pp(pp); ++ u32 val; ++ ++ val = sunxi_pcie_readl(pcie, PCIE_INT_ENABLE_CLR); ++ val &= ~PCIE_LINK_INT_EN; ++ sunxi_pcie_writel(val, pcie, PCIE_INT_ENABLE_CLR); ++} ++ ++static void sunxi_pcie_plat_set_irqmask(struct sunxi_pcie *pci) ++{ ++ u32 val; ++ ++ val = sunxi_pcie_readl(pci, PCIE_INT_ENABLE_CLR); ++ val |= PCIE_LINK_INT_EN; ++ sunxi_pcie_writel(val, pci, PCIE_INT_ENABLE_CLR); ++} ++ ++static int sunxi_pcie_plat_power_on(struct sunxi_pcie *pci) ++{ ++ struct device *dev = pci->dev; ++ int ret = 0; ++ ++ if (!IS_ERR(pci->pcie3v3)) { ++ ret = regulator_enable(pci->pcie3v3); ++ if (ret) ++ dev_err(dev, "failed to enable pcie3v3 regulator\n"); ++ } ++ ++ return ret; ++} ++ ++static void sunxi_pcie_plat_power_off(struct sunxi_pcie *pci) ++{ ++ if (!IS_ERR(pci->pcie3v3)) ++ regulator_disable(pci->pcie3v3); ++} ++ ++static int sunxi_pcie_plat_clk_setup(struct sunxi_pcie *pci) ++{ ++ int ret; ++ ++ if (pci->drvdata->need_pcie_rst) { ++ ret = reset_control_deassert(pci->pcie_rst); ++ if (ret) { ++ dev_err(pci->dev, "cannot reset pcie\n"); ++ return ret; ++ } ++ ++ ret = reset_control_deassert(pci->pwrup_rst); ++ if (ret) { ++ dev_err(pci->dev, "cannot pwrup_reset pcie\n"); ++ goto err0; ++ } ++ } ++ ++ ret = clk_prepare_enable(pci->pcie_aux); ++ if (ret) { ++ dev_err(pci->dev, "cannot prepare/enable aux clock\n"); ++ goto err1; ++ } ++ ++ if (pci->drvdata->has_pcie_slv_clk) { ++ if (pci->drvdata->pcie_slv_clk_400m) { ++ ret = clk_set_rate(pci->pcie_slv, 400000000); ++ if (ret) { ++ dev_err(pci->dev, "cannot set slv clock\n"); ++ goto err2; ++ } ++ } ++ ret = clk_prepare_enable(pci->pcie_slv); ++ if (ret) { ++ dev_err(pci->dev, "cannot prepare/enable slv clock\n"); ++ goto err2; ++ } ++ } ++ ++ if (pci->drvdata->has_pcie_its_clk) { ++ ret = reset_control_deassert(pci->pcie_its_rst); ++ if (ret) { ++ dev_err(pci->dev, "cannot reset pcie its\n"); ++ goto err3; ++ } ++ ++ ret = clk_prepare_enable(pci->pcie_its); ++ if (ret) { ++ dev_err(pci->dev, "cannot prepare/enable its clock\n"); ++ goto err4; ++ } ++ } ++ ++ return 0; ++err4: ++ if (pci->drvdata->has_pcie_its_clk) ++ reset_control_assert(pci->pcie_its_rst); ++err3: ++ if (pci->drvdata->has_pcie_slv_clk) ++ clk_disable_unprepare(pci->pcie_slv); ++err2: ++ clk_disable_unprepare(pci->pcie_aux); ++err1: ++ if (pci->drvdata->need_pcie_rst) ++ reset_control_assert(pci->pwrup_rst); ++err0: ++ if (pci->drvdata->need_pcie_rst) ++ reset_control_assert(pci->pcie_rst); ++ ++ return ret; ++} ++ ++static void sunxi_pcie_plat_clk_exit(struct sunxi_pcie *pci) ++{ ++ if (pci->drvdata->has_pcie_its_clk) { ++ clk_disable_unprepare(pci->pcie_its); ++ reset_control_assert(pci->pcie_its_rst); ++ } ++ ++ if (pci->drvdata->has_pcie_slv_clk) ++ clk_disable_unprepare(pci->pcie_slv); ++ ++ clk_disable_unprepare(pci->pcie_aux); ++ ++ if (pci->drvdata->need_pcie_rst) { ++ reset_control_assert(pci->pcie_rst); ++ reset_control_assert(pci->pwrup_rst); ++ } ++} ++ ++static int sunxi_pcie_plat_clk_get(struct platform_device *pdev, struct sunxi_pcie *pci) ++{ ++ pci->pcie_aux = devm_clk_get(&pdev->dev, "pclk_aux"); ++ if (IS_ERR(pci->pcie_aux)) { ++ dev_err(&pdev->dev, "fail to get pclk_aux\n"); ++ return PTR_ERR(pci->pcie_aux); ++ } ++ ++ if (pci->drvdata->has_pcie_slv_clk) { ++ pci->pcie_slv = devm_clk_get(&pdev->dev, "pclk_slv"); ++ if (IS_ERR(pci->pcie_slv)) { ++ dev_err(&pdev->dev, "fail to get pclk_slv\n"); ++ return PTR_ERR(pci->pcie_slv); ++ } ++ } ++ ++ if (pci->drvdata->need_pcie_rst) { ++ pci->pcie_rst = devm_reset_control_get(&pdev->dev, "pclk_rst"); ++ if (IS_ERR(pci->pcie_rst)) { ++ dev_err(&pdev->dev, "fail to get pclk_rst\n"); ++ return PTR_ERR(pci->pcie_rst); ++ } ++ ++ pci->pwrup_rst = devm_reset_control_get(&pdev->dev, "pwrup_rst"); ++ if (IS_ERR(pci->pwrup_rst)) { ++ dev_err(&pdev->dev, "fail to get pwrup_rst\n"); ++ return PTR_ERR(pci->pwrup_rst); ++ } ++ } ++ ++ if (pci->drvdata->has_pcie_its_clk) { ++ pci->pcie_its = devm_clk_get(&pdev->dev, "its"); ++ if (IS_ERR(pci->pcie_its)) { ++ dev_err(&pdev->dev, "fail to get its clk\n"); ++ return PTR_ERR(pci->pcie_its); ++ } ++ ++ pci->pcie_its_rst = devm_reset_control_get(&pdev->dev, "its"); ++ if (IS_ERR(pci->pcie_its_rst)) { ++ dev_err(&pdev->dev, "fail to get its rst\n"); ++ return PTR_ERR(pci->pcie_its_rst); ++ } ++ } ++ return 0; ++} ++ ++static int sunxi_pcie_plat_combo_phy_init(struct sunxi_pcie *pci) ++{ ++ int ret; ++ ++ ret = phy_init(pci->phy); ++ if (ret) { ++ dev_err(pci->dev, "fail to init phy, err %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void sunxi_pcie_plat_combo_phy_deinit(struct sunxi_pcie *pci) ++{ ++ phy_exit(pci->phy); ++} ++ ++static void sunxi_pcie_plat_sii_int0_handler(struct sunxi_pcie_port *pp) ++{ ++ struct sunxi_pcie *pci = to_sunxi_pcie_from_pp(pp); ++ u32 mask, stas, irq; ++ ++ mask = sunxi_pcie_readl(pci, SII_INT_MASK0); ++ stas = sunxi_pcie_readl(pci, SII_INT_STAS0); ++ irq = mask & stas; ++ ++ if (irq & INTX_RX_ASSERT_MASK) { ++ unsigned long status = irq & INTX_RX_ASSERT_MASK; ++ u32 bit = INTX_RX_ASSERT_SHIFT; ++ for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_RX_ASSERT_SHIFT) { ++ /* Clear INTx status */ ++ sunxi_pcie_writel(BIT(bit), pci, SII_INT_STAS0); ++ generic_handle_domain_irq(pp->intx_domain, bit - INTX_RX_ASSERT_SHIFT); ++ } ++ } ++} ++ ++static irqreturn_t sunxi_pcie_plat_sii_handler(int irq, void *arg) ++{ ++ struct sunxi_pcie_port *pp = (struct sunxi_pcie_port *)arg; ++ ++ sunxi_pcie_plat_sii_int0_handler(pp); ++ ++ sunxi_pcie_plat_irqpending(pp); ++ ++ return IRQ_HANDLED; ++} ++ ++static void sunxi_pcie_plat_dma_handle_interrupt(struct sunxi_pcie *pci, u32 ch, enum dma_dir dma_trx) ++{ ++ sunxi_pci_edma_chan_t *edma_chan = NULL; ++ sunxi_pcie_edma_callback cb = NULL; ++ void *cb_data = NULL; ++ ++ if (dma_trx == PCIE_DMA_WRITE) { ++ edma_chan = &pci->dma_wr_chn[ch]; ++ cb = edma_chan->callback; ++ cb_data = edma_chan->callback_param; ++ if (cb) ++ cb(cb_data); ++ } else if (dma_trx == PCIE_DMA_READ) { ++ edma_chan = &pci->dma_rd_chn[ch]; ++ cb = edma_chan->callback; ++ cb_data = edma_chan->callback_param; ++ if (cb) ++ cb(cb_data); ++ } else { ++ dev_err(pci->dev, "ERR: unsupported type:%d \n", dma_trx); ++ } ++ ++ if (edma_chan->cookie) ++ sunxi_pcie_dma_chan_release(edma_chan, dma_trx); ++} ++ ++#define SUNXI_PCIE_DMA_IRQ_HANDLER(name, chn, dir) \ ++static irqreturn_t sunxi_pcie_##name##_irq_handler \ ++ (int irq, void *arg) \ ++{ \ ++ struct sunxi_pcie *pci = arg; \ ++ union int_status sta = {0}; \ ++ union int_clear clr = {0}; \ ++ \ ++ sta.dword = sunxi_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + \ ++ (dir ? PCIE_DMA_RD_INT_STATUS : PCIE_DMA_WR_INT_STATUS)); \ ++ \ ++ if (sta.done & BIT(chn)) { \ ++ clr.doneclr = BIT(chn); \ ++ sunxi_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + \ ++ (dir ? PCIE_DMA_RD_INT_CLEAR : PCIE_DMA_WR_INT_CLEAR), clr.dword);\ ++ sunxi_pcie_plat_dma_handle_interrupt(pci, chn, dir); \ ++ } \ ++ \ ++ if (sta.abort & BIT(chn)) { \ ++ clr.abortclr = BIT(chn); \ ++ sunxi_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + \ ++ (dir ? PCIE_DMA_RD_INT_CLEAR : PCIE_DMA_WR_INT_CLEAR), clr.dword);\ ++ dev_err(pci->dev, "DMA %s channel %d is abort\n", \ ++ dir ? "read":"write", chn); \ ++ } \ ++ \ ++ return IRQ_HANDLED; \ ++} ++ ++SUNXI_PCIE_DMA_IRQ_HANDLER(dma_w0, 0, PCIE_DMA_WRITE) ++SUNXI_PCIE_DMA_IRQ_HANDLER(dma_w1, 1, PCIE_DMA_WRITE) ++SUNXI_PCIE_DMA_IRQ_HANDLER(dma_w2, 2, PCIE_DMA_WRITE) ++SUNXI_PCIE_DMA_IRQ_HANDLER(dma_w3, 3, PCIE_DMA_WRITE) ++ ++SUNXI_PCIE_DMA_IRQ_HANDLER(dma_r0, 0, PCIE_DMA_READ) ++SUNXI_PCIE_DMA_IRQ_HANDLER(dma_r1, 1, PCIE_DMA_READ) ++SUNXI_PCIE_DMA_IRQ_HANDLER(dma_r2, 2, PCIE_DMA_READ) ++SUNXI_PCIE_DMA_IRQ_HANDLER(dma_r3, 3, PCIE_DMA_READ) ++ ++static void sunxi_pcie_plat_dma_read(struct sunxi_pcie *pci, struct dma_table *table) ++{ ++ int offset = PCIE_DMA_OFFSET + table->start.chnl * 0x200; ++ ++ sunxi_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_ENB, ++ table->enb.dword); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_RD_CTRL_LO, ++ table->ctx_reg.ctrllo.dword); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_RD_CTRL_HI, ++ table->ctx_reg.ctrlhi.dword); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_RD_XFERSIZE, ++ table->ctx_reg.xfersize); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_RD_SAR_LO, ++ table->ctx_reg.sarptrlo); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_RD_SAR_HI, ++ table->ctx_reg.sarptrhi); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_RD_DAR_LO, ++ table->ctx_reg.darptrlo); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_RD_DAR_HI, ++ table->ctx_reg.darptrhi); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_RD_WEILO, ++ table->weilo.dword); ++ sunxi_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_DOORBELL, ++ table->start.dword); ++} ++ ++static void sunxi_pcie_plat_dma_write(struct sunxi_pcie *pci, struct dma_table *table) ++{ ++ int offset = PCIE_DMA_OFFSET + table->start.chnl * 0x200; ++ ++ sunxi_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_ENB, ++ table->enb.dword); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_WR_CTRL_LO, ++ table->ctx_reg.ctrllo.dword); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_WR_CTRL_HI, ++ table->ctx_reg.ctrlhi.dword); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_WR_XFERSIZE, ++ table->ctx_reg.xfersize); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_WR_SAR_LO, ++ table->ctx_reg.sarptrlo); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_WR_SAR_HI, ++ table->ctx_reg.sarptrhi); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_WR_DAR_LO, ++ table->ctx_reg.darptrlo); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_WR_DAR_HI, ++ table->ctx_reg.darptrhi); ++ sunxi_pcie_writel_dbi(pci, offset + PCIE_DMA_WR_WEILO, ++ table->weilo.dword); ++ sunxi_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_DOORBELL, ++ table->start.dword); ++} ++ ++/* ++ * DMA controller: I/O and Type 0 or Type 1 configuration DMA ++ * transfers are not supported. ++ * Transfer size: 1B - 4GB ++ */ ++static void sunxi_pcie_plat_dma_start(struct dma_table *table, struct dma_trx_obj *obj) ++{ ++ struct sunxi_pcie *pci = dev_get_drvdata(obj->dev); ++ ++ if (table->dir == PCIE_DMA_READ) { ++ sunxi_pcie_plat_dma_read(pci, table); ++ } else if (table->dir == PCIE_DMA_WRITE) { ++ sunxi_pcie_plat_dma_write(pci, table); ++ } ++} ++ ++static int sunxi_pcie_plat_dma_config(struct dma_table *table, phys_addr_t src_addr, phys_addr_t dst_addr, ++ unsigned int size, enum dma_dir dma_trx, sunxi_pci_edma_chan_t *edma_chn) ++{ ++ sunxi_pci_edma_chan_t *chn = NULL; ++ ++ table->ctx_reg.ctrllo.lie = 0x1; ++ table->ctx_reg.ctrllo.rie = 0x0; ++ table->ctx_reg.ctrllo.td = 0x1; ++ table->ctx_reg.ctrlhi.dword = 0x0; ++ table->ctx_reg.xfersize = size; ++ table->ctx_reg.sarptrlo = (u32)(src_addr & 0xffffffff); ++ table->ctx_reg.sarptrhi = (u32)(src_addr >> 32); ++ table->ctx_reg.darptrlo = (u32)(dst_addr & 0xffffffff); ++ table->ctx_reg.darptrhi = (u32)(dst_addr >> 32); ++ table->start.stop = 0x0; ++ table->dir = dma_trx; ++ ++ if (!edma_chn) { ++ chn = (sunxi_pci_edma_chan_t *)sunxi_pcie_dma_chan_request(dma_trx, NULL, NULL); ++ if (!chn) { ++ dev_err(NULL, "pcie request %s channel error! \n", (dma_trx ? "DMA_READ" : "DMA_WRITE")); ++ return -ENOMEM; ++ } ++ ++ chn->cookie = true; ++ table->start.chnl = chn->chnl_num; ++ table->weilo.dword = (PCIE_WEIGHT << (5 * chn->chnl_num)); ++ } else { ++ table->start.chnl = edma_chn->chnl_num; ++ table->weilo.dword = (PCIE_WEIGHT << (5 * edma_chn->chnl_num)); ++ } ++ ++ table->enb.enb = 0x1; ++ return 0; ++} ++ ++static int sunxi_pcie_plat_request_irq(struct sunxi_pcie *sunxi_pcie, struct platform_device *pdev) ++{ ++ int irq, ret; ++ struct sunxi_pcie *pci = platform_get_drvdata(pdev); ++ struct sunxi_pcie_port *pp = &pci->pp; ++ ++ irq = platform_get_irq_byname(pdev, "sii"); ++ if (irq < 0) ++ return -EINVAL; ++ ++ if (!pp->has_its) { ++ irq = platform_get_irq_byname(pdev, "sii"); ++ if (irq < 0) ++ return -EINVAL; ++ ++ ret = devm_request_irq(&pdev->dev, irq, ++ sunxi_pcie_plat_sii_handler, IRQF_SHARED, "pcie-sii", &sunxi_pcie->pp); ++ if (ret) { ++ dev_err(&pdev->dev, "PCIe failed to request linkup IRQ\n"); ++ return ret; ++ } ++ } ++ ++ ret = sunxi_pcie_dma_get_chan(pdev); ++ if (ret) ++ return -EINVAL; ++ ++ switch (sunxi_pcie->num_edma) { ++ case 4: ++ irq = platform_get_irq_byname(pdev, "edma-w3"); ++ if (irq < 0) ++ return -EINVAL; ++ ++ ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_w3_irq_handler, ++ IRQF_SHARED, "pcie-dma-w3", sunxi_pcie); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); ++ return ret; ++ } ++ ++ irq = platform_get_irq_byname(pdev, "edma-r3"); ++ if (irq < 0) ++ return -EINVAL; ++ ++ ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_r3_irq_handler, ++ IRQF_SHARED, "pcie-dma-r3", sunxi_pcie); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); ++ return ret; ++ } ++ ++ fallthrough; ++ case 3: ++ irq = platform_get_irq_byname(pdev, "edma-w2"); ++ if (irq < 0) ++ return -EINVAL; ++ ++ ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_w2_irq_handler, ++ IRQF_SHARED, "pcie-dma-w2", sunxi_pcie); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); ++ return ret; ++ } ++ ++ irq = platform_get_irq_byname(pdev, "edma-r2"); ++ if (irq < 0) ++ return -EINVAL; ++ ++ ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_r2_irq_handler, ++ IRQF_SHARED, "pcie-dma-r2", sunxi_pcie); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); ++ return ret; ++ } ++ ++ fallthrough; ++ case 2: ++ irq = platform_get_irq_byname(pdev, "edma-w1"); ++ if (irq < 0) ++ return -EINVAL; ++ ++ ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_w1_irq_handler, ++ IRQF_SHARED, "pcie-dma-w1", sunxi_pcie); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); ++ return ret; ++ } ++ ++ irq = platform_get_irq_byname(pdev, "edma-r1"); ++ if (irq < 0) ++ return -EINVAL; ++ ++ ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_r1_irq_handler, ++ IRQF_SHARED, "pcie-dma-r1", sunxi_pcie); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); ++ return ret; ++ } ++ ++ fallthrough; ++ case 1: ++ irq = platform_get_irq_byname(pdev, "edma-w0"); ++ if (irq < 0) ++ return -EINVAL; ++ ++ ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_w0_irq_handler, ++ IRQF_SHARED, "pcie-dma-w0", sunxi_pcie); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); ++ return ret; ++ } ++ ++ irq = platform_get_irq_byname(pdev, "edma-r0"); ++ if (irq < 0) ++ return -EINVAL; ++ ++ ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_r0_irq_handler, ++ IRQF_SHARED, "pcie-dma-r0", sunxi_pcie); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); ++ return ret; ++ } ++ ++ break; ++ default: ++ dev_err(sunxi_pcie->dev, "Not support DMA chan_num[%d], which exceed chan_range [%d-%d]\n", ++ sunxi_pcie->num_edma, 1, 4); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int sunxi_pcie_plat_dma_init(struct sunxi_pcie *pci) ++{ ++ pci->dma_obj = sunxi_pcie_dma_obj_probe(pci->dev); ++ ++ if (IS_ERR(pci->dma_obj)) { ++ dev_err(pci->dev, "failed to prepare dma obj probe\n"); ++ return -EINVAL; ++ } ++ ++ sunxi_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK, 0x0); ++ sunxi_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK, 0x0); ++ return 0; ++} ++ ++static void sunxi_pcie_plat_dma_deinit(struct sunxi_pcie *pci) ++{ ++ sunxi_pcie_dma_obj_remove(pci->dev); ++ ++ sunxi_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK, PCIE_DMA_INT_MASK); ++ sunxi_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK, PCIE_DMA_INT_MASK); ++} ++ ++static int sunxi_pcie_plat_parse_dts_res(struct platform_device *pdev, struct sunxi_pcie *pci) ++{ ++ struct sunxi_pcie_port *pp = &pci->pp; ++ struct device_node *np = pp->dev->of_node; ++ struct resource *dbi_res; ++ int ret; ++ ++ dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); ++ if (!dbi_res) { ++ dev_err(&pdev->dev, "get pcie dbi failed\n"); ++ return -ENODEV; ++ } ++ ++ pci->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_res); ++ if (IS_ERR(pci->dbi_base)) { ++ dev_err(&pdev->dev, "ioremap pcie dbi failed\n"); ++ return PTR_ERR(pci->dbi_base); ++ } ++ ++ pp->dbi_base = pci->dbi_base; ++ pci->app_base = pci->dbi_base + PCIE_USER_DEFINED_REGISTER; ++ ++ pci->link_gen = of_pci_get_max_link_speed(pdev->dev.of_node); ++ if (pci->link_gen < 0) { ++ dev_warn(&pdev->dev, "get pcie speed Gen failed\n"); ++ pci->link_gen = 0x1; ++ } ++ ++ pci->rst_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_HIGH); ++ if (IS_ERR(pci->rst_gpio)) ++ dev_warn(&pdev->dev, "Failed to get \"reset-gpios\"\n"); ++ else ++ gpiod_direction_output(pci->rst_gpio, 1); ++ ++ pci->wake_gpio = devm_gpiod_get(&pdev->dev, "wake", GPIOD_OUT_HIGH); ++ if (IS_ERR(pci->wake_gpio)) ++ dev_warn(&pdev->dev, "Failed to get \"wake-gpios\"\n"); ++ else ++ gpiod_direction_output(pci->wake_gpio, 1); ++ ++ pci->pcie3v3 = devm_regulator_get_optional(&pdev->dev, "pcie3v3"); ++ if (IS_ERR(pci->pcie3v3)) ++ dev_warn(&pdev->dev, "no pcie3v3 regulator found\n"); ++ ++ ret = of_property_read_u32(np, "num-lanes", &pci->lanes); ++ if (ret) { ++ dev_err(&pdev->dev, "Failed to parse the number of lanes\n"); ++ return -EINVAL; ++ } ++ ++ pp->cpu_pcie_addr_quirk = pci->drvdata->cpu_pcie_addr_quirk; ++ ++ ret = sunxi_pcie_plat_clk_get(pdev, pci); ++ if (ret) { ++ dev_err(&pdev->dev, "pcie get clk init failed\n"); ++ return -ENODEV; ++ } ++ ++ pci->phy = devm_phy_get(pci->dev, "pcie-phy"); ++ if (IS_ERR(pci->phy)) ++ return dev_err_probe(pci->dev, PTR_ERR(pci->phy), "missing PHY\n"); ++ ++ return 0; ++} ++ ++static int sunxi_pcie_plat_hw_init(struct sunxi_pcie *pci) ++{ ++ int ret; ++ ++ ret = sunxi_pcie_plat_power_on(pci); ++ if (ret) ++ return ret; ++ ++ ret = sunxi_pcie_plat_clk_setup(pci); ++ if (ret) ++ goto err0; ++ ++ ret = sunxi_pcie_plat_combo_phy_init(pci); ++ if (ret) ++ goto err1; ++ ++ return 0; ++ ++err1: ++ sunxi_pcie_plat_clk_exit(pci); ++err0: ++ sunxi_pcie_plat_power_off(pci); ++ ++ return ret; ++} ++ ++static void sunxi_pcie_plat_hw_deinit(struct sunxi_pcie *pci) ++{ ++ sunxi_pcie_plat_combo_phy_deinit(pci); ++ sunxi_pcie_plat_power_off(pci); ++ sunxi_pcie_plat_clk_exit(pci); ++} ++ ++static int sunxi_pcie_plat_probe(struct platform_device *pdev) ++{ ++ struct sunxi_pcie *pci; ++ struct sunxi_pcie_port *pp; ++ const struct sunxi_pcie_of_data *data; ++ enum sunxi_pcie_device_mode mode; ++ int ret; ++ ++ data = of_device_get_match_data(&pdev->dev); ++ mode = (enum sunxi_pcie_device_mode)data->mode; ++ ++ pci = devm_kzalloc(&pdev->dev, sizeof(*pci), GFP_KERNEL); ++ if (!pci) ++ return -ENOMEM; ++ ++ pp = &pci->pp; ++ pp->dev = &pdev->dev; ++ pci->dev = &pdev->dev; ++ pci->drvdata = data; ++ ++ ret = sunxi_pcie_plat_parse_dts_res(pdev, pci); ++ if (ret) ++ return ret; ++ ++ ret = sunxi_pcie_plat_hw_init(pci); ++ if (ret) ++ return ret; ++ ++ sunxi_pcie_plat_set_irqmask(pci); ++ platform_set_drvdata(pdev, pci); ++ ++ ret = sunxi_pcie_plat_request_irq(pci, pdev); ++ if (ret) ++ goto err0; ++ ++ pm_runtime_enable(&pdev->dev); ++ ret = pm_runtime_get_sync(&pdev->dev); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "pm_runtime_get_sync failed\n"); ++ goto err1; ++ } ++ ++ ret = sunxi_pcie_plat_dma_init(pci); ++ if (ret) ++ goto err2; ++ ++ if (pci->dma_obj) { ++ pci->dma_obj->start_dma_trx_func = sunxi_pcie_plat_dma_start; ++ pci->dma_obj->config_dma_trx_func = sunxi_pcie_plat_dma_config; ++ } ++ ++ switch (pci->drvdata->mode) { ++ case SUNXI_PCIE_RC_TYPE: ++ ret = sunxi_pcie_host_add_port(pci, pdev); ++ break; ++ case SUNXI_PCIE_EP_TYPE: ++ sunxi_pcie_plat_set_mode(pci); ++ pci->ep.ops = &sunxi_ep_ops; ++ ret = sunxi_pcie_ep_init(pci); ++ break; ++ default: ++ dev_err(&pdev->dev, "INVALID device type %d\n", pci->drvdata->mode); ++ ret = -EINVAL; ++ break; ++ } ++ ++ if (ret) ++ goto err3; ++ ++ dev_info(&pdev->dev, "driver version: %s\n", SUNXI_PCIE_MODULE_VERSION); ++ ++ return 0; ++ ++err3: ++ sunxi_pcie_plat_dma_deinit(pci); ++err2: ++ pm_runtime_put(&pdev->dev); ++err1: ++ pm_runtime_disable(&pdev->dev); ++err0: ++ sunxi_pcie_plat_hw_deinit(pci); ++ ++ return ret; ++} ++ ++static void sunxi_pcie_plat_remove(struct platform_device *pdev) ++{ ++ struct sunxi_pcie *pci = platform_get_drvdata(pdev); ++ ++ sunxi_pcie_plat_hw_deinit(pci); ++ ++ pm_runtime_disable(&pdev->dev); ++ ++ pm_runtime_put(&pdev->dev); ++ ++ sunxi_pcie_plat_dma_deinit(pci); ++ ++ switch (pci->drvdata->mode) { ++ case SUNXI_PCIE_RC_TYPE: ++ sunxi_pcie_host_remove_port(pci); ++ break; ++ case SUNXI_PCIE_EP_TYPE: ++ sunxi_pcie_ep_deinit(pci); ++ break; ++ default: ++ dev_err(&pdev->dev, "unspport device type %d\n", pci->drvdata->mode); ++ break; ++ } ++ ++ sunxi_pcie_plat_ltssm_disable(pci); ++ ++} ++ ++#if IS_ENABLED(CONFIG_PM) ++static int sunxi_pcie_plat_suspend(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct sunxi_pcie *pci = platform_get_drvdata(pdev); ++ ++ sunxi_pcie_plat_ltssm_disable(pci); ++ ++ usleep_range(200, 300); ++ ++ sunxi_pcie_plat_hw_deinit(pci); ++ ++ return 0; ++} ++ ++static int sunxi_pcie_plat_resume(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct sunxi_pcie *pci = platform_get_drvdata(pdev); ++ struct sunxi_pcie_port *pp = &pci->pp; ++ int ret; ++ ++ ret = sunxi_pcie_plat_hw_init(pci); ++ if (ret) ++ return -EINVAL; ++ ++ /* TODO */ ++ usleep_range(100, 300); ++ ++ switch (pci->drvdata->mode) { ++ case SUNXI_PCIE_RC_TYPE: ++ sunxi_pcie_plat_ltssm_disable(pci); ++ sunxi_pcie_host_setup_rc(pp); ++ ++ if (IS_ENABLED(CONFIG_PCI_MSI) && !pp->has_its) { ++ phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pp), SZ_4K); ++ sunxi_pcie_host_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, lower_32_bits(pa)); ++ sunxi_pcie_host_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, upper_32_bits(pa)); ++ } ++ ++ sunxi_pcie_host_establish_link(pci); ++ sunxi_pcie_host_speed_change(pci, pci->link_gen); ++ break; ++ case SUNXI_PCIE_EP_TYPE: ++ /* TODO */ ++ break; ++ default: ++ dev_err(pci->dev, "unsupport device type %d\n", pci->drvdata->mode); ++ break; ++ } ++ ++ return 0; ++} ++ ++static struct dev_pm_ops sunxi_pcie_plat_pm_ops = { ++ .suspend = sunxi_pcie_plat_suspend, ++ .resume = sunxi_pcie_plat_resume, ++}; ++#else ++static struct dev_pm_ops sunxi_pcie_plat_pm_ops; ++#endif /* CONFIG_PM */ ++ ++static struct platform_driver sunxi_pcie_plat_driver = { ++ .driver = { ++ .name = "sunxi-pcie", ++ .owner = THIS_MODULE, ++ .of_match_table = sunxi_pcie_plat_of_match, ++ .pm = &sunxi_pcie_plat_pm_ops, ++ }, ++ .probe = sunxi_pcie_plat_probe, ++ .remove = sunxi_pcie_plat_remove, ++}; ++ ++module_platform_driver(sunxi_pcie_plat_driver); ++ ++MODULE_AUTHOR("songjundong "); ++MODULE_DESCRIPTION("Allwinner PCIe controller platform driver"); ++MODULE_VERSION(SUNXI_PCIE_MODULE_VERSION); ++MODULE_LICENSE("GPL v2"); +\ No newline at end of file +diff --git a/drivers/pci/pcie-sunxi/pcie-sunxi-rc.c b/drivers/pci/pcie-sunxi/pcie-sunxi-rc.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/pci/pcie-sunxi/pcie-sunxi-rc.c +@@ -0,0 +1,864 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++// SPDX_License-Identifier: GPL-2.0 ++/* ++ * allwinner PCIe host controller driver ++ * ++ * Copyright (c) 2007-2022 Allwinnertech Co., Ltd. ++ * ++ * Author: songjundong ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++// SPDX_License-Identifier: GPL-2.0 ++/* ++ * allwinner PCIe host controller driver ++ * ++ * Copyright (c) 2007-2022 Allwinnertech Co., Ltd. ++ * ++ * Author: songjundong ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#define SUNXI_MODNAME "pcie-rc" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pci.h" ++#include "pcie-sunxi.h" ++#include "pcie-sunxi-dma.h" ++ ++static bool sunxi_pcie_host_is_link_up(struct sunxi_pcie_port *pp) ++{ ++ if (pp->ops->is_link_up) ++ return pp->ops->is_link_up(pp); ++ else ++ return false; ++} ++ ++static int sunxi_pcie_host_rd_own_conf(struct sunxi_pcie_port *pp, int where, int size, u32 *val) ++{ ++ int ret; ++ ++ if (pp->ops->rd_own_conf) ++ ret = pp->ops->rd_own_conf(pp, where, size, val); ++ else ++ ret = sunxi_pcie_cfg_read(pp->dbi_base + where, size, val); ++ ++ return ret; ++} ++ ++int sunxi_pcie_host_wr_own_conf(struct sunxi_pcie_port *pp, int where, int size, u32 val) ++{ ++ int ret; ++ ++ if (pp->ops->wr_own_conf) ++ ret = pp->ops->wr_own_conf(pp, where, size, val); ++ else ++ ret = sunxi_pcie_cfg_write(pp->dbi_base + where, size, val); ++ ++ return ret; ++} ++ ++static void sunxi_msi_top_irq_ack(struct irq_data *d) ++{ ++ /* NULL */ ++} ++ ++static struct irq_chip sunxi_msi_top_chip = { ++ .name = "SUNXI-PCIe-MSI", ++ .irq_ack = sunxi_msi_top_irq_ack, ++ .irq_mask = pci_msi_mask_irq, ++ .irq_unmask = pci_msi_unmask_irq, ++}; ++ ++static int sunxi_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) ++{ ++ return -EINVAL; ++} ++ ++static void sunxi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) ++{ ++ struct sunxi_pcie_port *pcie = irq_data_get_irq_chip_data(data); ++ u64 msi_target = (u64)pcie->msi_data; ++ ++ msg->address_lo = lower_32_bits(msi_target); ++ msg->address_hi = upper_32_bits(msi_target); ++ msg->data = data->hwirq; ++ ++ pr_debug("-%s:[DEBUG]: msi#%d address_hi %#x address_lo %#x\n", ++ dev_name(pcie->dev), (int)data->hwirq, msg->address_hi, msg->address_lo); ++} ++ ++/* ++ * whether the following interface needs to be added on the driver: ++ * .irq_ack, .irq_mask, .irq_unmask and the xxx_bottom_irq_chip. ++ */ ++static struct irq_chip sunxi_msi_bottom_chip = { ++ .name = "SUNXI MSI", ++ .irq_set_affinity = sunxi_msi_set_affinity, ++ .irq_compose_msi_msg = sunxi_compose_msi_msg, ++}; ++ ++static int sunxi_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs, void *args) ++{ ++ struct sunxi_pcie_port *pp = domain->host_data; ++ int hwirq, i; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&pp->lock, flags); ++ ++ hwirq = bitmap_find_free_region(pp->msi_map, INT_PCI_MSI_NR, order_base_2(nr_irqs)); ++ ++ raw_spin_unlock_irqrestore(&pp->lock, flags); ++ ++ if (unlikely(hwirq < 0)) { ++ dev_err(pp->dev, "failed to alloc hwirq\n"); ++ return -ENOSPC; ++ } ++ ++ for (i = 0; i < nr_irqs; i++) ++ irq_domain_set_info(domain, virq + i, hwirq + i, ++ &sunxi_msi_bottom_chip, pp, ++ handle_edge_irq, NULL, NULL); ++ ++ return 0; ++} ++ ++static void sunxi_msi_domain_free(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs) ++{ ++ struct irq_data *d = irq_domain_get_irq_data(domain, virq); ++ struct sunxi_pcie_port *pp = domain->host_data; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&pp->lock, flags); ++ ++ bitmap_release_region(pp->msi_map, d->hwirq, order_base_2(nr_irqs)); ++ ++ raw_spin_unlock_irqrestore(&pp->lock, flags); ++} ++ ++static const struct irq_domain_ops sunxi_msi_domain_ops = { ++ .alloc = sunxi_msi_domain_alloc, ++ .free = sunxi_msi_domain_free, ++}; ++ ++static struct msi_domain_info sunxi_msi_info = { ++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI), ++ .chip = &sunxi_msi_top_chip, ++}; ++ ++static int sunxi_allocate_msi_domains(struct sunxi_pcie_port *pp) ++{ ++ struct fwnode_handle *fwnode = dev_fwnode(pp->dev); ++ ++ pp->irq_domain = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR, ++ &sunxi_msi_domain_ops, pp); ++ if (!pp->irq_domain) { ++ dev_err(pp->dev, "failed to create IRQ domain\n"); ++ return -ENOMEM; ++ } ++ irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); ++ ++ pp->msi_domain = pci_msi_create_irq_domain(fwnode, &sunxi_msi_info, pp->irq_domain); ++ if (!pp->msi_domain) { ++ dev_err(pp->dev, "failed to create MSI domain\n"); ++ irq_domain_remove(pp->irq_domain); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static void sunxi_free_msi_domains(struct sunxi_pcie_port *pp) ++{ ++ irq_domain_remove(pp->msi_domain); ++ irq_domain_remove(pp->irq_domain); ++} ++ ++static int sunxi_pcie_msi_init(struct sunxi_pcie_port *pp) ++{ ++ u64 msi_target; ++ int ret; ++ ++ ret = dma_set_mask(pp->dev, DMA_BIT_MASK(32)); ++ if (ret) ++ dev_warn(pp->dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n"); ++ ++ pp->msi_data = dma_map_single_attrs(pp->dev, &pp->msi_msg, sizeof(pp->msi_msg), ++ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); ++ ret = dma_mapping_error(pp->dev, pp->msi_data); ++ if (ret) { ++ dev_err(pp->dev, "Failed to map MSI data\n"); ++ pp->msi_data = 0; ++ return ret; ++ } ++ ++ msi_target = (u64)pp->msi_data; ++ sunxi_pcie_host_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, lower_32_bits(msi_target)); ++ sunxi_pcie_host_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, upper_32_bits(msi_target)); ++ ++ return 0; ++} ++ ++static void sunxi_pcie_free_msi(struct sunxi_pcie_port *pp) ++{ ++ if (pp->msi_data) ++ dma_unmap_single_attrs(pp->dev, pp->msi_data, sizeof(pp->msi_msg), ++ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); ++} ++ ++static void sunxi_pcie_intx_irq_mask(struct irq_data *data) ++{ ++ struct sunxi_pcie *pcie = irq_data_get_irq_chip_data(data); ++ struct sunxi_pcie_port *pp = &pcie->pp; ++ irq_hw_number_t hwirq = irqd_to_hwirq(data); ++ unsigned long flags; ++ u32 mask, stas; ++ ++ raw_spin_lock_irqsave(&pp->lock, flags); ++ mask = sunxi_pcie_readl(pcie, SII_INT_MASK0); ++ mask &= ~INTX_RX_ASSERT(hwirq); ++ sunxi_pcie_writel(mask, pcie, SII_INT_MASK0); ++ stas = sunxi_pcie_readl(pcie, SII_INT_STAS0); ++ stas |= INTX_RX_ASSERT(hwirq); ++ sunxi_pcie_writel(stas, pcie, SII_INT_STAS0); ++ raw_spin_unlock_irqrestore(&pp->lock, flags); ++} ++ ++static void sunxi_pcie_intx_irq_unmask(struct irq_data *data) ++{ ++ struct sunxi_pcie *pcie = irq_data_get_irq_chip_data(data); ++ struct sunxi_pcie_port *pp = &pcie->pp; ++ irq_hw_number_t hwirq = irqd_to_hwirq(data); ++ unsigned long flags; ++ u32 mask, stas; ++ ++ raw_spin_lock_irqsave(&pp->lock, flags); ++ stas = sunxi_pcie_readl(pcie, SII_INT_STAS0); ++ stas |= INTX_RX_ASSERT(hwirq); ++ sunxi_pcie_writel(stas, pcie, SII_INT_STAS0); ++ mask = sunxi_pcie_readl(pcie, SII_INT_MASK0); ++ mask |= INTX_RX_ASSERT(hwirq); ++ sunxi_pcie_writel(mask, pcie, SII_INT_MASK0); ++ raw_spin_unlock_irqrestore(&pp->lock, flags); ++} ++ ++static struct irq_chip sunxi_pcie_sii_intx_chip = { ++ .name = "SUNXI-PCIe-SII-INTx", ++ .irq_enable = sunxi_pcie_intx_irq_unmask, ++ .irq_disable = sunxi_pcie_intx_irq_mask, ++ .irq_mask = sunxi_pcie_intx_irq_mask, ++ .irq_unmask = sunxi_pcie_intx_irq_unmask, ++}; ++ ++static int sunxi_pcie_intx_map(struct irq_domain *domain, unsigned int irq, ++ irq_hw_number_t hwirq) ++{ ++ irq_set_chip_and_handler(irq, &sunxi_pcie_sii_intx_chip, handle_simple_irq); ++ irq_set_chip_data(irq, domain->host_data); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops intx_domain_ops = { ++ .map = sunxi_pcie_intx_map, ++}; ++ ++static int sunxi_allocate_intx_domains(struct sunxi_pcie_port *pp) ++{ ++ struct sunxi_pcie *pci = to_sunxi_pcie_from_pp(pp); ++ struct device_node *intc_node; ++ u32 val; ++ ++ intc_node = of_get_child_by_name(pp->dev->of_node, "legacy-interrupt-controller"); ++ if (!intc_node) { ++ dev_warn(pp->dev, "failed to found pcie intc node\n"); ++ return -ENODEV; ++ } ++ ++ pp->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX, ++ &intx_domain_ops, pci); ++ of_node_put(intc_node); ++ if (!pp->intx_domain) { ++ dev_warn(pp->dev, "failed to add intx irq domain\n"); ++ return -ENODEV; ++ } ++ ++ /* intx irq enable */ ++ val = sunxi_pcie_readl(pci, SII_INT_MASK0); ++ val |= INTX_RX_ASSERT_MASK; ++ sunxi_pcie_writel(val, pci, SII_INT_MASK0); ++ ++ return 0; ++} ++ ++static void sunxi_free_intx_domains(struct sunxi_pcie_port *pp) ++{ ++ if (pp->intx_domain) ++ irq_domain_remove(pp->intx_domain); ++} ++ ++static void sunxi_pcie_prog_outbound_atu(struct sunxi_pcie_port *pp, int index, int type, ++ u64 cpu_addr, u64 pci_addr, u32 size) ++{ ++ struct sunxi_pcie *pci = to_sunxi_pcie_from_pp(pp); ++ unsigned int retries; ++ int val; ++ ++ sunxi_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE_OUTBOUND(index), lower_32_bits(cpu_addr)); ++ sunxi_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE_OUTBOUND(index), upper_32_bits(cpu_addr)); ++ sunxi_pcie_writel_dbi(pci, PCIE_ATU_LIMIT_OUTBOUND(index), lower_32_bits(cpu_addr + size - 1)); ++ sunxi_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET_OUTBOUND(index), lower_32_bits(pci_addr)); ++ sunxi_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET_OUTBOUND(index), upper_32_bits(pci_addr)); ++ sunxi_pcie_writel_dbi(pci, PCIE_ATU_CR1_OUTBOUND(index), type); ++ sunxi_pcie_writel_dbi(pci, PCIE_ATU_CR2_OUTBOUND(index), PCIE_ATU_ENABLE); ++ ++ for (retries = 0; retries < LINK_WAIT_MAX_RETRIE; retries++) { ++ val = sunxi_pcie_readl_dbi(pci, PCIE_ATU_CR2_OUTBOUND(index)); ++ ++ if (val & PCIE_ATU_ENABLE) ++ return; ++ ++ mdelay(WAIT_ATU); ++ } ++ dev_warn(pp->dev, "Outbound iATU is not being enabled\n"); ++} ++ ++static int sunxi_pcie_rd_other_conf(struct sunxi_pcie_port *pp, struct pci_bus *bus, ++ u32 devfn, int where, int size, u32 *val) ++{ ++ int ret = PCIBIOS_SUCCESSFUL, type; ++ u64 busdev; ++ ++ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | ++ PCIE_ATU_FUNC(PCI_FUNC(devfn)); ++ ++ if (pci_is_root_bus(bus->parent)) ++ type = PCIE_ATU_TYPE_CFG0; ++ else ++ type = PCIE_ATU_TYPE_CFG1; ++ ++ sunxi_pcie_prog_outbound_atu(pp, PCIE_ATU_INDEX0, type, pp->cfg0_base, busdev, pp->cfg0_size); ++ ++ ret = sunxi_pcie_cfg_read(pp->va_cfg0_base + where, size, val); ++ ++ return ret; ++} ++ ++static int sunxi_pcie_wr_other_conf(struct sunxi_pcie_port *pp, struct pci_bus *bus, ++ u32 devfn, int where, int size, u32 val) ++{ ++ int ret = PCIBIOS_SUCCESSFUL, type; ++ u64 busdev; ++ ++ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | ++ PCIE_ATU_FUNC(PCI_FUNC(devfn)); ++ ++ if (pci_is_root_bus(bus->parent)) ++ type = PCIE_ATU_TYPE_CFG0; ++ else ++ type = PCIE_ATU_TYPE_CFG1; ++ ++ sunxi_pcie_prog_outbound_atu(pp, PCIE_ATU_INDEX0, type, pp->cfg0_base, busdev, pp->cfg0_size); ++ ++ ret = sunxi_pcie_cfg_write(pp->va_cfg0_base + where, size, val); ++ ++ return ret; ++} ++ ++static int sunxi_pcie_valid_config(struct sunxi_pcie_port *pp, ++ struct pci_bus *bus, int dev) ++{ ++ /* If there is no link, then there is no device */ ++ if (!pci_is_root_bus(bus)) { ++ if (!sunxi_pcie_host_is_link_up(pp)) ++ return 0; ++ } else if (dev > 0) ++ /* Access only one slot on each root port */ ++ return 0; ++ ++ return 1; ++} ++ ++static int sunxi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, ++ int size, u32 *val) ++{ ++ struct sunxi_pcie_port *pp = (bus->sysdata); ++ int ret; ++ ++ if (!pp) ++ BUG(); ++ ++ if (!sunxi_pcie_valid_config(pp, bus, PCI_SLOT(devfn))) { ++ *val = 0xffffffff; ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ } ++ ++ if (!pci_is_root_bus(bus)) ++ ret = sunxi_pcie_rd_other_conf(pp, bus, devfn, ++ where, size, val); ++ else ++ ret = sunxi_pcie_host_rd_own_conf(pp, where, size, val); ++ ++ return ret; ++} ++ ++static int sunxi_pcie_wr_conf(struct pci_bus *bus, u32 devfn, ++ int where, int size, u32 val) ++{ ++ struct sunxi_pcie_port *pp = (bus->sysdata); ++ int ret; ++ ++ if (!pp) ++ BUG(); ++ ++ if (sunxi_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ if (!pci_is_root_bus(bus)) ++ ret = sunxi_pcie_wr_other_conf(pp, bus, devfn, ++ where, size, val); ++ else ++ ret = sunxi_pcie_host_wr_own_conf(pp, where, size, val); ++ ++ return ret; ++} ++ ++static struct pci_ops sunxi_pcie_ops = { ++ .read = sunxi_pcie_rd_conf, ++ .write = sunxi_pcie_wr_conf, ++}; ++ ++static int sunxi_pcie_host_init(struct sunxi_pcie_port *pp) ++{ ++ struct device *dev = pp->dev; ++ struct resource_entry *win; ++ struct pci_host_bridge *bridge; ++ int ret; ++ ++ bridge = devm_pci_alloc_host_bridge(dev, 0); ++ if (!bridge) { ++ dev_err(dev, "Failed to alloc host bridge\n"); ++ return -ENOMEM; ++ } ++ ++ pp->bridge = bridge; ++ /* Get the I/O and memory ranges from DT */ ++ resource_list_for_each_entry(win, &bridge->windows) { ++ switch (resource_type(win->res)) { ++ case IORESOURCE_IO: ++ pp->io_size = resource_size(win->res); ++ pp->io_bus_addr = win->res->start - win->offset; ++ pp->io_base = pci_pio_to_address(win->res->start); ++ break; ++ case 0: ++ pp->cfg0_size = resource_size(win->res); ++ pp->cfg0_base = win->res->start; ++ break; ++ } ++ } ++ ++ if (!pp->va_cfg0_base) { ++ pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, ++ pp->cfg0_base, pp->cfg0_size); ++ if (!pp->va_cfg0_base) { ++ dev_err(dev, "Error with ioremap in function\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ if (pp->cpu_pcie_addr_quirk) { ++ pp->cfg0_base -= PCIE_CPU_BASE; ++ pp->io_base -= PCIE_CPU_BASE; ++ } ++ ++ sunxi_allocate_intx_domains(pp); ++ ++ if (pci_msi_enabled() && !pp->has_its) { ++ ret = sunxi_allocate_msi_domains(pp); ++ if (ret) ++ return ret; ++ ++ ret = sunxi_pcie_msi_init(pp); ++ if (ret) ++ return ret; ++ } ++ ++ if (pp->ops->host_init) ++ pp->ops->host_init(pp); ++ ++ bridge->sysdata = pp; ++ bridge->ops = &sunxi_pcie_ops; ++ ++ ret = pci_host_probe(bridge); ++ ++ if (ret) { ++ if (pci_msi_enabled() && !pp->has_its) { ++ sunxi_pcie_free_msi(pp); ++ sunxi_free_msi_domains(pp); ++ } ++ sunxi_free_intx_domains(pp); ++ ++ dev_err(pp->dev, "Failed to probe host bridge\n"); ++ ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void sunxi_pcie_host_setup_rc(struct sunxi_pcie_port *pp) ++{ ++ u32 val, i; ++ int atu_idx = 0; ++ struct resource_entry *entry; ++ phys_addr_t mem_base; ++ struct sunxi_pcie *pci = to_sunxi_pcie_from_pp(pp); ++ ++ sunxi_pcie_plat_set_rate(pci); ++ ++ /* setup RC BARs */ ++ sunxi_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x4); ++ sunxi_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x0); ++ ++ /* setup interrupt pins */ ++ val = sunxi_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); ++ val &= PCIE_INTERRUPT_LINE_MASK; ++ val |= PCIE_INTERRUPT_LINE_ENABLE; ++ sunxi_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); ++ ++ /* setup bus numbers */ ++ val = sunxi_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); ++ val &= 0xff000000; ++ val |= 0x00ff0100; ++ sunxi_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); ++ ++ /* setup command register */ ++ val = sunxi_pcie_readl_dbi(pci, PCI_COMMAND); ++ ++ val &= PCIE_HIGH16_MASK; ++ val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | ++ PCI_COMMAND_MASTER | PCI_COMMAND_SERR; ++ ++ sunxi_pcie_writel_dbi(pci, PCI_COMMAND, val); ++ ++ if (pci_msi_enabled() && !pp->has_its) { ++ for (i = 0; i < 8; i++) { ++ sunxi_pcie_host_wr_own_conf(pp, PCIE_MSI_INTR_ENABLE(i), 4, ~0); ++ } ++ } ++ ++ resource_list_for_each_entry(entry, &pp->bridge->windows) { ++ if (resource_type(entry->res) != IORESOURCE_MEM) ++ continue; ++ ++ if (pp->num_ob_windows <= ++atu_idx) ++ break; ++ ++ if (pp->cpu_pcie_addr_quirk) ++ mem_base = entry->res->start - PCIE_CPU_BASE; ++ else ++ mem_base = entry->res->start; ++ ++ sunxi_pcie_prog_outbound_atu(pp, atu_idx, PCIE_ATU_TYPE_MEM, mem_base, ++ entry->res->start - entry->offset, ++ resource_size(entry->res)); ++ } ++ ++ if (pp->io_size) { ++ if (pp->num_ob_windows > ++atu_idx) ++ sunxi_pcie_prog_outbound_atu(pp, atu_idx, PCIE_ATU_TYPE_IO, pp->io_base, ++ pp->io_bus_addr, pp->io_size); ++ else ++ dev_err(pp->dev, "Resources exceed number of ATU entries (%d)", ++ pp->num_ob_windows); ++ } ++ ++ sunxi_pcie_host_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); ++ ++ sunxi_pcie_dbi_ro_wr_en(pci); ++ ++ sunxi_pcie_host_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); ++ ++ sunxi_pcie_dbi_ro_wr_dis(pci); ++ ++ sunxi_pcie_host_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); ++ val |= PORT_LOGIC_SPEED_CHANGE; ++ sunxi_pcie_host_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_host_setup_rc); ++ ++static int sunxi_pcie_host_wait_for_speed_change(struct sunxi_pcie *pci) ++{ ++ u32 tmp; ++ unsigned int retries; ++ ++ for (retries = 0; retries < LINK_WAIT_MAX_RETRIE; retries++) { ++ tmp = sunxi_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); ++ if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) ++ return 0; ++ usleep_range(SPEED_CHANGE_USLEEP_MIN, SPEED_CHANGE_USLEEP_MAX); ++ } ++ ++ dev_err(pci->dev, "Speed change timeout\n"); ++ return -ETIMEDOUT; ++} ++ ++static int sunxi_pcie_host_read_speed(struct sunxi_pcie *pci) ++{ ++ int val, gen; ++ ++ sunxi_pcie_dbi_ro_wr_en(pci); ++ val = sunxi_pcie_readl_dbi(pci, LINK_CONTROL2_LINK_STATUS2); ++ gen = val & 0xf; ++ ++ dev_info(pci->dev, "PCIe speed of Gen%d\n", gen); ++ ++ sunxi_pcie_dbi_ro_wr_dis(pci); ++ return 0; ++} ++ ++int sunxi_pcie_host_speed_change(struct sunxi_pcie *pci, int gen) ++{ ++ u32 val; ++ u32 current_speed; ++ int ret; ++ ++ current_speed = sunxi_pcie_host_read_speed(pci); ++ ++ if (current_speed >= gen) { ++ dev_info(pci->dev, "Link already at Gen%u, skipping retrain.\n", current_speed); ++ return 0; ++ } ++ ++ dev_info(pci->dev, "Current speed Gen%u < target Gen%d. Retraining link...\n", ++ current_speed, gen); ++ ++ sunxi_pcie_dbi_ro_wr_en(pci); ++ val = sunxi_pcie_readl_dbi(pci, LINK_CONTROL2_LINK_STATUS2); ++ val &= ~0xf; ++ val |= gen; ++ sunxi_pcie_writel_dbi(pci, LINK_CONTROL2_LINK_STATUS2, val); ++ ++ val = sunxi_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); ++ val &= ~PORT_LOGIC_SPEED_CHANGE; ++ sunxi_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); ++ ++ val = sunxi_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); ++ val |= PORT_LOGIC_SPEED_CHANGE; ++ sunxi_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); ++ ++ ret = sunxi_pcie_host_wait_for_speed_change(pci); ++ if (!ret) { ++ dev_info(pci->dev, "PCIe speed of Gen%d\n", gen); ++ } ++ else ++ dev_info(pci->dev, "PCIe speed of Gen1\n"); ++ ++ sunxi_pcie_dbi_ro_wr_dis(pci); ++ return 0; ++} ++ ++static void __sunxi_pcie_host_init(struct sunxi_pcie_port *pp) ++{ ++ struct sunxi_pcie *pci = to_sunxi_pcie_from_pp(pp); ++ ++ if (!sunxi_pcie_host_is_link_up(pp)) { ++ sunxi_pcie_plat_ltssm_disable(pci); ++ if (!IS_ERR(pci->rst_gpio)) { ++ gpiod_set_raw_value(pci->rst_gpio, 0); ++ msleep(100); ++ gpiod_set_raw_value(pci->rst_gpio, 1); ++ } ++ } else { ++ msleep(100); ++ } ++ ++ sunxi_pcie_host_setup_rc(pp); ++ ++ if (sunxi_pcie_host_is_link_up(pp)) { ++ dev_info(pci->dev, "pcie is already link up\n"); ++ ++ sunxi_pcie_host_read_speed(pci); ++ } else { ++ sunxi_pcie_host_establish_link(pci); ++ ++ sunxi_pcie_host_speed_change(pci, pci->link_gen); ++ } ++} ++ ++static bool sunxi_pcie_host_link_up_status(struct sunxi_pcie_port *pp) ++{ ++ u32 val; ++ int ret; ++ struct sunxi_pcie *pcie = to_sunxi_pcie_from_pp(pp); ++ val = sunxi_pcie_readl(pcie, PCIE_LINK_STAT); ++ ++ if ((val & RDLH_LINK_UP) && (val & SMLH_LINK_UP)) ++ ret = true; ++ else ++ ret = false; ++ ++ return ret; ++} ++ ++static struct sunxi_pcie_host_ops sunxi_pcie_host_ops = { ++ .is_link_up = sunxi_pcie_host_link_up_status, ++ .host_init = __sunxi_pcie_host_init, ++}; ++ ++static int sunxi_pcie_host_wait_for_link(struct sunxi_pcie_port *pp) ++{ ++ int retries; ++ ++ for (retries = 0; retries < LINK_WAIT_MAX_RETRIE; retries++) { ++ if (sunxi_pcie_host_is_link_up(pp)) { ++ dev_info(pp->dev, "pcie link up success\n"); ++ return 0; ++ } ++ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); ++ } ++ ++ return -ETIMEDOUT; ++} ++ ++int sunxi_pcie_host_establish_link(struct sunxi_pcie *pci) ++{ ++ struct sunxi_pcie_port *pp = &pci->pp; ++ ++ if (sunxi_pcie_host_is_link_up(pp)) { ++ dev_info(pci->dev, "pcie is already link up\n"); ++ msleep(20); ++ return 0; ++ } ++ ++ sunxi_pcie_plat_ltssm_enable(pci); ++ ++ return sunxi_pcie_host_wait_for_link(pp); ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_host_establish_link); ++ ++static irqreturn_t sunxi_pcie_host_msi_irq_handler(int irq, void *arg) ++{ ++ struct sunxi_pcie_port *pp = (struct sunxi_pcie_port *)arg; ++ struct sunxi_pcie *pci = to_sunxi_pcie_from_pp(pp); ++ unsigned long val; ++ int i, pos; ++ u32 status; ++ irqreturn_t ret = IRQ_NONE; ++ ++ for (i = 0; i < MAX_MSI_CTRLS; i++) { ++ status = sunxi_pcie_readl_dbi(pci, PCIE_MSI_INTR_STATUS + (i * MSI_REG_CTRL_BLOCK_SIZE)); ++ ++ if (!status) ++ continue; ++ ++ ret = IRQ_HANDLED; ++ pos = 0; ++ val = status; ++ while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos)) != MAX_MSI_IRQS_PER_CTRL) { ++ ++ /* Clear MSI interrupt first here. Otherwise some irqs will be lost or timeout */ ++ sunxi_pcie_writel_dbi(pci, ++ PCIE_MSI_INTR_STATUS + (i * MSI_REG_CTRL_BLOCK_SIZE), 1 << pos); ++ ++ generic_handle_domain_irq(pp->irq_domain, (i * MAX_MSI_IRQS_PER_CTRL) + pos); ++ ++ pos++; ++ } ++ } ++ ++ return ret; ++} ++ ++int sunxi_pcie_host_add_port(struct sunxi_pcie *pci, struct platform_device *pdev) ++{ ++ struct sunxi_pcie_port *pp = &pci->pp; ++ int ret; ++ ++ ret = of_property_read_u32(pp->dev->of_node, "num-ob-windows", &pp->num_ob_windows); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to parse num-ob-windows\n"); ++ return -EINVAL; ++ } ++ ++ pp->has_its = device_property_read_bool(&pdev->dev, "msi-map"); ++ ++ if (pci_msi_enabled() && !pp->has_its) { ++ pp->msi_irq = platform_get_irq_byname(pdev, "msi"); ++ if (pp->msi_irq < 0) ++ return pp->msi_irq; ++ ++ ret = devm_request_irq(&pdev->dev, pp->msi_irq, sunxi_pcie_host_msi_irq_handler, ++ IRQF_SHARED, "pcie-msi", pp); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to request MSI IRQ\n"); ++ return ret; ++ } ++ } ++ ++ pp->ops = &sunxi_pcie_host_ops; ++ raw_spin_lock_init(&pp->lock); ++ ++ ret = sunxi_pcie_host_init(pp); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to initialize host\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_host_add_port); ++ ++void sunxi_pcie_host_remove_port(struct sunxi_pcie *pci) ++{ ++ struct sunxi_pcie_port *pp = &pci->pp; ++ ++ if (pp->bridge->bus) { ++ pci_stop_root_bus(pp->bridge->bus); ++ pci_remove_root_bus(pp->bridge->bus); ++ } ++ ++ if (pci_msi_enabled() && !pp->has_its) { ++ sunxi_pcie_free_msi(pp); ++ sunxi_free_msi_domains(pp); ++ } ++ sunxi_free_intx_domains(pp); ++} ++EXPORT_SYMBOL_GPL(sunxi_pcie_host_remove_port); +\ No newline at end of file +diff --git a/drivers/pci/pcie-sunxi/pcie-sunxi.h b/drivers/pci/pcie-sunxi/pcie-sunxi.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/pci/pcie-sunxi/pcie-sunxi.h +@@ -0,0 +1,392 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++/* ++ * Allwinner PCIe controller driver ++ * ++ * Copyright (C) 2022 allwinner Co., Ltd. ++ * ++ * Author: songjundong ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef _PCIE_SUNXI_H ++#define _PCIE_SUNXI_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pcie-sunxi-dma.h" ++ ++#define PCIE_PORT_LINK_CONTROL 0x710 ++#define PORT_LINK_MODE_MASK (0x3f << 16) ++#define PORT_LINK_MODE_1_LANES (0x1 << 16) ++#define PORT_LINK_MODE_2_LANES (0x3 << 16) ++#define PORT_LINK_MODE_4_LANES (0x7 << 16) ++#define PORT_LINK_LPBK_ENABLE (0x1 << 2) ++ ++#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C ++#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) ++#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8) ++#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) ++#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) ++#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) ++ ++#define PCIE_ATU_VIEWPORT 0x900 ++#define PCIE_ATU_REGION_INBOUND (0x1 << 31) ++#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) ++#define PCIE_ATU_REGION_INDEX2 (0x2 << 0) ++#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) ++#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) ++ ++#define PCIE_ATU_INDEX0 0x0 ++#define PCIE_ATU_INDEX1 0x1 ++#define PCIE_ATU_INDEX2 0x2 ++#define PCIE_ATU_INDEX3 0x3 ++#define PCIE_ATU_INDEX4 0x4 ++#define PCIE_ATU_INDEX5 0x5 ++#define PCIE_ATU_INDEX6 0x6 ++#define PCIE_ATU_INDEX7 0x7 ++ ++#define PCIE_EP_REBAR_SIZE_32M 0x200 ++ ++#define PCIE_ATU_CR1_OUTBOUND(reg) (0x300000 + ((reg) * 0x200)) ++#define PCIE_ATU_TYPE_MEM (0x0 << 0) ++#define PCIE_ATU_TYPE_IO (0x2 << 0) ++#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) ++#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) ++#define PCIE_ATU_CR2_OUTBOUND(reg) (0x300004 + ((reg) * 0x200)) ++#define PCIE_ATU_DMA_BYPASS BIT(27) ++#define PCIE_ATU_BAR_MODE_ENABLE BIT(30) ++#define PCIE_ATU_ENABLE BIT(31) ++ ++#define PCIE_ATU_LOWER_BASE_OUTBOUND(reg) (0x300008 + ((reg) * 0x200)) ++#define PCIE_ATU_UPPER_BASE_OUTBOUND(reg) (0x30000c + ((reg) * 0x200)) ++#define PCIE_ATU_LIMIT_OUTBOUND(reg) (0x300010 + ((reg) * 0x200)) ++#define PCIE_ATU_LOWER_TARGET_OUTBOUND(reg) (0x300014 + ((reg) * 0x200)) ++#define PCIE_ATU_UPPER_TARGET_OUTBOUND(reg) (0x300018 + ((reg) * 0x200)) ++ ++#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20) ++#define PCIE_ATU_CR2_INBOUND(reg) (0x300104 + ((reg) * 0x200)) ++#define PCIE_ATU_MATCH_MODE BIT(30) ++#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19) ++ ++#define PCIE_ATU_LOWER_BASE_INBOUND(reg) (0x300108 + ((reg) * 0x200)) ++#define PCIE_ATU_UPPER_BASE_INBOUND(reg) (0x30010c + ((reg) * 0x200)) ++#define PCIE_ATU_LIMIT_INBOUND(reg) (0x300110 + ((reg) * 0x200)) ++#define PCIE_ATU_LOWER_TARGET_INBOUND(reg) (0x300114 + ((reg) * 0x200)) ++#define PCIE_ATU_UPPER_TARGET_INBOUND(reg) (0x300118 + ((reg) * 0x200)) ++ ++#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) ++#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) ++#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) ++ ++#define PCIE_MISC_CONTROL_1_CFG 0x8bc ++#define PCIE_TYPE1_CLASS_CODE_REV_ID_REG 0x08 ++ ++#define PCIE_ADDRESS_ALIGNING (~0x3) ++#define PCIE_HIGH_16 16 ++#define PCIE_BAR_NUM 6 ++#define PCIE_MEM_FLAGS 0x4 ++#define PCIE_IO_FLAGS 0x1 ++#define PCIE_BAR_REG 0x4 ++#define PCIE_HIGH16_MASK 0xffff0000 ++#define PCIE_LOW16_MASK 0x0000ffff ++#define PCIE_INTERRUPT_LINE_MASK 0xffff00ff ++#define PCIE_INTERRUPT_LINE_ENABLE 0x00000100 ++#define PCIE_PRIMARY_BUS_MASK 0xff000000 ++#define PCIE_PRIMARY_BUS_ENABLE 0x00010100 ++#define PCIE_MEMORY_MASK 0xfff00000 ++ ++#define PCIE_CPU_BASE 0x20000000 ++ ++#define PCIE_TYPE0_STATUS_COMMAND_REG 0x4 ++ ++#define PCIE_DBI2_BASE 0x100000 ++#define DBI2_FUNC_OFFSET 0x10000 ++#define BAR_ENABLE 0x1 ++ ++#define RESBAR_CAP_REG 0x4 /* from PCIe spec4.0 7.8.6 */ ++#define RESBAR_SIZE_MASK 0xfffff0 ++#define RESBAR_CTL_REG 0x8 ++#define RESBAR_NEXT_BAR 0x8 ++#define SIZE_OF_1MB 20 /* 2^20 = 0x100000 */ ++ ++#define PCIE_COMBO_PHY_BGR 0x04 ++#define PHY_ACLK_EN BIT(17) ++#define PHY_HCLK_EN BIT(16) ++#define PHY_TERSTN BIT(1) ++#define PHY_PW_UP_RSTN BIT(0) ++#define PCIE_COMBO_PHY_CTL 0x10 ++#define PHY_USE_SEL BIT(31) /* 0:PCIE; 1:USB3 */ ++#define PHY_CLK_SEL BIT(30) /* 0:internal clk; 1:exteral clk */ ++#define PHY_BIST_EN BIT(16) ++#define PHY_PIPE_SW BIT(9) ++#define PHY_PIPE_SEL BIT(8) /* 0:PIPE resetn ctrl by PCIE ctrl; 1:PIPE resetn ctrl by */ ++#define PHY_PIPE_CLK_INVERT BIT(4) ++#define PHY_FPGA_SYS_RSTN BIT(1) /* for PFGA */ ++#define PHY_RSTN BIT(0) ++ ++#define NEXT_CAP_PTR_MASK 0xff00 ++#define CAP_ID_MASK 0x00ff ++ ++/* ++ * Maximum number of MSI IRQs can be 256 per controller. But keep ++ * it 32 as of now. Probably we will never need more than 32. If needed, ++ * then increment it in multiple of 32. ++ */ ++#define INT_PCI_MSI_NR 32 ++#define MAX_MSI_IRQS 256 ++#define MAX_MSI_IRQS_PER_CTRL 32 ++#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL) ++#define MSI_REG_CTRL_BLOCK_SIZE 12 ++ ++#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C ++#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) ++#define LINK_CONTROL2_LINK_STATUS2 0xa0 ++/* Parameters for the waiting for link up routine */ ++#define LINK_WAIT_MAX_RETRIE 20 ++#define LINK_WAIT_USLEEP_MIN 90000 ++#define LINK_WAIT_USLEEP_MAX 100000 ++#define SPEED_CHANGE_USLEEP_MIN 100 ++#define SPEED_CHANGE_USLEEP_MAX 1000 ++#define WAIT_ATU 1 ++ ++#define PCIE_MSI_ADDR_LO 0x820 ++#define PCIE_MSI_ADDR_HI 0x824 ++#define PCIE_MSI_INTR_ENABLE(reg) (0x828 + ((reg) * 0x0c)) ++#define PCIE_MSI_INTR_MASK 0x82C ++#define PCIE_MSI_INTR_STATUS 0x830 ++ ++#define PCIE_CTRL_MGMT_BASE 0x900000 ++ ++#define PCIE_USER_DEFINED_REGISTER 0x400000 ++#define PCIE_VER 0x00 ++#define PCIE_ADDR_PAGE_CFG 0x04 ++#define PCIE_AWMISC_CTRL 0x200 ++#define PCIE_ARMISC_CTRL 0x220 ++#define PCIE_LTSSM_CTRL 0xc00 ++#define PCIE_LINK_TRAINING BIT(0) /* 0:disable; 1:enable */ ++#define DEVICE_TYPE_MASK GENMASK(7, 4) ++#define DEVICE_TYPE_RC BIT(6) ++#define PCIE_INT_ENABLE_CLR 0xE04 /* BIT(1):RDLH_LINK_MASK; BIT(0):SMLH_LINK_MASK */ ++#define PCIE_LINK_STAT 0xE0C /* BIT(1):RDLH_LINK; BIT(0):SMLH_LINK */ ++#define RDLH_LINK_UP BIT(1) ++#define SMLH_LINK_UP BIT(0) ++#define PCIE_LINK_INT_EN (BIT(0) | BIT(1)) ++ ++#define SII_INT_MASK0 0x0e00 ++#define SII_INT_STAS0 0x0e08 ++ #define INTX_TX_DEASSERT_MASK GENMASK(28, 25) ++ #define INTX_TX_DEASSERT_SHIFT 25 ++ #define INTX_TX_DEASSERT(x) BIT((x) + INTX_TX_DEASSERT_SHIFT) ++ #define INTX_TX_ASSERT_MASK GENMASK(24, 21) ++ #define INTX_TX_ASSERT_SHIFT 21 ++ #define INTX_TX_ASSERT(x) BIT((x) + INTX_TX_ASSERT_SHIFT) ++ #define INTX_RX_DEASSERT_MASK GENMASK(12, 9) ++ #define INTX_RX_DEASSERT_SHIFT 9 ++ #define INTX_RX_DEASSERT(x) BIT((x) + INTX_RX_DEASSERT_SHIFT) ++ #define INTX_RX_ASSERT_MASK GENMASK(8, 5) ++ #define INTX_RX_ASSERT_SHIFT 5 ++ #define INTX_RX_ASSERT(x) BIT((x) + INTX_RX_ASSERT_SHIFT) ++ ++#define PCIE_PHY_CFG 0x800 ++#define SYS_CLK 0 ++#define PAD_CLK 1 ++#define PCIE_LINK_UP_MASK (0x3<<16) ++ ++#define PCIE_RC_RP_ATS_BASE 0x400000 ++ ++#define SUNXI_PCIE_BAR_CFG_CTRL_DISABLED 0x0 ++#define SUNXI_PCIE_BAR_CFG_CTRL_IO_32BITS 0x1 ++#define SUNXI_PCIE_BAR_CFG_CTRL_MEM_32BITS 0x4 ++#define SUNXI_PCIE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5 ++#define SUNXI_PCIE_BAR_CFG_CTRL_MEM_64BITS 0x6 ++#define SUNXI_PCIE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 ++ ++#define SUNXI_PCIE_EP_MSI_CTRL_REG 0x90 ++#define SUNXI_PCIE_EP_MSI_CTRL_MMC_OFFSET 17 ++#define SUNXI_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17) ++#define SUNXI_PCIE_EP_MSI_CTRL_MME_OFFSET 20 ++#define SUNXI_PCIE_EP_MSI_CTRL_MME_MASK GENMASK(22, 20) ++#define SUNXI_PCIE_EP_MSI_CTRL_ME BIT(16) ++#define SUNXI_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24) ++#define SUNXI_PCIE_EP_DUMMY_IRQ_ADDR 0x1 ++ ++#define PCIE_PHY_FUNC_CFG (PCIE_CTRL_MGMT_BASE + 0x2c0) ++#define PCIE_RC_BAR_CONF (PCIE_CTRL_MGMT_BASE + 0x300) ++ ++enum sunxi_pcie_device_mode { ++ SUNXI_PCIE_EP_TYPE, ++ SUNXI_PCIE_RC_TYPE, ++}; ++ ++struct sunxi_pcie_of_data { ++ const struct sunxi_pcie_ep_ops *ops; ++ enum sunxi_pcie_device_mode mode; ++ u32 func_offset; ++ bool cpu_pcie_addr_quirk; ++ bool has_pcie_slv_clk; ++ bool need_pcie_rst; ++ bool pcie_slv_clk_400m; ++ bool has_pcie_its_clk; ++}; ++ ++struct sunxi_pcie_ep_func { ++ struct list_head list; ++ u8 func_no; ++ u8 msi_cap; ++ u8 msix_cap; ++}; ++ ++struct sunxi_pcie_ep { ++ struct pci_epc *epc; ++ struct list_head func_list; ++ const struct sunxi_pcie_ep_ops *ops; ++ phys_addr_t phys_base; ++ size_t addr_size; ++ size_t page_size; ++ u8 bar_to_atu[PCI_STD_NUM_BARS]; ++ phys_addr_t *outbound_addr; ++ u32 num_ib_windows; ++ u32 num_ob_windows; ++ unsigned long *ib_window_map; ++ unsigned long *ob_window_map; ++ void __iomem *msi_mem; ++ phys_addr_t msi_mem_phys; ++ struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; ++}; ++ ++struct sunxi_pcie_ep_ops { ++ void (*ep_init)(struct sunxi_pcie_ep *ep); ++ int (*raise_irq)(struct sunxi_pcie_ep *ep, u8 func_no, ++ unsigned int type, u16 interrupt_num); ++ const struct pci_epc_features *(*get_features)(struct sunxi_pcie_ep *ep); ++ unsigned int (*func_conf_select)(struct sunxi_pcie_ep *ep, u8 func_no); ++}; ++ ++struct sunxi_pcie_port { ++ struct device *dev; ++ void __iomem *dbi_base; ++ u64 cfg0_base; ++ void __iomem *va_cfg0_base; ++ u32 cfg0_size; ++ resource_size_t io_base; ++ phys_addr_t io_bus_addr; ++ u32 io_size; ++ u32 num_ob_windows; ++ struct sunxi_pcie_host_ops *ops; ++ int msi_irq; ++ struct irq_domain *intx_domain; ++ struct irq_domain *irq_domain; ++ struct irq_domain *msi_domain; ++ u16 msi_msg; ++ dma_addr_t msi_data; ++ struct pci_host_bridge *bridge; ++ raw_spinlock_t lock; ++ unsigned long msi_map[BITS_TO_LONGS(INT_PCI_MSI_NR)]; ++ bool has_its; ++ bool cpu_pcie_addr_quirk; ++}; ++ ++struct sunxi_pci_edma_chan; ++ ++struct sunxi_pcie { ++ struct device *dev; ++ void __iomem *dbi_base; ++ void __iomem *app_base; ++ int link_gen; ++ struct sunxi_pcie_port pp; ++ struct sunxi_pcie_ep ep; ++ struct clk *pcie_aux; ++ struct clk *pcie_slv; ++ struct clk *pcie_its; ++ struct reset_control *pcie_rst; ++ struct reset_control *pwrup_rst; ++ struct reset_control *pcie_its_rst; ++ struct phy *phy; ++ struct dma_trx_obj *dma_obj; ++ const struct sunxi_pcie_of_data *drvdata; ++ struct gpio_desc *rst_gpio; ++ struct gpio_desc *wake_gpio; ++ u32 lanes; ++ u32 num_edma; ++ unsigned long *rd_edma_map; ++ unsigned long *wr_edma_map; ++ struct sunxi_pci_edma_chan *dma_wr_chn; ++ struct sunxi_pci_edma_chan *dma_rd_chn; ++ struct regulator *pcie3v3; ++}; ++ ++#define to_sunxi_pcie_from_pp(x) \ ++ container_of((x), struct sunxi_pcie, pp) ++ ++#define to_sunxi_pcie_from_ep(endpoint) \ ++ container_of((endpoint), struct sunxi_pcie, ep) ++ ++struct sunxi_pcie_host_ops { ++ void (*readl_rc)(struct sunxi_pcie_port *pp, void __iomem *dbi_base, u32 *val); ++ void (*writel_rc)(struct sunxi_pcie_port *pp, u32 val, void __iomem *dbi_base); ++ int (*rd_own_conf)(struct sunxi_pcie_port *pp, int where, int size, u32 *val); ++ int (*wr_own_conf)(struct sunxi_pcie_port *pp, int where, int size, u32 val); ++ bool (*is_link_up)(struct sunxi_pcie_port *pp); ++ void (*host_init)(struct sunxi_pcie_port *pp); ++ void (*scan_bus)(struct sunxi_pcie_port *pp); ++}; ++ ++void sunxi_pcie_plat_set_rate(struct sunxi_pcie *pci); ++void sunxi_pcie_write_dbi(struct sunxi_pcie *pci, u32 reg, size_t size, u32 val); ++u32 sunxi_pcie_read_dbi(struct sunxi_pcie *pci, u32 reg, size_t size); ++void sunxi_pcie_plat_ltssm_enable(struct sunxi_pcie *pci); ++void sunxi_pcie_plat_ltssm_disable(struct sunxi_pcie *pci); ++int sunxi_pcie_cfg_write(void __iomem *addr, int size, u32 val); ++int sunxi_pcie_cfg_read(void __iomem *addr, int size, u32 *val); ++ ++#if IS_ENABLED(CONFIG_PCIE_SUN55I_RC) ++int sunxi_pcie_host_add_port(struct sunxi_pcie *pci, struct platform_device *pdev); ++void sunxi_pcie_host_remove_port(struct sunxi_pcie *pci); ++int sunxi_pcie_host_speed_change(struct sunxi_pcie *pci, int gen); ++int sunxi_pcie_host_wr_own_conf(struct sunxi_pcie_port *pp, int where, int size, u32 val); ++int sunxi_pcie_host_establish_link(struct sunxi_pcie *pci); ++void sunxi_pcie_host_setup_rc(struct sunxi_pcie_port *pp); ++#else ++static inline int sunxi_pcie_host_add_port(struct sunxi_pcie *pci, struct platform_device *pdev) {return 0; } ++static inline void sunxi_pcie_host_remove_port(struct sunxi_pcie *pci) {} ++static inline int sunxi_pcie_host_speed_change(struct sunxi_pcie *pci, int gen) {return 0; } ++static inline int sunxi_pcie_host_wr_own_conf(struct sunxi_pcie_port *pp, int where, int size, u32 val) {return 0; } ++static inline int sunxi_pcie_host_establish_link(struct sunxi_pcie *pci) {return 0; } ++static inline void sunxi_pcie_host_setup_rc(struct sunxi_pcie_port *pp) {} ++#endif ++ ++#if IS_ENABLED(CONFIG_AW_PCIE_EP) ++int sunxi_pcie_ep_init(struct sunxi_pcie *pci); ++void sunxi_pcie_ep_deinit(struct sunxi_pcie *pci); ++#else ++static inline int sunxi_pcie_ep_init(struct sunxi_pcie *pci) {return 0; } ++static inline void sunxi_pcie_ep_deinit(struct sunxi_pcie *pci) {} ++#endif ++ ++void sunxi_pcie_writel(u32 val, struct sunxi_pcie *pcie, u32 offset); ++u32 sunxi_pcie_readl(struct sunxi_pcie *pcie, u32 offset); ++void sunxi_pcie_writel_dbi(struct sunxi_pcie *pci, u32 reg, u32 val); ++u32 sunxi_pcie_readl_dbi(struct sunxi_pcie *pci, u32 reg); ++void sunxi_pcie_writew_dbi(struct sunxi_pcie *pci, u32 reg, u16 val); ++u16 sunxi_pcie_readw_dbi(struct sunxi_pcie *pci, u32 reg); ++void sunxi_pcie_writeb_dbi(struct sunxi_pcie *pci, u32 reg, u8 val); ++u8 sunxi_pcie_readb_dbi(struct sunxi_pcie *pci, u32 reg); ++void sunxi_pcie_dbi_ro_wr_en(struct sunxi_pcie *pci); ++void sunxi_pcie_dbi_ro_wr_dis(struct sunxi_pcie *pci); ++u8 sunxi_pcie_plat_find_capability(struct sunxi_pcie *pci, u8 cap); ++int sunxi_cleanup_uboot_msi_config(struct sunxi_pcie_port *pp); ++ ++#endif /* _PCIE_SUNXI_H */ +\ No newline at end of file +diff --git a/include/sunxi-gpio.h b/include/sunxi-gpio.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/include/sunxi-gpio.h +@@ -0,0 +1,188 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++/* ++ * (C) Copyright 2015-2020 ++ * Allwinner Technology Co., Ltd. ++ * Wim Hwang ++ * ++ * sunxi gpio utils ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of ++ * the License, or (at your option) any later version. ++ */ ++ ++#ifndef __SW_GPIO_H ++#define __SW_GPIO_H ++ ++#define SUNXI_PINCTRL "pio" ++#define SUNXI_R_PINCTRL "r_pio" ++#include ++ ++/* pin group base number name space, ++ * the max pin number : 26*32=832. ++ */ ++#define SUNXI_BANK_SIZE 32 ++#define SUNXI_PA_BASE 0 ++#define SUNXI_PB_BASE 32 ++#define SUNXI_PC_BASE 64 ++#define SUNXI_PD_BASE 96 ++#define SUNXI_PE_BASE 128 ++#define SUNXI_PF_BASE 160 ++#define SUNXI_PG_BASE 192 ++#define SUNXI_PH_BASE 224 ++#define SUNXI_PI_BASE 256 ++#define SUNXI_PJ_BASE 288 ++#define SUNXI_PK_BASE 320 ++#define SUNXI_PL_BASE 352 ++#define SUNXI_PM_BASE 384 ++#define SUNXI_PN_BASE 416 ++#define SUNXI_PO_BASE 448 ++#define AXP_PIN_BASE 1024 ++ ++#define SUNXI_PIN_NAME_MAX_LEN 8 ++ ++/* sunxi gpio name space */ ++#define GPIOA(n) (SUNXI_PA_BASE + (n)) ++#define GPIOB(n) (SUNXI_PB_BASE + (n)) ++#define GPIOC(n) (SUNXI_PC_BASE + (n)) ++#define GPIOD(n) (SUNXI_PD_BASE + (n)) ++#define GPIOE(n) (SUNXI_PE_BASE + (n)) ++#define GPIOF(n) (SUNXI_PF_BASE + (n)) ++#define GPIOG(n) (SUNXI_PG_BASE + (n)) ++#define GPIOH(n) (SUNXI_PH_BASE + (n)) ++#define GPIOI(n) (SUNXI_PI_BASE + (n)) ++#define GPIOJ(n) (SUNXI_PJ_BASE + (n)) ++#define GPIOK(n) (SUNXI_PK_BASE + (n)) ++#define GPIOL(n) (SUNXI_PL_BASE + (n)) ++#define GPIOM(n) (SUNXI_PM_BASE + (n)) ++#define GPION(n) (SUNXI_PN_BASE + (n)) ++#define GPIOO(n) (SUNXI_PO_BASE + (n)) ++#define GPIO_AXP(n) (AXP_PIN_BASE + (n)) ++ ++/* sunxi specific input/output/eint functions */ ++#define SUNXI_PIN_INPUT_FUNC (0) ++#define SUNXI_PIN_OUTPUT_FUNC (1) ++#define SUNXI_PIN_EINT_FUNC (6) ++#define SUNXI_PIN_IO_DISABLE (7) ++ ++/* axp group base number name space, ++ * axp pinctrl number space coherent to sunxi-pinctrl. ++ */ ++#define AXP_PINCTRL "axp-pinctrl" ++#define AXP_CFG_GRP (0xFFFF) ++#define AXP_PIN_INPUT_FUNC (0) ++#define AXP_PIN_OUTPUT_FUNC (1) ++#define IS_AXP_PIN(pin) (pin >= AXP_PIN_BASE) ++ ++/* sunxi specific pull up/down */ ++enum sunxi_pull_up_down { ++ SUNXI_PULL_DISABLE = 0, ++ SUNXI_PULL_UP, ++ SUNXI_PULL_DOWN, ++}; ++ ++/* sunxi specific data types */ ++enum sunxi_data_type { ++ SUNXI_DATA_LOW = 0, ++ SUNXI_DATA_HIGH = 0, ++}; ++ ++/* sunxi specific pull status */ ++enum sunxi_pin_pull { ++ SUNXI_PIN_PULL_DISABLE = 0x00, ++ SUNXI_PIN_PULL_UP = 0x01, ++ SUNXI_PIN_PULL_DOWN = 0x02, ++ SUNXI_PIN_PULL_RESERVED = 0x03, ++}; ++ ++/* sunxi specific driver levels */ ++enum sunxi_pin_drv_level { ++ SUNXI_DRV_LEVEL0 = 10, ++ SUNXI_DRV_LEVEL1 = 20, ++ SUNXI_DRV_LEVEL2 = 30, ++ SUNXI_DRV_LEVEL3 = 40, ++}; ++ ++/* sunxi specific data bit status */ ++enum sunxi_pin_data_status { ++ SUNXI_PIN_DATA_LOW = 0x00, ++ SUNXI_PIN_DATA_HIGH = 0x01, ++}; ++ ++/* sunxi pin interrupt trigger mode */ ++enum sunxi_pin_int_trigger_mode { ++ SUNXI_PIN_EINT_POSITIVE_EDGE = 0x0, ++ SUNXI_PIN_EINT_NEGATIVE_EDGE = 0x1, ++ SUNXI_PIN_EINT_HIGN_LEVEL = 0x2, ++ SUNXI_PIN_EINT_LOW_LEVEL = 0x3, ++ SUNXI_PIN_EINT_DOUBLE_EDGE = 0x4 ++}; ++ ++/* the source clock of pin int */ ++enum sunxi_pin_int_source_clk { ++ SUNXI_PIN_INT_SRC_CLK_32K = 0x0, ++ SUNXI_PIN_INT_SRC_CLK_24M = 0x1 ++}; ++ ++/* ++ * pin configuration (pull up/down and drive strength) type and its value are ++ * packed together into a 32-bits. The lower 8-bits represent the configuration ++ * type and the upper 24-bits hold the value of the configuration type. ++ */ ++#define SUNXI_PINCFG_PACK(type, value) (((value) << 8) | (type & 0xFF)) ++#define SUNXI_PINCFG_UNPACK_TYPE(cfg) ((cfg) & 0xFF) ++#define SUNXI_PINCFG_UNPACK_VALUE(cfg) (((cfg) & 0xFFFFFF00) >> 8) ++ ++static inline int sunxi_gpio_to_name(int gpio, char *name) ++{ ++ int bank, index; ++ ++ if (!name) ++ return -EINVAL; ++ ++ if (IS_AXP_PIN(gpio)) { ++ /* axp gpio name like this : GPIO0/GPIO1/.. */ ++ index = gpio - AXP_PIN_BASE; ++ sprintf(name, "GPIO%d", index); ++ } else { ++ /* sunxi gpio name like this : PA0/PA1/PB0 */ ++ bank = gpio / SUNXI_BANK_SIZE; ++ index = gpio % SUNXI_BANK_SIZE; ++ sprintf(name, "P%c%d", ('A' + bank), index); ++ } ++ ++ return 0; ++} ++ ++/* pio end, invalid macro */ ++#define GPIO_INDEX_INVALID (0xFFFFFFF0) ++#define GPIO_CFG_INVALID (0xEEEEEEEE) ++#define GPIO_PULL_INVALID (0xDDDDDDDD) ++#define GPIO_DRVLVL_INVALID (0xCCCCCCCC) ++#define IRQ_NUM_INVALID (0xFFFFFFFF) ++#define AXP_PORT_VAL (0x0000FFFF) ++ ++/* pio default macro */ ++#define GPIO_PULL_DEFAULT ((u32)-1) ++#define GPIO_DRVLVL_DEFAULT ((u32)-1) ++#define GPIO_DATA_DEFAULT ((u32)-1) ++ ++/* ++ * struct gpio_config - gpio config info ++ * @gpio: gpio global index, must be unique ++ * @mul_sel: multi sel val: 0 - input, 1 - output. ++ * @pull: pull val: 0 - pull up/down disable, 1 - pull up ++ * @drv_level: driver level val: 0 - level 0, 1 - level 1 ++ * @data: data val: 0 - low, 1 - high, only valid when mul_sel is input/output ++ */ ++struct gpio_config { ++ u32 data; ++ u32 gpio; ++ u32 mul_sel; ++ u32 pull; ++ u32 drv_level; ++}; ++ ++#endif +-- +Armbian diff --git a/patch/kernel/archive/sunxi-6.18/patches.armbian/drv-phy-allwinner-add-pcie-usb3-driver.patch b/patch/kernel/archive/sunxi-6.18/patches.armbian/drv-phy-allwinner-add-pcie-usb3-driver.patch new file mode 100644 index 0000000000..2acc104fbc --- /dev/null +++ b/patch/kernel/archive/sunxi-6.18/patches.armbian/drv-phy-allwinner-add-pcie-usb3-driver.patch @@ -0,0 +1,1238 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Fri, 27 Jun 2025 12:40:09 +0200 +Subject: phy: allwinner: add INNO PCIe/USB3 combo PHY driver + +Signed-off-by: Marvin Wewer +--- + drivers/phy/allwinner/Kconfig | 8 + + drivers/phy/allwinner/Makefile | 1 + + drivers/phy/allwinner/sunxi-inno-combophy.c | 1193 ++++++++++ + 3 files changed, 1202 insertions(+) + +diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig +index 111111111111..222222222222 100644 +--- a/drivers/phy/allwinner/Kconfig ++++ b/drivers/phy/allwinner/Kconfig +@@ -67,3 +67,11 @@ config AC200_PHY_CTL + Enable this to support the Ethernet PHY operation of the AC200 + mixed signal chip. This driver just enables and configures the + PHY, the PHY itself is supported by a standard driver. ++ ++config AW_INNO_COMBOPHY ++ tristate "Allwinner INNO COMBO PHY Driver" ++ depends on ARCH_SUNXI && OF ++ select GENERIC_PHY ++ help ++ Enable this to support the Allwinner PCIe/USB3.0 combo PHY ++ with INNOSILICON IP block. +diff --git a/drivers/phy/allwinner/Makefile b/drivers/phy/allwinner/Makefile +index 111111111111..222222222222 100644 +--- a/drivers/phy/allwinner/Makefile ++++ b/drivers/phy/allwinner/Makefile +@@ -4,3 +4,4 @@ obj-$(CONFIG_PHY_SUN6I_MIPI_DPHY) += phy-sun6i-mipi-dphy.o + obj-$(CONFIG_PHY_SUN9I_USB) += phy-sun9i-usb.o + obj-$(CONFIG_PHY_SUN50I_USB3) += phy-sun50i-usb3.o + obj-$(CONFIG_AC200_PHY_CTL) += ac200-ephy-ctl.o ++obj-$(CONFIG_AW_INNO_COMBOPHY) += sunxi-inno-combophy.o +diff --git a/drivers/phy/allwinner/sunxi-inno-combophy.c b/drivers/phy/allwinner/sunxi-inno-combophy.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/phy/allwinner/sunxi-inno-combophy.c +@@ -0,0 +1,1193 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++/* ++ * Allwinner PIPE USB3.0 PCIE Combo Phy driver ++ * ++ * Copyright (C) 2022 Allwinner Electronics Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++/* PCIE USB3 Sub-System Registers */ ++/* Sub-System Version Reset Register */ ++#define PCIE_USB3_SYS_VER 0x00 ++ ++/* Sub-System PCIE Bus Gating Reset Register */ ++#define PCIE_COMBO_PHY_BGR 0x04 ++#define PCIE_SLV_ACLK_EN BIT(18) ++#define PCIE_ACLK_EN BIT(17) ++#define PCIE_HCLK_EN BIT(16) ++#define PCIE_PERSTN BIT(1) ++#define PCIE_PW_UP_RSTN BIT(0) ++ ++/* Sub-System USB3 Bus Gating Reset Register */ ++#define USB3_COMBO_PHY_BGR 0x08 ++#define USB3_ACLK_EN BIT(17) ++#define USB3_HCLK_EN BIT(16) ++#define USB3_U2_PHY_RSTN BIT(4) ++#define USB3_U2_PHY_MUX_EN BIT(3) ++#define USB3_U2_PHY_MUX_SEL BIT(0) ++#define USB3_RESETN BIT(0) ++ ++/* Sub-System PCIE PHY Control Register */ ++#define PCIE_COMBO_PHY_CTL 0x10 ++#define PHY_USE_SEL BIT(31) /* 0:PCIE; 1:USB3 */ ++#define PHY_CLK_SEL BIT(30) /* 0:internal clk; 1:external clk */ ++#define PHY_BIST_EN BIT(16) ++#define PHY_PIPE_SW BIT(9) ++#define PHY_PIPE_SEL BIT(8) /* 0:rstn by PCIE or USB3; 1:rstn by PHY_PIPE_SW */ ++#define PHY_PIPE_CLK_INVERT BIT(4) ++#define PHY_FPGA_SYS_RSTN BIT(1) /* for FPGA */ ++#define PHY_RSTN BIT(0) ++ ++/* Registers */ ++#define COMBO_REG_SYSVER(comb_base_addr) ((comb_base_addr) \ ++ + PCIE_USB3_SYS_VER) ++#define COMBO_REG_PCIEBGR(comb_base_addr) ((comb_base_addr) \ ++ + PCIE_COMBO_PHY_BGR) ++#define COMBO_REG_USB3BGR(comb_base_addr) ((comb_base_addr) \ ++ + USB3_COMBO_PHY_BGR) ++#define COMBO_REG_PHYCTRL(comb_base_addr) ((comb_base_addr) \ ++ + PCIE_COMBO_PHY_CTL) ++/* Sub-System Version Number */ ++#define COMBO_VERSION_01 (0x10000) ++#define COMBO_VERSION_ANY (0x0) ++ ++#define KEY_PHY_USE_SEL "phy_use_sel" ++#define KEY_PHY_REFCLK_SEL "phy_refclk_sel" ++ ++enum phy_use_sel { ++ PHY_USE_BY_PCIE = 0, /* PHY used by PCIE */ ++ PHY_USE_BY_USB3, /* PHY used by USB3 */ ++ PHY_USE_BY_PCIE_USB3_U2,/* PHY used by PCIE & USB3_U2 */ ++}; ++ ++enum phy_refclk_sel { ++ INTER_SIG_REF_CLK = 0, /* PHY use internal single end reference clock */ ++ EXTER_DIF_REF_CLK, /* PHY use external single end reference clock */ ++}; ++ ++extern struct atomic_notifier_head inno_subsys_notifier_list; ++ ++struct sunxi_combophy_of_data { ++ bool has_cfg_clk; ++ bool has_slv_clk; ++ bool has_phy_mbus_clk; ++ bool has_phy_ahb_clk; ++ bool has_pcie_axi_clk; ++ bool has_u2_phy_mux; ++ bool need_noppu_rst; ++ bool has_u3_phy_data_quirk; ++ bool need_optimize_jitter; ++}; ++ ++struct sunxi_combphy { ++ struct device *dev; ++ struct phy *phy; ++ void __iomem *phy_ctl; /* parse dts, control the phy mode, reset and power */ ++ void __iomem *phy_clk; /* parse dts, set the phy clock */ ++ struct reset_control *reset; ++ struct reset_control *noppu_reset; ++ ++ struct clk *phyclk_ref; ++ struct clk *refclk_par; ++ struct clk *phyclk_cfg; ++ struct clk *cfgclk_par; ++ struct clk *phy_mclk; ++ struct clk *phy_hclk; ++ struct clk *phy_axi; ++ struct clk *phy_axi_par; ++ __u8 mode; ++ __u32 vernum; /* version number */ ++ enum phy_use_sel user; ++ enum phy_refclk_sel ref; ++ struct notifier_block pwr_nb; ++ const struct sunxi_combophy_of_data *drvdata; ++ ++ struct regulator *select3v3_supply; ++ bool initialized; ++}; ++ ++ATOMIC_NOTIFIER_HEAD(inno_subsys_notifier_list); ++EXPORT_SYMBOL(inno_subsys_notifier_list); ++ ++/* PCIE USB3 Sub-system Application */ ++static void combo_pcie_clk_set(struct sunxi_combphy *combphy, bool enable) ++{ ++ u32 val, tmp = 0; ++ ++ val = readl(COMBO_REG_PCIEBGR(combphy->phy_ctl)); ++ if (combphy->drvdata->has_slv_clk) ++ tmp = PCIE_SLV_ACLK_EN | PCIE_ACLK_EN | PCIE_HCLK_EN | PCIE_PERSTN | PCIE_PW_UP_RSTN; ++ else ++ tmp = PCIE_ACLK_EN | PCIE_HCLK_EN | PCIE_PERSTN | PCIE_PW_UP_RSTN; ++ if (enable) ++ val |= tmp; ++ else ++ val &= ~tmp; ++ writel(val, COMBO_REG_PCIEBGR(combphy->phy_ctl)); ++} ++ ++static void combo_usb3_clk_set(struct sunxi_combphy *combphy, bool enable) ++{ ++ u32 val, tmp = 0; ++ ++ val = readl(COMBO_REG_USB3BGR(combphy->phy_ctl)); ++ if (combphy->drvdata->has_u2_phy_mux) ++ tmp = USB3_ACLK_EN | USB3_HCLK_EN | USB3_U2_PHY_MUX_SEL | USB3_U2_PHY_RSTN | USB3_U2_PHY_MUX_EN; ++ else ++ tmp = USB3_ACLK_EN | USB3_HCLK_EN | USB3_RESETN; ++ if (enable) ++ val |= tmp; ++ else ++ val &= ~tmp; ++ writel(val, COMBO_REG_USB3BGR(combphy->phy_ctl)); ++} ++ ++static void combo_phy_mode_set(struct sunxi_combphy *combphy, bool enable) ++{ ++ u32 val; ++ ++ val = readl(COMBO_REG_PHYCTRL(combphy->phy_ctl)); ++ ++ if (combphy->user == PHY_USE_BY_PCIE || combphy->user == PHY_USE_BY_PCIE_USB3_U2) ++ val &= ~PHY_USE_SEL; ++ else if (combphy->user == PHY_USE_BY_USB3) ++ val |= PHY_USE_SEL; ++ ++ if (combphy->ref == INTER_SIG_REF_CLK) ++ val &= ~PHY_CLK_SEL; ++ else if (combphy->ref == EXTER_DIF_REF_CLK) ++ val |= PHY_CLK_SEL; ++ ++ if (enable) ++ val |= PHY_RSTN; ++ else ++ val &= ~PHY_RSTN; ++ ++ writel(val, COMBO_REG_PHYCTRL(combphy->phy_ctl)); ++} ++ ++static u32 combo_sysver_get(struct sunxi_combphy *combphy) ++{ ++ u32 reg; ++ ++ reg = readl(COMBO_REG_SYSVER(combphy->phy_ctl)); ++ ++ return reg; ++} ++ ++static void pcie_usb3_sub_system_enable(struct sunxi_combphy *combphy) ++{ ++ combo_phy_mode_set(combphy, true); ++ ++ if (combphy->user == PHY_USE_BY_PCIE) ++ combo_pcie_clk_set(combphy, true); ++ else if (combphy->user == PHY_USE_BY_USB3) ++ combo_usb3_clk_set(combphy, true); ++ else if (combphy->user == PHY_USE_BY_PCIE_USB3_U2) { ++ combo_pcie_clk_set(combphy, true); ++ combo_usb3_clk_set(combphy, true); ++ } ++ ++ combphy->vernum = combo_sysver_get(combphy); ++} ++ ++static void pcie_usb3_sub_system_disable(struct sunxi_combphy *combphy) ++{ ++ combo_phy_mode_set(combphy, false); ++ ++ if (combphy->user == PHY_USE_BY_PCIE) ++ combo_pcie_clk_set(combphy, false); ++ else if (combphy->user == PHY_USE_BY_USB3) ++ combo_usb3_clk_set(combphy, false); ++ else if (combphy->user == PHY_USE_BY_PCIE_USB3_U2) { ++ combo_pcie_clk_set(combphy, false); ++ combo_usb3_clk_set(combphy, false); ++ } ++} ++ ++static int sunxi_combphy_enable_clocks(struct sunxi_combphy *combphy) ++{ ++ int ret; ++ struct device *dev = combphy->dev; ++ ++ if (!IS_ERR_OR_NULL(combphy->phyclk_ref)) { ++ ret = clk_prepare_enable(combphy->phyclk_ref); ++ if (ret) return ret; ++ } ++ ++ if (combphy->drvdata->has_cfg_clk && !IS_ERR_OR_NULL(combphy->phyclk_cfg)) { ++ ret = clk_prepare_enable(combphy->phyclk_cfg); ++ if (ret) goto err_cfg; ++ } ++ ++ if (combphy->drvdata->has_phy_ahb_clk && !IS_ERR_OR_NULL(combphy->phy_hclk)) { ++ ret = clk_prepare_enable(combphy->phy_hclk); ++ if (ret) { ++ dev_err(dev, "cannot enable phy_hclk\n"); ++ goto err_hclk; ++ } ++ } ++ ++ if (combphy->drvdata->has_pcie_axi_clk && !IS_ERR_OR_NULL(combphy->phy_axi)) { ++ ret = clk_prepare_enable(combphy->phy_axi); ++ if (ret) { ++ dev_err(dev, "cannot enable phy_axi\n"); ++ goto err_axi; ++ } ++ } ++ ++ if (combphy->drvdata->has_phy_mbus_clk && !IS_ERR_OR_NULL(combphy->phy_mclk)) { ++ ret = clk_prepare_enable(combphy->phy_mclk); ++ if (ret) { ++ dev_err(dev, "cannot enable phy_mclk\n"); ++ goto err_mclk; ++ } ++ } ++ ++ return 0; ++ ++err_mclk: ++ clk_disable_unprepare(combphy->phy_axi); ++err_axi: ++ clk_disable_unprepare(combphy->phy_hclk); ++err_hclk: ++ clk_disable_unprepare(combphy->phyclk_cfg); ++err_cfg: ++ clk_disable_unprepare(combphy->phyclk_ref); ++ return ret; ++} ++ ++static int sunxi_combphy_reset_deassert(struct sunxi_combphy *combphy) ++{ ++ int ret; ++ ++ if (!IS_ERR_OR_NULL(combphy->reset)) { ++ ret = reset_control_deassert(combphy->reset); ++ if (ret) ++ return ret; ++ } ++ ++ if (combphy->drvdata->need_noppu_rst && !IS_ERR_OR_NULL(combphy->noppu_reset)) { ++ ret = reset_control_deassert(combphy->noppu_reset); ++ if (ret) { ++ if (!IS_ERR_OR_NULL(combphy->reset)) ++ reset_control_assert(combphy->reset); ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++static int pcie_usb3_sub_system_init(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct sunxi_combphy *combphy = platform_get_drvdata(pdev); ++ bool already_enabled = false; ++ int ret; ++ ++ if (!combphy || combphy->initialized) ++ return 0; ++ ++ ++ if (combphy->phy_ctl) { ++ if (readl(combphy->phy_ctl + PCIE_COMBO_PHY_CTL) & PHY_RSTN) { ++ dev_info(dev, "PHY already enabled by Bootloader.\n"); ++ already_enabled = true; ++ } ++ } ++ ++ ++ if (!IS_ERR_OR_NULL(combphy->select3v3_supply)) { ++ ret = regulator_enable(combphy->select3v3_supply); ++ if (ret) return ret; ++ } ++ ++ ++ if (!already_enabled) { ++ ++ ret = sunxi_combphy_reset_deassert(combphy); ++ if (ret) goto err_regulator; ++ ++ ++ if (!IS_ERR_OR_NULL(combphy->phyclk_ref)) ++ clk_set_rate(combphy->phyclk_ref, 100000000); ++ ++ if (combphy->drvdata->has_cfg_clk && !IS_ERR_OR_NULL(combphy->phyclk_cfg)) ++ clk_set_rate(combphy->phyclk_cfg, 200000000); ++ } ++ ++ ++ ret = sunxi_combphy_enable_clocks(combphy); ++ if (ret) goto err_regulator; ++ ++ ++ if (!already_enabled) ++ pcie_usb3_sub_system_enable(combphy); ++ ++ combphy->initialized = true; ++ return 0; ++ ++err_regulator: ++ if (!IS_ERR_OR_NULL(combphy->select3v3_supply)) ++ regulator_disable(combphy->select3v3_supply); ++ return ret; ++} ++ ++static int pcie_usb3_sub_system_exit(struct platform_device *pdev) ++{ ++ struct sunxi_combphy *combphy = platform_get_drvdata(pdev); ++ ++ if (!combphy || !combphy->initialized) ++ return 0; ++ ++ ++ pcie_usb3_sub_system_disable(combphy); ++ ++ ++ if (combphy->drvdata->has_phy_mbus_clk && !IS_ERR_OR_NULL(combphy->phy_mclk)) ++ clk_disable_unprepare(combphy->phy_mclk); ++ ++ if (combphy->drvdata->has_pcie_axi_clk && !IS_ERR_OR_NULL(combphy->phy_axi)) ++ clk_disable_unprepare(combphy->phy_axi); ++ ++ if (combphy->drvdata->has_phy_ahb_clk && !IS_ERR_OR_NULL(combphy->phy_hclk)) ++ clk_disable_unprepare(combphy->phy_hclk); ++ ++ if (combphy->drvdata->has_cfg_clk && !IS_ERR_OR_NULL(combphy->phyclk_cfg)) ++ clk_disable_unprepare(combphy->phyclk_cfg); ++ ++ if (!IS_ERR_OR_NULL(combphy->phyclk_ref)) ++ clk_disable_unprepare(combphy->phyclk_ref); ++ ++ ++ if (combphy->drvdata->need_noppu_rst && !IS_ERR_OR_NULL(combphy->noppu_reset)) ++ reset_control_assert(combphy->noppu_reset); ++ ++ if (!IS_ERR_OR_NULL(combphy->reset)) ++ reset_control_assert(combphy->reset); ++ ++ ++ if (!IS_ERR_OR_NULL(combphy->select3v3_supply)) ++ regulator_disable(combphy->select3v3_supply); ++ ++ combphy->initialized = false; ++ return 0; ++} ++ ++static int sunxi_inno_combophy_power_event(struct notifier_block *nb, unsigned long event, void *p) ++{ ++ struct sunxi_combphy *combphy = container_of(nb, struct sunxi_combphy, pwr_nb); ++ struct platform_device *pdev = to_platform_device(combphy->dev); ++ ++ dev_dbg(combphy->dev, "event %s\n", event ? "on" : "off"); ++ if (event) { ++ if (combphy->initialized) { ++ pcie_usb3_sub_system_exit(pdev); ++ } ++ pcie_usb3_sub_system_init(pdev); ++ } ++ else ++ pcie_usb3_sub_system_exit(pdev); ++ ++ return NOTIFY_DONE; ++} ++ ++static void sunxi_combphy_pcie_phy_enable(struct sunxi_combphy *combphy) ++{ ++ u32 val; ++ ++ /* Enable clocks and power for PCIe */ ++ val = readl(combphy->phy_ctl + PCIE_COMBO_PHY_BGR); ++ val &= ~((0x03 << 0) | (0x07 << 16)); /* Clear power/reset and clock bits */ ++ val |= (0x03 << 0); /* Set PCIE_PERSTN and PCIE_PW_UP_RSTN */ ++ if (combphy->drvdata->has_slv_clk) ++ val |= (0x07 << 16); /* Enable all clocks: SLV_ACLK, ACLK, HCLK */ ++ else ++ val |= (0x03 << 16); /* Enable ACLK, HCLK */ ++ writel(val, combphy->phy_ctl + PCIE_COMBO_PHY_BGR); ++ ++ /* Assert PHY reset */ ++ val = readl(combphy->phy_ctl + PCIE_COMBO_PHY_CTL); ++ val &= ~PHY_USE_SEL; /* Select PCIe mode */ ++ val &= ~(0x03 << 8); /* Clear PHY_PIPE_SEL and PHY_PIPE_SW */ ++ val &= ~PHY_RSTN; /* Assert PHY reset */ ++ writel(val, combphy->phy_ctl + PCIE_COMBO_PHY_CTL); ++ ++ /* Wait for reset to propagate */ ++ udelay(10); ++ ++ /* De-assert PHY reset */ ++ val = readl(combphy->phy_ctl + PCIE_COMBO_PHY_CTL); ++ val &= ~PHY_CLK_SEL; /* Select internal clock */ ++ val |= PHY_RSTN; /* De-assert PHY reset */ ++ writel(val, combphy->phy_ctl + PCIE_COMBO_PHY_CTL); ++} ++ ++static void sunxi_combphy_usb3_phy_set(struct sunxi_combphy *combphy, bool enable) ++{ ++ u32 val, tmp = 0; ++ ++ val = readl(combphy->phy_clk + 0x1418); ++ tmp = GENMASK(17, 16); ++ if (enable) { ++ val &= ~tmp; ++ val |= BIT(25); ++ } else { ++ val |= tmp; ++ val &= ~BIT(25); ++ } ++ writel(val, combphy->phy_clk + 0x1418); ++ ++ /* reg_rx_eq_bypass[3]=1, rx_ctle_res_cal_bypass */ ++ val = readl(combphy->phy_clk + 0x0674); ++ if (enable) ++ val |= BIT(3); ++ else ++ val &= ~BIT(3); ++ writel(val, combphy->phy_clk + 0x0674); ++ ++ /* rx_ctle_res_cal=0xf, 0x4->0xf */ ++ val = readl(combphy->phy_clk + 0x0704); ++ tmp = GENMASK(9, 8) | BIT(11); ++ if (enable) ++ val |= tmp; ++ else ++ val &= ~tmp; ++ writel(val, combphy->phy_clk + 0x0704); ++ ++ /* CDR_div_fin_gain1 */ ++ val = readl(combphy->phy_clk + 0x0400); ++ if (enable) ++ val |= BIT(4); ++ else ++ val &= ~BIT(4); ++ writel(val, combphy->phy_clk + 0x0400); ++ ++ /* CDR_div1_fin_gain1 */ ++ val = readl(combphy->phy_clk + 0x0404); ++ tmp = GENMASK(3, 0) | BIT(5); ++ if (enable) ++ val |= tmp; ++ else ++ val &= ~tmp; ++ writel(val, combphy->phy_clk + 0x0404); ++ ++ /* CDR_div3_fin_gain1 */ ++ val = readl(combphy->phy_clk + 0x0408); ++ if (enable) ++ val |= BIT(5); ++ else ++ val &= ~BIT(5); ++ writel(val, combphy->phy_clk + 0x0408); ++ ++ val = readl(combphy->phy_clk + 0x109c); ++ if (enable) ++ val |= BIT(1); ++ else ++ val &= ~BIT(1); ++ writel(val, combphy->phy_clk + 0x109c); ++ ++ /* balance parm configure */ ++ if (combphy->drvdata->has_u3_phy_data_quirk) { ++ val = readl(combphy->phy_clk + 0x0804); ++ if (enable) ++ val |= (0x6<<4); ++ else ++ val &= ~(0xf<<4); ++ writel(val, combphy->phy_clk + 0x0804); ++ } ++ ++ /* SSC configure */ ++ val = readl(combphy->phy_clk + 0x107c); ++ tmp = 0x3f << 12; ++ val = val & (~tmp); ++ val |= ((0x1 << 12) & tmp); /* div_N */ ++ writel(val, combphy->phy_clk + 0x107c); ++ ++ val = readl(combphy->phy_clk + 0x1020); ++ tmp = 0x1f << 0; ++ val = val & (~tmp); ++ val |= ((0x6 << 0) & tmp); /* modulation freq div */ ++ writel(val, combphy->phy_clk + 0x1020); ++ ++ val = readl(combphy->phy_clk + 0x1034); ++ tmp = 0x7f << 16; ++ val = val & (~tmp); ++ val |= ((0x9 << 16) & tmp); /* spread[6:0], 400*9=4410ppm ssc */ ++ writel(val, combphy->phy_clk + 0x1034); ++ ++ val = readl(combphy->phy_clk + 0x101c); ++ tmp = 0x1 << 27; ++ val = val & (~tmp); ++ val |= ((0x1 << 27) & tmp); /* choose downspread */ ++ ++ tmp = 0x1 << 28; ++ val = val & (~tmp); ++ if (enable) ++ val |= ((0x0 << 28) & tmp); /* don't disable ssc = 0 */ ++ else ++ val |= ((0x1 << 28) & tmp); /* don't enable ssc = 1 */ ++ writel(val, combphy->phy_clk + 0x101c); ++ ++#ifdef SUNXI_INNO_COMMBOPHY_DEBUG ++ /* TX Eye configure bypass_en */ ++ val = readl(combphy->phy_clk + 0x0ddc); ++ if (enable) ++ val |= BIT(4); /* 0x0ddc[4]=1 */ ++ else ++ val &= ~BIT(4); ++ writel(val, combphy->phy_clk + 0x0ddc); ++ ++ /* Leg_cur[6:0] - 7'd84 */ ++ val = readl(combphy->phy_clk + 0x0ddc); ++ val |= ((0x54 & BIT(6)) >> 3); /* 0x0ddc[3] */ ++ writel(val, combphy->phy_clk + 0x0ddc); ++ ++ val = readl(combphy->phy_clk + 0x0de0); ++ val |= ((0x54 & GENMASK(5, 0)) << 2); /* 0x0de0[7:2] */ ++ writel(val, combphy->phy_clk + 0x0de0); ++ ++ /* Leg_curb[5:0] - 6'd18 */ ++ val = readl(combphy->phy_clk + 0x0de4); ++ val |= ((0x12 & GENMASK(5, 1)) >> 1); /* 0x0de4[4:0] */ ++ writel(val, combphy->phy_clk + 0x0de4); ++ ++ val = readl(combphy->phy_clk + 0x0de8); ++ val |= ((0x12 & BIT(0)) << 7); /* 0x0de8[7] */ ++ writel(val, combphy->phy_clk + 0x0de8); ++ ++ /* Exswing_isel */ ++ val = readl(combphy->phy_clk + 0x0028); ++ val |= (0x4 << 28); /* 0x28[30:28] */ ++ writel(val, combphy->phy_clk + 0x0028); ++ ++ /* Exswing_en */ ++ val = readl(combphy->phy_clk + 0x0028); ++ if (enable) ++ val |= BIT(31); /* 0x28[31]=1 */ ++ else ++ val &= ~BIT(31); ++ writel(val, combphy->phy_clk + 0x0028); ++#endif ++} ++ ++static void sunxi_combphy_usb3_power_set(struct sunxi_combphy *combphy, bool enable) ++{ ++ u32 val; ++ ++ dev_dbg(combphy->dev, "set power %s\n", enable ? "on" : "off"); ++ val = readl(combphy->phy_clk + 0x14); ++ if (enable) ++ val &= ~BIT(26); ++ else ++ val |= BIT(26); ++ writel(val, combphy->phy_clk + 0x14); ++ ++ val = readl(combphy->phy_clk + 0x0); ++ if (enable) ++ val &= ~BIT(10); ++ else ++ val |= BIT(10); ++ writel(val, combphy->phy_clk + 0x0); ++} ++ ++static void sunxi_combphy_pcie_phy_100M(struct sunxi_combphy *combphy) ++{ ++ u32 val; ++ ++ val = readl(combphy->phy_clk + 0x1004); ++ val &= ~(0x3<<3); ++ val &= ~(0x1<<0); ++ val |= (0x1<<0); ++ val |= (0x1<<2); ++ val |= (0x1<<4); ++ writel(val, combphy->phy_clk + 0x1004); ++ ++ val = readl(combphy->phy_clk + 0x1018); ++ val &= ~(0x3<<4); ++ val |= (0x3<<4); ++ writel(val, combphy->phy_clk + 0x1018); ++ ++ val = readl(combphy->phy_clk + 0x101c); ++ val &= ~(0x0fffffff); ++ writel(val, combphy->phy_clk + 0x101c); ++ ++ /* if need optimize jitter parm*/ ++ if (combphy->drvdata->need_optimize_jitter) { ++ val = readl(combphy->phy_clk + 0x107c); ++ val &= ~(0x3ffff); ++ val |= (0x4<<12); ++ val |= 0x64; ++ writel(val, combphy->phy_clk + 0x107c); ++ ++ val = readl(combphy->phy_clk + 0x1030); ++ val &= ~(0x3<<20); ++ writel(val, combphy->phy_clk + 0x1030); ++ ++ val = readl(combphy->phy_clk + 0x1050); ++ val &= ~(0x7<<0); ++ val &= ~(0x7<<5); ++ val &= ~(0x3<<3); ++ val |= (0x3<<3); ++ writel(val, combphy->phy_clk + 0x1050); ++ } else { ++ val = readl(combphy->phy_clk + 0x107c); ++ val &= ~(0x3ffff); ++ val |= (0x2<<12); ++ val |= 0x32; ++ writel(val, combphy->phy_clk + 0x107c); ++ ++ val = readl(combphy->phy_clk + 0x1030); ++ val &= ~(0x3<<20); ++ writel(val, combphy->phy_clk + 0x1030); ++ ++ val = readl(combphy->phy_clk + 0x1050); ++ val &= ~(0x7<<5); ++ val |= (0x1<<5); ++ writel(val, combphy->phy_clk + 0x1050); ++ } ++ ++ val = readl(combphy->phy_clk + 0x1054); ++ val &= ~(0x7<<5); ++ val |= (0x1<<5); ++ writel(val, combphy->phy_clk + 0x1054); ++ ++ val = readl(combphy->phy_clk + 0x0804); ++ val &= ~(0xf<<4); ++ val |= (0xc<<4); ++ writel(val, combphy->phy_clk + 0x0804); ++ ++ val = readl(combphy->phy_clk + 0x109c); ++ val &= ~(0x3<<8); ++ val |= (0x1<<1); ++ writel(val, combphy->phy_clk + 0x109c); ++ ++ writel(0x80540a0a, combphy->phy_clk + 0x1418); ++} ++ ++static int sunxi_combphy_pcie_init(struct sunxi_combphy *combphy) ++{ ++ sunxi_combphy_pcie_phy_100M(combphy); ++ ++ sunxi_combphy_pcie_phy_enable(combphy); ++ ++ return 0; ++} ++ ++static int sunxi_combphy_pcie_exit(struct sunxi_combphy *combphy) ++{ ++ u32 val; ++ ++ /* set the phy: ++ * bit(17): aclk enable ++ * bit(16): hclk enbale ++ * bit(1) : pcie_presetn ++ * bit(0) : pcie_power_up_rstn ++ */ ++ val = readl(combphy->phy_ctl + PCIE_COMBO_PHY_BGR); ++ val &= (~(0x03<<0)); ++ val &= (~(0x03<<16)); ++ writel(val, combphy->phy_ctl + PCIE_COMBO_PHY_BGR); ++ ++ /* Assert the phy */ ++ val = readl(combphy->phy_ctl + PCIE_COMBO_PHY_CTL); ++ val &= (~PHY_USE_SEL); ++ val &= (~(0x03<<8)); ++ val &= (~PHY_RSTN); ++ writel(val, combphy->phy_ctl + PCIE_COMBO_PHY_CTL); ++ ++ return 0; ++} ++ ++static int sunxi_combphy_usb3_init(struct sunxi_combphy *combphy) ++{ ++ sunxi_combphy_usb3_phy_set(combphy, true); ++ ++ return 0; ++} ++ ++static int sunxi_combphy_usb3_exit(struct sunxi_combphy *combphy) ++{ ++ sunxi_combphy_usb3_phy_set(combphy, false); ++ ++ return 0; ++} ++ ++static int sunxi_combphy_usb3_power_on(struct sunxi_combphy *combphy) ++{ ++ int ret; ++ ++ sunxi_combphy_usb3_power_set(combphy, true); ++ ++ if (combphy->select3v3_supply) { ++ ret = regulator_set_voltage(combphy->select3v3_supply, 3300000, 3300000); ++ if (ret) { ++ dev_err(combphy->dev, "set select3v3-supply failed\n"); ++ goto err0; ++ } ++ ++ ret = regulator_enable(combphy->select3v3_supply); ++ if (ret) { ++ dev_err(combphy->dev, "enable select3v3-supply failed\n"); ++ goto err0; ++ } ++ } ++ ++ return 0; ++err0: ++ sunxi_combphy_usb3_power_set(combphy, false); ++ ++ return ret; ++} ++ ++static int sunxi_combphy_usb3_power_off(struct sunxi_combphy *combphy) ++{ ++ sunxi_combphy_usb3_power_set(combphy, false); ++ ++ if (combphy->select3v3_supply) ++ regulator_disable(combphy->select3v3_supply); ++ ++ return 0; ++} ++ ++static int sunxi_combphy_set_mode(struct sunxi_combphy *combphy) ++{ ++ switch (combphy->mode) { ++ case PHY_TYPE_PCIE: ++ sunxi_combphy_pcie_init(combphy); ++ break; ++ case PHY_TYPE_USB3: ++ if (combphy->user == PHY_USE_BY_PCIE_USB3_U2) { ++ sunxi_combphy_pcie_init(combphy); ++ } else if (combphy->user == PHY_USE_BY_USB3) { ++ sunxi_combphy_usb3_init(combphy); ++ } ++ break; ++ default: ++ dev_err(combphy->dev, "incompatible PHY type\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int sunxi_combphy_init(struct phy *phy) ++{ ++ struct sunxi_combphy *combphy = phy_get_drvdata(phy); ++ int ret; ++ ++ ret = sunxi_combphy_set_mode(combphy); ++ if (ret) { ++ dev_err(combphy->dev, "invalid number of arguments\n"); ++ return ret; ++ } ++ ++ return ret; ++} ++ ++static int sunxi_combphy_exit(struct phy *phy) ++{ ++ struct sunxi_combphy *combphy = phy_get_drvdata(phy); ++ ++ switch (combphy->mode) { ++ case PHY_TYPE_PCIE: ++ sunxi_combphy_pcie_exit(combphy); ++ break; ++ case PHY_TYPE_USB3: ++ sunxi_combphy_usb3_exit(combphy); ++ break; ++ default: ++ dev_err(combphy->dev, "incompatible PHY type\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int sunxi_combphy_power_on(struct phy *phy) ++{ ++ struct sunxi_combphy *combphy = phy_get_drvdata(phy); ++ int ret; ++ switch (combphy->mode) { ++ case PHY_TYPE_PCIE: ++ break; ++ case PHY_TYPE_USB3: ++ ret = sunxi_combphy_usb3_power_on(combphy); ++ if (ret) ++ return ret; ++ break; ++ default: ++ dev_err(combphy->dev, "incompatible PHY type\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int sunxi_combphy_power_off(struct phy *phy) ++{ ++ struct sunxi_combphy *combphy = phy_get_drvdata(phy); ++ int ret; ++ ++ switch (combphy->mode) { ++ case PHY_TYPE_PCIE: ++ break; ++ case PHY_TYPE_USB3: ++ ret = sunxi_combphy_usb3_power_off(combphy); ++ if (ret) ++ return ret; ++ break; ++ default: ++ dev_err(combphy->dev, "incompatible PHY type\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static const struct phy_ops sunxi_combphy_ops = { ++ .init = sunxi_combphy_init, ++ .exit = sunxi_combphy_exit, ++ .power_on = sunxi_combphy_power_on, ++ .power_off = sunxi_combphy_power_off, ++ .owner = THIS_MODULE, ++}; ++ ++static struct phy *sunxi_combphy_xlate(struct device *dev, ++ const struct of_phandle_args *args) ++{ ++ struct sunxi_combphy *combphy = dev_get_drvdata(dev); ++ ++ if (args->args_count != 1) { ++ dev_err(dev, "invalid number of arguments\n"); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ if (combphy->mode != PHY_NONE && combphy->mode != args->args[0]) ++ dev_warn(dev, "phy type select %d overwriting type %d\n", ++ args->args[0], combphy->mode); ++ ++ combphy->mode = args->args[0]; ++ ++ return combphy->phy; ++} ++ ++static int sunxi_combphy_parse_dt(struct platform_device *pdev, ++ struct sunxi_combphy *combphy) ++{ ++ struct device *dev = &pdev->dev; ++ struct device_node *np = dev->of_node; ++ int ret = -1; ++ struct resource *res_ctl; ++ struct resource *res_clk; ++ ++ /* combo phy use sel */ ++ ret = of_property_read_u32(np, KEY_PHY_USE_SEL, &combphy->user); ++ if (ret) ++ dev_err(dev, "get phy_use_sel is fail, %d\n", ret); ++ ++ combphy->phyclk_ref = devm_clk_get(&pdev->dev, "phyclk_ref"); ++ if (IS_ERR(combphy->phyclk_ref)) ++ dev_dbg(dev, "failed to get phyclk_ref\n"); ++ ++ combphy->reset = devm_reset_control_get(dev, "phy_rst"); ++ if (IS_ERR(combphy->reset)) { ++ dev_err(dev, "failed to get reset control\n"); ++ return PTR_ERR(combphy->reset); ++ } ++ ++ if (combphy->drvdata->need_noppu_rst) { ++ combphy->noppu_reset = devm_reset_control_get(dev, "noppu_rst"); ++ if (IS_ERR(combphy->noppu_reset)) { ++ dev_err(dev, "failed to get noppu_reset control\n"); ++ return PTR_ERR(combphy->noppu_reset); ++ } ++ } ++ if (combphy->user == PHY_USE_BY_PCIE || combphy->user == PHY_USE_BY_PCIE_USB3_U2) { ++ ++ combphy->refclk_par = devm_clk_get(&pdev->dev, "refclk_par"); ++ if (IS_ERR(combphy->refclk_par)) ++ dev_dbg(dev, "failed to get refclk_par\n"); ++ ++ if (IS_ERR(combphy->phyclk_ref) || IS_ERR(combphy->refclk_par)) { ++ dev_err(dev, "failed to get required clocks for ref\n"); ++ return -EINVAL; ++ } ++ ++ ret = clk_set_parent(combphy->phyclk_ref, combphy->refclk_par); ++ if (ret) { ++ dev_err(dev, "failed to set refclk parent\n"); ++ return -EINVAL; ++ } ++ } ++ ++ if (combphy->drvdata->has_cfg_clk) { ++ combphy->phyclk_cfg = devm_clk_get(&pdev->dev, "phyclk_cfg"); ++ if (IS_ERR(combphy->phyclk_cfg)) ++ dev_dbg(dev, "failed to get phyclk_cfg\n"); ++ ++ combphy->cfgclk_par = devm_clk_get(&pdev->dev, "cfgclk_par"); ++ if (IS_ERR(combphy->cfgclk_par)) ++ dev_dbg(dev, "failed to get cfgclk_par\n"); ++ ++ if (IS_ERR(combphy->phyclk_cfg) || IS_ERR(combphy->cfgclk_par)) { ++ dev_err(dev, "failed to get required clocks for cfg\n"); ++ return -EINVAL; ++ } ++ ++ ret = clk_set_parent(combphy->phyclk_cfg, combphy->cfgclk_par); ++ if (ret) { ++ dev_err(dev, "failed to set cfgclk parent\n"); ++ return -EINVAL; ++ } ++ } ++ ++ if (combphy->drvdata->has_pcie_axi_clk) { ++ combphy->phy_axi = devm_clk_get(&pdev->dev, "pclk_axi"); ++ if (IS_ERR(combphy->phy_axi)) { ++ dev_err(dev, "failed to get pclk_axi\n"); ++ return PTR_ERR(combphy->phy_axi); ++ } ++ ++ combphy->phy_axi_par = devm_clk_get(&pdev->dev, "pclk_axi_par"); ++ if (IS_ERR(combphy->phy_axi_par)) { ++ dev_err(dev, "failed to get pcie_axi_par\n"); ++ return PTR_ERR(combphy->phy_axi_par); ++ } ++ ++ ret = clk_set_parent(combphy->phy_axi, combphy->phy_axi_par); ++ if (ret) { ++ dev_err(dev, "failed to set parent\n"); ++ return -EINVAL; ++ } ++ } ++ ++ if (combphy->drvdata->has_phy_mbus_clk) { ++ combphy->phy_mclk = devm_clk_get(&pdev->dev, "phy_mclk"); ++ if (IS_ERR(combphy->phy_mclk)) { ++ dev_err(dev, "fail to get phy_mclk\n"); ++ return PTR_ERR(combphy->phy_mclk); ++ } ++ } ++ ++ if (combphy->drvdata->has_phy_ahb_clk) { ++ combphy->phy_hclk = devm_clk_get(&pdev->dev, "phy_hclk"); ++ if (IS_ERR(combphy->phy_hclk)) { ++ dev_err(dev, "fail to get phy_hclk\n"); ++ return PTR_ERR(combphy->phy_hclk); ++ } ++ } ++ ++ res_ctl = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy-ctl"); ++ if (!res_ctl) { ++ dev_err(&pdev->dev, "get phy-ctl failed\n"); ++ return -ENODEV; ++ } ++ ++ combphy->phy_ctl = devm_ioremap_resource(&pdev->dev, res_ctl); ++ if (IS_ERR(combphy->phy_ctl)) { ++ dev_err(&pdev->dev, "ioremap phy-ctl failed\n"); ++ return PTR_ERR(combphy->phy_ctl); ++ } ++ ++ res_clk = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy-clk"); ++ if (!res_clk) { ++ dev_err(&pdev->dev, "get phy-clk failed\n"); ++ return -ENODEV; ++ } ++ ++ combphy->phy_clk = devm_ioremap_resource(&pdev->dev, res_clk); ++ if (IS_ERR(combphy->phy_clk)) { ++ dev_err(&pdev->dev, "ioremap phy-clk failed\n"); ++ return PTR_ERR(combphy->phy_clk); ++ } ++ ++ /* combo phy refclk sel */ ++ ret = of_property_read_u32(np, KEY_PHY_REFCLK_SEL, &combphy->ref); ++ if (ret) ++ dev_err(dev, "get phy_refclk_sel is fail, %d\n", ret); ++ ++ /* select ic supply */ ++ combphy->select3v3_supply = devm_regulator_get_optional(&pdev->dev, "select3v3"); ++ if (IS_ERR(combphy->select3v3_supply)) { ++ if (PTR_ERR(combphy->select3v3_supply) == -EPROBE_DEFER) ++ return PTR_ERR(combphy->select3v3_supply); ++ combphy->select3v3_supply = NULL; ++ } ++ ++ return 0; ++} ++ ++static int sunxi_combphy_probe(struct platform_device *pdev) ++{ ++ struct phy_provider *phy_provider; ++ struct device *dev = &pdev->dev; ++ struct sunxi_combphy *combphy; ++ const struct sunxi_combophy_of_data *data; ++ int ret; ++ ++ data = of_device_get_match_data(&pdev->dev); ++ ++ combphy = devm_kzalloc(dev, sizeof(*combphy), GFP_KERNEL); ++ if (!combphy) ++ return -ENOMEM; ++ ++ combphy->dev = dev; ++ combphy->mode = PHY_NONE; ++ combphy->drvdata = data; ++ ++ ret = sunxi_combphy_parse_dt(pdev, combphy); ++ if (ret) { ++ dev_err(dev, "failed to parse dts of combphy\n"); ++ return ret; ++ } ++ ++ combphy->phy = devm_phy_create(dev, NULL, &sunxi_combphy_ops); ++ if (IS_ERR(combphy->phy)) { ++ dev_err(dev, "failed to create combphy\n"); ++ return PTR_ERR(combphy->phy); ++ } ++ ++ platform_set_drvdata(pdev, combphy); ++ ++ ret = pcie_usb3_sub_system_init(pdev); ++ if (ret) { ++ dev_err(dev, "failed to init sub system\n"); ++ return ret; ++ } ++ ++ dev_info(dev, "Sub System Version: 0x%x\n", combphy->vernum); ++ ++ phy_set_drvdata(combphy->phy, combphy); ++ ++ phy_provider = devm_of_phy_provider_register(dev, sunxi_combphy_xlate); ++ ++ combphy->pwr_nb.notifier_call = sunxi_inno_combophy_power_event; ++ /* register inno power notifier */ ++ atomic_notifier_chain_register(&inno_subsys_notifier_list, &combphy->pwr_nb); ++ ++ pm_runtime_set_active(dev); ++ pm_runtime_enable(dev); ++ pm_runtime_get_sync(dev); ++ ++ return PTR_ERR_OR_ZERO(phy_provider); ++} ++ ++static void sunxi_combphy_remove(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct sunxi_combphy *combphy = platform_get_drvdata(pdev); ++ int ret; ++ ++ ret = pcie_usb3_sub_system_exit(pdev); ++ if (ret) { ++ dev_err(dev, "failed to exit sub system\n"); ++ ++ } ++ ++ /* unregister inno power notifier */ ++ atomic_notifier_chain_unregister(&inno_subsys_notifier_list, &combphy->pwr_nb); ++ ++ pm_runtime_disable(dev); ++ pm_runtime_put_noidle(dev); ++ pm_runtime_set_suspended(dev); ++ ++ ++} ++ ++static int __maybe_unused sunxi_combo_suspend(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ int ret; ++ ++ ret = pcie_usb3_sub_system_exit(pdev); ++ ++ if (ret) { ++ dev_err(dev, "failed to suspend sub system\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int __maybe_unused sunxi_combo_resume(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ int ret; ++ ++ ret = pcie_usb3_sub_system_init(pdev); ++ if (ret) { ++ dev_err(dev, "failed to resume sub system\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static struct dev_pm_ops sunxi_combo_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(sunxi_combo_suspend, sunxi_combo_resume) ++}; ++ ++/* ++ * inno-combphy: innosilicon combo phy ++ */ ++static const struct sunxi_combophy_of_data sunxi_inno_v1_of_data = { ++ .has_cfg_clk = false, ++}; ++ ++static const struct sunxi_combophy_of_data sunxi_inno_v2_of_data = { ++ .has_cfg_clk = true, ++ .has_slv_clk = true, ++ .has_phy_mbus_clk = true, ++ .has_phy_ahb_clk = true, ++ .has_pcie_axi_clk = true, ++ .has_u2_phy_mux = true, ++ .need_noppu_rst = true, ++ .has_u3_phy_data_quirk = true, ++ .need_optimize_jitter = true, ++}; ++ ++static const struct of_device_id sunxi_combphy_of_match[] = { ++ { ++ .compatible = "allwinner,inno-combphy", ++ .data = &sunxi_inno_v1_of_data, ++ }, ++ { ++ .compatible = "allwinner,inno-v2-combphy", ++ .data = &sunxi_inno_v2_of_data, ++ }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, sunxi_combphy_of_match); ++ ++static struct platform_driver sunxi_combphy_driver = { ++ .probe = sunxi_combphy_probe, ++ .remove = sunxi_combphy_remove, ++ .driver = { ++ .name = "inno-combphy", ++ .of_match_table = sunxi_combphy_of_match, ++ .pm = &sunxi_combo_pm_ops, ++ }, ++}; ++module_platform_driver(sunxi_combphy_driver); ++ ++MODULE_DESCRIPTION("Allwinner INNO COMBOPHY driver"); ++MODULE_AUTHOR("songjundong@allwinnertech.com"); ++MODULE_VERSION("0.0.20"); ++MODULE_LICENSE("GPL v2"); +-- +Armbian diff --git a/patch/kernel/archive/sunxi-6.18/patches.backports/30-allwinner-a523-support-spi-controllers.patch b/patch/kernel/archive/sunxi-6.18/patches.backports/30-allwinner-a523-support-spi-controllers.patch index 18398c8c4a..6ae2983229 100644 --- a/patch/kernel/archive/sunxi-6.18/patches.backports/30-allwinner-a523-support-spi-controllers.patch +++ b/patch/kernel/archive/sunxi-6.18/patches.backports/30-allwinner-a523-support-spi-controllers.patch @@ -2,10 +2,14 @@ From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Sun, 21 Dec 2025 11:05:52 +0000 Subject: arm64: allwinner: a523: Support SPI controllers + +Original patch by Chen-Yu Tsai. +Modified by [Marvin Wewer ]: Added SPI support for Radxa Cubie A5E Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml arch/arm64/boot/dts/allwinner/sun55i-a523.dtsi arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts drivers/spi/spi-sun6i.c + arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts https://patchwork.kernel.org/project/linux-arm-kernel/cover/20251221110513.1850535-1-wens@kernel.org/ @@ -13,9 +17,10 @@ Signed-off-by: Chen-Yu Tsai --- Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml | 4 + arch/arm64/boot/dts/allwinner/sun55i-a523.dtsi | 94 ++++++++++ + arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts | 13 ++ arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts | 15 ++ drivers/spi/spi-sun6i.c | 11 +- - 4 files changed, 120 insertions(+), 4 deletions(-) + 5 files changed, 133 insertions(+), 4 deletions(-) diff --git a/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml b/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml index 111111111111..222222222222 100644 @@ -158,6 +163,27 @@ index 111111111111..222222222222 100644 mcu_ccu: clock-controller@7102000 { compatible = "allwinner,sun55i-a523-mcu-ccu"; reg = <0x7102000 0x200>; +diff --git a/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts b/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts +index 111111111111..222222222222 100644 +--- a/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts ++++ b/arch/arm64/boot/dts/allwinner/sun55i-a527-cubie-a5e.dts +@@ -421,3 +421,16 @@ &usbphy { + usb1_vbus-supply = <®_usb_vbus>; + status = "okay"; + }; ++ ++&spi0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&spi0_pc_pins>, <&spi0_cs0_pc_pin>; ++ status = "okay"; ++ w25q128: flash@0 { ++ compatible = "winbond,w25q128fw", "jedec,spi-nor"; ++ reg = <0>; ++ spi-max-frequency = <24000000>; ++ vcc-supply = <®_cldo1>; ++ status = "okay"; ++ }; ++}; diff --git a/arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts b/arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts index 111111111111..222222222222 100644 --- a/arch/arm64/boot/dts/allwinner/sun55i-t527-orangepi-4a.dts diff --git a/patch/kernel/archive/sunxi-6.18/series.conf b/patch/kernel/archive/sunxi-6.18/series.conf index 781f59d595..cfaa4848a0 100644 --- a/patch/kernel/archive/sunxi-6.18/series.conf +++ b/patch/kernel/archive/sunxi-6.18/series.conf @@ -550,3 +550,10 @@ patches.armbian/drv-usb-gadget-composite-rename-serial-manufacturer.patch patches.armbian/drv-video-st7796s-fb-tft-driver.patch patches.armbian/include-uapi-drm_fourcc-add-ARM-tiled-format-modifier.patch + patches.armbian/drv-clk-sunxi-ng-fix-clock-handling-for-ccu-sun55i-a523.patch + patches.armbian/drv-pci-sunxi-enable-pcie-support.patch + patches.armbian/drv-phy-allwinner-add-pcie-usb3-driver.patch + patches.armbian/drv-iommu-sunxi-add-iommu-driver.patch + patches.armbian/arm64-dts-sun55i-t527-orangepi-4a-enable-pcie-combophy.patch + patches.armbian/arm64-dts-sun55i-a527-cubie-a5e-enable-usbc-pcie-combophy.patch + patches.armbian/arm64-dts-sun55i-dtsi-add-iommu-usbc-pcie-combophy-nodes.patch diff --git a/patch/u-boot/sunxi-dev-u-boot-a523/allwinner-a523-support-spi-controllers.patch b/patch/u-boot/sunxi-dev-u-boot-a523/allwinner-a523-support-spi-controllers.patch new file mode 100644 index 0000000000..265df25515 --- /dev/null +++ b/patch/u-boot/sunxi-dev-u-boot-a523/allwinner-a523-support-spi-controllers.patch @@ -0,0 +1,127 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Thu, 23 Oct 2025 13:30:29 +0000 +Subject: spi: sunxi: Add support for Allwinner A523 SPI controllers + +Signed-off-by: Marvin Wewer +--- + drivers/spi/spi-sunxi.c | 44 ++++++++++ + 1 file changed, 44 insertions(+) + +diff --git a/drivers/spi/spi-sunxi.c b/drivers/spi/spi-sunxi.c +index 111111111111..222222222222 100644 +--- a/drivers/spi/spi-sunxi.c ++++ b/drivers/spi/spi-sunxi.c +@@ -82,10 +82,12 @@ DECLARE_GLOBAL_DATA_PTR; + #endif + #define SUN4I_SPI_MIN_RATE 3000 + #define SUN4I_SPI_DEFAULT_RATE 1000000 + #define SUN4I_SPI_TIMEOUT_MS 1000 + ++#define SUN55I_BUF_STA_REG 0x400 ++ + #define SPI_REG(priv, reg) ((priv)->base + \ + (priv)->variant->regs[reg]) + #define SPI_BIT(priv, bit) ((priv)->variant->bits[bit]) + #define SPI_CS(priv, cs) (((cs) << SPI_BIT(priv, SPI_TCR_CS_SEL)) & \ + SPI_BIT(priv, SPI_TCR_CS_MASK)) +@@ -128,10 +130,11 @@ struct sun4i_spi_variant { + const u32 *bits; + u32 fifo_depth; + bool has_soft_reset; + bool has_burst_ctl; + bool has_clk_ctl; ++ bool has_bsr; + }; + + struct sun4i_spi_plat { + struct sun4i_spi_variant *variant; + u32 base; +@@ -364,10 +367,25 @@ static int sun4i_spi_xfer(struct udevice *dev, unsigned int bitlen, + + /* Reset FIFOs */ + setbits_le32(SPI_REG(priv, SPI_FCR), SPI_BIT(priv, SPI_FCR_RF_RST) | + SPI_BIT(priv, SPI_FCR_TF_RST)); + ++ if (priv->variant->has_bsr) { ++ u32 reg; ++ int ret; ++ ++ ret = readl_poll_timeout(SPI_REG(priv, SPI_FCR), reg, ++ !(reg & (SPI_BIT(priv, SPI_FCR_RF_RST) | ++ SPI_BIT(priv, SPI_FCR_TF_RST))), ++ SUN4I_SPI_TIMEOUT_MS * 1000); ++ if (ret) { ++ printf("ERROR: sun4i_spi: FIFO reset timeout\n"); ++ sun4i_spi_set_cs(bus, slave_plat->cs[0], false); ++ return ret; ++ } ++ } ++ + while (len) { + /* Setup the transfer now... */ + nbytes = min(len, (priv->variant->fifo_depth - 1)); + + /* Setup the counters */ +@@ -517,10 +535,23 @@ static const unsigned long sun6i_spi_regs[] = { + [SPI_BCTL] = SUN6I_BURST_CTL_REG, + [SPI_TXD] = SUN6I_TXDATA_REG, + [SPI_RXD] = SUN6I_RXDATA_REG, + }; + ++static const unsigned long sun55i_spi_regs[] = { ++ [SPI_GCR] = SUN6I_GBL_CTL_REG, ++ [SPI_TCR] = SUN6I_TFR_CTL_REG, ++ [SPI_FCR] = SUN6I_FIFO_CTL_REG, ++ [SPI_FSR] = SUN55I_BUF_STA_REG, ++ [SPI_CCR] = SUN6I_CLK_CTL_REG, ++ [SPI_BC] = SUN6I_BURST_CNT_REG, ++ [SPI_TC] = SUN6I_XMIT_CNT_REG, ++ [SPI_BCTL] = SUN6I_BURST_CTL_REG, ++ [SPI_TXD] = SUN6I_TXDATA_REG, ++ [SPI_RXD] = SUN6I_RXDATA_REG, ++}; ++ + static const u32 sun6i_spi_bits[] = { + [SPI_GCR_TP] = BIT(7), + [SPI_GCR_SRST] = BIT(31), + [SPI_TCR_CPHA] = BIT(0), + [SPI_TCR_CPOL] = BIT(1), +@@ -568,10 +599,19 @@ static const struct sun4i_spi_variant sun50i_r329_spi_variant = { + .fifo_depth = 64, + .has_soft_reset = true, + .has_burst_ctl = true, + }; + ++static const struct sun4i_spi_variant sun55i_a523_spi_variant = { ++ .regs = sun55i_spi_regs, ++ .bits = sun6i_spi_bits, ++ .fifo_depth = 64, ++ .has_soft_reset = true, ++ .has_burst_ctl = true, ++ .has_bsr = true, ++}; ++ + static const struct udevice_id sun4i_spi_ids[] = { + { + .compatible = "allwinner,sun4i-a10-spi", + .data = (ulong)&sun4i_a10_spi_variant, + }, +@@ -585,10 +625,14 @@ static const struct udevice_id sun4i_spi_ids[] = { + }, + { + .compatible = "allwinner,sun50i-r329-spi", + .data = (ulong)&sun50i_r329_spi_variant, + }, ++ { ++ .compatible = "allwinner,sun55i-a523-spi", ++ .data = (ulong)&sun55i_a523_spi_variant, ++ }, + { /* sentinel */ } + }; + + U_BOOT_DRIVER(sun4i_spi) = { + .name = "sun4i_spi", +-- +Armbian + diff --git a/patch/u-boot/sunxi-dev-u-boot-a523/allwinner-a523-support-spl-spi-controllers.patch b/patch/u-boot/sunxi-dev-u-boot-a523/allwinner-a523-support-spl-spi-controllers.patch new file mode 100644 index 0000000000..e86209d0de --- /dev/null +++ b/patch/u-boot/sunxi-dev-u-boot-a523/allwinner-a523-support-spl-spi-controllers.patch @@ -0,0 +1,236 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Mon, 10 Nov 2025 22:10:36 +0000 +Subject: Add Allwinner A523 support for SPL SPI controllers + +Signed-off-by: Marvin Wewer +--- + arch/arm/mach-sunxi/spl_spi_sunxi.c | 107 ++++++---- + 1 file changed, 68 insertions(+), 39 deletions(-) + +diff --git a/arch/arm/mach-sunxi/spl_spi_sunxi.c b/arch/arm/mach-sunxi/spl_spi_sunxi.c +index 111111111111..222222222222 100644 +--- a/arch/arm/mach-sunxi/spl_spi_sunxi.c ++++ b/arch/arm/mach-sunxi/spl_spi_sunxi.c +@@ -99,35 +99,44 @@ + + #define SPI0_CLK_DIV_BY_2 0x1000 + #define SPI0_CLK_DIV_BY_4 0x1001 + #define SPI0_CLK_DIV_BY_32 0x100f + ++#define SUN55I_BUF_STA_REG 0x400 ++ + /*****************************************************************************/ + + /* + * Allwinner A10/A20 SoCs were using pins PC0,PC1,PC2,PC23 for booting + * from SPI Flash, everything else is using pins PC0,PC1,PC2,PC3. + * The H6 uses PC0, PC2, PC3, PC5, the H616 PC0, PC2, PC3, PC4. + */ + static void spi0_pinmux_setup(unsigned int pin_function) + { ++ if (IS_ENABLED(CONFIG_MACH_SUN55I_A523)) { ++ sunxi_gpio_set_cfgpin(SUNXI_GPC(12), pin_function); ++ } ++ + /* All chips use PC2. And all chips use PC0, except R528/T113 */ +- if (!IS_ENABLED(CONFIG_MACH_SUN8I_R528)) ++ if (!IS_ENABLED(CONFIG_MACH_SUN8I_R528) && ++ !IS_ENABLED(CONFIG_MACH_SUN55I_A523)) + sunxi_gpio_set_cfgpin(SUNXI_GPC(0), pin_function); + + sunxi_gpio_set_cfgpin(SUNXI_GPC(2), pin_function); + + /* All chips except H6/H616/R528/T113 use PC1. */ + if (!IS_ENABLED(CONFIG_SUN50I_GEN_H6) && +- !IS_ENABLED(CONFIG_MACH_SUN8I_R528)) ++ !IS_ENABLED(CONFIG_MACH_SUN8I_R528) && ++ !IS_ENABLED(CONFIG_MACH_SUN55I_A523)) + sunxi_gpio_set_cfgpin(SUNXI_GPC(1), pin_function); + + if (IS_ENABLED(CONFIG_MACH_SUN50I_H6) || + IS_ENABLED(CONFIG_MACH_SUN8I_R528)) + sunxi_gpio_set_cfgpin(SUNXI_GPC(5), pin_function); + if (IS_ENABLED(CONFIG_MACH_SUN50I_H616) || +- IS_ENABLED(CONFIG_MACH_SUN8I_R528)) ++ IS_ENABLED(CONFIG_MACH_SUN8I_R528) || ++ IS_ENABLED(CONFIG_MACH_SUN55I_A523)) + sunxi_gpio_set_cfgpin(SUNXI_GPC(4), pin_function); + + /* Older generations use PC23 for CS, newer ones use PC3. */ + if (IS_ENABLED(CONFIG_MACH_SUN4I) || IS_ENABLED(CONFIG_MACH_SUN7I) || + IS_ENABLED(CONFIG_MACH_SUN8I_R40)) +@@ -142,10 +151,15 @@ static bool is_sun6i_gen_spi(void) + IS_ENABLED(CONFIG_SUN50I_GEN_H6) || + IS_ENABLED(CONFIG_SUNXI_GEN_NCAT2) || + IS_ENABLED(CONFIG_MACH_SUN8I_V3S); + } + ++static bool is_sun55i_gen_spi(void) ++{ ++ return IS_ENABLED(CONFIG_MACH_SUN55I_A523); ++} ++ + static uintptr_t spi0_base_address(void) + { + if (IS_ENABLED(CONFIG_MACH_SUN8I_R40)) + return 0x01C05000; + +@@ -225,11 +239,11 @@ static void spi0_enable_clock(void) + static void spi0_disable_clock(void) + { + uintptr_t base = spi0_base_address(); + + /* Disable the SPI0 controller */ +- if (is_sun6i_gen_spi()) ++ if (is_sun6i_gen_spi() || is_sun55i_gen_spi()) + clrbits_le32(base + SUN6I_SPI0_GCR, SUN6I_CTL_MASTER | + SUN6I_CTL_ENABLE); + else + clrbits_le32(base + SUN4I_SPI0_CTL, SUN4I_CTL_MASTER | + SUN4I_CTL_ENABLE); +@@ -255,11 +269,12 @@ static void spi0_disable_clock(void) + static void spi0_init(void) + { + unsigned int pin_function = SUNXI_GPC_SPI0; + + if (IS_ENABLED(CONFIG_MACH_SUN50I) || +- IS_ENABLED(CONFIG_SUN50I_GEN_H6)) ++ IS_ENABLED(CONFIG_SUN50I_GEN_H6) || ++ IS_ENABLED(CONFIG_MACH_SUN55I_A523)) + pin_function = SUN50I_GPC_SPI0; + else if (IS_ENABLED(CONFIG_MACH_SUNIV) || + IS_ENABLED(CONFIG_MACH_SUN8I_R528)) + pin_function = SUNIV_GPC_SPI0; + +@@ -270,11 +285,12 @@ static void spi0_init(void) + static void spi0_deinit(void) + { + /* New SoCs can disable pins, older could only set them as input */ + unsigned int pin_function = SUNXI_GPIO_INPUT; + +- if (is_sun6i_gen_spi()) ++ if (is_sun6i_gen_spi() || ++ is_sun55i_gen_spi()) + pin_function = SUNXI_GPIO_DISABLE; + + spi0_disable_clock(); + spi0_pinmux_setup(pin_function); + } +@@ -282,46 +298,49 @@ static void spi0_deinit(void) + /*****************************************************************************/ + + #define SPI_READ_MAX_SIZE 60 /* FIFO size, minus 4 bytes of the header */ + + static void sunxi_spi0_read_data(u8 *buf, u32 addr, u32 bufsize, +- ulong spi_ctl_reg, +- ulong spi_ctl_xch_bitmask, +- ulong spi_fifo_reg, +- ulong spi_tx_reg, +- ulong spi_rx_reg, +- ulong spi_bc_reg, +- ulong spi_tc_reg, +- ulong spi_bcc_reg) ++ ulong spi_ctl_reg, ++ ulong spi_ctl_xch_bitmask, ++ ulong spi_fifo_reg, ++ ulong spi_tx_reg, ++ ulong spi_rx_reg, ++ ulong spi_bc_reg, ++ ulong spi_tc_reg, ++ ulong spi_bcc_reg) + { +- writel(4 + bufsize, spi_bc_reg); /* Burst counter (total bytes) */ +- writel(4, spi_tc_reg); /* Transfer counter (bytes to send) */ +- if (spi_bcc_reg) +- writel(4, spi_bcc_reg); /* SUN6I also needs this */ +- +- /* Send the Read Data Bytes (03h) command header */ +- writeb(0x03, spi_tx_reg); +- writeb((u8)(addr >> 16), spi_tx_reg); +- writeb((u8)(addr >> 8), spi_tx_reg); +- writeb((u8)(addr), spi_tx_reg); +- +- /* Start the data transfer */ +- setbits_le32(spi_ctl_reg, spi_ctl_xch_bitmask); +- +- /* Wait until everything is received in the RX FIFO */ +- while ((readl(spi_fifo_reg) & 0x7F) < 4 + bufsize) +- ; ++ writel(4 + bufsize, spi_bc_reg); /* Burst counter (total bytes) */ ++ writel(4, spi_tc_reg); /* Transfer counter (bytes to send) */ ++ if (spi_bcc_reg) ++ writel(4, spi_bcc_reg); /* SUN6I also needs this */ ++ ++ /* Send the Read Data Bytes (03h) command header */ ++ writeb(0x03, spi_tx_reg); ++ writeb((u8)(addr >> 16), spi_tx_reg); ++ writeb((u8)(addr >> 8), spi_tx_reg); ++ writeb((u8)(addr), spi_tx_reg); ++ ++ /* Start the data transfer */ ++ setbits_le32(spi_ctl_reg, spi_ctl_xch_bitmask); ++ ++ /* Wait until everything is received in the RX FIFO */ ++#if IS_ENABLED(CONFIG_MACH_SUN55I_A523) ++ while ((readl(spi_fifo_reg) & 0xFF) < 4 + bufsize); ++#else ++ while ((readl(spi_fifo_reg) & 0x7F) < 4 + bufsize); ++#endif + +- /* Skip 4 bytes */ +- readl(spi_rx_reg); ++ /* Skip 4 bytes */ ++ readl(spi_rx_reg); + +- /* Read the data */ +- while (bufsize-- > 0) +- *buf++ = readb(spi_rx_reg); ++ /* Read the data */ ++ while (bufsize-- > 0) ++ *buf++ = readb(spi_rx_reg); + +- /* tSHSL time is up to 100 ns in various SPI flash datasheets */ +- udelay(1); ++ /* tSHSL time is up to 100 ns in various SPI flash datasheets */ ++ udelay(1); + } + + static void spi0_read_data(void *buf, u32 addr, u32 len) + { + u8 *buf8 = buf; +@@ -331,20 +350,30 @@ static void spi0_read_data(void *buf, u32 addr, u32 len) + while (len > 0) { + chunk_len = len; + if (chunk_len > SPI_READ_MAX_SIZE) + chunk_len = SPI_READ_MAX_SIZE; + +- if (is_sun6i_gen_spi()) { ++ if (is_sun6i_gen_spi() && !is_sun55i_gen_spi()) { + sunxi_spi0_read_data(buf8, addr, chunk_len, + base + SUN6I_SPI0_TCR, + SUN6I_TCR_XCH, + base + SUN6I_SPI0_FIFO_STA, + base + SUN6I_SPI0_TXD, + base + SUN6I_SPI0_RXD, + base + SUN6I_SPI0_MBC, + base + SUN6I_SPI0_MTC, + base + SUN6I_SPI0_BCC); ++ } else if (is_sun55i_gen_spi()) { ++ sunxi_spi0_read_data(buf8, addr, chunk_len, ++ base + SUN6I_SPI0_TCR, ++ SUN6I_TCR_XCH, ++ base + SUN55I_BUF_STA_REG, ++ base + SUN6I_SPI0_TXD, ++ base + SUN6I_SPI0_RXD, ++ base + SUN6I_SPI0_MBC, ++ base + SUN6I_SPI0_MTC, ++ base + SUN6I_SPI0_BCC); + } else { + sunxi_spi0_read_data(buf8, addr, chunk_len, + base + SUN4I_SPI0_CTL, + SUN4I_CTL_XCH, + base + SUN4I_SPI0_FIFO_STA, +-- +Armbian + diff --git a/patch/u-boot/sunxi-dev-u-boot-a523/allwinner-add-nvme-boot-target.patch b/patch/u-boot/sunxi-dev-u-boot-a523/allwinner-add-nvme-boot-target.patch new file mode 100644 index 0000000000..fdfbcc91de --- /dev/null +++ b/patch/u-boot/sunxi-dev-u-boot-a523/allwinner-add-nvme-boot-target.patch @@ -0,0 +1,46 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Sun, 16 Nov 2025 13:44:00 +0000 +Subject: Add NVME boot target support to sunxi-common.h + +Signed-off-by: Marvin Wewer +--- + include/configs/sunxi-common.h | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/include/configs/sunxi-common.h b/include/configs/sunxi-common.h +index 111111111111..222222222222 100644 +--- a/include/configs/sunxi-common.h ++++ b/include/configs/sunxi-common.h +@@ -206,10 +206,16 @@ + #define BOOT_TARGET_DEVICES_DHCP(func) func(DHCP, dhcp, na) + #else + #define BOOT_TARGET_DEVICES_DHCP(func) + #endif + ++#ifdef CONFIG_CMD_NVME ++#define BOOT_TARGET_DEVICES_NVME(func) func(NVME, nvme, 0) ++#else ++#define BOOT_TARGET_DEVICES_NVME(func) ++#endif ++ + /* FEL boot support, auto-execute boot.scr if a script address was provided */ + #define BOOTENV_DEV_FEL(devtypeu, devtypel, instance) \ + "bootcmd_fel=" \ + "if test -n ${fel_booted} && test -n ${fel_scriptaddr}; then " \ + "echo '(FEL boot)'; " \ +@@ -218,10 +224,11 @@ + #define BOOTENV_DEV_NAME_FEL(devtypeu, devtypel, instance) \ + "fel " + + #define BOOT_TARGET_DEVICES(func) \ + func(FEL, fel, na) \ ++ BOOT_TARGET_DEVICES_NVME(func) \ + BOOT_TARGET_DEVICES_MMC(func) \ + BOOT_TARGET_DEVICES_SCSI(func) \ + BOOT_TARGET_DEVICES_USB(func) \ + BOOT_TARGET_DEVICES_PXE(func) \ + BOOT_TARGET_DEVICES_DHCP(func) +-- +Armbian + diff --git a/patch/u-boot/sunxi-dev-u-boot-a523/arm64-dts-sun55i-a527-cubie-a5e-enable-spi0-pcie-combophy.patch b/patch/u-boot/sunxi-dev-u-boot-a523/arm64-dts-sun55i-a527-cubie-a5e-enable-spi0-pcie-combophy.patch new file mode 100644 index 0000000000..a193a6dd3f --- /dev/null +++ b/patch/u-boot/sunxi-dev-u-boot-a523/arm64-dts-sun55i-a527-cubie-a5e-enable-spi0-pcie-combophy.patch @@ -0,0 +1,141 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Sat, 25 Oct 2025 16:50:43 +0000 +Subject: arm64: dts: allwinner: sun55i-a527-cubie-a5e: Enable SPI0 and PCIe with ComboPHY + +Signed-off-by: Marvin Wewer +--- + dts/upstream/src/arm64/allwinner/sun55i-a527-cubie-a5e.dts | 84 ++++++++++ + 1 file changed, 84 insertions(+) + +diff --git a/dts/upstream/src/arm64/allwinner/sun55i-a527-cubie-a5e.dts b/dts/upstream/src/arm64/allwinner/sun55i-a527-cubie-a5e.dts +index 111111111111..222222222222 100644 +--- a/dts/upstream/src/arm64/allwinner/sun55i-a527-cubie-a5e.dts ++++ b/dts/upstream/src/arm64/allwinner/sun55i-a527-cubie-a5e.dts +@@ -43,10 +43,34 @@ + regulator-max-microvolt = <5000000>; + vin-supply = <®_vcc5v>; + gpio = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */ + enable-active-high; + }; ++ ++ reg_pcie_vcc3v3: regulator-pcie-vcc3v3 { ++ compatible = "regulator-fixed"; ++ regulator-name = "pcie-pwren"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ gpio = <&r_pio 0 11 GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ regulator-always-on; ++ regulator-boot-on; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pcie_pwren_pins>; ++ }; ++ ++ reg_vcc_3v3: vcc-3v3 { ++ compatible = "regulator-fixed"; ++ regulator-name = "vcc-3v3"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ gpio = <&r_pio 0 7 GPIO_ACTIVE_HIGH>; ++ regulator-always-on; ++ regulator-boot-on; ++ enable-active-high; ++ }; + }; + + &ehci0 { + status = "okay"; + }; +@@ -102,10 +126,34 @@ + vcc-pg-supply = <®_bldo1>; + vcc-ph-supply = <®_cldo3>; /* via VCC-IO */ + vcc-pi-supply = <®_cldo3>; + vcc-pj-supply = <®_cldo4>; + vcc-pk-supply = <®_cldo1>; ++ ++ pcie0_pins_ph: pcie0-ph { ++ pins = "PH19"; ++ function = "pcie0"; ++ allwinner,pinmux = <6>; ++ drive-strength = <20>; ++ bias-pull-up; ++ power-source = <3300>; ++ }; ++ ++ spi0_pins: spi0-pins { ++ pins = "PC2","PC4", "PC12"; ++ allwinner,pinmux = <4>; ++ function = "spi0"; ++ drive-strength = <10>; ++ }; ++ ++ spi0_cs_pin: spi0-cs0-pin { ++ pins = "PC3"; ++ allwinner,pinmux = <4>; ++ function = "spi0"; ++ drive-strength = <10>; ++ bias-pull-up; /* cs, hold, wp should be pulled up */ ++ }; + }; + + &r_i2c0 { + status = "okay"; + +@@ -280,18 +328,53 @@ + * Specifying the supply would create a circular dependency. + * + * vcc-pl-supply = <®_aldo3>; + */ + vcc-pm-supply = <®_aldo3>; ++ ++ pcie_pwren_pins: pcie-pwren-pins { ++ allwinner,pins = "PL11"; ++ function = "gpio_out"; ++ allwinner,pinmux = <1>; ++ drive-strength = <10>; ++ bias-disable; //bias-pull-up; or bias-pull-down; ++ }; + }; + + &uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_pb_pins>; + status = "okay"; + }; + ++&combophy { ++ select3v3-supply = <®_vcc_3v3>; ++ status = "okay"; ++}; ++ ++&spi0 { ++ pinctrl-0 = <&spi0_pins>, <&spi0_cs_pin>; ++ pinctrl-names = "default"; ++ status = "okay"; ++ w25q128: flash@0 { ++ compatible = "winbond,w25q128fw", "jedec,spi-nor"; ++ reg = <0>; ++ spi-max-frequency = <24000000>; ++ vcc-supply = <®_cldo1>; ++ status = "okay"; ++ }; ++}; ++ ++&pcie { ++ reset-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; ++ wake-gpios = <&pio 7 12 GPIO_ACTIVE_HIGH>; ++ num-lanes = <1>; ++ slot-3v3-supply = <®_pcie_vcc3v3>; ++ switch-sel-gpios = <&pio 1 6 GPIO_ACTIVE_HIGH>; ++ status = "okay"; ++}; ++ + &usb_otg { + /* + * The USB-C port is the primary power supply, so in this configuration + * relies on the other end of the USB cable to supply the VBUS power. + * So use this port in peripheral mode. +-- +Armbian + diff --git a/patch/u-boot/sunxi-dev-u-boot-a523/arm64-dts-sun55i-t527-orangepi-4a-enable-spi0-pcie-combophy.patch b/patch/u-boot/sunxi-dev-u-boot-a523/arm64-dts-sun55i-t527-orangepi-4a-enable-spi0-pcie-combophy.patch new file mode 100644 index 0000000000..ba337019d1 --- /dev/null +++ b/patch/u-boot/sunxi-dev-u-boot-a523/arm64-dts-sun55i-t527-orangepi-4a-enable-spi0-pcie-combophy.patch @@ -0,0 +1,84 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Sun, 4 Jan 2026 21:16:16 +0000 +Subject: arm64: dts: allwinner: sun55i-t527-orangepi-4a: Enable SPI0 and PCIe with ComboPHY + +Signed-off-by: Marvin Wewer +--- + dts/upstream/src/arm64/allwinner/sun55i-t527-orangepi-4a.dts | 38 ++++++++-- + 1 file changed, 38 insertions(+) + +diff --git a/dts/upstream/src/arm64/allwinner/sun55i-t527-orangepi-4a.dts b/dts/upstream/src/arm64/allwinner/sun55i-t527-orangepi-4a.dts +index 111111111111..222222222222 100644 +--- a/dts/upstream/src/arm64/allwinner/sun55i-t527-orangepi-4a.dts ++++ b/dts/upstream/src/arm64/allwinner/sun55i-t527-orangepi-4a.dts +@@ -152,10 +152,22 @@ + vcc-pg-supply = <®_bldo1>; + vcc-ph-supply = <®_cldo3>; /* via VCC-IO */ + vcc-pi-supply = <®_cldo3>; + vcc-pj-supply = <®_cldo1>; + vcc-pk-supply = <®_cldo1>; ++ ++ spi0_pc_pins: spi0-pc-pins { ++ pins = "PC2", "PC4", "PC12"; ++ function = "spi0"; ++ allwinner,pinmux = <4>; ++ }; ++ ++ spi0_cs0_pc_pin: spi0-cs0-pc-pin { ++ pins = "PC3"; ++ function = "spi0"; ++ allwinner,pinmux = <4>; ++ }; + }; + + &r_i2c0 { + status = "okay"; + +@@ -368,10 +390,36 @@ + host-wakeup-gpios = <&r_pio 1 4 GPIO_ACTIVE_HIGH>; /* PM4 */ + shutdown-gpios = <&r_pio 1 2 GPIO_ACTIVE_HIGH>; /* PM2 */ + }; + }; + ++&combophy { ++ select3v3-supply = <®_cldo3>; ++ status = "okay"; ++}; ++ ++&spi0 { ++ pinctrl-0 = <&spi0_pc_pins>, <&spi0_cs0_pc_pin>; ++ pinctrl-names = "default"; ++ status = "okay"; ++ xm25qu128c: flash@0 { ++ compatible = "xmc,xm25qu128c", "jedec,spi-nor"; ++ reg = <0>; ++ spi-max-frequency = <20000000>; ++ vcc-supply = <®_cldo1>; ++ status = "okay"; ++ }; ++}; ++ ++&pcie { ++ reset-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; ++ wake-gpios = <&pio 7 12 GPIO_ACTIVE_HIGH>; ++ num-lanes = <2>; ++ slot-3v3-supply = <®_pcie_vcc3v3>; ++ status = "okay"; ++}; ++ + &usb_otg { + /* + * The OTG controller is connected to one of the type-A ports. + * There is a regulator, controlled by a GPIO, to provide VBUS power + * to the port, and a VBUSDET GPIO, to detect externally provided +@@ -386,5 +414,6 @@ + usb0_vbus-supply = <®_otg_vbus>; + usb0_vbus_det-gpios = <&r_pio 0 7 GPIO_ACTIVE_HIGH>; /* PL7 */ + usb1_vbus-supply = <®_usb_vbus>; + status = "okay"; + }; ++ +-- +Armbian + diff --git a/patch/u-boot/sunxi-dev-u-boot-a523/arm64-dtsi-sun55i-add-spi0-pcie-combophy-nodes.patch b/patch/u-boot/sunxi-dev-u-boot-a523/arm64-dtsi-sun55i-add-spi0-pcie-combophy-nodes.patch new file mode 100644 index 0000000000..22d6b3bf92 --- /dev/null +++ b/patch/u-boot/sunxi-dev-u-boot-a523/arm64-dtsi-sun55i-add-spi0-pcie-combophy-nodes.patch @@ -0,0 +1,114 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Thu, 23 Oct 2025 09:10:08 +0000 +Subject: arm64: dts: allwinner: sun55i-a523: Add SPI0, PCIe and Combophy nodes + +Signed-off-by: Marvin Wewer +--- + dts/upstream/src/arm64/allwinner/sun55i-a523.dtsi | 66 ++++++++++ + 1 file changed, 66 insertions(+) + +diff --git a/dts/upstream/src/arm64/allwinner/sun55i-a523.dtsi b/dts/upstream/src/arm64/allwinner/sun55i-a523.dtsi +index 111111111111..222222222222 100644 +--- a/dts/upstream/src/arm64/allwinner/sun55i-a523.dtsi ++++ b/dts/upstream/src/arm64/allwinner/sun55i-a523.dtsi +@@ -1,9 +1,10 @@ + // SPDX-License-Identifier: (GPL-2.0-only OR MIT) + // Copyright (C) 2023-2024 Arm Ltd. + + #include ++#include + #include + #include + #include + #include + #include +@@ -607,10 +608,38 @@ + clocks = <&r_ccu CLK_BUS_R_PPU1>; + resets = <&r_ccu RST_BUS_R_PPU1>; + #power-domain-cells = <1>; + }; + ++ dma:dma-controller@3002000 { ++ compatible = "allwinner,sun50i-h6-dma"; ++ reg = <0x03002000 0x1000>; ++ interrupts = ; ++ clocks = <&ccu CLK_BUS_DMA>, <&ccu CLK_MBUS_DMA>; ++ clock-names = "bus", "mbus"; ++ dma-channels = <8>; ++ dma-requests = <54>; ++ resets = <&ccu RST_BUS_DMA>; ++ #dma-cells = <1>; ++ status = "okay"; ++ }; ++ ++ spi0: spi@4025000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "allwinner,sun55i-a523-spi"; ++ device_type = "spi0"; ++ reg = <0x04025000 0x1000>; ++ interrupts = ; ++ clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>; ++ clock-names = "ahb", "mod"; ++ resets = <&ccu RST_BUS_SPI0>; ++ dmas = <&dma 22>, <&dma 22>; ++ dma-names = "rx", "tx"; ++ status = "disabled"; ++ }; ++ + r_ccu: clock-controller@7010000 { + compatible = "allwinner,sun55i-a523-r-ccu"; + reg = <0x7010000 0x250>; + clocks = <&osc24M>, + <&rtc CLK_OSC32K>, +@@ -624,10 +653,46 @@ + "pll-audio"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + ++ combophy: phy@4f00000 { ++ compatible = "allwinner,inno-combphy"; ++ reg = <0x04f00000 0x80000>, /* Sub-System Application Registers */ ++ <0x04f80000 0x80000>; /* Combo INNO PHY Registers */ ++ reg-names = "phy-ctl", "phy-clk"; ++ #phy-cells = <1>; ++ clocks = <&ccu CLK_USB3_REF>, <&ccu CLK_PLL_PERIPH0_200M>; ++ clock-names = "phyclk_ref","refclk_par"; ++ resets = <&ccu RST_BUS_PCIE_USB3>; ++ reset-names = "phy_rst"; ++ }; ++ ++ pcie: pcie@4800000 { ++ compatible = "allwinner,sun55i-pcie-v210-rc"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ bus-range = <0x0 0xff>; ++ reg = <0x04800000 0x480000>; ++ reg-names = "dbi"; ++ device_type = "pci"; ++ ranges = <0x00000800 0 0x20000000 0x20000000 0 0x01000000 ++ 0x81000000 0 0x21000000 0x21000000 0 0x01000000 ++ 0x82000000 0 0x22000000 0x22000000 0 0x0e000000>; ++ phys = <&combophy PHY_TYPE_PCIE>; ++ phy-names = "pcie-phy"; ++ #interrupt-cells = <1>; ++ num-edma = <4>; ++ max-link-speed = <2>; ++ num-ib-windows = <8>; ++ num-ob-windows = <8>; ++ linux,pci-domain = <0>; ++ clocks = <&osc24M>, <&ccu CLK_PCIE_AUX>; ++ clock-names = "hosc", "pclk_aux"; ++ power-domains = <&pck600 PD_PCIE>; ++ }; ++ + nmi_intc: interrupt-controller@7010320 { + compatible = "allwinner,sun55i-a523-nmi"; + reg = <0x07010320 0xc>; + interrupt-controller; + #interrupt-cells = <2>; +-- +Armbian + diff --git a/patch/u-boot/sunxi-dev-u-boot-a523/clk-sunxi-fix-clock-handling-for-sun55i-a523.patch b/patch/u-boot/sunxi-dev-u-boot-a523/clk-sunxi-fix-clock-handling-for-sun55i-a523.patch new file mode 100644 index 0000000000..eba7186822 --- /dev/null +++ b/patch/u-boot/sunxi-dev-u-boot-a523/clk-sunxi-fix-clock-handling-for-sun55i-a523.patch @@ -0,0 +1,56 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Sat, 25 Oct 2025 10:54:53 +0000 +Subject: clk: sunxi: Add USB3 and PCIe clock gates and reset for A523 + +Signed-off-by: Marvin Wewer +--- + drivers/clk/sunxi/clk_a523.c | 3 +++ + dts/upstream/include/dt-bindings/clock/sun55i-a523-ccu.h | 1 + + 2 files changed, 4 insertions(+) + +diff --git a/drivers/clk/sunxi/clk_a523.c b/drivers/clk/sunxi/clk_a523.c +index 111111111111..222222222222 100644 +--- a/drivers/clk/sunxi/clk_a523.c ++++ b/drivers/clk/sunxi/clk_a523.c +@@ -44,10 +44,12 @@ static struct ccu_clk_gate a523_gates[] = { + [CLK_BUS_OHCI0] = GATE(0xa8c, BIT(0)), + [CLK_BUS_OHCI1] = GATE(0xa8c, BIT(1)), + [CLK_BUS_EHCI0] = GATE(0xa8c, BIT(4)), + [CLK_BUS_EHCI1] = GATE(0xa8c, BIT(5)), + [CLK_BUS_OTG] = GATE(0xa8c, BIT(8)), ++ [CLK_USB3_REF] = GATE(0x0A84, BIT(31)), ++ [CLK_PCIE_AUX] = GATE(0xaa0, BIT(31)), + }; + + static struct ccu_reset a523_resets[] = { + [RST_BUS_MMC0] = RESET(0x84c, BIT(16)), + [RST_BUS_MMC1] = RESET(0x84c, BIT(17)), +@@ -73,10 +75,11 @@ static struct ccu_reset a523_resets[] = { + [RST_BUS_OHCI0] = RESET(0xa8c, BIT(16)), + [RST_BUS_OHCI1] = RESET(0xa8c, BIT(17)), + [RST_BUS_EHCI0] = RESET(0xa8c, BIT(20)), + [RST_BUS_EHCI1] = RESET(0xa8c, BIT(21)), + [RST_BUS_OTG] = RESET(0xa8c, BIT(24)), ++ [RST_BUS_PCIE_USB3] = RESET(0xaac, BIT(24)), + }; + + const struct ccu_desc a523_ccu_desc = { + .gates = a523_gates, + .resets = a523_resets, +diff --git a/dts/upstream/include/dt-bindings/clock/sun55i-a523-ccu.h b/dts/upstream/include/dt-bindings/clock/sun55i-a523-ccu.h +index 111111111111..222222222222 100644 +--- a/dts/upstream/include/dt-bindings/clock/sun55i-a523-ccu.h ++++ b/dts/upstream/include/dt-bindings/clock/sun55i-a523-ccu.h +@@ -183,7 +183,8 @@ + #define CLK_FANOUT_27M 174 + #define CLK_FANOUT_PCLK 175 + #define CLK_FANOUT0 176 + #define CLK_FANOUT1 177 + #define CLK_FANOUT2 178 ++#define CLK_USB3_REF 179 + + #endif /* _DT_BINDINGS_CLK_SUN55I_A523_CCU_H_ */ +-- +Armbian + diff --git a/patch/u-boot/sunxi-dev-u-boot-a523/edit-orangepi-4a-defconfig.patch b/patch/u-boot/sunxi-dev-u-boot-a523/edit-orangepi-4a-defconfig.patch new file mode 100644 index 0000000000..dba510ce64 --- /dev/null +++ b/patch/u-boot/sunxi-dev-u-boot-a523/edit-orangepi-4a-defconfig.patch @@ -0,0 +1,43 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Mon, 12 Jan 2026 13:45:24 +0000 +Subject: Enable PCIe, NVMe, SPI and additional commands in orangepi_4a_defconfig + +Signed-off-by: Marvin Wewer +--- + configs/orangepi_4a_defconfig | 20 ++++++++++ + 1 file changed, 20 insertions(+) + +diff --git a/configs/orangepi_4a_defconfig b/configs/orangepi_4a_defconfig +index 111111111111..222222222222 100644 +--- a/configs/orangepi_4a_defconfig ++++ b/configs/orangepi_4a_defconfig +@@ -28,5 +28,25 @@ CONFIG_AXP717_POWER=y + CONFIG_AXP_I2C_ADDRESS=0x35 + CONFIG_AXP_DCDC2_VOLT=920 + CONFIG_AXP_DCDC3_VOLT=1160 + CONFIG_USB_EHCI_HCD=y + CONFIG_USB_OHCI_HCD=y ++CONFIG_PHY_SUN55I_PCIE_USB3=y ++CONFIG_PCI=y ++CONFIG_PCIE_SUN55I_RC=y ++CONFIG_DM_PCI_COMPAT=y ++CONFIG_NVME=y ++CONFIG_NVME_PCI=y ++CONFIG_SPI_SUNXI=y ++CONFIG_CMD_PCI=y ++CONFIG_CMD_PART=y ++CONFIG_CMD_GPIO=y ++CONFIG_CMD_LSBLK=y ++CONFIG_CMD_CAT=y ++CONFIG_BLKMAP=y ++CONFIG_MTD=y ++CONFIG_SPI=y ++CONFIG_CMD_SF=y ++CONFIG_SPI_FLASH_WINBOND=y ++CONFIG_BOOTDEV_SPI_FLASH=y ++CONFIG_CMD_BOOTDEV=y ++CONFIG_SPL_SPI_SUNXI=y +-- +Armbian + diff --git a/patch/u-boot/sunxi-dev-u-boot-a523/edit-radxa-cubie-a5e-defconfig.patch b/patch/u-boot/sunxi-dev-u-boot-a523/edit-radxa-cubie-a5e-defconfig.patch new file mode 100644 index 0000000000..b655b8d4bd --- /dev/null +++ b/patch/u-boot/sunxi-dev-u-boot-a523/edit-radxa-cubie-a5e-defconfig.patch @@ -0,0 +1,43 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Sun, 19 Oct 2025 11:39:07 +0200 +Subject: Enable PCIe, NVMe, SPI and GPIO support in radxa-cubie-a5e_defconfig + +Signed-off-by: Marvin Wewer +--- + configs/radxa-cubie-a5e_defconfig | 55 ++++++++++ + 1 file changed, 55 insertions(+) + +diff --git a/configs/radxa-cubie-a5e_defconfig b/configs/radxa-cubie-a5e_defconfig +index 111111111111..222222222222 100644 +--- a/configs/radxa-cubie-a5e_defconfig ++++ b/configs/radxa-cubie-a5e_defconfig +@@ -28,5 +28,25 @@ CONFIG_REGULATOR_AXP=y + CONFIG_AXP717_POWER=y + CONFIG_AXP_DCDC2_VOLT=920 + CONFIG_AXP_DCDC3_VOLT=1100 + CONFIG_USB_EHCI_HCD=y + CONFIG_USB_OHCI_HCD=y ++CONFIG_PHY_SUN55I_PCIE_USB3=y ++CONFIG_PCI=y ++CONFIG_PCIE_SUN55I_RC=y ++CONFIG_DM_PCI_COMPAT=y ++CONFIG_NVME=y ++CONFIG_NVME_PCI=y ++CONFIG_SPI_SUNXI=y ++CONFIG_CMD_PCI=y ++CONFIG_CMD_PART=y ++CONFIG_CMD_GPIO=y ++CONFIG_CMD_LSBLK=y ++CONFIG_CMD_CAT=y ++CONFIG_BLKMAP=y ++CONFIG_MTD=y ++CONFIG_SPI=y ++CONFIG_CMD_SF=y ++CONFIG_SPI_FLASH_WINBOND=y ++CONFIG_BOOTDEV_SPI_FLASH=y ++CONFIG_CMD_BOOTDEV=y ++CONFIG_SPL_SPI_SUNXI=y +-- +Armbian + diff --git a/patch/u-boot/sunxi-dev-u-boot-a523/pci-sunxi-enable-pcie-support.patch b/patch/u-boot/sunxi-dev-u-boot-a523/pci-sunxi-enable-pcie-support.patch new file mode 100644 index 0000000000..f0099d8d76 --- /dev/null +++ b/patch/u-boot/sunxi-dev-u-boot-a523/pci-sunxi-enable-pcie-support.patch @@ -0,0 +1,1498 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Thu, 30 Oct 2025 22:33:03 +0000 +Subject: PCIe: Add support for Allwinner SUN55I DesignWare PCIe controller + +Signed-off-by: Marvin Wewer +--- + drivers/pci/Kconfig | 8 + + drivers/pci/Makefile | 1 + + drivers/pci/pcie-sun55i-plat.c | 444 +++++++++ + drivers/pci/pcie-sun55i.c | 498 ++++++++++ + drivers/pci/pcie-sun55i.h | 490 +++++++++ + 5 files changed, 1441 insertions(+) + +diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig +index 111111111111..222222222222 100644 +--- a/drivers/pci/Kconfig ++++ b/drivers/pci/Kconfig +@@ -462,6 +462,14 @@ config PCIE_DW_IMX + select SYSCON + help + Say Y here if you want to enable DW PCIe controller support on + iMX SoCs. + ++ ++config PCIE_SUN55I_RC ++ bool "Allwinner SUN55I DesignWare PCIe controller" ++ default n ++ depends on ARCH_SUNXI ++ help ++ Enables support for the DW PCIe controller in the Allwinner Sun55i SoC. ++ + endif +diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile +index 111111111111..222222222222 100644 +--- a/drivers/pci/Makefile ++++ b/drivers/pci/Makefile +@@ -55,5 +55,6 @@ obj-$(CONFIG_PCIE_DW_SIFIVE) += pcie_dw_sifive.o + obj-$(CONFIG_PCIE_UNIPHIER) += pcie_uniphier.o + obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o + obj-$(CONFIG_PCIE_PLDA_COMMON) += pcie_plda_common.o + obj-$(CONFIG_PCIE_STARFIVE_JH7110) += pcie_starfive_jh7110.o + obj-$(CONFIG_PCIE_DW_IMX) += pcie_dw_imx.o ++obj-$(CONFIG_PCIE_SUN55I_RC) += pcie-sun55i-plat.o pcie-sun55i.o +\ No newline at end of file +diff --git a/drivers/pci/pcie-sun55i-plat.c b/drivers/pci/pcie-sun55i-plat.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/pci/pcie-sun55i-plat.c +@@ -0,0 +1,444 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Allwinner PCI Express plat driver ++ * ++ * Copyright(c) 2020 - 2024 Allwinner Technology Co.,Ltd. All rights reserved. ++ * ++ * pcie-sun55i-plat.c: chenhuaqiang ++ */ ++ ++ ++#include "pci.h" ++#include "pcie-sun55i.h" ++#include ++#include ++#include ++#include ++#include ++ ++/* Indexed by PCI_EXP_LNKCAP_SLS, PCI_EXP_LNKSTA_CLS */ ++const unsigned char pcie_link_speed[] = { ++ PCI_SPEED_UNKNOWN, /* 0 */ ++ PCIE_SPEED_2_5GT, /* 1 */ ++ PCIE_SPEED_5_0GT, /* 2 */ ++ PCIE_SPEED_8_0GT, /* 3 */ ++ PCIE_SPEED_16_0GT, /* 4 */ ++ PCIE_SPEED_32_0GT, /* 5 */ ++}; ++ ++int sun55i_pcie_cfg_write(void __iomem *addr, int size, ulong val) ++{ ++ if ((uintptr_t)addr & (size - 1)) ++ return PCIBIOS_BAD_REGISTER_NUMBER; ++ ++ if (size == 4) ++ writel(val, addr); ++ else if (size == 2) ++ writew(val, addr); ++ else if (size == 1) ++ writeb(val, addr); ++ else ++ return PCIBIOS_BAD_REGISTER_NUMBER; ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++int sun55i_pcie_cfg_read(void __iomem *addr, int size, ulong *val) ++{ ++ ++ if ((uintptr_t)addr & (size - 1)) { ++ *val = 0; ++ printf("CFG_READ: Bad alignment\n"); ++ return PCIBIOS_BAD_REGISTER_NUMBER; ++ } ++ ++ if (size == 4) { ++ *val = readl(addr); ++ } else if (size == 2) { ++ *val = readw(addr); ++ } else if (size == 1) { ++ *val = readb(addr); ++ } else { ++ *val = 0; ++ printf("CFG_READ: Bad size\n"); ++ return PCIBIOS_BAD_REGISTER_NUMBER; ++ } ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++void sun55i_pcie_writel(u32 val, struct sun55i_pcie *pcie, u32 offset) ++{ ++ writel(val, pcie->app_base + offset); ++} ++ ++u32 sun55i_pcie_readl(struct sun55i_pcie *pcie, u32 offset) ++{ ++ return readl(pcie->app_base + offset); ++} ++ ++static void sun55i_pcie_write_dbi(struct sun55i_pcie *pci, u32 reg, size_t size, u32 val) ++{ ++ int ret; ++ ++ ret = sun55i_pcie_cfg_write(pci->dbi_base + reg, size, val); ++ if (ret) ++ printf("Write DBI address failed\n"); ++} ++ ++static ulong sun55i_pcie_read_dbi(struct sun55i_pcie *pci, u32 reg, size_t size) ++{ ++ int ret; ++ ulong val; ++ ++ ret = sun55i_pcie_cfg_read(pci->dbi_base + reg, size, &val); ++ if (ret) ++ printf("Read DBI address failed\n"); ++ ++ return val; ++} ++ ++void sun55i_pcie_writel_dbi(struct sun55i_pcie *pci, u32 reg, u32 val) ++{ ++ sun55i_pcie_write_dbi(pci, reg, 0x4, val); ++} ++ ++u32 sun55i_pcie_readl_dbi(struct sun55i_pcie *pci, u32 reg) ++{ ++ return sun55i_pcie_read_dbi(pci, reg, 0x4); ++} ++ ++void sun55i_pcie_writew_dbi(struct sun55i_pcie *pci, u32 reg, u16 val) ++{ ++ sun55i_pcie_write_dbi(pci, reg, 0x2, val); ++} ++ ++u16 sun55i_pcie_readw_dbi(struct sun55i_pcie *pci, u32 reg) ++{ ++ return sun55i_pcie_read_dbi(pci, reg, 0x2); ++} ++ ++void sun55i_pcie_writeb_dbi(struct sun55i_pcie *pci, u32 reg, u8 val) ++{ ++ sun55i_pcie_write_dbi(pci, reg, 0x1, val); ++} ++ ++u8 sun55i_pcie_readb_dbi(struct sun55i_pcie *pci, u32 reg) ++{ ++ return sun55i_pcie_read_dbi(pci, reg, 0x1); ++} ++ ++void sun55i_pcie_dbi_ro_wr_en(struct sun55i_pcie *pci) ++{ ++ u32 val; ++ ++ val = sun55i_pcie_readl_dbi(pci, PCIE_MISC_CONTROL_1_CFG); ++ val |= (0x1 << 0); ++ sun55i_pcie_writel_dbi(pci, PCIE_MISC_CONTROL_1_CFG, val); ++} ++ ++void sun55i_pcie_dbi_ro_wr_dis(struct sun55i_pcie *pci) ++{ ++ u32 val; ++ ++ val = sun55i_pcie_readl_dbi(pci, PCIE_MISC_CONTROL_1_CFG); ++ val &= ~(0x1 << 0); ++ sun55i_pcie_writel_dbi(pci, PCIE_MISC_CONTROL_1_CFG, val); ++} ++ ++void sun55i_pcie_plat_ltssm_enable(struct sun55i_pcie *pcie) ++{ ++ u32 val; ++ ++ val = sun55i_pcie_readl(pcie, PCIE_LTSSM_CTRL); ++ val |= PCIE_LINK_TRAINING; ++ sun55i_pcie_writel(val, pcie, PCIE_LTSSM_CTRL); ++} ++ ++void sun55i_pcie_plat_ltssm_disable(struct sun55i_pcie *pcie) ++{ ++ u32 val; ++ ++ val = sun55i_pcie_readl(pcie, PCIE_LTSSM_CTRL); ++ val &= ~PCIE_LINK_TRAINING; ++ sun55i_pcie_writel(val, pcie, PCIE_LTSSM_CTRL); ++} ++ ++static u8 __sun55i_pcie_find_next_cap(struct sun55i_pcie *pci, u8 cap_ptr, ++ u8 cap) ++{ ++ u8 cap_id, next_cap_ptr; ++ u16 reg; ++ ++ if (!cap_ptr) ++ return 0; ++ ++ reg = sun55i_pcie_readw_dbi(pci, cap_ptr); ++ cap_id = (reg & CAP_ID_MASK); ++ ++ if (cap_id > PCI_CAP_ID_MAX) ++ return 0; ++ ++ if (cap_id == cap) ++ return cap_ptr; ++ ++ next_cap_ptr = (reg & NEXT_CAP_PTR_MASK) >> 8; ++ return __sun55i_pcie_find_next_cap(pci, next_cap_ptr, cap); ++} ++ ++u8 sun55i_pcie_plat_find_capability(struct sun55i_pcie *pci, u8 cap) ++{ ++ u8 next_cap_ptr; ++ u16 reg; ++ ++ reg = sun55i_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); ++ next_cap_ptr = (reg & CAP_ID_MASK); ++ ++ return __sun55i_pcie_find_next_cap(pci, next_cap_ptr, cap); ++} ++ ++static void sun55i_pcie_plat_set_link_cap(struct sun55i_pcie *pci, u32 link_gen) ++{ ++ u32 cap, ctrl2, link_speed = 0; ++ ++ u8 offset = sun55i_pcie_plat_find_capability(pci, PCI_CAP_ID_EXP); ++ ++ cap = sun55i_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); ++ ctrl2 = sun55i_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2); ++ ctrl2 &= ~PCI_EXP_LNKCTL2_TLS; ++ ++ switch (pcie_link_speed[link_gen]) { ++ case PCIE_SPEED_2_5GT: ++ link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT; ++ break; ++ case PCIE_SPEED_5_0GT: ++ link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT; ++ break; ++ case PCIE_SPEED_8_0GT: ++ link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT; ++ break; ++ case PCIE_SPEED_16_0GT: ++ link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT; ++ break; ++ default: ++ /* Use hardware capability */ ++ // link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap); ++ // ctrl2 &= ~PCI_EXP_LNKCTL2_HASD; ++ break; ++ } ++ ++ sun55i_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed); ++ ++ cap &= ~((u32)PCI_EXP_LNKCAP_SLS); ++ sun55i_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed); ++} ++ ++void sun55i_pcie_plat_set_rate(struct sun55i_pcie *pci) ++{ ++ u32 val; ++ ++ sun55i_pcie_plat_set_link_cap(pci, pci->link_gen); ++ /* set the number of lanes */ ++ val = sun55i_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); ++ val &= ~PORT_LINK_MODE_MASK; ++ switch (pci->lanes) { ++ case 1: ++ val |= PORT_LINK_MODE_1_LANES; ++ break; ++ case 2: ++ val |= PORT_LINK_MODE_2_LANES; ++ break; ++ case 4: ++ val |= PORT_LINK_MODE_4_LANES; ++ break; ++ default: ++ printf("num-lanes %u: invalid value\n", pci->lanes); ++ return; ++ } ++ sun55i_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); ++ ++ /* set link width speed control register */ ++ val = sun55i_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); ++ val &= ~PORT_LOGIC_LINK_WIDTH_MASK; ++ switch (pci->lanes) { ++ case 1: ++ val |= PORT_LOGIC_LINK_WIDTH_1_LANES; ++ break; ++ case 2: ++ val |= PORT_LOGIC_LINK_WIDTH_2_LANES; ++ break; ++ case 4: ++ val |= PORT_LOGIC_LINK_WIDTH_4_LANES; ++ break; ++ } ++ sun55i_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); ++} ++ ++static int sun55i_pcie_plat_init_port(struct udevice *dev) ++{ ++ struct sun55i_pcie *pci = dev_get_priv(dev); ++ int ret; ++ ++ if (dm_gpio_is_valid(&pci->wake_gpio)) { ++ ret = dm_gpio_set_value(&pci->wake_gpio, 1); ++ if (ret) { ++ printf("PCIe: Failed to set wake GPIO: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ ret = dm_gpio_set_value(&pci->switch_gpio, 1); ++ if (ret) { ++ printf("PCIe: Failed to set switch GPIO: %d\n", ret); ++ return ret; ++ } ++ ++ ret = regulator_set_enable(pci->slot_3v3, true); ++ if (ret && ret != -EALREADY) { ++ printf("PCIe: Failed to enable 3.3V slot supply: %d\n", ret); ++ return ret; ++ } ++ ++ mdelay(50); ++ ++ ret = clk_enable(&pci->pcie_aux); ++ if (ret) { ++ printf("PCIe: Failed to enable bus clock: %d\n", ret); ++ goto err_disable_slot_supply; ++ } ++ ++ if (pci->drvdata && pci->drvdata->need_pcie_rst) { ++ ret = reset_deassert(&pci->pcie_rst); ++ if (ret) { ++ printf("PCIe: Failed to deassert internal reset: %d\n", ret); ++ goto err_disable_clk; ++ } ++ } ++ ++ ret = generic_phy_init(&pci->phy); ++ if (ret) { ++ printf("PCIe: Failed to init phy: %d\n", ret); ++ goto err_assert_reset; ++ } ++ ret = generic_phy_power_on(&pci->phy); ++ if (ret) { ++ printf("PCIe: Failed to power on phy: %d\n", ret); ++ goto err_assert_reset; ++ } ++ ++ printf("PCIe: Toggling external device reset (PERST#)...\n"); ++ ret = dm_gpio_set_value(&pci->rst_gpio, 0); ++ if (ret) { ++ printf("PCIe: Failed to assert external reset: %d\n", ret); ++ goto err_power_off_phy; ++ } ++ ++ mdelay(100); ++ ++ ret = dm_gpio_set_value(&pci->rst_gpio, 1); ++ if (ret) { ++ printf("PCIe: Failed to deassert external reset: %d\n", ret); ++ goto err_power_off_phy; ++ } ++ ++ mdelay(40); ++ ++ printf("PCIe: Hardware power-on sequence successful.\n"); ++ return 0; ++ ++err_power_off_phy: ++ generic_phy_power_off(&pci->phy); ++err_assert_reset: ++ if (pci->drvdata && pci->drvdata->need_pcie_rst) ++ reset_assert(&pci->pcie_rst); ++err_disable_clk: ++ clk_disable(&pci->pcie_aux); ++err_disable_slot_supply: ++ regulator_set_enable(pci->slot_3v3, false); ++ ++ return ret; ++} ++ ++int sun55i_pcie_plat_hw_init(struct udevice *dev) ++{ ++ struct sun55i_pcie *pci = dev_get_priv(dev); ++ int ret; ++ ++ printf("PCIe: Acquiring resources...\n"); ++ ++ ret = dev_read_u32(dev, "num-lanes", &pci->lanes); ++ if (ret) { ++ printf("PCIe: Failed to parse num-lanes, using default: 1\n"); ++ pci->lanes = 1; ++ } ++ ++ ret = dev_read_u32(dev, "max-link-speed", &pci->link_gen); ++ if (ret) { ++ printf("PCIe: Couldn't parse max-link-speed, using default link speed: Gen2\n"); ++ pci->link_gen = 2; ++ } ++ ++ if (pci->lanes != 1 && pci->lanes != 2 && pci->lanes != 4) { ++ printf("PCIe: Invalid num-lanes %d, using 1\n", pci->lanes); ++ pci->lanes = 1; ++ } ++ ++ if (pci->link_gen < 1 || pci->link_gen > 3) { ++ printf("PCIe: Invalid max-link-speed %d, using 2\n", pci->link_gen); ++ pci->link_gen = 2; ++ } ++ ++ ret = gpio_request_by_name(dev, "switch-sel-gpios", 0, &pci->switch_gpio, GPIOD_IS_OUT); ++ if (ret) { ++ printf("PCIe: Failed to get switch-sel GPIO: %d\n", ret); ++ return ret; ++ } ++ ++ ret = gpio_request_by_name(dev, "reset-gpios", 0, &pci->rst_gpio, ++ GPIOD_IS_OUT); ++ if (ret) { ++ printf("PCIe: Failed to get reset-gpios: %d\n", ret); ++ return ret; ++ } ++ ++ ret = gpio_request_by_name(dev, "wake-gpios", 0, &pci->wake_gpio, ++ GPIOD_IS_OUT); ++ if (ret) { ++ printf("PCIe: Warning: Failed to get wake-gpios: %d\n", ret); ++ } ++ ++ ret = device_get_supply_regulator(dev, "slot-3v3-supply", &pci->slot_3v3); ++ if (ret) { ++ printf("PCIe: Failed to get 3.3V slot supply: %d\n", ret); ++ return ret; ++ } ++ ++ ret = clk_get_by_name(dev, "pclk_aux", &pci->pcie_aux); ++ if (ret) { ++ printf("PCIe: Failed to get bus clock: %d\n", ret); ++ return ret; ++ } ++ ++ pci->drvdata = (const struct sun55i_pcie_of_data *)dev_get_driver_data(dev); ++ if (pci->drvdata && pci->drvdata->need_pcie_rst) { ++ ret = reset_get_by_index(dev, 0, &pci->pcie_rst); ++ if (ret) { ++ printf("PCIe: Failed to get reset controller: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ ret = generic_phy_get_by_index(dev, 0, &pci->phy); ++ if (ret) { ++ printf("PCIe: Failed to get phy: %d\n", ret); ++ return ret; ++ } ++ ++ printf("PCIe: All resources acquired. Starting power-on sequence...\n"); ++ ++ return sun55i_pcie_plat_init_port(dev); ++ ++} ++ ++ ++ +diff --git a/drivers/pci/pcie-sun55i.c b/drivers/pci/pcie-sun55i.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/pci/pcie-sun55i.c +@@ -0,0 +1,498 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * sun55i DesignWare based PCIe host controller driver ++ * ++ * Copyright (c) 2021 sun55i, Inc. ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "pcie-sun55i.h" ++ ++DECLARE_GLOBAL_DATA_PTR; ++ ++#define sun55i_pcie_DBG 0 ++ ++#define __pcie_dev_print_emit(fmt, ...) \ ++({ \ ++ printf(fmt, ##__VA_ARGS__); \ ++}) ++ ++#ifdef dev_err ++#undef dev_err ++#define dev_err(dev, fmt, ...) \ ++({ \ ++ if (dev) \ ++ __pcie_dev_print_emit("%s: " fmt, dev->name, \ ++ ##__VA_ARGS__); \ ++}) ++#endif ++ ++#ifdef dev_info ++#undef dev_info ++#define dev_info dev_err ++#endif ++ ++#ifdef DEBUG ++#define dev_dbg dev_err ++#else ++#define dev_dbg(dev, fmt, ...) \ ++({ \ ++ if (0) \ ++ __dev_printk(7, dev, fmt, ##__VA_ARGS__); \ ++}) ++#endif ++ ++ ++ ++static int sun55i_pcie_addr_valid(pci_dev_t d, int first_busno) ++{ ++ if ((PCI_BUS(d) == first_busno) && (PCI_DEV(d) > 0)) ++ return 0; ++ if ((PCI_BUS(d) == first_busno + 1) && (PCI_DEV(d) > 0)) ++ return 0; ++ ++ return 1; ++} ++ ++static void sun55i_pcie_prog_outbound_atu(struct sun55i_pcie_port *pp, int index, int type, ++ u64 cpu_addr, u64 pci_addr, u32 size) ++{ ++ struct sun55i_pcie *pci = to_sun55i_pcie_from_pp(pp); ++ ++ ++ sun55i_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE_OUTBOUND(index), lower_32_bits(cpu_addr)); ++ sun55i_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE_OUTBOUND(index), upper_32_bits(cpu_addr)); ++ sun55i_pcie_writel_dbi(pci, PCIE_ATU_LIMIT_OUTBOUND(index), lower_32_bits(cpu_addr + size - 1)); ++ sun55i_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET_OUTBOUND(index), lower_32_bits(pci_addr)); ++ sun55i_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET_OUTBOUND(index), upper_32_bits(pci_addr)); ++ sun55i_pcie_writel_dbi(pci, PCIE_ATU_CR1_OUTBOUND(index), type); ++ sun55i_pcie_writel_dbi(pci, PCIE_ATU_CR2_OUTBOUND(index), PCIE_ATU_ENABLE); ++} ++ ++static int sun55i_pcie_rd_other_conf(struct sun55i_pcie_port *pp, pci_dev_t d, int where, int size, ulong *val) ++{ ++ int ret = PCIBIOS_SUCCESSFUL, type; ++ u64 busdev; ++ u64 atu_cpu_addr = pp->cfg0_base; ++ ++ if (pp->cpu_pcie_addr_quirk) ++ atu_cpu_addr -= PCIE_CPU_BASE; ++ ++ busdev = PCIE_ATU_BUS(PCI_BUS(d)) | PCIE_ATU_DEV(PCI_DEV(d)) | PCIE_ATU_FUNC(PCI_FUNC(d)); ++ ++ if (PCI_BUS(d) != 0) ++ type = PCIE_ATU_TYPE_CFG0; ++ else ++ type = PCIE_ATU_TYPE_CFG1; ++ ++ sun55i_pcie_prog_outbound_atu(pp, PCIE_ATU_INDEX0, type, atu_cpu_addr, busdev, pp->cfg0_size); ++ ++ ret = sun55i_pcie_cfg_read(pp->va_cfg0_base + where, size, val); ++ ++ return ret; ++} ++ ++static int sun55i_pcie_wr_other_conf(struct sun55i_pcie_port *pp, pci_dev_t d, int where, int size, ulong val) ++{ ++ int ret = PCIBIOS_SUCCESSFUL, type; ++ u64 busdev; ++ u64 atu_cpu_addr = pp->cfg0_base; ++ ++ if (pp->cpu_pcie_addr_quirk) ++ atu_cpu_addr -= PCIE_CPU_BASE; ++ ++ busdev = PCIE_ATU_BUS(PCI_BUS(d)) | PCIE_ATU_DEV(PCI_DEV(d)) | PCIE_ATU_FUNC(PCI_FUNC(d)); ++ ++ if (PCI_BUS(d) != 0) ++ type = PCIE_ATU_TYPE_CFG0; ++ else ++ type = PCIE_ATU_TYPE_CFG1; ++ ++ sun55i_pcie_prog_outbound_atu(pp, PCIE_ATU_INDEX0, type, atu_cpu_addr, busdev, pp->cfg0_size); ++ ++ ret = sun55i_pcie_cfg_write(pp->va_cfg0_base + where, size, val); ++ ++ return ret; ++} ++ ++static int sun55i_pcie_host_rd_own_conf(struct sun55i_pcie_port *pp, int where, int size, ulong *val) ++{ ++ int ret; ++ ++ ret = sun55i_pcie_cfg_read(pp->dbi_base + where, size, val); ++ ++ return ret; ++} ++ ++static int sun55i_pcie_host_wr_own_conf(struct sun55i_pcie_port *pp, int where, int size, ulong val) ++{ ++ int ret; ++ ++ ret = sun55i_pcie_cfg_write(pp->dbi_base + where, size, val); ++ ++ return ret; ++} ++ ++static int sun55i_pcie_read_config(const struct udevice *bus, pci_dev_t bdf, ++ uint offset, ulong *value, ++ enum pci_size_t size) ++{ ++ struct sun55i_pcie *pcie = dev_get_priv(bus); ++ int ret, size_len = 4; ++ ++ if (!sun55i_pcie_addr_valid(bdf, pcie->first_busno)) { ++ debug("- out of range\n"); ++ *value = pci_get_ff(size); ++ return 0; ++ } ++ ++ if (size == PCI_SIZE_8) ++ size_len = 1; ++ else if (size == PCI_SIZE_16) ++ size_len = 2; ++ else if (size == PCI_SIZE_32) ++ size_len = 4; ++ ++ if (PCI_BUS(bdf) != pcie->first_busno) ++ ret = sun55i_pcie_rd_other_conf(&pcie->pcie_port, bdf, offset, size_len, value); ++ else ++ ret = sun55i_pcie_host_rd_own_conf(&pcie->pcie_port, offset, size_len, value); ++ ++ return ret; ++} ++ ++static int sun55i_pcie_write_config(struct udevice *bus, pci_dev_t bdf, ++ uint offset, ulong value, ++ enum pci_size_t size) ++{ ++ struct sun55i_pcie *pcie = dev_get_priv(bus); ++ int ret, size_len = 4; ++ ++ if (!sun55i_pcie_addr_valid(bdf, pcie->first_busno)) { ++ debug("- out of range\n"); ++ return 0; ++ } ++ ++ if (size == PCI_SIZE_8) ++ size_len = 1; ++ else if (size == PCI_SIZE_16) ++ size_len = 2; ++ else if (size == PCI_SIZE_32) ++ size_len = 4; ++ ++ if (PCI_BUS(bdf) != 0) ++ ret = sun55i_pcie_wr_other_conf(&pcie->pcie_port, bdf, offset, size_len, value); ++ else ++ ret = sun55i_pcie_host_wr_own_conf(&pcie->pcie_port, offset, size_len, value); ++ ++ return ret; ++} ++ ++static void sun55i_pcie_host_setup_rc(struct sun55i_pcie_port *pp) ++{ ++ ulong val, i; ++ phys_addr_t mem_base; ++ phys_addr_t io_base; ++ struct sun55i_pcie *pci = to_sun55i_pcie_from_pp(pp); ++ ++ sun55i_pcie_plat_set_rate(pci); ++ ++ sun55i_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x4); ++ sun55i_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x0); ++ ++ val = sun55i_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); ++ val &= PCIE_INTERRUPT_LINE_MASK; ++ val |= PCIE_INTERRUPT_LINE_ENABLE; ++ sun55i_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); ++ ++ val = sun55i_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); ++ val &= 0xff000000; ++ val |= 0x00ff0100; ++ sun55i_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); ++ ++ val = sun55i_pcie_readl_dbi(pci, PCI_COMMAND); ++ ++ val &= PCIE_HIGH16_MASK; ++ val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | ++ PCI_COMMAND_MASTER | PCI_COMMAND_SERR; ++ ++ sun55i_pcie_writel_dbi(pci, PCI_COMMAND, val); ++ ++ if (IS_ENABLED(CONFIG_PCI_MSI) && !pp->has_its) { ++ for (i = 0; i < 8; i++) { ++ sun55i_pcie_host_wr_own_conf(pp, PCIE_MSI_INTR_ENABLE(i), 4, ~0); ++ } ++ } ++ ++ if (pp->cpu_pcie_addr_quirk) { ++ mem_base = pp->mem_base - PCIE_CPU_BASE; ++ io_base = pp->io_base - PCIE_CPU_BASE; ++ } else { ++ mem_base = pp->mem_base; ++ io_base = pp->io_base; ++ } ++ ++ sun55i_pcie_prog_outbound_atu(pp, PCIE_ATU_INDEX1, PCIE_ATU_TYPE_MEM, ++ mem_base, pp->mem_bus_addr, pp->mem_size); ++ ++ sun55i_pcie_prog_outbound_atu(pp, PCIE_ATU_INDEX2, PCIE_ATU_TYPE_IO, ++ io_base, pp->io_bus_addr, pp->io_size); ++ ++ sun55i_pcie_host_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); ++ ++ sun55i_pcie_dbi_ro_wr_en(pci); ++ ++ sun55i_pcie_host_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); ++ ++ sun55i_pcie_dbi_ro_wr_dis(pci); ++ ++ sun55i_pcie_host_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); ++ val |= PORT_LOGIC_SPEED_CHANGE; ++ sun55i_pcie_host_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); ++} ++ ++static int sun55i_pcie_host_link_up_status(struct sun55i_pcie_port *pp) ++{ ++ u32 val; ++ int ret; ++ struct sun55i_pcie *pcie = to_sun55i_pcie_from_pp(pp); ++ ++ val = sun55i_pcie_readl(pcie, PCIE_LINK_STAT); ++ ++ if ((val & RDLH_LINK_UP) && (val & SMLH_LINK_UP)) ++ ret = 1; ++ else ++ ret = 0; ++ ++ printf(" Link Status: 0x%08x\n", val); ++ printf(" RDLH_LINK_UP: %d\n", !!(val & RDLH_LINK_UP)); ++ printf(" SMLH_LINK_UP: %d\n", !!(val & SMLH_LINK_UP)); ++ printf(" LINK_SPEED: %d\n", (val >> 16) & 0xF); ++ printf(" LINK_WIDTH: %d\n", (val >> 20) & 0x3F); ++ ++ return ret; ++} ++ ++static int sun55i_pcie_host_link_up(struct sun55i_pcie_port *pp) ++{ ++ return sun55i_pcie_host_link_up_status(pp); ++} ++ ++static int sun55i_pcie_host_wait_for_link(struct sun55i_pcie_port *pp) ++{ ++ int retries; ++ ++ for (retries = 0; retries < LINK_WAIT_MAX_RETRIE; retries++) { ++ if (sun55i_pcie_host_link_up(pp)) { ++ printf("pcie link up success\n"); ++ return 0; ++ } ++ mdelay(1); ++ } ++ ++ return -ETIMEDOUT; ++} ++ ++static int sun55i_pcie_host_establish_link(struct sun55i_pcie *pci) ++{ ++ struct sun55i_pcie_port *pp = &pci->pcie_port; ++ ++ if (sun55i_pcie_host_link_up(pp)) { ++ printf("pcie is already link up\n"); ++ return 0; ++ } ++ ++ sun55i_pcie_plat_ltssm_enable(pci); ++ ++ return sun55i_pcie_host_wait_for_link(pp); ++} ++ ++static int sun55i_pcie_host_wait_for_speed_change(struct sun55i_pcie *pci) ++{ ++ u32 tmp; ++ unsigned int retries; ++ ++ for (retries = 0; retries < LINK_WAIT_MAX_RETRIE; retries++) { ++ tmp = sun55i_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); ++ if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) ++ return 0; ++ mdelay(1); ++ } ++ ++ printf("Speed change timeout\n"); ++ return -ETIMEDOUT; ++} ++ ++static int sun55i_pcie_host_speed_change(struct sun55i_pcie *pci, int gen) ++{ ++ u32 val; ++ int ret; ++ u8 offset; ++ ++ sun55i_pcie_dbi_ro_wr_en(pci); ++ ++ offset = sun55i_pcie_plat_find_capability(pci, PCI_CAP_ID_EXP); ++ if (!offset) { ++ printf("PCIe: Cannot find PCI Express capability\n"); ++ sun55i_pcie_dbi_ro_wr_dis(pci); ++ return -EINVAL; ++ } ++ ++ val = sun55i_pcie_readl_dbi(pci, LINK_CONTROL2_LINK_STATUS2); ++ val &= ~PCI_EXP_LNKCTL2_TLS; ++ val |= gen; ++ sun55i_pcie_writel_dbi(pci, LINK_CONTROL2_LINK_STATUS2, val); ++ ++ val = sun55i_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); ++ val &= ~PORT_LOGIC_SPEED_CHANGE; ++ sun55i_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); ++ ++ val = sun55i_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); ++ val |= PORT_LOGIC_SPEED_CHANGE; ++ sun55i_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); ++ ++ ret = sun55i_pcie_host_wait_for_speed_change(pci); ++ if (!ret) ++ printf("PCIe: Link active at Gen%d\n", gen); ++ else ++ printf("PCIe: Link active, but speed change failed (remains Gen1)\n"); ++ ++ sun55i_pcie_dbi_ro_wr_dis(pci); ++ ++ return 0; ++} ++ ++static void sun55i_pcie_host_init(struct udevice *dev) ++{ ++ struct sun55i_pcie *pci = dev_get_priv(dev); ++ ++ sun55i_pcie_plat_ltssm_disable(pci); ++ ++ sun55i_pcie_host_setup_rc(&pci->pcie_port); ++ ++ sun55i_pcie_host_establish_link(pci); ++ ++ sun55i_pcie_host_speed_change(pci, pci->link_gen); ++} ++ ++static int sun55i_pcie_probe(struct udevice *dev) ++{ ++ struct sun55i_pcie *pci = dev_get_priv(dev); ++ struct udevice *ctlr = pci_get_controller(dev); ++ struct pci_controller *hose = dev_get_uclass_priv(ctlr); ++ const struct sun55i_pcie_of_data *data; ++ int ret; ++ ++ data = (const struct sun55i_pcie_of_data *)dev_get_driver_data(dev); ++ if (!data) { ++ printf("PCIe: No platform data found\n"); ++ return -EINVAL; ++ } ++ ++ ret = sun55i_pcie_plat_hw_init(dev); ++ if (ret) { ++ printf("PCIe: Hardware init failed with error %d\n", ret); ++ return ret; ++ } ++ ++ pci->first_busno = dev->seq_; ++ pci->dev = dev; ++ ++ pci->dbi_base = (void __iomem *)phys_to_virt((phys_addr_t)data->dbi_addr); ++ pci->app_base = (void __iomem *)((char *)pci->dbi_base + PCIE_USER_DEFINED_REGISTER); ++ ++ printf("PCIe: Disabling DBI write protection...\n"); ++ sun55i_pcie_dbi_ro_wr_en(pci); ++ ++ pci_set_region(&hose->regions[0], ++ data->io_addr, ++ data->io_addr, ++ data->io_size, ++ PCI_REGION_IO); ++ ++ pci_set_region(&hose->regions[1], ++ data->mem_addr, ++ data->mem_addr, ++ data->mem_size, ++ PCI_REGION_MEM); ++ ++ hose->region_count = 2; ++ ++ pci->pcie_port.dbi_base = (void __iomem *)phys_to_virt((phys_addr_t)data->dbi_addr); ++ pci->pcie_port.cfg0_base = data->cfg_addr; ++ pci->pcie_port.cfg0_size = data->cfg_size; ++ pci->pcie_port.io_base = data->io_addr; ++ pci->pcie_port.io_size = data->io_size; ++ pci->pcie_port.mem_base = data->mem_addr; ++ pci->pcie_port.mem_size = data->mem_size; ++ ++ pci->pcie_port.io_bus_addr = data->io_addr; ++ pci->pcie_port.mem_bus_addr = data->mem_addr; ++ ++ if (!pci->lanes) ++ pci->lanes = data->num_lanes; ++ if (!pci->link_gen) ++ pci->link_gen = data->max_link_speed; ++ ++ pci->pcie_port.cpu_pcie_addr_quirk = true; ++ ++ pci->pcie_port.va_cfg0_base = phys_to_virt(pci->pcie_port.cfg0_base); ++ ++ printf("PCIe: DBI region: 0x%08x-0x%08x\n", data->dbi_addr, data->dbi_addr + data->dbi_size); ++ printf("PCIe: IO region: 0x%08x-0x%08x\n", data->io_addr, data->io_addr + data->io_size); ++ printf("PCIe: MEM region: 0x%08x-0x%08x\n", data->mem_addr, data->mem_addr + data->mem_size); ++ printf("PCIe: CFG region: 0x%08x-0x%08x\n", data->cfg_addr, data->cfg_addr + data->cfg_size); ++ printf("PCIe: Lanes: %d, Max Speed: Gen%d\n", data->num_lanes, data->max_link_speed); ++ ++ sun55i_pcie_host_init(dev); ++ ++ sun55i_pcie_dbi_ro_wr_dis(pci); ++ ++ return 0; ++} ++ ++static const struct dm_pci_ops sun55i_pcie_ops = { ++ .read_config = sun55i_pcie_read_config, ++ .write_config = sun55i_pcie_write_config, ++}; ++ ++static const struct sun55i_pcie_of_data sun55i_pcie_rc_v210_of_data = { ++ .mode = SUN55I_PCIE_RC_TYPE, ++ .cpu_pcie_addr_quirk = true, ++ ++ .dbi_addr = 0x04800000, ++ .dbi_size = 0x480000, ++ .io_addr = 0x21000000, ++ .io_size = 0x01000000, ++ .mem_addr = 0x22000000, ++ .mem_size = 0x0e000000, ++ .cfg_addr = 0x20000000, ++ .cfg_size = 0x01000000, ++ .num_lanes = 1, /* Default */ ++ .max_link_speed = 2, ++ .num_ib_windows = 8, ++ .num_ob_windows = 8, ++}; ++ ++static const struct udevice_id sun55i_pcie_ids[] = { ++ { ++ .compatible = "allwinner,sun55i-pcie-v210-rc", ++ .data = (ulong)&sun55i_pcie_rc_v210_of_data, ++ }, ++ { } ++}; ++ ++U_BOOT_DRIVER(sun55i_pcie) = { ++ .name = "pcie_dw_sun55i", ++ .id = UCLASS_PCI, ++ .of_match = sun55i_pcie_ids, ++ .ops = &sun55i_pcie_ops, ++ .probe = sun55i_pcie_probe, ++ .priv_auto = sizeof(struct sun55i_pcie), ++}; +diff --git a/drivers/pci/pcie-sun55i.h b/drivers/pci/pcie-sun55i.h +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/pci/pcie-sun55i.h +@@ -0,0 +1,490 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* Copyright(c) 2020 - 2023 Allwinner Technology Co.,Ltd. All rights reserved. */ ++/* ++ * Allwinner PCIe controller driver ++ * ++ * Copyright (C) 2022 allwinner Co., Ltd. ++ * ++ * Author: songjundong ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef _PCIE_SUN55I_H ++#define _PCIE_SUN55I_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define SUN55I_PCIE_MODULE_VERSION "1.0.0" ++ ++#define SUN55I_PCIE_CFG_SIZE 0x01000000 ++ ++#define SUN55I_CFG_RW_SIZE 0x04 ++ ++#define PCIE_PORT_LINK_CONTROL 0x710 ++#define PORT_LINK_MODE_MASK (0x3f << 16) ++#define PORT_LINK_MODE_1_LANES (0x1 << 16) ++#define PORT_LINK_MODE_2_LANES (0x3 << 16) ++#define PORT_LINK_MODE_4_LANES (0x7 << 16) ++#define PORT_LINK_LPBK_ENABLE (0x1 << 2) ++ ++#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C ++#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) ++#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8) ++#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) ++#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) ++#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) ++ ++#define PCIE_ATU_VIEWPORT 0x900 ++#define PCIE_ATU_REGION_INBOUND (0x1 << 31) ++#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) ++#define PCIE_ATU_REGION_INDEX2 (0x2 << 0) ++#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) ++#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) ++ ++#define PCIE_ATU_INDEX0 0x0 ++#define PCIE_ATU_INDEX1 0x1 ++#define PCIE_ATU_INDEX2 0x2 ++#define PCIE_ATU_INDEX3 0x3 ++#define PCIE_ATU_INDEX4 0x4 ++#define PCIE_ATU_INDEX5 0x5 ++#define PCIE_ATU_INDEX6 0x6 ++#define PCIE_ATU_INDEX7 0x7 ++ ++#define PCIE_EP_REBAR_SIZE_32M 0x200 ++ ++#define PCIE_ATU_CR1_OUTBOUND(reg) (0x300000 + ((reg) * 0x200)) ++#define PCIE_ATU_TYPE_MEM (0x0 << 0) ++#define PCIE_ATU_TYPE_IO (0x2 << 0) ++#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) ++#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) ++#define PCIE_ATU_CR2_OUTBOUND(reg) (0x300004 + ((reg) * 0x200)) ++#define PCIE_ATU_DMA_BYPASS BIT(27) ++#define PCIE_ATU_BAR_MODE_ENABLE BIT(30) ++#define PCIE_ATU_ENABLE BIT(31) ++ ++#define PCIE_ATU_LOWER_BASE_OUTBOUND(reg) (0x300008 + ((reg) * 0x200)) ++#define PCIE_ATU_UPPER_BASE_OUTBOUND(reg) (0x30000c + ((reg) * 0x200)) ++#define PCIE_ATU_LIMIT_OUTBOUND(reg) (0x300010 + ((reg) * 0x200)) ++#define PCIE_ATU_LOWER_TARGET_OUTBOUND(reg) (0x300014 + ((reg) * 0x200)) ++#define PCIE_ATU_UPPER_TARGET_OUTBOUND(reg) (0x300018 + ((reg) * 0x200)) ++ ++#define PCIE_ATU_CR1_INBOUND(reg) (0x300100 + ((reg) * 0x200)) ++#define PCIE_ATU_TYPE_MEM (0x0 << 0) ++#define PCIE_ATU_TYPE_IO (0x2 << 0) ++#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) ++#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) ++#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20) ++#define PCIE_ATU_CR2_INBOUND(reg) (0x300104 + ((reg) * 0x200)) ++#define PCIE_ATU_MATCH_MODE BIT(30) ++#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19) ++#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20) ++ ++#define PCIE_ATU_LOWER_BASE_INBOUND(reg) (0x300108 + ((reg) * 0x200)) ++#define PCIE_ATU_UPPER_BASE_INBOUND(reg) (0x30010c + ((reg) * 0x200)) ++#define PCIE_ATU_LIMIT_INBOUND(reg) (0x300110 + ((reg) * 0x200)) ++#define PCIE_ATU_LOWER_TARGET_INBOUND(reg) (0x300114 + ((reg) * 0x200)) ++#define PCIE_ATU_UPPER_TARGET_INBOUND(reg) (0x300118 + ((reg) * 0x200)) ++ ++#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) ++#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) ++#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) ++ ++#define PCIE_MISC_CONTROL_1_CFG 0x8bc ++#define PCIE_TYPE1_CLASS_CODE_REV_ID_REG 0x08 ++ ++#define PCIE_ADDRESS_ALIGNING (~0x3) ++#define PCIE_HIGH_16 16 ++#define PCIE_BAR_NUM 6 ++#define PCIE_MEM_FLAGS 0x4 ++#define PCIE_IO_FLAGS 0x1 ++#define PCIE_BAR_REG 0x4 ++#define PCIE_HIGH16_MASK 0xffff0000 ++#define PCIE_LOW16_MASK 0x0000ffff ++#define PCIE_INTERRUPT_LINE_MASK 0xffff00ff ++#define PCIE_INTERRUPT_LINE_ENABLE 0x00000100 ++#define PCIE_PRIMARY_BUS_MASK 0xff000000 ++#define PCIE_PRIMARY_BUS_ENABLE 0x00010100 ++#define PCIE_MEMORY_MASK 0xfff00000 ++ ++#define PCIE_CPU_BASE 0x20000000 ++ ++#define PCIE_TYPE0_STATUS_COMMAND_REG 0x4 ++ ++#define PCIE_DBI2_BASE 0x100000 ++#define DBI2_FUNC_OFFSET 0x10000 ++#define BAR_ENABLE 0x1 ++ ++#define RESBAR_CAP_REG 0x4 /* from PCIe spec4.0 7.8.6 */ ++#define RESBAR_SIZE_MASK 0xfffff0 ++#define RESBAR_CTL_REG 0x8 ++#define RESBAR_NEXT_BAR 0x8 ++#define SIZE_OF_1MB 20 /* 2^20 = 0x100000 */ ++ ++#define PCIE_COMBO_PHY_BGR 0x04 ++#define PHY_ACLK_EN BIT(17) ++#define PHY_HCLK_EN BIT(16) ++#define PHY_TERSTN BIT(1) ++#define PHY_PW_UP_RSTN BIT(0) ++#define PCIE_COMBO_PHY_CTL 0x10 ++#define PHY_USE_SEL BIT(31) /* 0:PCIE; 1:USB3 */ ++#define PHY_CLK_SEL BIT(30) /* 0:internal clk; 1:exteral clk */ ++#define PHY_BIST_EN BIT(16) ++#define PHY_PIPE_SW BIT(9) ++#define PHY_PIPE_SEL BIT(8) /* 0:PIPE resetn ctrl by PCIE ctrl; 1:PIPE resetn ctrl by */ ++#define PHY_PIPE_CLK_INVERT BIT(4) ++#define PHY_FPGA_SYS_RSTN BIT(1) /* for PFGA */ ++#define PHY_RSTN BIT(0) ++ ++#define NEXT_CAP_PTR_MASK 0xff00 ++#define CAP_ID_MASK 0x00ff ++ ++/* Error values that may be returned by PCI functions */ ++#define PCIBIOS_SUCCESSFUL 0x00 ++#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 ++#define PCIBIOS_BAD_VENDOR_ID 0x83 ++#define PCIBIOS_DEVICE_NOT_FOUND 0x86 ++#define PCIBIOS_BAD_REGISTER_NUMBER 0x87 ++#define PCIBIOS_SET_FAILED 0x88 ++#define PCIBIOS_BUFFER_TOO_SMALL 0x89 ++ ++/* ++ * Maximum number of MSI IRQs can be 256 per controller. But keep ++ * it 32 as of now. Probably we will never need more than 32. If needed, ++ * then increment it in multiple of 32. ++ */ ++#define INT_PCI_MSI_NR 32 ++#define MAX_MSI_IRQS 256 ++#define MAX_MSI_IRQS_PER_CTRL 32 ++#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL) ++#define MSI_REG_CTRL_BLOCK_SIZE 12 ++ ++/* #define MAX_MSI_IRQS 32 */ ++/* #define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32) */ ++#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C ++#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) ++#define LINK_CONTROL2_LINK_STATUS2 0xa0 ++/* Parameters for the waiting for link up routine */ ++#define LINK_WAIT_MAX_RETRIE 20 ++#define LINK_WAIT_USLEEP_MIN 90000 ++#define LINK_WAIT_USLEEP_MAX 100000 ++#define SPEED_CHANGE_USLEEP_MIN 100 ++#define SPEED_CHANGE_USLEEP_MAX 1000 ++ ++#define PCIE_MSI_ADDR_LO 0x820 ++#define PCIE_MSI_ADDR_HI 0x824 ++#define PCIE_MSI_INTR_ENABLE(reg) (0x828 + ((reg) * 0x0c)) ++/* #define PCIE_MSI_INTR_MASK(reg) (0x82C + ((reg) * 0x0c)) */ ++/* #define PCIE_MSI_INTR_STATUS(reg) (0x830 + ((reg) * 0x0c)) */ ++/* #define PCIE_MSI_INTR_ENABLE 0x828 */ ++#define PCIE_MSI_INTR_MASK 0x82C ++#define PCIE_MSI_INTR_STATUS 0x830 ++ ++#define PCIE_CTRL_MGMT_BASE 0x900000 ++ ++#define PCIE_USER_DEFINED_REGISTER 0x400000 ++#define PCIE_VER 0x00 ++#define PCIE_ADDR_PAGE_CFG 0x04 ++#define PCIE_AWMISC_CTRL 0x200 ++#define PCIE_ARMISC_CTRL 0x220 ++#define PCIE_LTSSM_CTRL 0xc00 ++#define PCIE_LINK_TRAINING BIT(0) /* 0:disable; 1:enable */ ++#define DEVICE_TYPE_MASK GENMASK(7, 4) ++#define DEVICE_TYPE_RC BIT(6) ++#define PCIE_INT_ENABLE_CLR 0xE04 /* BIT(1):RDLH_LINK_MASK; BIT(0):SMLH_LINK_MASK */ ++#define PCIE_LINK_STAT 0xE0C /* BIT(1):RDLH_LINK; BIT(0):SMLH_LINK */ ++#define RDLH_LINK_UP BIT(1) ++#define SMLH_LINK_UP BIT(0) ++#define PCIE_LINK_INT_EN (BIT(0) | BIT(1)) ++ ++#define PCIE_PHY_CFG 0x800 ++#define SYS_CLK 0 ++#define PAD_CLK 1 ++#define PCIE_LINK_UP_MASK (0x3<<16) ++ ++#define PCIE_RC_RP_ATS_BASE 0x400000 ++ ++#define SUN55I_PCIE_BAR_CFG_CTRL_DISABLED 0x0 ++#define SUN55I_PCIE_BAR_CFG_CTRL_IO_32BITS 0x1 ++#define SUN55I_PCIE_BAR_CFG_CTRL_MEM_32BITS 0x4 ++#define SUN55I_PCIE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5 ++#define SUN55I_PCIE_BAR_CFG_CTRL_MEM_64BITS 0x6 ++#define SUN55I_PCIE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 ++ ++#define SUN55I_PCIE_EP_MSI_CTRL_REG 0x90 ++#define SUN55I_PCIE_EP_MSI_CTRL_MMC_OFFSET 17 ++#define SUN55I_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17) ++#define SUN55I_PCIE_EP_MSI_CTRL_MME_OFFSET 20 ++#define SUN55I_PCIE_EP_MSI_CTRL_MME_MASK GENMASK(22, 20) ++#define SUN55I_PCIE_EP_MSI_CTRL_ME BIT(16) ++#define SUN55I_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24) ++#define SUN55I_PCIE_EP_DUMMY_IRQ_ADDR 0x1 ++ ++#define PCIE_PHY_FUNC_CFG (PCIE_CTRL_MGMT_BASE + 0x2c0) ++#define PCIE_RC_BAR_CONF (PCIE_CTRL_MGMT_BASE + 0x300) ++ ++//ECC ++#define PCIE_RASDP_ERR_PROT_CTRL_OFF 0X1F0 ++#define PCIE_RASDP_ERR_INJ_CTRL_OFF 0X204 ++#define PCIE_RASDP_UNCORR_COUNTER_CTRL_OFF 0X1FC ++#define PCIE_RASDP_UNCORR_COUNTER_REPORT_OFF 0X200 ++#define PCIE_RASDP_UNCORR_ERROR_LOCATION_OFF 0X20C ++#define PCIE_RASDP_ERROR_MODR_CLEAR_OFF 0X214 ++ ++#define PCIE_RASDP_CORR_COUNTER_CTRL_OFF 0X1F4 ++#define PCIE_RASDP_CORR_COUNTER_REPORT_OFF 0X1F8 ++#define PCIE_RASDP_CORR_ERROR_LOCATION_OFF 0X208 ++ ++#define PCIE_SII_INT_MASK_RES2 0XE10 ++#define PCIE_SII_INT_RES2 0XE18 ++#define APP_PARITY_ERRS2_MASK BIT(12) ++#define APP_PARITY_ERRS1_MASK BIT(11) ++#define APP_PARITY_ERRS0_MASK BIT(10) ++#define SLV_RASDP_ERR_MODE_MASK BIT(9) ++#define MATR_RASDP_ERR_MODE_MASK BIT(8) ++#define RASDP_ERR_PENDING (BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12)) ++#define PCIE_SII_INT_RES2_ECC_MASK GENMASK(12, 8) ++ ++#define PCI_EXP_LNKCTL2_TLS 0x000f ++#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */ ++#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */ ++#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */ ++#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */ ++#define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */ ++#define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */ ++ ++#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ ++#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */ ++#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ ++#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ ++#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ ++#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ ++#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */ ++#define PCI_EXP_LNKCAP_L1EL 0x00038000 /* L1 Exit Latency */ ++#define PCI_EXP_LNKCAP_CLKPM 0x00040000 /* Clock Power Management */ ++#define PCI_EXP_LNKCAP_SDERC 0x00080000 /* Surprise Down Error Reporting Capable */ ++#define PCI_EXP_LNKCAP_DLLLARC 0x00100000 /* Data Link Layer Link Active Reporting Capable */ ++#define PCI_EXP_LNKCAP_LBNC 0x00200000 /* Link Bandwidth Notification Capability */ ++#define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */ ++ ++#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ ++#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ ++#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */ ++#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */ ++#define PCI_EXP_SLTSTA2 58 /* Slot Status 2 */ ++ ++ ++ ++/* Resizable BARs */ ++#define PCI_REBAR_CAP 4 /* capability register */ ++#define PCI_REBAR_CAP_SIZES 0x00FFFFF0 /* supported BAR sizes */ ++#define PCI_REBAR_CTRL 8 /* control register */ ++#define PCI_REBAR_CTRL_BAR_IDX 0x00000007 /* BAR index */ ++#define PCI_REBAR_CTRL_NBAR_MASK 0x000000E0 /* # of resizable BARs */ ++#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # of BARs */ ++#define PCI_REBAR_CTRL_BAR_SIZE 0x00001F00 /* BAR size */ ++#define PCI_REBAR_CTRL_BAR_SHIFT 8 /* shift for BAR size */ ++ ++#define PCI_HEADER_TYPE_MASK 0x7f ++ ++ ++#define PCI_STD_NUM_BARS 6 /* Number of standard BARs */ ++enum sun55i_pcie_device_mode { ++ SUN55I_PCIE_EP_TYPE, ++ SUN55I_PCIE_RC_TYPE, ++}; ++ ++/* See matching string table in pci_speed_string() */ ++enum pci_bus_speed { ++ PCI_SPEED_33MHz = 0x00, ++ PCI_SPEED_66MHz = 0x01, ++ PCI_SPEED_66MHz_PCIX = 0x02, ++ PCI_SPEED_100MHz_PCIX = 0x03, ++ PCI_SPEED_133MHz_PCIX = 0x04, ++ PCI_SPEED_66MHz_PCIX_ECC = 0x05, ++ PCI_SPEED_100MHz_PCIX_ECC = 0x06, ++ PCI_SPEED_133MHz_PCIX_ECC = 0x07, ++ PCI_SPEED_66MHz_PCIX_266 = 0x09, ++ PCI_SPEED_100MHz_PCIX_266 = 0x0a, ++ PCI_SPEED_133MHz_PCIX_266 = 0x0b, ++ AGP_UNKNOWN = 0x0c, ++ AGP_1X = 0x0d, ++ AGP_2X = 0x0e, ++ AGP_4X = 0x0f, ++ AGP_8X = 0x10, ++ PCI_SPEED_66MHz_PCIX_533 = 0x11, ++ PCI_SPEED_100MHz_PCIX_533 = 0x12, ++ PCI_SPEED_133MHz_PCIX_533 = 0x13, ++ PCIE_SPEED_2_5GT = 0x14, ++ PCIE_SPEED_5_0GT = 0x15, ++ PCIE_SPEED_8_0GT = 0x16, ++ PCIE_SPEED_16_0GT = 0x17, ++ PCIE_SPEED_32_0GT = 0x18, ++ PCI_SPEED_UNKNOWN = 0xff, ++}; ++ ++struct sun55i_pcie_of_data { ++ const struct sun55i_pcie_ep_ops *ops; ++ enum sun55i_pcie_device_mode mode; ++ u32 func_offset; ++ bool has_pcie_slv_clk; ++ bool need_pcie_rst; ++ bool pcie_slv_clk_400m; ++ bool has_pcie_its_clk; ++ bool has_pcie_ecc; ++ bool cpu_pcie_addr_quirk; ++ ++ u32 dbi_addr; ++ u32 dbi_size; ++ u32 io_addr; ++ u32 io_size; ++ u32 mem_addr; ++ u32 mem_size; ++ u32 cfg_addr; ++ u32 cfg_size; ++ u32 num_lanes; ++ u32 max_link_speed; ++ u32 num_ib_windows; ++ u32 num_ob_windows; ++}; ++ ++struct sun55i_pcie_ep_func { ++ struct list_head list; ++ u8 func_no; ++ u8 msi_cap; ++ u8 msix_cap; ++}; ++ ++struct sun55i_pcie_ep { ++ struct pci_epc *epc; ++ struct list_head func_list; ++ const struct sun55i_pcie_ep_ops *ops; ++ phys_addr_t phys_base; ++ size_t addr_size; ++ size_t page_size; ++ u8 bar_to_atu[PCI_STD_NUM_BARS]; ++ phys_addr_t *outbound_addr; ++ u32 num_ib_windows; ++ u32 num_ob_windows; ++ unsigned long *ib_window_map; ++ unsigned long *ob_window_map; ++ u8 max_functions; ++ void __iomem *msi_mem; ++ phys_addr_t msi_mem_phys; ++ struct pci_bar *epf_bar[PCI_STD_NUM_BARS]; ++}; ++ ++struct sun55i_pcie_ep_ops { ++ void (*ep_init)(struct sun55i_pcie_ep *ep); ++ // int (*raise_irq)(struct sun55i_pcie_ep *ep, u8 func_no, ++ // enum pci_epc_irq_type type, u16 interrupt_num); ++ const struct pci_epc_features *(*get_features)(struct sun55i_pcie_ep *ep); ++ unsigned int (*func_conf_select)(struct sun55i_pcie_ep *ep, u8 func_no); ++}; ++ ++struct sun55i_pcie_port { ++ struct device *dev; ++ void __iomem *dbi_base; ++ u32 cfg0_base; ++ void __iomem *va_cfg0_base; ++ fdt_size_t cfg0_size; ++ resource_size_t io_base; ++ phys_addr_t io_bus_addr; ++ u32 io_size; ++ phys_addr_t mem_base; ++ phys_addr_t mem_bus_addr; ++ u32 mem_size; ++ u32 num_ob_windows; ++ struct sun55i_pcie_host_ops *ops; ++ struct sun55i_pcie *pcie; ++ int msi_irq; ++ struct irq_domain *irq_domain; ++ struct irq_domain *msi_domain; ++ struct pci_host_bridge *bridge; ++ u8 root_bus_nr; ++ unsigned long msi_map[BITS_TO_LONGS(INT_PCI_MSI_NR)]; ++ bool has_its; ++ bool cpu_pcie_addr_quirk; ++}; ++ ++struct sun55i_pci_edma_chan; ++ ++struct sun55i_pcie { ++ struct udevice *dev; ++ void *dbi_base; ++ void *app_base; ++ int link_gen; ++ int first_busno; ++ struct sun55i_pcie_port pcie_port; ++ struct sun55i_pcie_ep ep; ++ ++ struct clk pcie_aux; ++ struct reset_ctl pcie_rst; ++ struct phy phy; ++ struct udevice *slot_3v3; ++ ++ struct clk pcie_slv; ++ struct clk pcie_its; ++ struct reset_ctl pwrup_rst; ++ struct reset_ctl pcie_its_rst; ++ struct dma_trx_obj *dma_obj; ++ const struct sun55i_pcie_of_data *drvdata; ++ struct gpio_desc rst_gpio; ++ struct gpio_desc wake_gpio; ++ struct gpio_desc switch_gpio; ++ u32 lanes; ++ u32 num_edma; ++ unsigned long *rd_edma_map; ++ unsigned long *wr_edma_map; ++ struct sun55i_pci_edma_chan *dma_wr_chn; ++ struct sun55i_pci_edma_chan *dma_rd_chn; ++ ++ struct udevice *pcie3v3; ++}; ++ ++#define to_sun55i_pcie_from_pp(x) \ ++ container_of((x), struct sun55i_pcie, pcie_port) ++ ++#define to_sun55i_pcie_from_ep(endpoint) \ ++ container_of((endpoint), struct sun55i_pcie, ep) ++ ++int sun55i_pcie_plat_hw_init(struct udevice *dev); ++ ++void sun55i_pcie_plat_set_rate(struct sun55i_pcie *pci); ++void sun55i_pcie_plat_set_mode(struct sun55i_pcie *pci); ++void sun55i_pcie_plat_ltssm_enable(struct sun55i_pcie *pci); ++void sun55i_pcie_plat_ltssm_disable(struct sun55i_pcie *pci); ++ ++int sun55i_pcie_cfg_write(void __iomem *addr, int size, ulong val); ++int sun55i_pcie_cfg_read(void __iomem *addr, int size, ulong *val); ++ ++u32 sun55i_pcie_readl(struct sun55i_pcie *pcie, u32 offset); ++void sun55i_pcie_writel(u32 val, struct sun55i_pcie *pcie, u32 offset); ++ ++void sun55i_pcie_writel_dbi(struct sun55i_pcie *pci, u32 reg, u32 val); ++u32 sun55i_pcie_readl_dbi(struct sun55i_pcie *pci, u32 reg); ++void sun55i_pcie_writew_dbi(struct sun55i_pcie *pci, u32 reg, u16 val); ++u16 sun55i_pcie_readw_dbi(struct sun55i_pcie *pci, u32 reg); ++void sun55i_pcie_writeb_dbi(struct sun55i_pcie *pci, u32 reg, u8 val); ++u8 sun55i_pcie_readb_dbi(struct sun55i_pcie *pci, u32 reg); ++ ++void sun55i_pcie_dbi_ro_wr_en(struct sun55i_pcie *pci); ++void sun55i_pcie_dbi_ro_wr_dis(struct sun55i_pcie *pci); ++ ++u8 sun55i_pcie_plat_find_capability(struct sun55i_pcie *pci, u8 cap); ++ ++#define SUN55I_PCIE_PHY_BASE 0x04f00000 ++#define SUN55I_PCIE_PORT0_CONF_OFFSET 0x1000 ++ ++#endif /* _PCIE_SUN55I_H */ +-- +Armbian + diff --git a/patch/u-boot/sunxi-dev-u-boot-a523/phy-allwinner-add-pcie-usb3-driver.patch b/patch/u-boot/sunxi-dev-u-boot-a523/phy-allwinner-add-pcie-usb3-driver.patch new file mode 100644 index 0000000000..a4bb064ece --- /dev/null +++ b/patch/u-boot/sunxi-dev-u-boot-a523/phy-allwinner-add-pcie-usb3-driver.patch @@ -0,0 +1,712 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Marvin Wewer +Date: Sat, 25 Oct 2025 20:32:35 +0000 +Subject: phy: allwinner: Add SUN55I INNO combo PHY driver for PCIe/USB3 + +Signed-off-by: Marvin Wewer +--- + drivers/phy/allwinner/Kconfig | 8 + + drivers/phy/allwinner/Makefile | 1 + + drivers/phy/allwinner/phy-sun55i-pcie-usb3.c | 661 ++++++++++ + 3 files changed, 670 insertions(+) + +diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig +index 111111111111..222222222222 100644 +--- a/drivers/phy/allwinner/Kconfig ++++ b/drivers/phy/allwinner/Kconfig +@@ -31,5 +31,13 @@ config PHY_SUN50I_USB3 + depends on ARCH_SUNXI + select PHY + help + Enable this to support the USB3 transceiver that is part of + Allwinner sun50i SoCs. ++ ++config PHY_SUN55I_PCIE_USB3 ++ tristate "Allwinner SUN55I INNO COMBO PHY Driver" ++ depends on ARCH_SUNXI ++ select PHY ++ help ++ Enable this to support the Allwinner sun55i PCIe/USB3.0 combo PHY ++ with INNOSILICON IP block. +diff --git a/drivers/phy/allwinner/Makefile b/drivers/phy/allwinner/Makefile +index 111111111111..222222222222 100644 +--- a/drivers/phy/allwinner/Makefile ++++ b/drivers/phy/allwinner/Makefile +@@ -3,5 +3,6 @@ + # Copyright (C) 2016 Amarula Solutions + # + + obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o + obj-$(CONFIG_PHY_SUN50I_USB3) += phy-sun50i-usb3.o ++obj-$(CONFIG_PHY_SUN55I_PCIE_USB3) += phy-sun55i-pcie-usb3.o +\ No newline at end of file +diff --git a/drivers/phy/allwinner/phy-sun55i-pcie-usb3.c b/drivers/phy/allwinner/phy-sun55i-pcie-usb3.c +new file mode 100644 +index 000000000000..111111111111 +--- /dev/null ++++ b/drivers/phy/allwinner/phy-sun55i-pcie-usb3.c +@@ -0,0 +1,661 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Allwinner PIPE USB3.0 PCIE Combo Phy driver ++ * ++ * Copyright(c) 2020 - 2024 Allwinner Technology Co.,Ltd. All rights reserved. ++ * ++ * sun55i-inno-combophy.c: chenhuaqiang ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++#define sun55i_COMBOPHY_CTL_BASE 0x04f00000 ++#define sun55i_COMBOPHY_CLK_BASE 0x04f80000 ++ ++/* PCIE USB3 Sub-System Registers */ ++/* Sub-System Version Reset Register */ ++#define PCIE_USB3_SYS_VER 0x00 ++ ++/* PHY CLK Gating Control Register */ ++#define PCIE_REF_CLK_GATING 31 ++ ++/* CCMU Base Address */ ++#define SUNXI_CCMU_BASE 0x02001000 ++#define SUNXI_CCM_BASE (SUNXI_CCMU_BASE) ++ ++/* Sub-System PCIE Bus Gating Reset Register */ ++#define PCIE_BRG_REG_RST 16 ++#define PCIE_BGR_REG (SUNXI_CCM_BASE + 0x0aac) ++ ++/* Sub-System PCIE Bus Gating Reset Register */ ++#define PCIE_COMBO_PHY_BGR 0x04 ++#define PCIE_SLV_ACLK_EN BIT(18) ++#define PCIE_ACLK_EN BIT(17) ++#define PCIE_HCLK_EN BIT(16) ++#define PCIE_PERSTN BIT(1) ++#define PCIE_PW_UP_RSTN BIT(0) ++ ++/* Sub-System USB3 Bus Gating Reset Register */ ++#define USB3_COMBO_PHY_BGR 0x08 ++#define USB3_ACLK_EN BIT(17) ++#define USB3_HCLK_EN BIT(16) ++#define USB3_U2_PHY_RSTN BIT(4) ++#define USB3_U2_PHY_MUX_EN BIT(3) ++#define USB3_U2_PHY_MUX_SEL BIT(0) ++#define USB3_RESETN BIT(0) ++ ++/* Sub-System PCIE PHY Control Register */ ++#define PCIE_COMBO_PHY_CTL 0x10 ++#define PHY_USE_SEL BIT(31) /* 0:PCIE; 1:USB3 */ ++#define PHY_CLK_SEL BIT(30) /* 0:internal clk; 1:external clk */ ++#define PHY_BIST_EN BIT(16) ++#define PHY_PIPE_SW BIT(9) ++#define PHY_PIPE_SEL BIT(8) /* 0:rstn by PCIE or USB3; 1:rstn by PHY_PIPE_SW */ ++#define PHY_PIPE_CLK_INVERT BIT(4) ++#define PHY_FPGA_SYS_RSTN BIT(1) /* for FPGA */ ++#define PHY_RSTN BIT(0) ++ ++/* PHY CLK Registers (offset from sun55i_COMBOPHY_CLK_BASE) */ ++#define PCIE_REF_CLK_REG_PCIE_REF_CLK_GATING_CLEAR_MASK 0x80000000 ++ ++/* Registers */ ++#define COMBO_REG_SYSVER(comb_base_addr) ((comb_base_addr) \ ++ + PCIE_USB3_SYS_VER) ++#define COMBO_REG_PCIEBGR(comb_base_addr) ((comb_base_addr) \ ++ + PCIE_COMBO_PHY_BGR) ++#define COMBO_REG_USB3BGR(comb_base_addr) ((comb_base_addr) \ ++ + USB3_COMBO_PHY_BGR) ++#define COMBO_REG_PHYCTRL(comb_base_addr) ((comb_base_addr) \ ++ + PCIE_COMBO_PHY_CTL) ++ ++/* Sub-System Version Number */ ++#define COMBO_VERSION_01 (0x10000) ++#define COMBO_VERSION_ANY (0x0) ++ ++enum phy_use_sel { ++ PHY_USE_BY_PCIE = 0, /* PHY used by PCIE */ ++ PHY_USE_BY_USB3, /* PHY used by USB3 */ ++ PHY_USE_BY_PCIE_USB3_U2,/* PHY used by PCIE & USB3_U2 */ ++}; ++ ++enum phy_refclk_sel { ++ INTER_SIG_REF_CLK = 0, /* PHY use internal single end reference clock */ ++ EXTER_DIF_REF_CLK, /* PHY use external single end reference clock */ ++}; ++ ++struct sun55i_combophy_of_data { ++ bool has_cfg_clk; ++ bool has_slv_clk; ++ bool has_phy_mbus_clk; ++ bool has_phy_ahb_clk; ++ bool has_pcie_axi_clk; ++ bool has_u2_phy_mux; ++ bool need_noppu_rst; ++ bool has_u3_phy_data_quirk; ++ bool need_optimize_jitter; ++}; ++ ++struct sun55i_combphy { ++ struct device *dev; ++ struct phy *phy; ++ void __iomem *phy_ctl; /* parse dts, control the phy mode, reset and power */ ++ void __iomem *phy_clk; /* parse dts, set the phy clock */ ++ ++ struct reset_ctl reset; ++ struct reset_ctl noppu_reset; ++ ++ struct clk *phyclk_ref; ++ struct clk *refclk_par; ++ struct clk *phyclk_cfg; ++ struct clk *cfgclk_par; ++ struct clk *phy_mclk; ++ struct clk *phy_hclk; ++ struct clk *phy_axi; ++ struct clk *phy_axi_par; ++ __u8 mode; ++ __u32 vernum; /* version number */ ++ enum phy_use_sel user; ++ enum phy_refclk_sel ref; ++ const struct sun55i_combophy_of_data *drvdata; ++ ++ struct udevice *select3v3_supply; ++ bool initialized; ++}; ++ ++static void sun55i_combphy_usb3_phy_set(struct sun55i_combphy *combphy, bool enable) ++{ ++ u32 val, tmp = 0; ++ ++ val = readl(combphy->phy_clk + 0x1418); ++ tmp = GENMASK(17, 16); ++ if (enable) { ++ val &= ~tmp; ++ val |= BIT(25); ++ } else { ++ val |= tmp; ++ val &= ~BIT(25); ++ } ++ writel(val, combphy->phy_clk + 0x1418); ++ ++ /* reg_rx_eq_bypass[3]=1, rx_ctle_res_cal_bypass */ ++ val = readl(combphy->phy_clk + 0x0674); ++ if (enable) ++ val |= BIT(3); ++ else ++ val &= ~BIT(3); ++ writel(val, combphy->phy_clk + 0x0674); ++ ++ /* rx_ctle_res_cal=0xf, 0x4->0xf */ ++ val = readl(combphy->phy_clk + 0x0704); ++ tmp = GENMASK(9, 8) | BIT(11); ++ if (enable) ++ val |= tmp; ++ else ++ val &= ~tmp; ++ writel(val, combphy->phy_clk + 0x0704); ++ ++ /* CDR_div_fin_gain1 */ ++ val = readl(combphy->phy_clk + 0x0400); ++ if (enable) ++ val |= BIT(4); ++ else ++ val &= ~BIT(4); ++ writel(val, combphy->phy_clk + 0x0400); ++ ++ /* CDR_div1_fin_gain1 */ ++ val = readl(combphy->phy_clk + 0x0404); ++ tmp = GENMASK(3, 0) | BIT(5); ++ if (enable) ++ val |= tmp; ++ else ++ val &= ~tmp; ++ writel(val, combphy->phy_clk + 0x0404); ++ ++ /* CDR_div3_fin_gain1 */ ++ val = readl(combphy->phy_clk + 0x0408); ++ if (enable) ++ val |= BIT(5); ++ else ++ val &= ~BIT(5); ++ writel(val, combphy->phy_clk + 0x0408); ++ ++ val = readl(combphy->phy_clk + 0x109c); ++ if (enable) ++ val |= BIT(1); ++ else ++ val &= ~BIT(1); ++ writel(val, combphy->phy_clk + 0x109c); ++ ++ /* balance parm configure */ ++ if (combphy->drvdata->has_u3_phy_data_quirk) { ++ val = readl(combphy->phy_clk + 0x0804); ++ if (enable) ++ val |= (0x6<<4); ++ else ++ val &= ~(0xf<<4); ++ writel(val, combphy->phy_clk + 0x0804); ++ } ++ ++ /* SSC configure */ ++ val = readl(combphy->phy_clk + 0x107c); ++ tmp = 0x3f << 12; ++ val = val & (~tmp); ++ val |= ((0x1 << 12) & tmp); /* div_N */ ++ writel(val, combphy->phy_clk + 0x107c); ++ ++ val = readl(combphy->phy_clk + 0x1020); ++ tmp = 0x1f << 0; ++ val = val & (~tmp); ++ val |= ((0x6 << 0) & tmp); /* modulation freq div */ ++ writel(val, combphy->phy_clk + 0x1020); ++ ++ val = readl(combphy->phy_clk + 0x1034); ++ tmp = 0x7f << 16; ++ val = val & (~tmp); ++ val |= ((0x9 << 16) & tmp); /* spread[6:0], 400*9=4410ppm ssc */ ++ writel(val, combphy->phy_clk + 0x1034); ++ ++ val = readl(combphy->phy_clk + 0x101c); ++ tmp = 0x1 << 27; ++ val = val & (~tmp); ++ val |= ((0x1 << 27) & tmp); /* choose downspread */ ++ ++ tmp = 0x1 << 28; ++ val = val & (~tmp); ++ if (enable) ++ val |= ((0x0 << 28) & tmp); /* don't disable ssc = 0 */ ++ else ++ val |= ((0x1 << 28) & tmp); /* don't enable ssc = 1 */ ++ writel(val, combphy->phy_clk + 0x101c); ++ ++#ifdef SUN55i_INNO_COMMBOPHY_DEBUG ++ /* TX Eye configure bypass_en */ ++ val = readl(combphy->phy_clk + 0x0ddc); ++ if (enable) ++ val |= BIT(4); /* 0x0ddc[4]=1 */ ++ else ++ val &= ~BIT(4); ++ writel(val, combphy->phy_clk + 0x0ddc); ++ ++ /* Leg_cur[6:0] - 7'd84 */ ++ val = readl(combphy->phy_clk + 0x0ddc); ++ val |= ((0x54 & BIT(6)) >> 3); /* 0x0ddc[3] */ ++ writel(val, combphy->phy_clk + 0x0ddc); ++ ++ val = readl(combphy->phy_clk + 0x0de0); ++ val |= ((0x54 & GENMASK(5, 0)) << 2); /* 0x0de0[7:2] */ ++ writel(val, combphy->phy_clk + 0x0de0); ++ ++ /* Leg_curb[5:0] - 6'd18 */ ++ val = readl(combphy->phy_clk + 0x0de4); ++ val |= ((0x12 & GENMASK(5, 1)) >> 1); /* 0x0de4[4:0] */ ++ writel(val, combphy->phy_clk + 0x0de4); ++ ++ val = readl(combphy->phy_clk + 0x0de8); ++ val |= ((0x12 & BIT(0)) << 7); /* 0x0de8[7] */ ++ writel(val, combphy->phy_clk + 0x0de8); ++ ++ /* Exswing_isel */ ++ val = readl(combphy->phy_clk + 0x0028); ++ val |= (0x4 << 28); /* 0x28[30:28] */ ++ writel(val, combphy->phy_clk + 0x0028); ++ ++ /* Exswing_en */ ++ val = readl(combphy->phy_clk + 0x0028); ++ if (enable) ++ val |= BIT(31); /* 0x28[31]=1 */ ++ else ++ val &= ~BIT(31); ++ writel(val, combphy->phy_clk + 0x0028); ++#endif ++} ++ ++static int sun55i_combphy_usb3_init(struct sun55i_combphy *combphy) ++{ ++ sun55i_combphy_usb3_phy_set(combphy, true); ++ ++ return 0; ++} ++ ++// static int sun55i_combphy_usb3_exit(struct sun55i_combphy *combphy) ++// { ++// sun55i_combphy_usb3_phy_set(combphy, false); ++ ++// return 0; ++// } ++ ++static void sun55i_combphy_pcie_phy_enable(struct sun55i_combphy *combphy) ++{ ++ u32 val; ++ ++ /* set the phy: ++ * bit(18): slv aclk enable ++ * bit(17): aclk enable ++ * bit(16): hclk enbale ++ * bit(1) : pcie_presetn ++ * bit(0) : pcie_power_up_rstn ++ */ ++ val = readl(combphy->phy_ctl + PCIE_COMBO_PHY_BGR); ++ val &= (~(0x03<<0)); ++ val &= (~(0x03<<16)); ++ val |= (0x03<<0); ++ if (combphy->drvdata->has_slv_clk) ++ val |= (0x07<<16); ++ else ++ val |= (0x03<<16); ++ writel(val, combphy->phy_ctl + PCIE_COMBO_PHY_BGR); ++ ++ ++ /* select phy mode, phy assert */ ++ val = readl(combphy->phy_ctl + PCIE_COMBO_PHY_CTL); ++ val &= (~PHY_USE_SEL); ++ val &= (~(0x03<<8)); ++ val &= (~PHY_FPGA_SYS_RSTN); ++ val &= (~PHY_RSTN); ++ writel(val, combphy->phy_ctl + PCIE_COMBO_PHY_CTL); ++ ++ /* phy De-assert */ ++ val = readl(combphy->phy_ctl + PCIE_COMBO_PHY_CTL); ++ val &= (~PHY_CLK_SEL); ++ val &= (~(0x03<<8)); ++ val &= (~PHY_FPGA_SYS_RSTN); ++ val &= (~PHY_RSTN); ++ val |= PHY_RSTN; ++ writel(val, combphy->phy_ctl + PCIE_COMBO_PHY_CTL); ++ ++ val = readl(combphy->phy_ctl + PCIE_COMBO_PHY_CTL); ++ val &= (~PHY_CLK_SEL); ++ val &= (~(0x03<<8)); ++ val &= (~PHY_FPGA_SYS_RSTN); ++ val &= (~PHY_RSTN); ++ val |= PHY_RSTN; ++ val |= (PHY_FPGA_SYS_RSTN); ++ writel(val, combphy->phy_ctl + PCIE_COMBO_PHY_CTL); ++ ++} ++ ++static void sun55i_combphy_pcie_phy_100M(struct sun55i_combphy *combphy) ++{ ++ u32 val; ++ ++ val = readl(combphy->phy_clk + 0x1004); ++ val &= ~(0x3<<3); ++ val &= ~(0x1<<0); ++ val |= (0x1<<0); ++ val |= (0x1<<2); ++ val |= (0x1<<4); ++ writel(val, combphy->phy_clk + 0x1004); ++ ++ val = readl(combphy->phy_clk + 0x1018); ++ val &= ~(0x3<<4); ++ val |= (0x3<<4); ++ writel(val, combphy->phy_clk + 0x1018); ++ ++ val = readl(combphy->phy_clk + 0x101c); ++ val &= ~(0x0fffffff); ++ writel(val, combphy->phy_clk + 0x101c); ++ ++ /* if need optimize jitter parm*/ ++ if (combphy->drvdata->need_optimize_jitter) { ++ val = readl(combphy->phy_clk + 0x107c); ++ val &= ~(0x3ffff); ++ val |= (0x4<<12); ++ val |= 0x64; ++ writel(val, combphy->phy_clk + 0x107c); ++ ++ val = readl(combphy->phy_clk + 0x1030); ++ val &= ~(0x3<<20); ++ writel(val, combphy->phy_clk + 0x1030); ++ ++ val = readl(combphy->phy_clk + 0x1050); ++ val &= ~(0x7<<0); ++ val &= ~(0x7<<5); ++ val &= ~(0x3<<3); ++ val |= (0x3<<3); ++ writel(val, combphy->phy_clk + 0x1050); ++ } else { ++ val = readl(combphy->phy_clk + 0x107c); ++ val &= ~(0x3ffff); ++ val |= (0x2<<12); ++ val |= 0x32; ++ writel(val, combphy->phy_clk + 0x107c); ++ ++ val = readl(combphy->phy_clk + 0x1030); ++ val &= ~(0x3<<20); ++ writel(val, combphy->phy_clk + 0x1030); ++ ++ val = readl(combphy->phy_clk + 0x1050); ++ val &= ~(0x7<<5); ++ val |= (0x1<<5); ++ writel(val, combphy->phy_clk + 0x1050); ++ } ++ ++ val = readl(combphy->phy_clk + 0x1054); ++ val &= ~(0x7<<5); ++ val |= (0x1<<5); ++ writel(val, combphy->phy_clk + 0x1054); ++ ++ val = readl(combphy->phy_clk + 0x0804); ++ val &= ~(0xf<<4); ++ val |= (0xc<<4); ++ writel(val, combphy->phy_clk + 0x0804); ++ ++ val = readl(combphy->phy_clk + 0x109c); ++ val &= ~(0x3<<8); ++ val |= (0x1<<1); ++ writel(val, combphy->phy_clk + 0x109c); ++ ++ writel(0x80540a0a, combphy->phy_clk + 0x1418); ++} ++ ++static int sun55i_combphy_pcie_init(struct sun55i_combphy *combphy) ++{ ++ sun55i_combphy_pcie_phy_100M(combphy); ++ ++ sun55i_combphy_pcie_phy_enable(combphy); ++ ++ return 0; ++} ++ ++static int sun55i_combphy_set_mode(struct sun55i_combphy *combphy) ++{ ++ switch (combphy->mode) { ++ case PHY_TYPE_PCIE: ++ sun55i_combphy_pcie_init(combphy); ++ break; ++ case PHY_TYPE_USB3: ++ if (combphy->user == PHY_USE_BY_PCIE_USB3_U2) { ++ sun55i_combphy_pcie_init(combphy); ++ } else if (combphy->user == PHY_USE_BY_USB3) { ++ sun55i_combphy_usb3_init(combphy); ++ } ++ break; ++ default: ++ printf("incompatible PHY type\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static void combo_phy_mode_set(struct sun55i_combphy *combphy, bool enable) ++{ ++ u32 val; ++ ++ val = readl(COMBO_REG_PHYCTRL(combphy->phy_ctl)); ++ ++ /* Set PHY_USE_SEL based on user (default: clear for PCIE/PCIE_USB3_U2, set for USB3) */ ++ val &= ~PHY_USE_SEL; /* Default: clear (PCIE mode) */ ++ if (combphy->user == PHY_USE_BY_USB3) ++ val |= PHY_USE_SEL; ++ ++ /* Set PHY_CLK_SEL based on ref (default: clear for internal, set for external) */ ++ val &= ~PHY_CLK_SEL; /* Default: clear (internal clock) */ ++ if (combphy->ref == EXTER_DIF_REF_CLK) ++ val |= PHY_CLK_SEL; ++ ++ /* Set or clear PHY_RSTN based on enable */ ++ if (enable) ++ val |= PHY_RSTN; ++ else ++ val &= ~PHY_RSTN; ++ ++ writel(val, COMBO_REG_PHYCTRL(combphy->phy_ctl)); ++} ++ ++/* PCIE USB3 Sub-system Application */ ++static void combo_pcie_clk_set(struct sun55i_combphy *combphy, bool enable) ++{ ++ u32 val, tmp = 0; ++ ++ val = readl(COMBO_REG_PCIEBGR(combphy->phy_ctl)); ++ if (combphy->drvdata->has_slv_clk) ++ tmp = PCIE_SLV_ACLK_EN | PCIE_ACLK_EN | PCIE_HCLK_EN | PCIE_PERSTN | PCIE_PW_UP_RSTN; ++ else ++ tmp = PCIE_ACLK_EN | PCIE_HCLK_EN | PCIE_PERSTN | PCIE_PW_UP_RSTN; ++ if (enable) ++ val |= tmp; ++ else ++ val &= ~tmp; ++ writel(val, COMBO_REG_PCIEBGR(combphy->phy_ctl)); ++} ++ ++static void combo_usb3_clk_set(struct sun55i_combphy *combphy, bool enable) ++{ ++ u32 val, tmp = 0; ++ ++ val = readl(COMBO_REG_USB3BGR(combphy->phy_ctl)); ++ if (combphy->drvdata->has_u2_phy_mux) ++ tmp = USB3_ACLK_EN | USB3_HCLK_EN | USB3_U2_PHY_MUX_SEL | USB3_U2_PHY_RSTN | USB3_U2_PHY_MUX_EN; ++ else ++ tmp = USB3_ACLK_EN | USB3_HCLK_EN | USB3_RESETN; ++ if (enable) ++ val |= tmp; ++ else ++ val &= ~tmp; ++ writel(val, COMBO_REG_USB3BGR(combphy->phy_ctl)); ++} ++ ++static u32 combo_sysver_get(struct sun55i_combphy *combphy) ++{ ++ u32 reg; ++ ++ reg = readl(COMBO_REG_SYSVER(combphy->phy_ctl)); ++ ++ return reg; ++} ++ ++static void pcie_usb3_sub_system_enable(struct sun55i_combphy *combphy) ++{ ++ combo_phy_mode_set(combphy, true); ++ ++ if (combphy->user == PHY_USE_BY_PCIE) ++ combo_pcie_clk_set(combphy, true); ++ else if (combphy->user == PHY_USE_BY_USB3) ++ combo_usb3_clk_set(combphy, true); ++ else if (combphy->user == PHY_USE_BY_PCIE_USB3_U2) { ++ combo_pcie_clk_set(combphy, true); ++ combo_usb3_clk_set(combphy, true); ++ } ++ ++ combphy->vernum = combo_sysver_get(combphy); ++} ++ ++static int pcie_usb3_sub_system_init(void *phy) ++{ ++ struct sun55i_combphy *combphy = (struct sun55i_combphy *)(phy); ++ unsigned long reg_value = 0; ++ ++ if (combphy->initialized) ++ return 0; ++ ++ reg_value = readl(PCIE_BGR_REG); ++ reg_value |= (1 << PCIE_BRG_REG_RST); ++ writel(reg_value, PCIE_BGR_REG); ++ ++ reg_value = readl(0x2001a84); ++ reg_value |= (1 << PCIE_REF_CLK_GATING); ++ //reg_value = 0x81000001; ++ writel(reg_value, 0x2001a84); ++ ++ pcie_usb3_sub_system_enable(combphy); ++ ++ combphy->initialized = true; ++ ++ return 0; ++} ++ ++int sun55i_combphy_init(struct phy *phy) ++{ ++ struct sun55i_combphy *combphy = dev_get_priv(phy->dev); ++ int ret; ++ ++ if (!combphy) { ++ printf("PHY drvdata not set!\n"); ++ return -EIO; ++ } ++ ++ if (combphy->select3v3_supply) { ++ ret = regulator_set_enable_if_allowed(combphy->select3v3_supply, true); ++ if (ret && ret != -EALREADY) { ++ printf("PHY: Failed to enable 3.3V supply: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ ret = reset_deassert(&combphy->reset); ++ if (ret) { ++ printf("PHY: Failed to deassert reset: %d\n", ret); ++ regulator_set_enable(combphy->select3v3_supply, false); ++ return ret; ++ } ++ ++ ret = pcie_usb3_sub_system_init(combphy); ++ if (ret) { ++ printf("PHY: failed to init sub system\n"); ++ reset_assert(&combphy->reset); ++ regulator_set_enable(combphy->select3v3_supply, false); ++ return ret; ++ } ++ ++ ret = sun55i_combphy_set_mode(combphy); ++ if (ret) { ++ printf("PHY: invalid number of arguments\n"); ++ reset_assert(&combphy->reset); ++ regulator_set_enable(combphy->select3v3_supply, false); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static const struct sun55i_combophy_of_data sun55i_inno_v1_of_data = { ++ .has_cfg_clk = false, ++}; ++ ++static int sun55i_inno_phy_probe(struct udevice *dev) ++{ ++ struct sun55i_combphy *combphy = dev_get_priv(dev); ++ int ret; ++ ++ combphy->phy_ctl = dev_read_addr_name_ptr(dev, "phy-ctl"); ++ if (!combphy->phy_ctl) ++ return -ENODEV; ++ ++ combphy->phy_clk = dev_read_addr_name_ptr(dev, "phy-clk"); ++ if (!combphy->phy_clk) ++ return -ENODEV; ++ ++ ret = reset_get_by_index(dev, 0, &combphy->reset); ++ if (ret) ++ return ret; ++ ++ ret = device_get_supply_regulator(dev, "select3v3-supply", &combphy->select3v3_supply); ++ if (ret) ++ return ret; ++ ++ combphy->drvdata = (const struct sun55i_combophy_of_data *)dev_get_driver_data(dev); ++ combphy->user = PHY_USE_BY_PCIE; ++ combphy->mode = PHY_TYPE_PCIE; ++ combphy->ref = EXTER_DIF_REF_CLK; ++ ++ return 0; ++} ++ ++static const struct phy_ops sun55i_inno_phy_ops = { ++ .init = sun55i_combphy_init, ++}; ++ ++static const struct udevice_id sun55i_inno_phy_of_match_table[] = { ++ { ++ .compatible = "allwinner,inno-combphy", ++ .data = (ulong)&sun55i_inno_v1_of_data, ++ }, ++}; ++ ++U_BOOT_DRIVER(sun55i_inno_combophy) = { ++ .name = "sun55i_inno_combophy", ++ .id = UCLASS_PHY, ++ .of_match = sun55i_inno_phy_of_match_table, ++ .ops = &sun55i_inno_phy_ops, ++ .probe = sun55i_inno_phy_probe, ++ .priv_auto = sizeof(struct sun55i_combphy), ++}; +-- +Armbian +