diff --git a/target/linux/ipq40xx/config-4.9 b/target/linux/ipq40xx/config-4.9 new file mode 100644 index 000000000..376ffb85c --- /dev/null +++ b/target/linux/ipq40xx/config-4.9 @@ -0,0 +1,497 @@ +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_AMBA_PL08X is not set +CONFIG_APQ_GCC_8084=y +CONFIG_APQ_MMCC_8084=y +CONFIG_AR40XX_PHY=y +CONFIG_AR8216_PHY=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_IPQ40XX=y +# CONFIG_ARCH_MDM9615 is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_ARCH_MSM8960=y +CONFIG_ARCH_MSM8974=y +CONFIG_ARCH_MSM8X60=y +CONFIG_ARCH_MULTIPLATFORM=y +# CONFIG_ARCH_MULTI_CPU_AUTO is not set +CONFIG_ARCH_MULTI_V6_V7=y +CONFIG_ARCH_MULTI_V7=y +CONFIG_ARCH_NR_GPIO=0 +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_ARM=y +CONFIG_ARM_AMBA=y +CONFIG_ARM_APPENDED_DTB=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ATAG_DTB_COMPAT=y +# CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND is not set +# CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER is not set +CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_MANGLE=y +CONFIG_ARM_CPUIDLE=y +CONFIG_ARM_CPU_SUSPEND=y +# CONFIG_ARM_CPU_TOPOLOGY is not set +CONFIG_ARM_GIC=y +CONFIG_ARM_HAS_SG_CHAIN=y +CONFIG_ARM_L1_CACHE_SHIFT=6 +CONFIG_ARM_L1_CACHE_SHIFT_6=y +# CONFIG_ARM_LPAE is not set +CONFIG_ARM_PATCH_IDIV=y +CONFIG_ARM_PATCH_PHYS_VIRT=y +CONFIG_ARM_QCOM_CPUFREQ=y +CONFIG_ARM_QCOM_CPUIDLE=y +# CONFIG_ARM_SMMU is not set +# CONFIG_ARM_SP805_WATCHDOG is not set +CONFIG_ARM_THUMB=y +# CONFIG_ARM_THUMBEE is not set +CONFIG_ARM_UNWIND=y +CONFIG_ARM_VIRT_EXT=y +CONFIG_AT803X_PHY=y +# CONFIG_BINFMT_FLAT is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_MQ_PCI=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_BOUNCE=y +CONFIG_BUS_TOPOLOGY_ADHOC=y +# CONFIG_CACHE_L2X0 is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_CLKSRC_OF=y +CONFIG_CLKSRC_PROBE=y +CONFIG_CLKSRC_QCOM=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_COMMON_CLK=y +CONFIG_COMMON_CLK_QCOM=y +CONFIG_CPUFREQ_DT=y +CONFIG_CPUFREQ_DT_PLATDEV=y +CONFIG_CPU_32v6K=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +# CONFIG_CPU_BIG_ENDIAN is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_HAS_ASID=y +# CONFIG_CPU_ICACHE_DISABLE is not set +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_PM=y +CONFIG_CPU_RMAP=y +CONFIG_CPU_THERMAL=y +CONFIG_CPU_TLB_V7=y +CONFIG_CPU_V7=y +CONFIG_CRC16=y +# CONFIG_CRC32_SARWATE is not set +CONFIG_CRC32_SLICEBY8=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEV_QCE=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_XTS=y +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_DEBUG_GPIO=y +CONFIG_DEBUG_UNCOMPRESS=y +# CONFIG_DEBUG_USER is not set +CONFIG_DMADEVICES=y +CONFIG_DMA_ENGINE=y +CONFIG_DMA_OF=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DTC=y +CONFIG_DT_IDLE_STATES=y +# CONFIG_DWMAC_GENERIC is not set +CONFIG_DWMAC_IPQ806X=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_EARLY_PRINTK=y +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_ESSEDMA=y +CONFIG_ETHERNET_PACKET_MANGLE=y +CONFIG_FIXED_PHY=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_GENERIC_IO=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_PHY=y +CONFIG_GENERIC_PINCONF=y +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_IRQCHIP=y +CONFIG_GPIO_SYSFS=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_ARM_ARCH_TIMER=y +CONFIG_HAVE_ARM_SMCCC=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_CBPF_JIT=y +CONFIG_HAVE_CC_STACKPROTECTOR=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_IDE=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_HAVE_NET_DSA=y +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_SMP=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_UID16=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HIGHMEM=y +# CONFIG_HIGHPTE is not set +CONFIG_HWMON=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM=y +CONFIG_HZ_FIXED=0 +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_QUP=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_IOMMU_HELPER=y +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set +CONFIG_IOMMU_SUPPORT=y +CONFIG_IPQ_GCC_4019=y +CONFIG_IPQ_GCC_806X=y +# CONFIG_IPQ_LCC_806X is not set +CONFIG_IRQCHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_WORK=y +CONFIG_KPSS_XCC=y +CONFIG_KRAITCC=y +CONFIG_KRAIT_CLOCKS=y +CONFIG_KRAIT_L2_ACCESSORS=y +CONFIG_LIBFDT=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_MDIO_BITBANG=y +CONFIG_MDIO_BOARDINFO=y +CONFIG_MDIO_GPIO=y +CONFIG_MDIO_IPQ40XX=y +# CONFIG_MDM_GCC_9615 is not set +# CONFIG_MDM_LCC_9615 is not set +# CONFIG_MFD_MAX77620 is not set +CONFIG_MFD_QCOM_RPM=y +# CONFIG_MFD_SPMI_PMIC is not set +CONFIG_MFD_SYSCON=y +CONFIG_MIGHT_HAVE_CACHE_L2X0=y +CONFIG_MIGHT_HAVE_PCI=y +CONFIG_MMC=y +CONFIG_MMC_ARMMMCI=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=16 +CONFIG_MMC_QCOM_DML=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_MSM=y +# CONFIG_MMC_SDHCI_PCI is not set +CONFIG_MMC_SDHCI_PLTFM=y +# CONFIG_MMC_TIFM_SD is not set +CONFIG_MODULES_USE_ELF_REL=y +CONFIG_MSM_BUS_SCALING=y +CONFIG_MSM_GCC_8660=y +# CONFIG_MSM_GCC_8916 is not set +CONFIG_MSM_GCC_8960=y +CONFIG_MSM_GCC_8974=y +# CONFIG_MSM_GCC_8996 is not set +# CONFIG_MSM_IOMMU is not set +# CONFIG_MSM_LCC_8960 is not set +CONFIG_MSM_MMCC_8960=y +CONFIG_MSM_MMCC_8974=y +# CONFIG_MSM_MMCC_8996 is not set +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_M25P80=y +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_ECC=y +CONFIG_MTD_NAND_QCOM=y +# CONFIG_MTD_PHYSMAP_OF_VERSATILE is not set +CONFIG_MTD_QCOM_SMEM_PARTS=y +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_SPINAND_MT29F=y +CONFIG_MTD_SPINAND_ONDIEECC=y +CONFIG_MTD_SPLIT_FIRMWARE=y +CONFIG_MTD_SPLIT_FIT_FW=y +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_BEB_LIMIT=20 +CONFIG_MTD_UBI_BLOCK=y +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MULTI_IRQ_HANDLER=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEON=y +CONFIG_NET_DSA=y +CONFIG_NET_DSA_HWMON=y +CONFIG_NET_DSA_QCA8K=y +CONFIG_NET_DSA_TAG_QCA=y +CONFIG_NET_FLOW_LIMIT=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NET_SWITCHDEV=y +CONFIG_NLS=y +CONFIG_NO_BOOTMEM=y +CONFIG_NO_HZ=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=4 +CONFIG_NVMEM=y +CONFIG_OF=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_ADDRESS_PCI=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_GPIO=y +CONFIG_OF_IRQ=y +CONFIG_OF_MDIO=y +CONFIG_OF_NET=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OLD_SIGACTION=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_PADATA=y +CONFIG_PAGE_OFFSET=0xC0000000 +CONFIG_PCI=y +CONFIG_PCIEAER=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIE_DW=y +CONFIG_PCIE_QCOM=y +CONFIG_PCI_DEBUG=y +CONFIG_PCI_DISABLE_COMMON_QUIRKS=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PERF_USE_VMALLOC=y +CONFIG_PGTABLE_LEVELS=2 +CONFIG_PHYLIB=y +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +CONFIG_PHY_QCOM_IPQ806X_SATA=y +# CONFIG_PHY_QCOM_UFS is not set +CONFIG_PINCTRL=y +CONFIG_PINCTRL_APQ8064=y +# CONFIG_PINCTRL_APQ8084 is not set +CONFIG_PINCTRL_IPQ4019=y +CONFIG_PINCTRL_IPQ8064=y +# CONFIG_PINCTRL_MDM9615 is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_MSM8660 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8960 is not set +# CONFIG_PINCTRL_MSM8996 is not set +CONFIG_PINCTRL_MSM8X74=y +# CONFIG_PINCTRL_QCOM_SPMI_PMIC is not set +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PL330_DMA is not set +CONFIG_PM_OPP=y +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMKONA is not set +CONFIG_POWER_RESET_MSM=y +CONFIG_POWER_SUPPLY=y +CONFIG_PPS=y +CONFIG_PRINTK_TIME=y +CONFIG_PTP_1588_CLOCK=y +CONFIG_QCOM_ADM=y +CONFIG_QCOM_BAM_DMA=y +CONFIG_QCOM_CLK_RPM=y +# CONFIG_QCOM_EBI2 is not set +CONFIG_QCOM_GDSC=y +CONFIG_QCOM_GSBI=y +CONFIG_QCOM_HFPLL=y +CONFIG_QCOM_PM=y +# CONFIG_QCOM_Q6V5_PIL is not set +CONFIG_QCOM_QFPROM=y +CONFIG_QCOM_RPMCC=y +CONFIG_QCOM_SCM=y +CONFIG_QCOM_SCM_32=y +# CONFIG_QCOM_SMD is not set +CONFIG_QCOM_SMEM=y +# CONFIG_QCOM_SMP2P is not set +# CONFIG_QCOM_SMSM is not set +CONFIG_QCOM_TCSR=y +CONFIG_QCOM_TSENS=y +# CONFIG_QCOM_WCNSS_PIL is not set +CONFIG_QCOM_WDT=y +# CONFIG_QRTR is not set +CONFIG_RAS=y +CONFIG_RATIONAL=y +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +CONFIG_RCU_STALL_COMMON=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_MMIO=y +CONFIG_REGMAP_SPI=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_QCOM_RPM=y +# CONFIG_REGULATOR_QCOM_SPMI is not set +CONFIG_RESET_CONTROLLER=y +CONFIG_RFS_ACCEL=y +# CONFIG_RPMSG_QCOM_SMD is not set +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_DRV_CMOS is not set +CONFIG_RTC_I2C_AND_SPI=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +# CONFIG_SCHED_INFO is not set +# CONFIG_SCSI_DMA is not set +CONFIG_SERIAL_8250_FSL=y +# CONFIG_SERIAL_AMBA_PL010 is not set +# CONFIG_SERIAL_AMBA_PL011 is not set +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +CONFIG_SMP=y +CONFIG_SMP_ON_UP=y +CONFIG_SPARSE_IRQ=y +CONFIG_SPI=y +# CONFIG_SPI_CADENCE_QUADSPI is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_QUP=y +CONFIG_SPMI=y +CONFIG_SPMI_MSM_PMIC_ARB=y +CONFIG_SRCU=y +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_PLATFORM=y +CONFIG_SWCONFIG=y +CONFIG_SWCONFIG_LEDS=y +CONFIG_SWIOTLB=y +CONFIG_SWPHY=y +CONFIG_SWP_EMULATE=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_THERMAL=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THUMB2_KERNEL is not set +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_TREE_RCU=y +CONFIG_UBIFS_FS=y +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h" +CONFIG_USB=y +CONFIG_USB_COMMON=y +# CONFIG_USB_EHCI_HCD is not set +CONFIG_USB_IPQ4019_PHY=y +CONFIG_USB_PHY=y +CONFIG_USB_SUPPORT=y +# CONFIG_USB_UHCI_HCD is not set +CONFIG_USE_OF=y +CONFIG_VDSO=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_WATCHDOG_CORE=y +CONFIG_XPS=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_BCJ=y +CONFIG_ZBOOT_ROM_BSS=0 +CONFIG_ZBOOT_ROM_TEXT=0 +CONFIG_ZLIB_DEFLATE=y +CONFIG_ZLIB_INFLATE=y +CONFIG_MSM_PCIE=y \ No newline at end of file diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-a42.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-a42.dts new file mode 100644 index 000000000..887be993e --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-a42.dts @@ -0,0 +1,244 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2017, Sven Eckelmann + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#include "qcom-ipq4019.dtsi" +#include "qcom-ipq4019-bus.dtsi" +#include +#include +#include + +/ { + model = "OpenMesh A42"; + compatible = "openmesh,a42", "qcom,ipq4019"; + + reserved-memory { + #address-cells = <0x1>; + #size-cells = <0x1>; + ranges; + + rsvd1@87000000 { + reg = <0x87000000 0x500000>; + no-map; + }; + + wifi_dump@87500000 { + reg = <0x87500000 0x600000>; + no-map; + }; + + rsvd2@87B00000 { + reg = <0x87b00000 0x500000>; + no-map; + }; + }; + + soc { + tcsr@194b000 { + /* select hostmode */ + compatible = "qcom,tcsr"; + reg = <0x194b000 0x100>; + qcom,usb-hsphy-mode-select = ; + status = "ok"; + }; + + ess_tcsr@1953000 { + compatible = "qcom,tcsr"; + reg = <0x1953000 0x1000>; + qcom,ess-interface-select = ; + }; + + tcsr@1949000 { + compatible = "qcom,tcsr"; + reg = <0x1949000 0x100>; + qcom,wifi_glb_cfg = ; + }; + + tcsr@1957000 { + compatible = "qcom,tcsr"; + reg = <0x1957000 0x100>; + qcom,wifi_noc_memtype_m0_m2 = ; + }; + + pinctrl@1000000 { + serial_pins: serial_pinmux { + mux { + pins = "gpio60", "gpio61"; + function = "blsp_uart0"; + bias-disable; + }; + }; + + spi_0_pins: spi_0_pinmux { + pinmux { + function = "blsp_spi0"; + pins = "gpio55", "gpio56", "gpio57"; + }; + pinmux_cs { + function = "gpio"; + pins = "gpio54"; + }; + pinconf { + pins = "gpio55", "gpio56", "gpio57"; + drive-strength = <12>; + bias-disable; + }; + pinconf_cs { + pins = "gpio54"; + drive-strength = <2>; + bias-disable; + output-high; + }; + }; + }; + + blsp_dma: dma@7884000 { + status = "ok"; + }; + + spi_0: spi@78b5000 { + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; + status = "ok"; + cs-gpios = <&tlmm 54 0>; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <24000000>; + + /* partitions are passed via bootloader */ + }; + }; + + serial@78af000 { + pinctrl-0 = <&serial_pins>; + pinctrl-names = "default"; + status = "ok"; + }; + + cryptobam: dma@8e04000 { + status = "ok"; + }; + + crypto@8e3a000 { + status = "ok"; + }; + + watchdog@b017000 { + status = "ok"; + }; + + usb2_hs_phy: hsphy@a8000 { + status = "ok"; + }; + + usb2: usb2@60f8800 { + status = "ok"; + }; + + mdio@90000 { + status = "okay"; + }; + + ess-switch@c000000 { + status = "okay"; + }; + + ess-psgmii@98000 { + status = "okay"; + }; + + edma@c080000 { + status = "okay"; + }; + + wifi@a000000 { + status = "okay"; + qcom,ath10k-calibration-variant = "OM-A42"; + }; + + wifi@a800000 { + status = "okay"; + qcom,ath10k-calibration-variant = "OM-A42"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + reset { + label = "reset"; + gpios = <&tlmm 59 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; + + aliases { + led-boot = &power; + led-failsafe = &power; + led-running = &power; + led-upgrade = &power; + }; + + gpio-leds { + compatible = "gpio-leds"; + + red { + label = "a42:red:status"; + gpios = <&tlmm 0 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "default-off"; + }; + + power: green { + label = "a42:green:status"; + gpios = <&tlmm 1 GPIO_ACTIVE_HIGH>; + }; + + blue { + label = "a42:blue:status"; + gpios = <&tlmm 2 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "default-off"; + }; + }; + + watchdog { + compatible = "linux,wdt-gpio"; + gpios = <&tlmm 5 GPIO_ACTIVE_LOW>; + hw_algo = "toggle"; + /* hw_margin_ms is actually 300s but driver limits it to 60s */ + hw_margin_ms = <60000>; + always-running; + }; +}; + +&gmac0 { + qcom,phy_mdio_addr = <4>; + qcom,poll_required = <1>; + qcom,forced_speed = <1000>; + qcom,forced_duplex = <1>; + vlan_tag = <2 0x20>; +}; + +&gmac1 { + qcom,phy_mdio_addr = <3>; + qcom,poll_required = <1>; + qcom,forced_speed = <1000>; + qcom,forced_duplex = <1>; + vlan_tag = <1 0x10>; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-bus.dtsi b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-bus.dtsi new file mode 100644 index 000000000..169505973 --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-bus.dtsi @@ -0,0 +1,1142 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#include + +/ { + +soc { + ad_hoc_bus: ad-hoc-bus { + compatible = "qcom,msm-bus-device"; + reg = <0x580000 0x14000>, + <0x500000 0x11000>; + reg-names = "snoc-base", "pcnoc-base"; + + /*Buses*/ + + fab_pcnoc: fab-pcnoc { + cell-id = ; + label = "fab-pcnoc"; + qcom,fab-dev; + qcom,base-name = "pcnoc-base"; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + qcom,qos-off = <0x1000>; + qcom,base-offset = <0x0>; + clocks = <>; + }; + + fab_snoc: fab-snoc { + cell-id = ; + label = "fab-snoc"; + qcom,fab-dev; + qcom,base-name = "snoc-base"; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + qcom,qos-off = <0x80>; + qcom,base-offset = <0x0>; + clocks = <>; + }; + + /*Masters*/ + + mas_blsp_bam: mas-blsp-bam { + cell-id = ; + label = "mas-blsp-bam"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&pcnoc_m_0>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg + &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg + &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg + &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg + &slv_srif &slv_prng &slv_qdss_cfg + &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt + &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg + &slv_boot_rom &slv_security &slv_spdm + &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg + &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl + &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg + &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg + &slv_sdcc_cfg &slv_snoc_cfg>; + }; + + mas_usb2_bam: mas-usb2-bam { + cell-id = ; + label = "mas-usb2-bam"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,qport = <15>; + qcom,qos-mode = "fixed"; + qcom,connections = <&slv_pcnoc_snoc>; + qcom,prio1 = <1>; + qcom,prio0 = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg + &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg + &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg + &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg + &slv_srif &slv_prng &slv_qdss_cfg + &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt + &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg + &slv_boot_rom &slv_security &slv_spdm + &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg + &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl + &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg + &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg + &slv_sdcc_cfg &slv_snoc_cfg>; + }; + + mas_adss_dma0: mas-adss-dma0 { + cell-id = ; + label = "mas-adss-dma0"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&pcnoc_m_1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg + &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg + &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg + &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg + &slv_srif &slv_prng &slv_qdss_cfg + &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt + &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg + &slv_boot_rom &slv_security &slv_spdm + &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg + &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl + &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg + &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg + &slv_sdcc_cfg &slv_snoc_cfg>; + }; + + mas_adss_dma1: mas-adss-dma1 { + cell-id = ; + label = "mas-adss-dma1"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&pcnoc_m_1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg + &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg + &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg + &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg + &slv_srif &slv_prng &slv_qdss_cfg + &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt + &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg + &slv_boot_rom &slv_security &slv_spdm + &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg + &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl + &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg + &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg + &slv_sdcc_cfg &slv_snoc_cfg>; + }; + + mas_adss_dma2: mas-adss-dma2 { + cell-id = ; + label = "mas-adss-dma2"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&pcnoc_m_1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg + &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg + &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg + &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg + &slv_srif &slv_prng &slv_qdss_cfg + &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt + &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg + &slv_boot_rom &slv_security &slv_spdm + &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg + &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl + &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg + &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg + &slv_sdcc_cfg &slv_snoc_cfg>; + }; + + mas_adss_dma3: mas-adss-dma3 { + cell-id = ; + label = "mas-adss-dma3"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&pcnoc_m_1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg + &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg + &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg + &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg + &slv_srif &slv_prng &slv_qdss_cfg + &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt + &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg + &slv_boot_rom &slv_security &slv_spdm + &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg + &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl + &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg + &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg + &slv_sdcc_cfg &slv_snoc_cfg>; + }; + + mas_qpic_bam: mas-qpic-bam { + cell-id = ; + label = "mas-qpic-bam"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&pcnoc_m_0>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg + &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg + &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg + &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg + &slv_srif &slv_prng &slv_qdss_cfg + &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt + &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg + &slv_boot_rom &slv_security &slv_spdm + &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg + &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl + &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg + &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg + &slv_sdcc_cfg &slv_snoc_cfg>; + }; + + mas_spdm: mas-spdm { + cell-id = ; + label = "mas-spdm"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&pcnoc_m_0>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg + &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg + &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg + &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg + &slv_srif &slv_prng &slv_qdss_cfg + &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt + &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg + &slv_boot_rom &slv_security &slv_spdm + &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg + &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl + &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg + &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg + &slv_sdcc_cfg &slv_snoc_cfg>; + }; + + mas_pcnoc_cfg: mas-pcnoc-cfg { + cell-id = ; + label = "mas-pcnoc-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&slv_srvc_pcnoc>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + }; + + mas_tic: mas-tic { + cell-id = ; + label = "mas-tic"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&pcnoc_int_0 &slv_pcnoc_snoc>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + }; + + mas_sdcc_bam: mas-sdcc-bam { + cell-id = ; + label = "mas-sdcc-bam"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,qport = <14>; + qcom,qos-mode = "fixed"; + qcom,connections = <&slv_pcnoc_snoc>; + qcom,prio1 = <0>; + qcom,prio0 = <0>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_tcsr &slv_mdio &slv_adss_cfg + &slv_fephy_cfg &slv_wss1_apu_cfg &slv_ddrc_mpu1_cfg + &slv_ddrc_mpu0_cfg &slv_qpic_cfg &slv_ddrc_cfg + &slv_pcnoc_cfg &slv_ess_apu_cfg &slv_imem_cfg + &slv_srif &slv_prng &slv_qdss_cfg + &slv_wss0_apu_cfg &slv_ddrc_apu_cfg &slv_gcnt + &slv_tlmm &slv_wss0_vmidmt_cfg &slv_prng_apu_cfg + &slv_boot_rom &slv_security &slv_spdm + &slv_pcnoc_mpu_cfg &slv_ddrc_mpu2_cfg &slv_ess_vmidmt_cfg + &slv_qhss_apu_cfg &slv_adss_vmidmt_cfg &slv_clk_ctl + &slv_adss_apu &slv_blsp_cfg &slv_usb2_cfg + &slv_srvc_pcnoc &slv_snoc_mpu_cfg &slv_wss1_vmidmt_cfg + &slv_sdcc_cfg &slv_snoc_cfg>; + }; + + mas_snoc_pcnoc: mas-snoc-pcnoc { + cell-id = ; + label = "mas-snoc-pcnoc"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,qport = <16>; + qcom,qos-mode = "fixed"; + qcom,connections = <&pcnoc_int_0>; + qcom,prio1 = <0>; + qcom,prio0 = <0>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + }; + + mas_qdss_dap: mas-qdss-dap { + cell-id = ; + label = "mas-qdss-dap"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&pcnoc_int_0 &slv_pcnoc_snoc>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + }; + + mas_ddrc_snoc: mas-ddrc-snoc { + cell-id = ; + label = "mas-ddrc-snoc"; + qcom,buswidth = <16>; + qcom,ap-owned; + qcom,connections = <&snoc_int_0 &snoc_int_1 + &slv_pcie>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_snoc_ddrc_m1 &slv_srvc_snoc>; + }; + + mas_wss_0: mas-wss-0 { + cell-id = ; + label = "mas-wss-0"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,qport = <26>; + qcom,qos-mode = "fixed"; + qcom,connections = <&snoc_int_0 &slv_snoc_ddrc_m1>; + qcom,prio1 = <0>; + qcom,prio0 = <0>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_pcie + &slv_wss1_cfg &slv_wss0_cfg &slv_crypto_cfg + &slv_srvc_snoc>; + }; + + mas_wss_1: mas-wss-1 { + cell-id = ; + label = "mas-wss-1"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,qport = <27>; + qcom,qos-mode = "fixed"; + qcom,connections = <&snoc_int_0 &slv_snoc_ddrc_m1>; + qcom,prio1 = <0>; + qcom,prio0 = <0>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_pcie + &slv_wss1_cfg &slv_wss0_cfg &slv_crypto_cfg + &slv_srvc_snoc>; + }; + + mas_crypto: mas-crypto { + cell-id = ; + label = "mas-crypto"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,qport = <5>; + qcom,qos-mode = "fixed"; + qcom,connections = <&snoc_int_0 &snoc_int_1 + &slv_snoc_ddrc_m1>; + qcom,prio1 = <0>; + qcom,prio0 = <0>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_a7ss + &slv_pcie &slv_qdss_stm &slv_crypto_cfg + &slv_srvc_snoc>; + }; + + mas_ess: mas-ess { + cell-id = ; + label = "mas-ess"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,qport = <44>; + qcom,qos-mode = "fixed"; + qcom,connections = <&snoc_int_0 &slv_snoc_ddrc_m1>; + qcom,prio1 = <0>; + qcom,prio0 = <0>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_a7ss + &slv_pcie &slv_qdss_stm &slv_wss1_cfg + &slv_wss0_cfg &slv_crypto_cfg &slv_srvc_snoc>; + }; + + mas_pcie: mas-pcie { + cell-id = ; + label = "mas-pcie"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,qport = <6>; + qcom,qos-mode = "fixed"; + qcom,connections = <&snoc_int_0 &slv_snoc_ddrc_m1>; + qcom,prio1 = <0>; + qcom,prio0 = <0>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_pcie + &slv_qdss_stm &slv_wss1_cfg &slv_wss0_cfg + &slv_crypto_cfg &slv_srvc_snoc>; + }; + + mas_usb3: mas-usb3 { + cell-id = ; + label = "mas-usb3"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,qport = <7>; + qcom,qos-mode = "fixed"; + qcom,connections = <&snoc_int_0 &slv_snoc_ddrc_m1>; + qcom,prio1 = <0>; + qcom,prio0 = <0>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_a7ss + &slv_pcie &slv_qdss_stm &slv_wss1_cfg + &slv_wss0_cfg &slv_crypto_cfg &slv_srvc_snoc>; + }; + + mas_qdss_etr: mas-qdss-etr { + cell-id = ; + label = "mas-qdss-etr"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,qport = <544>; + qcom,qos-mode = "fixed"; + qcom,connections = <&qdss_int>; + qcom,prio1 = <0>; + qcom,prio0 = <0>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_a7ss + &slv_pcie &slv_qdss_stm &slv_wss1_cfg + &slv_wss0_cfg &slv_crypto_cfg &slv_srvc_snoc>; + }; + + mas_qdss_bamndp: mas-qdss-bamndp { + cell-id = ; + label = "mas-qdss-bamndp"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,qport = <576>; + qcom,qos-mode = "fixed"; + qcom,connections = <&qdss_int>; + qcom,prio1 = <0>; + qcom,prio0 = <0>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_usb3_cfg &slv_ess_cfg &slv_a7ss + &slv_pcie &slv_qdss_stm &slv_wss1_cfg + &slv_wss0_cfg &slv_crypto_cfg &slv_srvc_snoc>; + }; + + mas_pcnoc_snoc: mas-pcnoc-snoc { + cell-id = ; + label = "mas-pcnoc-snoc"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,qport = <384>; + qcom,qos-mode = "fixed"; + qcom,connections = <&snoc_int_0 &snoc_int_1 + &slv_snoc_ddrc_m1>; + qcom,prio1 = <0>; + qcom,prio0 = <0>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&slv_srvc_snoc>; + }; + + mas_snoc_cfg: mas-snoc-cfg { + cell-id = ; + label = "mas-snoc-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&slv_srvc_snoc>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + }; + + /*Internal nodes*/ + + + pcnoc_m_0: pcnoc-m-0 { + cell-id = ; + label = "pcnoc-m-0"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,qport = <12>; + qcom,qos-mode = "fixed"; + qcom,connections = <&slv_pcnoc_snoc>; + qcom,prio1 = <1>; + qcom,prio0 = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_m_1: pcnoc-m-1 { + cell-id = ; + label = "pcnoc-m-1"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,qport = <13>; + qcom,qos-mode = "fixed"; + qcom,connections = <&slv_pcnoc_snoc>; + qcom,prio1 = <1>; + qcom,prio0 = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_int_0: pcnoc-int-0 { + cell-id = ; + label = "pcnoc-int-0"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,connections = < &pcnoc_s_1 &pcnoc_s_2 &pcnoc_s_0 + &pcnoc_s_4 &pcnoc_s_5 + &pcnoc_s_6 &pcnoc_s_7 + &pcnoc_s_8 &pcnoc_s_9 + &pcnoc_s_3>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_0: pcnoc-s-0 { + cell-id = ; + label = "pcnoc-s-0"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&slv_clk_ctl &slv_tcsr &slv_security + &slv_tlmm>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_1: pcnoc-s-1 { + cell-id = ; + label = "pcnoc-s-1"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = < &slv_prng_apu_cfg &slv_prng&slv_imem_cfg>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_2: pcnoc-s-2 { + cell-id = ; + label = "pcnoc-s-2"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = < &slv_spdm &slv_pcnoc_mpu_cfg &slv_pcnoc_cfg + &slv_boot_rom>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_3: pcnoc-s-3 { + cell-id = ; + label = "pcnoc-s-3"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = < &slv_qdss_cfg&slv_gcnt &slv_snoc_cfg + &slv_snoc_mpu_cfg>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_4: pcnoc-s-4 { + cell-id = ; + label = "pcnoc-s-4"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&slv_adss_cfg &slv_adss_vmidmt_cfg &slv_adss_apu>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_5: pcnoc-s-5 { + cell-id = ; + label = "pcnoc-s-5"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = <&slv_qhss_apu_cfg &slv_fephy_cfg &slv_mdio + &slv_srif>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_6: pcnoc-s-6 { + cell-id = ; + label = "pcnoc-s-6"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = < &slv_ddrc_mpu0_cfg &slv_ddrc_apu_cfg &slv_ddrc_mpu2_cfg + &slv_ddrc_cfg &slv_ddrc_mpu1_cfg>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_7: pcnoc-s-7 { + cell-id = ; + label = "pcnoc-s-7"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = < &slv_ess_apu_cfg &slv_usb2_cfg&slv_ess_vmidmt_cfg>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_8: pcnoc-s-8 { + cell-id = ; + label = "pcnoc-s-8"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = < &slv_sdcc_cfg &slv_qpic_cfg&slv_blsp_cfg>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_9: pcnoc-s-9 { + cell-id = ; + label = "pcnoc-s-9"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,connections = < &slv_wss1_apu_cfg &slv_wss1_vmidmt_cfg&slv_wss0_vmidmt_cfg + &slv_wss0_apu_cfg>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + snoc_int_0: snoc-int-0 { + cell-id = ; + label = "snoc-int-0"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,connections = < &slv_ocimem&slv_qdss_stm>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + snoc_int_1: snoc-int-1 { + cell-id = ; + label = "snoc-int-1"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,connections = < &slv_crypto_cfg &slv_a7ss &slv_ess_cfg + &slv_usb3_cfg &slv_wss1_cfg + &slv_wss0_cfg>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + qdss_int: qdss-int { + cell-id = ; + label = "qdss-int"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,connections = <&snoc_int_0 &slv_snoc_ddrc_m1>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + /*Slaves*/ + + slv_clk_ctl:slv-clk-ctl { + cell-id = ; + label = "slv-clk-ctl"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_security:slv-security { + cell-id = ; + label = "slv-security"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_tcsr:slv-tcsr { + cell-id = ; + label = "slv-tcsr"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_tlmm:slv-tlmm { + cell-id = ; + label = "slv-tlmm"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_imem_cfg:slv-imem-cfg { + cell-id = ; + label = "slv-imem-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_prng:slv-prng { + cell-id = ; + label = "slv-prng"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_prng_apu_cfg:slv-prng-apu-cfg { + cell-id = ; + label = "slv-prng-apu-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_boot_rom:slv-boot-rom { + cell-id = ; + label = "slv-boot-rom"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_spdm:slv-spdm { + cell-id = ; + label = "slv-spdm"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_pcnoc_cfg:slv-pcnoc-cfg { + cell-id = ; + label = "slv-pcnoc-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_pcnoc_mpu_cfg:slv-pcnoc-mpu-cfg { + cell-id = ; + label = "slv-pcnoc-mpu-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_gcnt:slv-gcnt { + cell-id = ; + label = "slv-gcnt"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_qdss_cfg:slv-qdss-cfg { + cell-id = ; + label = "slv-qdss-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_snoc_cfg:slv-snoc-cfg { + cell-id = ; + label = "slv-snoc-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_snoc_mpu_cfg:slv-snoc-mpu-cfg { + cell-id = ; + label = "slv-snoc-mpu-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_adss_cfg:slv-adss-cfg { + cell-id = ; + label = "slv-adss-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_adss_apu:slv-adss-apu { + cell-id = ; + label = "slv-adss-apu"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_adss_vmidmt_cfg:slv-adss-vmidmt-cfg { + cell-id = ; + label = "slv-adss-vmidmt-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_qhss_apu_cfg:slv-qhss-apu-cfg { + cell-id = ; + label = "slv-qhss-apu-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_mdio:slv-mdio { + cell-id = ; + label = "slv-mdio"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_fephy_cfg:slv-fephy-cfg { + cell-id = ; + label = "slv-fephy-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_srif:slv-srif { + cell-id = ; + label = "slv-srif"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_ddrc_cfg:slv-ddrc-cfg { + cell-id = ; + label = "slv-ddrc-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_ddrc_apu_cfg:slv-ddrc-apu-cfg { + cell-id = ; + label = "slv-ddrc-apu-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_ddrc_mpu0_cfg:slv-ddrc-mpu0-cfg { + cell-id = ; + label = "slv-ddrc-mpu0-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_ddrc_mpu1_cfg:slv-ddrc-mpu1-cfg { + cell-id = ; + label = "slv-ddrc-mpu1-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_ddrc_mpu2_cfg:slv-ddrc-mpu2-cfg { + cell-id = ; + label = "slv-ddrc-mpu2-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_ess_vmidmt_cfg:slv-ess-vmidmt-cfg { + cell-id = ; + label = "slv-ess-vmidmt-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_ess_apu_cfg:slv-ess-apu-cfg { + cell-id = ; + label = "slv-ess-apu-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_usb2_cfg:slv-usb2-cfg { + cell-id = ; + label = "slv-usb2-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_blsp_cfg:slv-blsp-cfg { + cell-id = ; + label = "slv-blsp-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_qpic_cfg:slv-qpic-cfg { + cell-id = ; + label = "slv-qpic-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_sdcc_cfg:slv-sdcc-cfg { + cell-id = ; + label = "slv-sdcc-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_wss0_vmidmt_cfg:slv-wss0-vmidmt-cfg { + cell-id = ; + label = "slv-wss0-vmidmt-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_wss0_apu_cfg:slv-wss0-apu-cfg { + cell-id = ; + label = "slv-wss0-apu-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_wss1_vmidmt_cfg:slv-wss1-vmidmt-cfg { + cell-id = ; + label = "slv-wss1-vmidmt-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_wss1_apu_cfg:slv-wss1-apu-cfg { + cell-id = ; + label = "slv-wss1-apu-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_pcnoc_snoc:slv-pcnoc-snoc { + cell-id = ; + label = "slv-pcnoc-snoc"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_srvc_pcnoc:slv-srvc-pcnoc { + cell-id = ; + label = "slv-srvc-pcnoc"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_snoc_ddrc_m1:slv-snoc-ddrc-m1 { + cell-id = ; + label = "slv-snoc-ddrc-m1"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_a7ss:slv-a7ss { + cell-id = ; + label = "slv-a7ss"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_ocimem:slv-ocimem { + cell-id = ; + label = "slv-ocimem"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_wss0_cfg:slv-wss0-cfg { + cell-id = ; + label = "slv-wss0-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_wss1_cfg:slv-wss1-cfg { + cell-id = ; + label = "slv-wss1-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_pcie:slv-pcie { + cell-id = ; + label = "slv-pcie"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_usb3_cfg:slv-usb3-cfg { + cell-id = ; + label = "slv-usb3-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_crypto_cfg:slv-crypto-cfg { + cell-id = ; + label = "slv-crypto-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_ess_cfg:slv-ess-cfg { + cell-id = ; + label = "slv-ess-cfg"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_qdss_stm:slv-qdss-stm { + cell-id = ; + label = "slv-qdss-stm"; + qcom,buswidth = <4>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_srvc_snoc:slv-srvc-snoc { + cell-id = ; + label = "slv-srvc-snoc"; + qcom,buswidth = <8>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + }; +}; + +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-fritz4040.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-fritz4040.dts new file mode 100644 index 000000000..f5ca3d5c5 --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-fritz4040.dts @@ -0,0 +1,322 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#include "qcom-ipq4019.dtsi" +#include "qcom-ipq4019-bus.dtsi" +#include +#include +#include + +/ { + model = "AVM FRITZ!Box 4040"; + compatible = "avm,fritzbox-4040", "qcom,ipq4019"; + + aliases { + led-boot = &power; + led-failsafe = &flash; + led-running = &power; + led-upgrade = &flash; + }; + + reserved-memory { + #address-cells = <0x1>; + #size-cells = <0x1>; + ranges; + + tz_apps@87b80000 { + reg = <0x87b80000 0x280000>; + reusable; + }; + + smem@87e00000 { + reg = <0x87e00000 0x080000>; + no-map; + }; + + tz@87e80000 { + reg = <0x87e80000 0x180000>; + no-map; + }; + }; + + soc { + mdio@90000 { + status = "okay"; + }; + + ess-psgmii@98000 { + status = "okay"; + }; + + tcsr@1949000 { + compatible = "qcom,tcsr"; + reg = <0x1949000 0x100>; + qcom,wifi_glb_cfg = ; + }; + + tcsr@194b000 { + compatible = "qcom,tcsr"; + reg = <0x194b000 0x100>; + qcom,usb-hsphy-mode-select = ; + }; + + ess_tcsr@1953000 { + compatible = "qcom,tcsr"; + reg = <0x1953000 0x1000>; + qcom,ess-interface-select = ; + }; + + tcsr@1957000 { + compatible = "qcom,tcsr"; + reg = <0x1957000 0x100>; + qcom,wifi_noc_memtype_m0_m2 = ; + }; + + usb2@60f8800 { + status = "ok"; + }; + + serial@78af000 { + pinctrl-0 = <&serial_pins>; + pinctrl-names = "default"; + status = "ok"; + }; + + usb3@8af8800 { + status = "ok"; + }; + + crypto@8e3a000 { + status = "ok"; + }; + + wifi@a000000 { + status = "okay"; + }; + + wifi@a800000 { + status = "okay"; + }; + + watchdog@b017000 { + status = "ok"; + }; + + qca8075: ess-switch@c000000 { + status = "okay"; + + #gpio-cells = <2>; + gpio-controller; + + enable-usb-power { + gpio-hog; + line-name = "enable USB3 power"; + gpios = <7 GPIO_ACTIVE_HIGH>; + output-high; + }; + }; + + edma@c080000 { + status = "okay"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + wlan { + label = "wlan"; + gpios = <&tlmm 58 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + wps { + label = "wps"; + gpios = <&tlmm 63 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + + wlan { + label = "fritz4040:green:wlan"; + gpios = <&qca8075 1 GPIO_ACTIVE_HIGH>; + }; + + panic: info_red { + label = "fritz4040:red:info"; + gpios = <&qca8075 3 GPIO_ACTIVE_HIGH>; + panic-indicator; + }; + + wan { + label = "fritz4040:green:wan"; + gpios = <&qca8075 5 GPIO_ACTIVE_HIGH>; + }; + + power: power { + label = "fritz4040:green:power"; + gpios = <&qca8075 11 GPIO_ACTIVE_HIGH>; + }; + + lan { + label = "fritz4040:green:lan"; + gpios = <&qca8075 13 GPIO_ACTIVE_HIGH>; + }; + + flash: info_amber { + label = "fritz4040:amber:info"; + gpios = <&qca8075 15 GPIO_ACTIVE_HIGH>; + }; + }; +}; + +&tlmm { + serial_pins: serial_pinmux { + mux { + pins = "gpio60", "gpio61"; + function = "blsp_uart0"; + bias-disable; + }; + }; + + spi_0_pins: spi_0_pinmux { + mux { + function = "blsp_spi0"; + pins = "gpio55", "gpio56", "gpio57"; + drive-strength = <12>; + bias-disable; + }; + + mux_cs { + function = "gpio"; + pins = "gpio54"; + drive-strength = <2>; + bias-disable; + output-high; + }; + }; +}; + +&cryptobam { + status = "ok"; +}; + +&blsp_dma { + status = "ok"; +}; + +&spi_0 { /* BLSP1 QUP1 */ + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; + status = "ok"; + cs-gpios = <&tlmm 54 0>; + + mx25l25635f@0 { + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + spi-max-frequency = <24000000>; + status = "ok"; + m25p,fast-read; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition0@0 { + label = "SBL1"; + reg = <0x00000000 0x00040000>; + read-only; + }; + partition1@40000 { + label = "MIBIB"; + reg = <0x00040000 0x00020000>; + read-only; + }; + partition2@60000 { + label = "QSEE"; + reg = <0x00060000 0x00060000>; + read-only; + }; + partition3@c0000 { + label = "CDT"; + reg = <0x000c0000 0x00010000>; + read-only; + }; + partition4@d0000 { + label = "DDRPARAMS"; + reg = <0x000d0000 0x00010000>; + read-only; + }; + partition5@e0000 { + label = "APPSBLENV"; /* uboot env - empty */ + reg = <0x000e0000 0x00010000>; + read-only; + }; + partition6@f0000 { + label = "urlader"; /* APPSBL */ + reg = <0x000f0000 0x0002dc000>; + read-only; + }; + partition7@11dc00 { + /* make a backup of this partition! */ + label = "urlader_config"; + reg = <0x0011dc00 0x00002400>; + read-only; + }; + partition8@120000 { + label = "tffs1"; + reg = <0x00120000 0x00080000>; + read-only; + }; + partition9@1a0000 { + label = "tffs2"; + reg = <0x001a0000 0x00080000>; + read-only; + }; + partition10@220000 { + label = "uboot"; + reg = <0x00220000 0x00080000>; + read-only; + }; + partition11@2A0000 { + label = "firmware"; + reg = <0x002a0000 0x01c60000>; + }; + partition12@1f00000 { + label = "jffs2"; + reg = <0x01f00000 0x00100000>; + }; + }; + }; +}; + +&usb3_ss_phy { + status = "ok"; +}; + +&usb3_hs_phy { + status = "ok"; +}; + +&usb2_hs_phy { + status = "ok"; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-rt-ac58u.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-rt-ac58u.dts new file mode 100644 index 000000000..04f48ae9d --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-rt-ac58u.dts @@ -0,0 +1,197 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#include "qcom-ipq4019-ap.dk01.1.dtsi" +#include "qcom-ipq4019-bus.dtsi" +#include +#include + +/ { + model = "ASUS RT-AC58U"; + compatible = "asus,rt-ac58u", "qcom,ipq4019"; + + memory { + device_type = "memory"; + reg = <0x80000000 0x8000000>; + }; + + aliases { + led-boot = &power; + led-failsafe = &power; + led-running = &power; + led-upgrade = &power; + }; + + reserved-memory { + #address-cells = <0x1>; + #size-cells = <0x1>; + ranges; + + rsvd1@87E00000 { + reg = <0x87e00000 0x200000>; + no-map; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + reset { + label = "reset"; + gpios = <&tlmm 4 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + wps { + label = "wps"; + gpios = <&tlmm 63 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + + power: status { + label = "rt-ac58u:blue:status"; + gpios = <&tlmm 3 GPIO_ACTIVE_HIGH>; + }; + + wan { + label = "rt-ac58u:blue:wan"; + gpios = <&tlmm 1 GPIO_ACTIVE_HIGH>; + }; + + wlan2G { + label = "rt-ac58u:blue:wlan2G"; + gpios = <&tlmm 58 GPIO_ACTIVE_HIGH>; + }; + + wan5G { + label = "rt-ac58u:blue:wlan5G"; + gpios = <&tlmm 5 GPIO_ACTIVE_HIGH>; + }; + + usb { + label = "rt-ac58u:blue:usb"; + gpios = <&tlmm 0 GPIO_ACTIVE_HIGH>; + }; + + lan { + label = "rt-ac58u:blue:lan"; + gpios = <&tlmm 2 GPIO_ACTIVE_HIGH>; + }; + }; +}; + +&spi_0_pins { + pinmux_cs { + function = "gpio"; + pins = "gpio54", "gpio59"; + }; + pinconf_cs { + pins = "gpio54", "gpio59"; + drive-strength = <2>; + bias-disable; + output-high; + }; +}; + +&spi_0 { /* BLSP1 QUP1 */ + cs-gpios = <&tlmm 54 0>, + <&tlmm 59 0>; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <0>; + /* + * U-boot looks for "n25q128a11" node, + * if we don't have it, it will spit out the following warning: + * "ipq: fdt fixup unable to find compatible node". + */ + compatible = "mx25l1606e", "n25q128a11", "jedec,spi-nor"; + reg = <0>; + linux,modalias = "m25p80", "mx25l1606e", "n25q128a11"; + spi-max-frequency = <24000000>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition0@0 { + label = "SBL1"; + reg = <0x00000000 0x00040000>; + read-only; + }; + partition1@40000 { + label = "MIBIB"; + reg = <0x00040000 0x00020000>; + read-only; + }; + partition2@60000 { + label = "QSEE"; + reg = <0x00060000 0x00060000>; + read-only; + }; + partition3@c0000 { + label = "CDT"; + reg = <0x000c0000 0x00010000>; + read-only; + }; + partition4@d0000 { + label = "DDRPARAMS"; + reg = <0x000d0000 0x00010000>; + read-only; + }; + partition5@e0000 { + label = "APPSBLENV"; /* uboot env*/ + reg = <0x000e0000 0x00010000>; + read-only; + }; + partition5@f0000 { + label = "APPSBL"; /* uboot */ + reg = <0x000f0000 0x00080000>; + read-only; + }; + partition5@170000 { + label = "ART"; + reg = <0x00170000 0x00010000>; + read-only; + }; + /* 0x00180000 - 0x00200000 unused */ + }; + }; + + mt29f@1 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "spinand,mt29f", "w25n01gv"; + reg = <1>; + spi-max-frequency = <24000000>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition0@0 { + label = "ubi"; + reg = <0x00000000 0x08000000>; + }; + }; + }; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-rt-acrh17.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-rt-acrh17.dts new file mode 100644 index 000000000..beace8ecf --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq4019-rt-acrh17.dts @@ -0,0 +1,291 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#include "qcom-ipq4019-ap.dk04.1.dtsi" +#include "qcom-ipq4019-bus.dtsi" +#include +#include +#include + +/ { + model = "ASUS RT-ACRH17"; + compatible = "asus,rt-acrh17", "qcom,ipq4019"; + + memory { + device_type = "memory"; + reg = <0x80000000 0x10000000>; + }; + + aliases { + led-boot = &power; + led-failsafe = &power; + led-running = &power; + led-upgrade = &power; + }; + + reserved-memory { + #address-cells = <0x1>; + #size-cells = <0x1>; + ranges; + + rsvd1@87E00000 { + reg = <0x87e00000 0x200000>; + no-map; + }; + }; + + soc { + spi_0: spi@78b5000 { + status = "disabled"; + }; + + pcie0: qcom,pcie@80000 { + compatible = "qcom,msm_pcie"; + cell-index = <0>; + qcom,ctrl-amt = <1>; + + reg = <0x80000 0x2000>, + <0x99000 0x800>, + <0x40000000 0xf1d>, + <0x40000f20 0xa8>, + <0x40100000 0x1000>, + <0x40200000 0x100000>, + <0x40300000 0xd00000>; + reg-names = "parf", "phy", "dm_core", "elbi", + "conf", "io", "bars"; + + #address-cells = <0>; + interrupt-parent = <&pcie0>; + interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12>; + #interrupt-cells = <1>; + interrupt-map-mask = <0xffffffff>; + interrupt-map = <0 &intc 0 141 0 + 1 &intc 0 142 0 + 2 &intc 0 143 0 + 3 &intc 0 144 0 + 4 &intc 0 145 0 + 5 &intc 0 146 0 + 6 &intc 0 147 0 + 7 &intc 0 148 0 + 8 &intc 0 149 0 + 9 &intc 0 150 0 + 10 &intc 0 151 0 + 11 &intc 0 152 0 >; + + interrupt-names = "int_msi", "int_a", "int_b", "int_c", "int_d", + "int_pls_pme", "int_pme_legacy", "int_pls_err", + "int_aer_legacy", "int_pls_link_up", + "int_pls_link_down", "int_bridge_flush_n","int_wake"; + + qcom,ep-latency = <10>; + + clocks = <&gcc GCC_PCIE_AHB_CLK>, + <&gcc GCC_PCIE_AXI_M_CLK>, + <&gcc GCC_PCIE_AXI_S_CLK>; + + clock-names = "pcie_0_cfg_ahb_clk", + "pcie_0_mstr_axi_clk", + "pcie_0_slv_axi_clk"; + + max-clock-frequency-hz = <0>, <0>, <0>; + + resets = <&gcc PCIE_AXI_M_ARES>, + <&gcc PCIE_AXI_S_ARES>, + <&gcc PCIE_PIPE_ARES>, + <&gcc PCIE_AXI_M_VMIDMT_ARES>, + <&gcc PCIE_AXI_S_XPU_ARES>, + <&gcc PCIE_PARF_XPU_ARES>, + <&gcc PCIE_PHY_ARES>, + <&gcc PCIE_AXI_M_STICKY_ARES>, + <&gcc PCIE_PIPE_STICKY_ARES>, + <&gcc PCIE_PWR_ARES>, + <&gcc PCIE_AHB_ARES>, + <&gcc PCIE_PHY_AHB_ARES>; + + reset-names = "pcie_rst_axi_m_ares", + "pcie_rst_axi_s_ares", + "pcie_rst_pipe_ares", + "pcie_rst_axi_m_vmidmt_ares", + "pcie_rst_axi_s_xpu_ares", + "pcie_rst_parf_xpu_ares", + "pcie_rst_phy_ares", + "pcie_rst_axi_m_sticky_ares", + "pcie_rst_pipe_sticky_ares", + "pcie_rst_pwr_ares", + "pcie_rst_ahb_res", + "pcie_rst_phy_ahb_ares"; + + status = "ok"; + perst-gpio = <&tlmm 38 0>; + wake-gpio = <&tlmm 50 0>; + clkreq-gpio = <&tlmm 39 0>; + }; + + tcsr@194b000 { + /* select hostmode */ + compatible = "qcom,tcsr"; + reg = <0x194b000 0x100>; + qcom,usb-hsphy-mode-select = ; + status = "ok"; + }; + + ess_tcsr@1953000 { + compatible = "qcom,tcsr"; + reg = <0x1953000 0x1000>; + qcom,ess-interface-select = ; + }; + + tcsr@1949000 { + compatible = "qcom,tcsr"; + reg = <0x1949000 0x100>; + qcom,wifi_glb_cfg = ; + }; + + tcsr@1957000 { + compatible = "qcom,tcsr"; + reg = <0x1957000 0x100>; + qcom,wifi_noc_memtype_m0_m2 = ; + }; + + mdio@90000 { + status = "okay"; + }; + + ess-switch@c000000 { + status = "okay"; + }; + + ess-psgmii@98000 { + status = "okay"; + }; + + edma@c080000 { + status = "okay"; + }; + + wifi0: wifi@a000000 { + status = "ok"; + core-id = <0x0>; + qca,msi_addr = <0x0b006040>; + qca,msi_base = <0x40>; + wifi_led_num = <2>; /* Wifi 2G */ + wifi_led_source = <0>; /* source id 0 */ + qcom,mtd-name = "0:ART"; + qcom,cal-offset = <0x1000>; + qcom,cal-len = <12064>; + }; + + wifi1: wifi@a800000 { + status = "disabled"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + reset { + label = "reset"; + gpios = <&tlmm 18 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + wps { + label = "wps"; + gpios = <&tlmm 11 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + + power: status { + label = "rt-acrh17:blue:status"; + gpios = <&tlmm 40 GPIO_ACTIVE_LOW>; + }; + + lan1 { + label = "rt-acrh17:blue:lan1"; + gpios = <&tlmm 45 GPIO_ACTIVE_LOW>; + }; + + lan2 { + label = "rt-acrh17:blue:lan2"; + gpios = <&tlmm 43 GPIO_ACTIVE_LOW>; + }; + + lan3 { + label = "rt-acrh17:blue:lan3"; + gpios = <&tlmm 42 GPIO_ACTIVE_LOW>; + }; + + lan4 { + label = "rt-acrh17:blue:lan4"; + gpios = <&tlmm 49 GPIO_ACTIVE_LOW>; + }; + + wan_blue { + label = "rt-acrh17:blue:wan"; + gpios = <&tlmm 61 GPIO_ACTIVE_HIGH>; + }; + + wan_red { + label = "rt-acrh17:red:wan"; + gpios = <&tlmm 68 GPIO_ACTIVE_HIGH>; + }; + + wlan2g { + label = "rt-acrh17:blue:wlan2g"; + gpios = <&tlmm 52 GPIO_ACTIVE_LOW>; + }; + + wlan5g { + label = "rt-acrh17:blue:wlan5g"; + gpios = <&tlmm 54 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&nand_pins { + pullups { + pins = "gpio53", "gpio58", + "gpio59"; + function = "qpic"; + bias-pull-up; + }; + + pulldowns { + pins = "gpio55", "gpio56", + "gpio57", "gpio60", + "gpio62", "gpio63", "gpio64", + "gpio65", "gpio66", "gpio67", + "gpio69"; + function = "qpic"; + bias-pull-down; + }; +}; + +&i2c_0_pins { + pinmux { + function = "blsp_i2c0"; + pins = "gpio10"; + }; + pinconf { + pins = "gpio10"; + drive-strength = <16>; + bias-disable; + }; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-ap148.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-ap148.dts new file mode 100644 index 000000000..39a0d9656 --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-ap148.dts @@ -0,0 +1,246 @@ +#include "qcom-ipq8064-v1.0.dtsi" + +/ { + model = "Qualcomm IPQ8064/AP148"; + compatible = "qcom,ipq8064-ap148", "qcom,ipq8064"; + + memory@0 { + reg = <0x42000000 0x1e000000>; + device_type = "memory"; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + rsvd@41200000 { + reg = <0x41200000 0x300000>; + no-map; + }; + }; + + aliases { + serial0 = &gsbi4_serial; + mdio-gpio0 = &mdio0; + }; + + chosen { + linux,stdout-path = "serial0:115200n8"; + }; + + soc { + pinmux@800000 { + i2c4_pins: i2c4_pinmux { + pins = "gpio12", "gpio13"; + function = "gsbi4"; + bias-disable; + }; + + spi_pins: spi_pins { + mux { + pins = "gpio18", "gpio19", "gpio21"; + function = "gsbi5"; + drive-strength = <10>; + bias-none; + }; + }; + nand_pins: nand_pins { + mux { + pins = "gpio34", "gpio35", "gpio36", + "gpio37", "gpio38", "gpio39", + "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", + "gpio46", "gpio47"; + function = "nand"; + drive-strength = <10>; + bias-disable; + }; + pullups { + pins = "gpio39"; + bias-pull-up; + }; + hold { + pins = "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", + "gpio46", "gpio47"; + bias-bus-hold; + }; + }; + + mdio0_pins: mdio0_pins { + mux { + pins = "gpio0", "gpio1"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + }; + }; + + rgmii2_pins: rgmii2_pins { + mux { + pins = "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32", + "gpio51", "gpio52", "gpio59", "gpio60", "gpio61", "gpio62" ; + function = "rgmii2"; + drive-strength = <8>; + bias-disable; + }; + }; + }; + + gsbi@16300000 { + qcom,mode = ; + status = "ok"; + serial@16340000 { + status = "ok"; + }; + + /* + * The i2c device on gsbi4 should not be enabled. + * On ipq806x designs gsbi4 i2c is meant for exclusive + * RPM usage. Turning this on in kernel manifests as + * i2c failure for the RPM. + */ + }; + + gsbi5: gsbi@1a200000 { + qcom,mode = ; + status = "ok"; + + spi4: spi@1a280000 { + status = "ok"; + spi-max-frequency = <50000000>; + + pinctrl-0 = <&spi_pins>; + pinctrl-names = "default"; + + cs-gpios = <&qcom_pinmux 20 0>; + + flash: m25p80@0 { + compatible = "s25fl256s1"; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <50000000>; + reg = <0>; + + linux,part-probe = "qcom-smem"; + }; + }; + }; + + sata-phy@1b400000 { + status = "ok"; + }; + + sata@29000000 { + status = "ok"; + }; + + phy@100f8800 { /* USB3 port 1 HS phy */ + status = "ok"; + }; + + phy@100f8830 { /* USB3 port 1 SS phy */ + status = "ok"; + }; + + phy@110f8800 { /* USB3 port 0 HS phy */ + status = "ok"; + }; + + phy@110f8830 { /* USB3 port 0 SS phy */ + status = "ok"; + }; + + usb30@0 { + status = "ok"; + }; + + usb30@1 { + status = "ok"; + }; + + pcie0: pci@1b500000 { + status = "ok"; + }; + + pcie1: pci@1b700000 { + status = "ok"; + force_gen1 = <1>; + }; + + nand@1ac00000 { + status = "ok"; + + pinctrl-0 = <&nand_pins>; + pinctrl-names = "default"; + + cs0 { + reg = <0>; + compatible = "qcom,nandcs"; + + nand-ecc-strength = <4>; + nand-bus-width = <8>; + nand-ecc-step-size = <512>; + + linux,part-probe = "qcom-smem"; + }; + }; + + mdio0: mdio { + compatible = "virtual,mdio-gpio"; + #address-cells = <1>; + #size-cells = <0>; + gpios = <&qcom_pinmux 1 0 &qcom_pinmux 0 0>; + pinctrl-0 = <&mdio0_pins>; + pinctrl-names = "default"; + + phy0: ethernet-phy@0 { + device_type = "ethernet-phy"; + reg = <0>; + qca,ar8327-initvals = < + 0x00004 0x7600000 /* PAD0_MODE */ + 0x00008 0x1000000 /* PAD5_MODE */ + 0x0000c 0x80 /* PAD6_MODE */ + 0x000e4 0x6a545 /* MAC_POWER_SEL */ + 0x000e0 0xc74164de /* SGMII_CTRL */ + 0x0007c 0x4e /* PORT0_STATUS */ + 0x00094 0x4e /* PORT6_STATUS */ + >; + }; + + phy4: ethernet-phy@4 { + device_type = "ethernet-phy"; + reg = <4>; + }; + }; + + gmac1: ethernet@37200000 { + status = "ok"; + phy-mode = "rgmii"; + qcom,id = <1>; + + pinctrl-0 = <&rgmii2_pins>; + pinctrl-names = "default"; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + gmac2: ethernet@37400000 { + status = "ok"; + phy-mode = "sgmii"; + qcom,id = <2>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + }; +}; + +&adm_dma { + status = "ok"; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-c2600.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-c2600.dts new file mode 100644 index 000000000..a4fd13429 --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-c2600.dts @@ -0,0 +1,500 @@ +#include "qcom-ipq8064-v1.0.dtsi" + +#include + +/ { + model = "TP-Link Archer C2600"; + compatible = "tplink,c2600", "qcom,ipq8064"; + + memory@0 { + reg = <0x42000000 0x1e000000>; + device_type = "memory"; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + rsvd@41200000 { + reg = <0x41200000 0x300000>; + no-map; + }; + }; + + aliases { + serial0 = &gsbi4_serial; + mdio-gpio0 = &mdio0; + + led-boot = &power; + led-failsafe = &general; + led-running = &power; + led-upgrade = &general; + }; + + chosen { + linux,stdout-path = "serial0:115200n8"; + }; + + soc { + pinmux@800000 { + button_pins: button_pins { + mux { + pins = "gpio16", "gpio54", "gpio65"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + i2c4_pins: i2c4_pinmux { + mux { + pins = "gpio12", "gpio13"; + function = "gsbi4"; + drive-strength = <12>; + bias-disable; + }; + }; + + led_pins: led_pins { + mux { + pins = "gpio6", "gpio7", "gpio8", "gpio9", "gpio26", "gpio33", + "gpio53", "gpio66"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + spi_pins: spi_pins { + mux { + pins = "gpio18", "gpio19", "gpio21"; + function = "gsbi5"; + bias-pull-down; + }; + + data { + pins = "gpio18", "gpio19"; + drive-strength = <10>; + }; + + cs { + pins = "gpio20"; + function = "gpio"; + drive-strength = <10>; + bias-pull-up; + }; + + clk { + pins = "gpio21"; + drive-strength = <12>; + }; + }; + + mdio0_pins: mdio0_pins { + mux { + pins = "gpio0", "gpio1"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + }; + }; + + rgmii2_pins: rgmii2_pins { + mux { + pins = "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32", + "gpio51", "gpio52", "gpio59", "gpio60", "gpio61", "gpio62" ; + function = "rgmii2"; + drive-strength = <8>; + bias-disable; + }; + }; + + usb0_pwr_en_pin: usb0_pwr_en_pin { + mux { + pins = "gpio25"; + function = "gpio"; + drive-strength = <10>; + bias-pull-up; + output-high; + }; + }; + + usb1_pwr_en_pin: usb1_pwr_en_pin { + mux { + pins = "gpio23"; + function = "gpio"; + drive-strength = <10>; + bias-pull-up; + output-high; + }; + }; + }; + + gsbi@16300000 { + qcom,mode = ; + status = "ok"; + serial@16340000 { + status = "ok"; + }; + /* + * The i2c device on gsbi4 should not be enabled. + * On ipq806x designs gsbi4 i2c is meant for exclusive + * RPM usage. Turning this on in kernel manifests as + * i2c failure for the RPM. + */ + }; + + gsbi5: gsbi@1a200000 { + qcom,mode = ; + status = "ok"; + + spi5: spi@1a280000 { + status = "ok"; + + pinctrl-0 = <&spi_pins>; + pinctrl-names = "default"; + + cs-gpios = <&qcom_pinmux 20 GPIO_ACTIVE_HIGH>; + + flash: m25p80@0 { + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <50000000>; + reg = <0>; + + SBL1@0 { + label = "SBL1"; + reg = <0x0 0x20000>; + read-only; + }; + + MIBIB@20000 { + label = "MIBIB"; + reg = <0x20000 0x20000>; + read-only; + }; + + SBL2@40000 { + label = "SBL2"; + reg = <0x40000 0x20000>; + read-only; + }; + + SBL3@60000 { + label = "SBL3"; + reg = <0x60000 0x30000>; + read-only; + }; + + DDRCONFIG@90000 { + label = "DDRCONFIG"; + reg = <0x90000 0x10000>; + read-only; + }; + + SSD@a0000 { + label = "SSD"; + reg = <0xa0000 0x10000>; + read-only; + }; + + TZ@b0000 { + label = "TZ"; + reg = <0xb0000 0x30000>; + read-only; + }; + + RPM@e0000 { + label = "RPM"; + reg = <0xe0000 0x20000>; + read-only; + }; + + fs-uboot@100000 { + label = "fs-uboot"; + reg = <0x100000 0x70000>; + read-only; + }; + + uboot-env@170000 { + label = "uboot-env"; + reg = <0x170000 0x40000>; + read-only; + }; + + radio@1b0000 { + label = "radio"; + reg = <0x1b0000 0x40000>; + read-only; + }; + + os-image@1f0000 { + label = "os-image"; + reg = <0x1f0000 0x200000>; + }; + + rootfs@3f0000 { + label = "rootfs"; + reg = <0x3f0000 0x1b00000>; + }; + + defaultmac: default-mac@1ef0000 { + label = "default-mac"; + reg = <0x1ef0000 0x00200>; + read-only; + }; + + pin@1ef0200 { + label = "pin"; + reg = <0x1ef0200 0x00200>; + read-only; + }; + + product-info@1ef0400 { + label = "product-info"; + reg = <0x1ef0400 0x0fc00>; + read-only; + }; + + partition-table@1f00000 { + label = "partition-table"; + reg = <0x1f00000 0x10000>; + read-only; + }; + + soft-version@1f10000 { + label = "soft-version"; + reg = <0x1f10000 0x10000>; + read-only; + }; + + support-list@1f20000 { + label = "support-list"; + reg = <0x1f20000 0x10000>; + read-only; + }; + + profile@1f30000 { + label = "profile"; + reg = <0x1f30000 0x10000>; + read-only; + }; + + default-config@1f40000 { + label = "default-config"; + reg = <0x1f40000 0x10000>; + read-only; + }; + + user-config@1f50000 { + label = "user-config"; + reg = <0x1f50000 0x40000>; + read-only; + }; + + qos-db@1f90000 { + label = "qos-db"; + reg = <0x1f90000 0x40000>; + read-only; + }; + + usb-config@1fd0000 { + label = "usb-config"; + reg = <0x1fd0000 0x10000>; + read-only; + }; + + log@1fe0000 { + label = "log"; + reg = <0x1fe0000 0x20000>; + read-only; + }; + }; + }; + }; + + phy@100f8800 { /* USB3 port 1 HS phy */ + status = "ok"; + }; + + phy@100f8830 { /* USB3 port 1 SS phy */ + status = "ok"; + }; + + phy@110f8800 { /* USB3 port 0 HS phy */ + status = "ok"; + }; + + phy@110f8830 { /* USB3 port 0 SS phy */ + status = "ok"; + }; + + usb30@0 { + status = "ok"; + + pinctrl-0 = <&usb0_pwr_en_pin>; + pinctrl-names = "default"; + }; + + usb30@1 { + status = "ok"; + + pinctrl-0 = <&usb1_pwr_en_pin>; + pinctrl-names = "default"; + }; + + pcie0: pci@1b500000 { + status = "ok"; + }; + + pcie1: pci@1b700000 { + status = "ok"; + force_gen1 = <1>; + }; + + mdio0: mdio { + compatible = "virtual,mdio-gpio"; + #address-cells = <1>; + #size-cells = <0>; + gpios = <&qcom_pinmux 1 GPIO_ACTIVE_HIGH &qcom_pinmux 0 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&mdio0_pins>; + pinctrl-names = "default"; + + phy0: ethernet-phy@0 { + device_type = "ethernet-phy"; + reg = <0>; + qca,ar8327-initvals = < + 0x00004 0x7600000 /* PAD0_MODE */ + 0x00008 0x1000000 /* PAD5_MODE */ + 0x0000c 0x80 /* PAD6_MODE */ + 0x000e4 0x6a545 /* MAC_POWER_SEL */ + 0x000e0 0xc74164de /* SGMII_CTRL */ + 0x0007c 0x4e /* PORT0_STATUS */ + 0x00094 0x4e /* PORT6_STATUS */ + >; + }; + + phy4: ethernet-phy@4 { + device_type = "ethernet-phy"; + reg = <4>; + }; + }; + + gmac1: ethernet@37200000 { + status = "ok"; + phy-mode = "rgmii"; + qcom,id = <1>; + + pinctrl-0 = <&rgmii2_pins>; + pinctrl-names = "default"; + + mtd-mac-address = <&defaultmac 0x8>; + mtd-mac-address-increment = <1>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + gmac2: ethernet@37400000 { + status = "ok"; + phy-mode = "sgmii"; + qcom,id = <2>; + + mtd-mac-address = <&defaultmac 0x8>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + rpm@108000 { + pinctrl-0 = <&i2c4_pins>; + pinctrl-names = "default"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + wifi { + label = "wifi"; + gpios = <&qcom_pinmux 49 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + reset { + label = "reset"; + gpios = <&qcom_pinmux 64 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + wps { + label = "wps"; + gpios = <&qcom_pinmux 65 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + ledswitch { + label = "ledswitch"; + gpios = <&qcom_pinmux 16 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + pinctrl-0 = <&led_pins>; + pinctrl-names = "default"; + + lan { + label = "c2600:white:lan"; + gpios = <&qcom_pinmux 6 GPIO_ACTIVE_HIGH>; + }; + + usb4 { + label = "c2600:white:usb_4"; + gpios = <&qcom_pinmux 7 GPIO_ACTIVE_HIGH>; + }; + + usb2 { + label = "c2600:white:usb_2"; + gpios = <&qcom_pinmux 8 GPIO_ACTIVE_HIGH>; + }; + + wps { + label = "c2600:white:wps"; + gpios = <&qcom_pinmux 9 GPIO_ACTIVE_HIGH>; + }; + + wan_amber { + label = "c2600:amber:wan"; + gpios = <&qcom_pinmux 26 GPIO_ACTIVE_LOW>; + }; + + wan_white { + label = "c2600:white:wan"; + gpios = <&qcom_pinmux 33 GPIO_ACTIVE_LOW>; + }; + + power: power { + label = "c2600:white:power"; + gpios = <&qcom_pinmux 53 GPIO_ACTIVE_HIGH>; + default-state = "keep"; + }; + + general: general { + label = "c2600:white:general"; + gpios = <&qcom_pinmux 66 GPIO_ACTIVE_HIGH>; + }; + }; +}; + +&adm_dma { + status = "ok"; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-d7800.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-d7800.dts new file mode 100644 index 000000000..b7c49cc81 --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-d7800.dts @@ -0,0 +1,419 @@ +#include "qcom-ipq8064-v1.0.dtsi" + +#include + +/ { + model = "Netgear Nighthawk X4 D7800"; + compatible = "netgear,d7800", "qcom,ipq8064"; + + memory@0 { + reg = <0x42000000 0xe000000>; + device_type = "memory"; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + rsvd@41200000 { + reg = <0x41200000 0x300000>; + no-map; + }; + }; + + aliases { + serial0 = &gsbi4_serial; + mdio-gpio0 = &mdio0; + + led-boot = &power_white; + led-failsafe = &power_amber; + led-running = &power_white; + led-upgrade = &power_amber; + }; + + chosen { + bootargs = "rootfstype=squashfs noinitrd"; + linux,stdout-path = "serial0:115200n8"; + }; + + soc { + pinmux@800000 { + button_pins: button_pins { + mux { + pins = "gpio6", "gpio54", "gpio65"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + i2c4_pins: i2c4_pinmux { + mux { + pins = "gpio12", "gpio13"; + function = "gsbi4"; + drive-strength = <12>; + bias-disable; + }; + }; + + led_pins: led_pins { + mux { + pins = "gpio7", "gpio8", "gpio9", "gpio22", "gpio23", + "gpio24","gpio26", "gpio53", "gpio64"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + mdio0_pins: mdio0_pins { + mux { + pins = "gpio0", "gpio1"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + }; + }; + + nand_pins: nand_pins { + mux { + pins = "gpio34", "gpio35", "gpio36", + "gpio37", "gpio38", "gpio39", + "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", + "gpio46", "gpio47"; + function = "nand"; + drive-strength = <10>; + bias-disable; + }; + pullups { + pins = "gpio39"; + bias-pull-up; + }; + hold { + pins = "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", + "gpio46", "gpio47"; + bias-bus-hold; + }; + }; + + rgmii2_pins: rgmii2_pins { + mux { + pins = "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32", + "gpio51", "gpio52", "gpio59", "gpio60", "gpio61", "gpio62" ; + function = "rgmii2"; + drive-strength = <8>; + bias-disable; + }; + }; + + usb0_pwr_en_pins: usb0_pwr_en_pins { + mux { + pins = "gpio15"; + function = "gpio"; + drive-strength = <12>; + bias-pull-down; + output-high; + }; + }; + + usb1_pwr_en_pins: usb1_pwr_en_pins { + mux { + pins = "gpio16", "gpio68"; + function = "gpio"; + drive-strength = <12>; + bias-pull-down; + output-high; + }; + }; + }; + + gsbi@16300000 { + qcom,mode = ; + status = "ok"; + serial@16340000 { + status = "ok"; + }; + /* + * The i2c device on gsbi4 should not be enabled. + * On ipq806x designs gsbi4 i2c is meant for exclusive + * RPM usage. Turning this on in kernel manifests as + * i2c failure for the RPM. + */ + }; + + sata-phy@1b400000 { + status = "ok"; + }; + + sata@29000000 { + status = "ok"; + }; + + phy@100f8800 { /* USB3 port 1 HS phy */ + status = "ok"; + }; + + phy@100f8830 { /* USB3 port 1 SS phy */ + status = "ok"; + }; + + phy@110f8800 { /* USB3 port 0 HS phy */ + status = "ok"; + }; + + phy@110f8830 { /* USB3 port 0 SS phy */ + status = "ok"; + }; + + usb30@0 { + status = "ok"; + + pinctrl-0 = <&usb0_pwr_en_pins>; + pinctrl-names = "default"; + }; + + usb30@1 { + status = "ok"; + + pinctrl-0 = <&usb1_pwr_en_pins>; + pinctrl-names = "default"; + }; + + pcie0: pci@1b500000 { + status = "ok"; + reset-gpio = <&qcom_pinmux 3 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&pcie0_pins>; + pinctrl-names = "default"; + }; + + pcie1: pci@1b700000 { + status = "ok"; + reset-gpio = <&qcom_pinmux 48 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&pcie1_pins>; + pinctrl-names = "default"; + force_gen1 = <1>; + }; + + nand@1ac00000 { + status = "ok"; + + pinctrl-0 = <&nand_pins>; + pinctrl-names = "default"; + + #address-cells = <1>; + #size-cells = <1>; + + cs0 { + reg = <0>; + compatible = "qcom,nandcs"; + + nand-ecc-strength = <4>; + nand-bus-width = <8>; + nand-ecc-step-size = <512>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + qcadata@0 { + label = "qcadata"; + reg = <0x0000000 0x0c80000>; + read-only; + }; + + APPSBL@c80000 { + label = "APPSBL"; + reg = <0x0c80000 0x0500000>; + read-only; + }; + + APPSBLENV@1180000 { + label = "APPSBLENV"; + reg = <0x1180000 0x0080000>; + read-only; + }; + + art: art@1200000 { + label = "art"; + reg = <0x1200000 0x0140000>; + read-only; + }; + + artbak: art@1340000 { + label = "artbak"; + reg = <0x1340000 0x0140000>; + read-only; + }; + + kernel@1480000 { + label = "kernel"; + reg = <0x1480000 0x0200000>; + }; + + ubi@1680000 { + label = "ubi"; + reg = <0x1680000 0x1E00000>; + }; + + netgear@3480000 { + label = "netgear"; + reg = <0x3480000 0x4480000>; + read-only; + }; + + reserve@7900000 { + label = "reserve"; + reg = <0x7900000 0x0700000>; + read-only; + }; + + firmware@1480000 { + label = "firmware"; + reg = <0x1480000 0x2000000>; + }; + }; + }; + }; + + mdio0: mdio { + compatible = "virtual,mdio-gpio"; + #address-cells = <1>; + #size-cells = <0>; + gpios = <&qcom_pinmux 1 GPIO_ACTIVE_HIGH &qcom_pinmux 0 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&mdio0_pins>; + pinctrl-names = "default"; + + phy0: ethernet-phy@0 { + device_type = "ethernet-phy"; + reg = <0>; + qca,ar8327-initvals = < + 0x00004 0x7600000 /* PAD0_MODE */ + 0x00008 0x1000000 /* PAD5_MODE */ + 0x0000c 0x80 /* PAD6_MODE */ + 0x000e4 0x6a545 /* MAC_POWER_SEL */ + 0x000e0 0xc74164de /* SGMII_CTRL */ + 0x0007c 0x4e /* PORT0_STATUS */ + 0x00094 0x4e /* PORT6_STATUS */ + >; + }; + + phy4: ethernet-phy@4 { + device_type = "ethernet-phy"; + reg = <4>; + }; + }; + + gmac1: ethernet@37200000 { + status = "ok"; + phy-mode = "rgmii"; + phy-handle = <&phy4>; + qcom,id = <1>; + + pinctrl-0 = <&rgmii2_pins>; + pinctrl-names = "default"; + + mtd-mac-address = <&art 6>; + }; + + gmac2: ethernet@37400000 { + status = "ok"; + phy-mode = "sgmii"; + qcom,id = <2>; + + mtd-mac-address = <&art 0>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + rpm@108000 { + pinctrl-0 = <&i2c4_pins>; + pinctrl-names = "default"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + wifi { + label = "wifi"; + gpios = <&qcom_pinmux 6 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + reset { + label = "reset"; + gpios = <&qcom_pinmux 54 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + wps { + label = "wps"; + gpios = <&qcom_pinmux 65 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + pinctrl-0 = <&led_pins>; + pinctrl-names = "default"; + + usb1 { + label = "d7800:white:usb1"; + gpios = <&qcom_pinmux 7 GPIO_ACTIVE_HIGH>; + }; + + usb2 { + label = "d7800:white:usb2"; + gpios = <&qcom_pinmux 8 GPIO_ACTIVE_HIGH>; + }; + + power_amber: power_amber { + label = "d7800:amber:power"; + gpios = <&qcom_pinmux 9 GPIO_ACTIVE_HIGH>; + }; + + wan_white { + label = "d7800:white:wan"; + gpios = <&qcom_pinmux 22 GPIO_ACTIVE_HIGH>; + }; + + wan_amber { + label = "d7800:amber:wan"; + gpios = <&qcom_pinmux 23 GPIO_ACTIVE_HIGH>; + }; + + wps { + label = "d7800:white:wps"; + gpios = <&qcom_pinmux 24 GPIO_ACTIVE_HIGH>; + }; + + esata { + label = "d7800:white:esata"; + gpios = <&qcom_pinmux 26 GPIO_ACTIVE_HIGH>; + }; + + power_white: power_white { + label = "d7800:white:power"; + gpios = <&qcom_pinmux 53 GPIO_ACTIVE_HIGH>; + default-state = "keep"; + }; + + wifi { + label = "d7800:white:wifi"; + gpios = <&qcom_pinmux 64 GPIO_ACTIVE_HIGH>; + }; + }; +}; + +&adm_dma { + status = "ok"; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-db149.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-db149.dts new file mode 100644 index 000000000..4c5686607 --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-db149.dts @@ -0,0 +1,236 @@ +#include "qcom-ipq8064-v1.0.dtsi" + +/ { + model = "Qualcomm IPQ8064/DB149"; + compatible = "qcom,ipq8064-db149", "qcom,ipq8064"; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + rsvd@41200000 { + reg = <0x41200000 0x300000>; + no-map; + }; + }; + + alias { + serial0 = &uart2; + mdio-gpio0 = &mdio0; + }; + + chosen { + linux,stdout-path = "serial0:115200n8"; + }; + + soc { + pinmux@800000 { + i2c4_pins: i2c4_pinmux { + pins = "gpio12", "gpio13"; + function = "gsbi4"; + bias-disable; + }; + + spi_pins: spi_pins { + mux { + pins = "gpio18", "gpio19", "gpio21"; + function = "gsbi5"; + drive-strength = <10>; + bias-none; + }; + }; + + mdio0_pins: mdio0_pins { + mux { + pins = "gpio0", "gpio1"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + }; + }; + + rgmii0_pins: rgmii0_pins { + mux { + pins = "gpio2", "gpio66"; + drive-strength = <8>; + bias-disable; + }; + }; + }; + + gsbi2: gsbi@12480000 { + qcom,mode = ; + status = "ok"; + uart2: serial@12490000 { + status = "ok"; + }; + }; + + gsbi5: gsbi@1a200000 { + qcom,mode = ; + status = "ok"; + + spi4: spi@1a280000 { + status = "ok"; + spi-max-frequency = <50000000>; + + pinctrl-0 = <&spi_pins>; + pinctrl-names = "default"; + + cs-gpios = <&qcom_pinmux 20 0>; + + flash: m25p80@0 { + compatible = "s25fl256s1"; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <50000000>; + reg = <0>; + m25p,fast-read; + + partition@0 { + label = "lowlevel_init"; + reg = <0x0 0x1b0000>; + }; + + partition@1 { + label = "u-boot"; + reg = <0x1b0000 0x80000>; + }; + + partition@2 { + label = "u-boot-env"; + reg = <0x230000 0x40000>; + }; + + partition@3 { + label = "caldata"; + reg = <0x270000 0x40000>; + }; + + partition@4 { + label = "firmware"; + reg = <0x2b0000 0x1d50000>; + }; + }; + }; + }; + + sata-phy@1b400000 { + status = "ok"; + }; + + sata@29000000 { + status = "ok"; + }; + + phy@100f8800 { /* USB3 port 1 HS phy */ + status = "ok"; + }; + + phy@100f8830 { /* USB3 port 1 SS phy */ + status = "ok"; + }; + + phy@110f8800 { /* USB3 port 0 HS phy */ + status = "ok"; + }; + + phy@110f8830 { /* USB3 port 0 SS phy */ + status = "ok"; + }; + + usb30@0 { + status = "ok"; + }; + + usb30@1 { + status = "ok"; + }; + + pcie0: pci@1b500000 { + status = "ok"; + }; + + pcie1: pci@1b700000 { + status = "ok"; + }; + + pcie2: pci@1b900000 { + status = "ok"; + }; + + mdio0: mdio { + compatible = "virtual,mdio-gpio"; + #address-cells = <1>; + #size-cells = <0>; + gpios = <&qcom_pinmux 1 0 &qcom_pinmux 0 0>; + + pinctrl-0 = <&mdio0_pins>; + pinctrl-names = "default"; + + phy0: ethernet-phy@0 { + device_type = "ethernet-phy"; + reg = <0>; + qca,ar8327-initvals = < + 0x00004 0x7600000 /* PAD0_MODE */ + 0x00008 0x1000000 /* PAD5_MODE */ + 0x0000c 0x80 /* PAD6_MODE */ + 0x000e4 0x6a545 /* MAC_POWER_SEL */ + 0x000e0 0xc74164de /* SGMII_CTRL */ + 0x0007c 0x4e /* PORT0_STATUS */ + 0x00094 0x4e /* PORT6_STATUS */ + >; + }; + + phy4: ethernet-phy@4 { + device_type = "ethernet-phy"; + reg = <4>; + }; + + phy6: ethernet-phy@6 { + device_type = "ethernet-phy"; + reg = <6>; + }; + + phy7: ethernet-phy@7 { + device_type = "ethernet-phy"; + reg = <7>; + }; + }; + + gmac0: ethernet@37000000 { + status = "ok"; + phy-mode = "rgmii"; + qcom,id = <0>; + phy-handle = <&phy4>; + + pinctrl-0 = <&rgmii0_pins>; + pinctrl-names = "default"; + }; + + gmac1: ethernet@37200000 { + status = "ok"; + phy-mode = "sgmii"; + qcom,id = <1>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + gmac2: ethernet@37400000 { + status = "ok"; + phy-mode = "sgmii"; + qcom,id = <2>; + phy-handle = <&phy6>; + }; + + gmac3: ethernet@37600000 { + status = "ok"; + phy-mode = "sgmii"; + qcom,id = <3>; + phy-handle = <&phy7>; + }; + }; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-ea8500.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-ea8500.dts new file mode 100644 index 000000000..a8628ff93 --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-ea8500.dts @@ -0,0 +1,406 @@ +#include "qcom-ipq8064-v1.0.dtsi" + +#include + +/ { + model = "Linksys EA8500 WiFi Router"; + compatible = "linksys,ea8500", "qcom,ipq8064"; + + memory@0 { + reg = <0x42000000 0x1e000000>; + device_type = "memory"; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + rsvd@41200000 { + reg = <0x41200000 0x300000>; + no-map; + }; + }; + + aliases { + serial0 = &gsbi4_serial; + mdio-gpio0 = &mdio0; + + led-boot = &power; + led-failsafe = &power; + led-running = &power; + led-upgrade = &power; + }; + + chosen { + bootargs = "console=ttyMSM0,115200n8"; + linux,stdout-path = "serial0:115200n8"; + append-rootblock = "ubi.mtd="; /* append to bootargs adding the root deviceblock nbr from bootloader */ + }; + + soc { + pinmux@800000 { + button_pins: button_pins { + mux { + pins = "gpio65", "gpio67", "gpio68"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + i2c4_pins: i2c4_pinmux { + mux { + pins = "gpio12", "gpio13"; + function = "gsbi4"; + drive-strength = <12>; + bias-disable; + }; + }; + + led_pins: led_pins { + mux { + pins = "gpio6", "gpio53", "gpio54"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + mdio0_pins: mdio0_pins { + mux { + pins = "gpio0", "gpio1"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + }; + }; + + nand_pins: nand_pins { + mux { + pins = "gpio34", "gpio35", "gpio36", + "gpio37", "gpio38", "gpio39", + "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", + "gpio46", "gpio47"; + function = "nand"; + drive-strength = <10>; + bias-disable; + }; + pullups { + pins = "gpio39"; + bias-pull-up; + }; + hold { + pins = "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", + "gpio46", "gpio47"; + bias-bus-hold; + }; + }; + + rgmii2_pins: rgmii2_pins { + mux { + pins = "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32", + "gpio51", "gpio52", "gpio59", "gpio60", "gpio61", "gpio62" ; + function = "rgmii2"; + drive-strength = <8>; + bias-disable; + }; + }; + }; + + gsbi@16300000 { + qcom,mode = ; + status = "ok"; + serial@16340000 { + status = "ok"; + }; + /* + * The i2c device on gsbi4 should not be enabled. + * On ipq806x designs gsbi4 i2c is meant for exclusive + * RPM usage. Turning this on in kernel manifests as + * i2c failure for the RPM. + */ + }; + + sata-phy@1b400000 { + status = "ok"; + }; + + sata@29000000 { + status = "ok"; + }; + + phy@100f8800 { /* USB3 port 1 HS phy */ + status = "ok"; + }; + + phy@100f8830 { /* USB3 port 1 SS phy */ + status = "ok"; + }; + + phy@110f8800 { /* USB3 port 0 HS phy */ + status = "ok"; + }; + + phy@110f8830 { /* USB3 port 0 SS phy */ + status = "ok"; + }; + + usb30@0 { + status = "ok"; + }; + + usb30@1 { + status = "ok"; + }; + + pcie0: pci@1b500000 { + status = "ok"; + force_gen1 = <1>; + }; + + pcie1: pci@1b700000 { + status = "ok"; + }; + + pcie2: pci@1b900000 { + status = "ok"; + }; + + nand@1ac00000 { + status = "ok"; + + pinctrl-0 = <&nand_pins>; + pinctrl-names = "default"; + + cs0 { + reg = <0>; + compatible = "qcom,nandcs"; + + nand-ecc-strength = <4>; + nand-bus-width = <8>; + nand-ecc-step-size = <512>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + SBL1@0 { + label = "SBL1"; + reg = <0x0000000 0x0040000>; + read-only; + }; + + MIBIB@40000 { + label = "MIBIB"; + reg = <0x0040000 0x0140000>; + read-only; + }; + + SBL2@180000 { + label = "SBL2"; + reg = <0x0180000 0x0140000>; + read-only; + }; + + SBL3@2c0000 { + label = "SBL3"; + reg = <0x02c0000 0x0280000>; + read-only; + }; + + DDRCONFIG@540000 { + label = "DDRCONFIG"; + reg = <0x0540000 0x0120000>; + read-only; + }; + + SSD@660000 { + label = "SSD"; + reg = <0x0660000 0x0120000>; + read-only; + }; + + TZ@780000 { + label = "TZ"; + reg = <0x0780000 0x0280000>; + read-only; + }; + + RPM@a00000 { + label = "RPM"; + reg = <0x0a00000 0x0280000>; + read-only; + }; + + art: art@c80000 { + label = "art"; + reg = <0x0c80000 0x0140000>; + read-only; + }; + + APPSBL@dc0000 { + label = "APPSBL"; + reg = <0x0dc0000 0x0100000>; + read-only; + }; + + u_env@ec0000 { + label = "u_env"; + reg = <0x0ec0000 0x0040000>; + }; + + s_env@f00000 { + label = "s_env"; + reg = <0x0f00000 0x0040000>; + }; + + devinfo@f40000 { + label = "devinfo"; + reg = <0x0f40000 0x0040000>; + }; + + linux@f80000 { + label = "kernel1"; + reg = <0x0f80000 0x2800000>; /* 3 MB spill to rootfs*/ + }; + + rootfs@1280000 { + label = "rootfs1"; + reg = <0x1280000 0x2500000>; + }; + + linux2@3780000 { + label = "kernel2"; + reg = <0x3780000 0x2800000>; + }; + + rootfs2@3a80000 { + label = "rootfs2"; + reg = <0x3a80000 0x2500000>; + }; + + syscfg@5f80000 { + label = "syscfg"; + reg = <0x5f80000 0x2080000>; + }; + }; + }; + }; + + mdio0: mdio { + compatible = "virtual,mdio-gpio"; + #address-cells = <1>; + #size-cells = <0>; + gpios = <&qcom_pinmux 1 GPIO_ACTIVE_HIGH &qcom_pinmux 0 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&mdio0_pins>; + pinctrl-names = "default"; + + phy0: ethernet-phy@0 { + device_type = "ethernet-phy"; + reg = <0>; + qca,ar8327-initvals = < + 0x00004 0x7600000 /* PAD0_MODE */ + 0x00008 0x1000000 /* PAD5_MODE */ + 0x0000c 0x80 /* PAD6_MODE */ + 0x000e4 0x6a545 /* MAC_POWER_SEL */ + 0x000e0 0xc74164de /* SGMII_CTRL */ + 0x0007c 0x4e /* PORT0_STATUS */ + 0x00094 0x4e /* PORT6_STATUS */ + >; + }; + + phy4: ethernet-phy@4 { + device_type = "ethernet-phy"; + reg = <4>; + }; + }; + + gmac1: ethernet@37200000 { + status = "ok"; + phy-mode = "rgmii"; + qcom,id = <1>; + qcom,phy_mdio_addr = <4>; + qcom,poll_required = <1>; + qcom,rgmii_delay = <0>; + qcom,emulation = <0>; + pinctrl-0 = <&rgmii2_pins>; + pinctrl-names = "default"; + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + //lan + gmac2: ethernet@37400000 { + status = "ok"; + phy-mode = "sgmii"; + qcom,id = <2>; + qcom,phy_mdio_addr = <0>; /* none */ + qcom,poll_required = <0>; /* no polling */ + qcom,rgmii_delay = <0>; + qcom,emulation = <0>; + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + rpm@108000 { + pinctrl-0 = <&i2c4_pins>; + pinctrl-names = "default"; + }; + + adm_dma: dma@18300000 { + status = "ok"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + wifi { + label = "wifi"; + gpios = <&qcom_pinmux 67 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + reset { + label = "reset"; + gpios = <&qcom_pinmux 68 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + wps { + label = "wps"; + gpios = <&qcom_pinmux 65 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + pinctrl-0 = <&led_pins>; + pinctrl-names = "default"; + + wps { + label = "ea8500:green:wps"; + gpios = <&qcom_pinmux 53 GPIO_ACTIVE_HIGH>; + }; + + power: power { + label = "ea8500:white:power"; + gpios = <&qcom_pinmux 6 GPIO_ACTIVE_LOW>; + default-state = "keep"; + }; + + wifi { + label = "ea8500:green:wifi"; + gpios = <&qcom_pinmux 54 GPIO_ACTIVE_HIGH>; + }; + }; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-r7500.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-r7500.dts new file mode 100644 index 000000000..3445a7925 --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-r7500.dts @@ -0,0 +1,394 @@ +#include "qcom-ipq8064-v1.0.dtsi" + +#include +#include + +/ { + model = "Netgear Nighthawk X4 R7500"; + compatible = "netgear,r7500", "qcom,ipq8064"; + + memory@0 { + reg = <0x42000000 0xe000000>; + device_type = "memory"; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + rsvd@41200000 { + reg = <0x41200000 0x300000>; + no-map; + }; + }; + + aliases { + serial0 = &gsbi4_serial; + mdio-gpio0 = &mdio0; + + led-boot = &power_white; + led-failsafe = &power_amber; + led-running = &power_white; + led-upgrade = &power_amber; + }; + + chosen { + bootargs = "rootfstype=squashfs noinitrd"; + linux,stdout-path = "serial0:115200n8"; + }; + + soc { + pinmux@800000 { + button_pins: button_pins { + mux { + pins = "gpio6", "gpio54", "gpio65"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + i2c4_pins: i2c4_pinmux { + mux { + pins = "gpio12", "gpio13"; + function = "gsbi4"; + drive-strength = <12>; + bias-disable; + }; + }; + + led_pins: led_pins { + mux { + pins = "gpio7", "gpio8", "gpio9", "gpio22", "gpio23", + "gpio24","gpio26", "gpio53", "gpio64"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + mdio0_pins: mdio0_pins { + mux { + pins = "gpio0", "gpio1"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + }; + }; + + nand_pins: nand_pins { + mux { + pins = "gpio34", "gpio35", "gpio36", + "gpio37", "gpio38", "gpio39", + "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", + "gpio46", "gpio47"; + function = "nand"; + drive-strength = <10>; + bias-disable; + }; + pullups { + pins = "gpio39"; + bias-pull-up; + }; + hold { + pins = "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", + "gpio46", "gpio47"; + bias-bus-hold; + }; + }; + + rgmii2_pins: rgmii2_pins { + mux { + pins = "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32", + "gpio51", "gpio52", "gpio59", "gpio60", "gpio61", "gpio62" ; + function = "rgmii2"; + drive-strength = <8>; + bias-disable; + }; + }; + }; + + gsbi@16300000 { + qcom,mode = ; + status = "ok"; + serial@16340000 { + status = "ok"; + }; + /* + * The i2c device on gsbi4 should not be enabled. + * On ipq806x designs gsbi4 i2c is meant for exclusive + * RPM usage. Turning this on in kernel manifests as + * i2c failure for the RPM. + */ + }; + + sata-phy@1b400000 { + status = "ok"; + }; + + sata@29000000 { + status = "ok"; + }; + + phy@100f8800 { /* USB3 port 1 HS phy */ + clocks = <&gcc USB30_0_UTMI_CLK>; + status = "ok"; + }; + + phy@100f8830 { /* USB3 port 1 SS phy */ + clocks = <&gcc USB30_0_MASTER_CLK>; + status = "ok"; + }; + + phy@110f8800 { /* USB3 port 0 HS phy */ + clocks = <&gcc USB30_1_UTMI_CLK>; + status = "ok"; + }; + + phy@110f8830 { /* USB3 port 0 SS phy */ + clocks = <&gcc USB30_1_MASTER_CLK>; + status = "ok"; + }; + + usb30@0 { + clocks = <&gcc USB30_1_MASTER_CLK>; + status = "ok"; + }; + + usb30@1 { + clocks = <&gcc USB30_0_MASTER_CLK>; + status = "ok"; + }; + + pcie0: pci@1b500000 { + status = "ok"; + }; + + pcie1: pci@1b700000 { + status = "ok"; + force_gen1 = <1>; + }; + + nand@1ac00000 { + status = "ok"; + + pinctrl-0 = <&nand_pins>; + pinctrl-names = "default"; + + cs0 { + reg = <0>; + compatible = "qcom,nandcs"; + + nand-ecc-strength = <4>; + nand-bus-width = <8>; + nand-ecc-step-size = <512>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + qcadata@0 { + label = "qcadata"; + reg = <0x0000000 0x0c80000>; + read-only; + }; + + APPSBL@c80000 { + label = "APPSBL"; + reg = <0x0c80000 0x0500000>; + read-only; + }; + + APPSBLENV@1180000 { + label = "APPSBLENV"; + reg = <0x1180000 0x0080000>; + read-only; + }; + + art: art@1200000 { + label = "art"; + reg = <0x1200000 0x0140000>; + read-only; + }; + + kernel@1340000 { + label = "kernel"; + reg = <0x1340000 0x0200000>; + }; + + ubi@1540000 { + label = "ubi"; + reg = <0x1540000 0x1800000>; + }; + + netgear@2d40000 { + label = "netgear"; + reg = <0x2d40000 0x0c00000>; + read-only; + }; + + reserve@3940000 { + label = "reserve"; + reg = <0x3940000 0x46c0000>; + read-only; + }; + + firmware@1340000 { + label = "firmware"; + reg = <0x1340000 0x1a00000>; + }; + }; + }; + }; + + mdio0: mdio { + compatible = "virtual,mdio-gpio"; + #address-cells = <1>; + #size-cells = <0>; + gpios = <&qcom_pinmux 1 GPIO_ACTIVE_HIGH &qcom_pinmux 0 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&mdio0_pins>; + pinctrl-names = "default"; + + phy0: ethernet-phy@0 { + device_type = "ethernet-phy"; + reg = <0>; + qca,ar8327-initvals = < + 0x00004 0x7600000 /* PAD0_MODE */ + 0x00008 0x1000000 /* PAD5_MODE */ + 0x0000c 0x80 /* PAD6_MODE */ + 0x000e4 0x6a545 /* MAC_POWER_SEL */ + 0x000e0 0xc74164de /* SGMII_CTRL */ + 0x0007c 0x4e /* PORT0_STATUS */ + 0x00094 0x4e /* PORT6_STATUS */ + >; + }; + + phy4: ethernet-phy@4 { + device_type = "ethernet-phy"; + reg = <4>; + }; + }; + + gmac1: ethernet@37200000 { + status = "ok"; + phy-mode = "rgmii"; + qcom,id = <1>; + + pinctrl-0 = <&rgmii2_pins>; + pinctrl-names = "default"; + + mtd-mac-address = <&art 6>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + gmac2: ethernet@37400000 { + status = "ok"; + phy-mode = "sgmii"; + qcom,id = <2>; + + mtd-mac-address = <&art 0>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + rpm@108000 { + pinctrl-0 = <&i2c4_pins>; + pinctrl-names = "default"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + wifi { + label = "wifi"; + gpios = <&qcom_pinmux 6 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + reset { + label = "reset"; + gpios = <&qcom_pinmux 54 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + wps { + label = "wps"; + gpios = <&qcom_pinmux 65 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + pinctrl-0 = <&led_pins>; + pinctrl-names = "default"; + + usb1 { + label = "r7500:white:usb1"; + gpios = <&qcom_pinmux 7 GPIO_ACTIVE_HIGH>; + }; + + usb2 { + label = "r7500:white:usb2"; + gpios = <&qcom_pinmux 8 GPIO_ACTIVE_HIGH>; + }; + + power_amber: power_amber { + label = "r7500:amber:power"; + gpios = <&qcom_pinmux 9 GPIO_ACTIVE_HIGH>; + }; + + wan_white { + label = "r7500:white:wan"; + gpios = <&qcom_pinmux 22 GPIO_ACTIVE_HIGH>; + }; + + wan_amber { + label = "r7500:amber:wan"; + gpios = <&qcom_pinmux 23 GPIO_ACTIVE_HIGH>; + }; + + wps { + label = "r7500:white:wps"; + gpios = <&qcom_pinmux 24 GPIO_ACTIVE_HIGH>; + }; + + esata { + label = "r7500:white:esata"; + gpios = <&qcom_pinmux 26 GPIO_ACTIVE_HIGH>; + }; + + power_white: power_white { + label = "r7500:white:power"; + gpios = <&qcom_pinmux 53 GPIO_ACTIVE_HIGH>; + default-state = "keep"; + }; + + wifi { + label = "r7500:white:wifi"; + gpios = <&qcom_pinmux 64 GPIO_ACTIVE_HIGH>; + }; + }; +}; + +&tcsr { + qcom,usb-ctrl-select = ; + compatible = "qcom,tcsr"; +}; + +&adm_dma { + status = "ok"; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-r7500v2.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-r7500v2.dts new file mode 100644 index 000000000..c4b0c4b5a --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-r7500v2.dts @@ -0,0 +1,425 @@ +#include "qcom-ipq8064-v1.0.dtsi" + +#include + +/ { + model = "Netgear Nighthawk X4 R7500v2"; + compatible = "netgear,r7500v2", "qcom,ipq8064"; + + memory@0 { + reg = <0x42000000 0x1e000000>; + device_type = "memory"; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + rsvd@41200000 { + reg = <0x41200000 0x300000>; + no-map; + }; + + rsvd@5fe00000 { + reg = <0x5fe00000 0x200000>; + reusable; + }; + }; + + aliases { + serial0 = &gsbi4_serial; + mdio-gpio0 = &mdio0; + + led-boot = &power; + led-failsafe = &power; + led-running = &power; + led-upgrade = &power; + }; + + chosen { + bootargs = "rootfstype=squashfs noinitrd"; + linux,stdout-path = "serial0:115200n8"; + }; + + soc { + pinmux@800000 { + button_pins: button_pins { + mux { + pins = "gpio6", "gpio54", "gpio65"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + i2c4_pins: i2c4_pinmux { + mux { + pins = "gpio12", "gpio13"; + function = "gsbi4"; + drive-strength = <12>; + bias-disable; + }; + }; + + led_pins: led_pins { + mux { + pins = "gpio7", "gpio8", "gpio9", "gpio22", "gpio23", + "gpio24","gpio26", "gpio53", "gpio64"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + mdio0_pins: mdio0_pins { + mux { + pins = "gpio0", "gpio1"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + }; + }; + + nand_pins: nand_pins { + mux { + pins = "gpio34", "gpio35", "gpio36", + "gpio37", "gpio38", "gpio39", + "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", + "gpio46", "gpio47"; + function = "nand"; + drive-strength = <10>; + bias-disable; + }; + pullups { + pins = "gpio39"; + bias-pull-up; + }; + hold { + pins = "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", + "gpio46", "gpio47"; + bias-bus-hold; + }; + }; + + rgmii2_pins: rgmii2_pins { + mux { + pins = "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32", + "gpio51", "gpio52", "gpio59", "gpio60", "gpio61", "gpio62" ; + function = "rgmii2"; + drive-strength = <8>; + bias-disable; + }; + }; + + usb0_pwr_en_pins: usb0_pwr_en_pins { + mux { + pins = "gpio15"; + function = "gpio"; + drive-strength = <12>; + bias-pull-down; + output-high; + }; + }; + + usb1_pwr_en_pins: usb1_pwr_en_pins { + mux { + pins = "gpio16", "gpio68"; + function = "gpio"; + drive-strength = <12>; + bias-pull-down; + output-high; + }; + }; + }; + + gsbi@16300000 { + qcom,mode = ; + status = "ok"; + serial@16340000 { + status = "ok"; + }; + /* + * The i2c device on gsbi4 should not be enabled. + * On ipq806x designs gsbi4 i2c is meant for exclusive + * RPM usage. Turning this on in kernel manifests as + * i2c failure for the RPM. + */ + }; + + sata-phy@1b400000 { + status = "ok"; + }; + + sata@29000000 { + status = "ok"; + }; + + phy@100f8800 { /* USB3 port 1 HS phy */ + status = "ok"; + }; + + phy@100f8830 { /* USB3 port 1 SS phy */ + status = "ok"; + }; + + phy@110f8800 { /* USB3 port 0 HS phy */ + status = "ok"; + }; + + phy@110f8830 { /* USB3 port 0 SS phy */ + status = "ok"; + }; + + usb30@0 { + status = "ok"; + + pinctrl-0 = <&usb0_pwr_en_pins>; + pinctrl-names = "default"; + }; + + usb30@1 { + status = "ok"; + + pinctrl-0 = <&usb1_pwr_en_pins>; + pinctrl-names = "default"; + }; + + pcie0: pci@1b500000 { + status = "ok"; + reset-gpio = <&qcom_pinmux 3 GPIO_ACTIVE_LOW>; + pinctrl-0 = <&pcie0_pins>; + pinctrl-names = "default"; + }; + + pcie1: pci@1b700000 { + status = "ok"; + reset-gpio = <&qcom_pinmux 48 GPIO_ACTIVE_LOW>; + pinctrl-0 = <&pcie1_pins>; + pinctrl-names = "default"; + force_gen1 = <1>; + }; + + nand@1ac00000 { + status = "ok"; + + pinctrl-0 = <&nand_pins>; + pinctrl-names = "default"; + + cs0 { + reg = <0>; + compatible = "qcom,nandcs"; + + nand-ecc-strength = <4>; + nand-bus-width = <8>; + nand-ecc-step-size = <512>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + qcadata@0 { + label = "qcadata"; + reg = <0x0000000 0x0c80000>; + read-only; + }; + + APPSBL@c80000 { + label = "APPSBL"; + reg = <0x0c80000 0x0500000>; + read-only; + }; + + APPSBLENV@1180000 { + label = "APPSBLENV"; + reg = <0x1180000 0x0080000>; + read-only; + }; + + art: art@1200000 { + label = "art"; + reg = <0x1200000 0x0140000>; + read-only; + }; + + artbak: art@1340000 { + label = "artbak"; + reg = <0x1340000 0x0140000>; + read-only; + }; + + kernel@1480000 { + label = "kernel"; + reg = <0x1480000 0x0200000>; + }; + + ubi@1680000 { + label = "ubi"; + reg = <0x1680000 0x1E00000>; + }; + + netgear@3480000 { + label = "netgear"; + reg = <0x3480000 0x4480000>; + read-only; + }; + + reserve@7900000 { + label = "reserve"; + reg = <0x7900000 0x0700000>; + read-only; + }; + + firmware@1480000 { + label = "firmware"; + reg = <0x1480000 0x2000000>; + }; + }; + }; + }; + + mdio0: mdio { + compatible = "virtual,mdio-gpio"; + #address-cells = <1>; + #size-cells = <0>; + gpios = <&qcom_pinmux 1 GPIO_ACTIVE_HIGH &qcom_pinmux 0 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&mdio0_pins>; + pinctrl-names = "default"; + + phy0: ethernet-phy@0 { + device_type = "ethernet-phy"; + reg = <0>; + qca,ar8327-initvals = < + 0x00004 0x7600000 /* PAD0_MODE */ + 0x00008 0x1000000 /* PAD5_MODE */ + 0x0000c 0x80 /* PAD6_MODE */ + 0x000e4 0xaa545 /* MAC_POWER_SEL */ + 0x000e0 0xc74164de /* SGMII_CTRL */ + 0x0007c 0x4e /* PORT0_STATUS */ + 0x00094 0x4e /* PORT6_STATUS */ + >; + }; + + phy4: ethernet-phy@4 { + device_type = "ethernet-phy"; + reg = <4>; + }; + }; + + gmac1: ethernet@37200000 { + status = "ok"; + phy-mode = "rgmii"; + qcom,id = <1>; + + pinctrl-0 = <&rgmii2_pins>; + pinctrl-names = "default"; + + mtd-mac-address = <&art 6>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + gmac2: ethernet@37400000 { + status = "ok"; + phy-mode = "sgmii"; + qcom,id = <2>; + + mtd-mac-address = <&art 0>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + rpm@108000 { + pinctrl-0 = <&i2c4_pins>; + pinctrl-names = "default"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + wifi { + label = "wifi"; + gpios = <&qcom_pinmux 6 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + reset { + label = "reset"; + gpios = <&qcom_pinmux 54 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + wps { + label = "wps"; + gpios = <&qcom_pinmux 65 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + pinctrl-0 = <&led_pins>; + pinctrl-names = "default"; + + usb1 { + label = "r7500v2:amber:usb1"; + gpios = <&qcom_pinmux 7 GPIO_ACTIVE_HIGH>; + }; + + usb3 { + label = "r7500v2:amber:usb3"; + gpios = <&qcom_pinmux 8 GPIO_ACTIVE_HIGH>; + }; + + status { + label = "r7500v2:amber:status"; + gpios = <&qcom_pinmux 9 GPIO_ACTIVE_HIGH>; + }; + + internet { + label = "r7500v2:white:internet"; + gpios = <&qcom_pinmux 22 GPIO_ACTIVE_HIGH>; + }; + + wan { + label = "r7500v2:white:wan"; + gpios = <&qcom_pinmux 23 GPIO_ACTIVE_HIGH>; + }; + + wps { + label = "r7500v2:white:wps"; + gpios = <&qcom_pinmux 24 GPIO_ACTIVE_HIGH>; + }; + + esata { + label = "r7500v2:white:esata"; + gpios = <&qcom_pinmux 26 GPIO_ACTIVE_HIGH>; + }; + + power: power { + label = "r7500v2:white:power"; + gpios = <&qcom_pinmux 53 GPIO_ACTIVE_HIGH>; + default-state = "keep"; + }; + + wifi { + label = "r7500v2:white:wifi"; + gpios = <&qcom_pinmux 64 GPIO_ACTIVE_HIGH>; + }; + }; +}; + +&adm_dma { + status = "ok"; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-vr2600v.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-vr2600v.dts new file mode 100644 index 000000000..561c49aaa --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064-vr2600v.dts @@ -0,0 +1,424 @@ +#include "qcom-ipq8064-v1.0.dtsi" + +#include + +/ { + model = "TP-Link Archer VR2600v"; + compatible = "tplink,vr2600v", "qcom,ipq8064"; + + memory@0 { + reg = <0x42000000 0x1e000000>; + device_type = "memory"; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + rsvd@41200000 { + reg = <0x41200000 0x300000>; + no-map; + }; + }; + + aliases { + serial0 = &gsbi4_serial; + mdio-gpio0 = &mdio0; + + led-boot = &power; + led-failsafe = &general; + led-running = &power; + led-upgrade = &general; + }; + + chosen { + linux,stdout-path = "serial0:115200n8"; + }; + + soc { + pinmux@800000 { + led_pins: led_pins { + mux { + pins = "gpio7", "gpio8", "gpio9", "gpio16", "gpio17", + "gpio26", "gpio53", "gpio56", "gpio66"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + i2c4_pins: i2c4_pinmux { + mux { + pins = "gpio12", "gpio13"; + function = "gsbi4"; + drive-strength = <12>; + bias-disable; + }; + }; + + button_pins: button_pins { + mux { + pins = "gpio54", "gpio64", "gpio65", "gpio67", "gpio68"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + spi_pins: spi_pins { + mux { + pins = "gpio18", "gpio19", "gpio21"; + function = "gsbi5"; + bias-pull-down; + }; + + data { + pins = "gpio18", "gpio19"; + drive-strength = <10>; + }; + + cs { + pins = "gpio20"; + drive-strength = <10>; + bias-pull-up; + }; + + clk { + pins = "gpio21"; + drive-strength = <12>; + }; + }; + + mdio0_pins: mdio0_pins { + mux { + pins = "gpio0", "gpio1"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + }; + }; + + rgmii2_pins: rgmii2_pins { + mux { + pins = "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32", + "gpio51", "gpio52", "gpio59", "gpio60", "gpio61", "gpio62" ; + function = "rgmii2"; + drive-strength = <8>; + bias-disable; + }; + }; + }; + + gsbi@16300000 { + qcom,mode = ; + status = "ok"; + serial@16340000 { + status = "ok"; + }; + /* + * The i2c device on gsbi4 should not be enabled. + * On ipq806x designs gsbi4 i2c is meant for exclusive + * RPM usage. Turning this on in kernel manifests as + * i2c failure for the RPM. + */ + }; + + gsbi5: gsbi@1a200000 { + qcom,mode = ; + status = "ok"; + + spi4: spi@1a280000 { + status = "ok"; + + pinctrl-0 = <&spi_pins>; + pinctrl-names = "default"; + + cs-gpios = <&qcom_pinmux 20 GPIO_ACTIVE_HIGH>; + + flash: W25Q128@0 { + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <50000000>; + reg = <0>; + + SBL1@0 { + label = "SBL1"; + reg = <0x0 0x20000>; + read-only; + }; + + MIBIB@20000 { + label = "MIBIB"; + reg = <0x20000 0x20000>; + read-only; + }; + + SBL2@40000 { + label = "SBL2"; + reg = <0x40000 0x40000>; + read-only; + }; + + SBL3@80000 { + label = "SBL3"; + reg = <0x80000 0x80000>; + read-only; + }; + + DDRCONFIG@100000 { + label = "DDRCONFIG"; + reg = <0x100000 0x10000>; + read-only; + }; + + SSD@110000 { + label = "SSD"; + reg = <0x110000 0x10000>; + read-only; + }; + + TZ@120000 { + label = "TZ"; + reg = <0x120000 0x80000>; + read-only; + }; + + RPM@1a0000 { + label = "RPM"; + reg = <0x1a0000 0x80000>; + read-only; + }; + + APPSBL@220000 { + label = "APPSBL"; + reg = <0x220000 0x80000>; + read-only; + }; + + APPSBLENV@2a0000 { + label = "APPSBLENV"; + reg = <0x2a0000 0x40000>; + read-only; + }; + + OLDART@2e0000 { + label = "OLDART"; + reg = <0x2e0000 0x40000>; + read-only; + }; + + kernel@320000 { + label = "kernel"; + reg = <0x320000 0x200000>; + }; + + rootfs@520000 { + label = "rootfs"; + reg = <0x520000 0xa60000>; + }; + + defaultmac: default-mac@0xfaf100 { + label = "default-mac"; + reg = <0xfaf100 0x00200>; + read-only; + }; + + ART@fc0000 { + label = "ART"; + reg = <0xfc0000 0x40000>; + read-only; + }; + }; + }; + }; + + phy@100f8800 { /* USB3 port 1 HS phy */ + status = "ok"; + }; + + phy@100f8830 { /* USB3 port 1 SS phy */ + status = "ok"; + }; + + phy@110f8800 { /* USB3 port 0 HS phy */ + status = "ok"; + }; + + phy@110f8830 { /* USB3 port 0 SS phy */ + status = "ok"; + }; + + usb30@0 { + status = "ok"; + }; + + usb30@1 { + status = "ok"; + }; + + pcie0: pci@1b500000 { + status = "ok"; + }; + + pcie1: pci@1b700000 { + status = "ok"; + force_gen1 = <1>; + }; + + mdio0: mdio { + compatible = "virtual,mdio-gpio"; + #address-cells = <1>; + #size-cells = <0>; + gpios = <&qcom_pinmux 1 GPIO_ACTIVE_HIGH &qcom_pinmux 0 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&mdio0_pins>; + pinctrl-names = "default"; + + phy0: ethernet-phy@0 { + device_type = "ethernet-phy"; + reg = <0>; + qca,ar8327-initvals = < + 0x00004 0x7600000 /* PAD0_MODE */ + 0x00008 0x1000000 /* PAD5_MODE */ + 0x0000c 0x80 /* PAD6_MODE */ + 0x000e4 0x6a545 /* MAC_POWER_SEL */ + 0x000e0 0xc74164de /* SGMII_CTRL */ + 0x0007c 0x4e /* PORT0_STATUS */ + 0x00094 0x4e /* PORT6_STATUS */ + >; + }; + + phy4: ethernet-phy@4 { + device_type = "ethernet-phy"; + reg = <4>; + }; + }; + + gmac1: ethernet@37200000 { + status = "ok"; + phy-mode = "rgmii"; + qcom,id = <1>; + + pinctrl-0 = <&rgmii2_pins>; + pinctrl-names = "default"; + + mtd-mac-address = <&defaultmac 0>; + mtd-mac-address-increment = <1>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + gmac2: ethernet@37400000 { + status = "ok"; + phy-mode = "sgmii"; + qcom,id = <2>; + + mtd-mac-address = <&defaultmac 0>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + rpm@108000 { + pinctrl-0 = <&i2c4_pins>; + pinctrl-names = "default"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + wifi { + label = "wifi"; + gpios = <&qcom_pinmux 54 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + reset { + label = "reset"; + gpios = <&qcom_pinmux 64 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + wps { + label = "wps"; + gpios = <&qcom_pinmux 65 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + dect { + label = "dect"; + gpios = <&qcom_pinmux 67 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + ledswitch { + label = "ledswitch"; + gpios = <&qcom_pinmux 68 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + pinctrl-0 = <&led_pins>; + pinctrl-names = "default"; + + dsl { + label = "vr2600v:white:dsl"; + gpios = <&qcom_pinmux 7 GPIO_ACTIVE_HIGH>; + }; + + usb { + label = "vr2600v:white:usb"; + gpios = <&qcom_pinmux 8 GPIO_ACTIVE_HIGH>; + }; + + lan { + label = "vr2600v:white:lan"; + gpios = <&qcom_pinmux 9 GPIO_ACTIVE_HIGH>; + }; + + wlan2g { + label = "vr2600v:white:wlan2g"; + gpios = <&qcom_pinmux 16 GPIO_ACTIVE_HIGH>; + }; + + wlan5g { + label = "vr2600v:white:wlan5g"; + gpios = <&qcom_pinmux 17 GPIO_ACTIVE_HIGH>; + }; + + power: power { + label = "vr2600v:white:power"; + gpios = <&qcom_pinmux 26 GPIO_ACTIVE_HIGH>; + default-state = "keep"; + }; + + phone { + label = "vr2600v:white:phone"; + gpios = <&qcom_pinmux 53 GPIO_ACTIVE_HIGH>; + }; + + wan { + label = "vr2600v:white:wan"; + gpios = <&qcom_pinmux 56 GPIO_ACTIVE_HIGH>; + }; + + general: general { + label = "vr2600v:white:general"; + gpios = <&qcom_pinmux 66 GPIO_ACTIVE_HIGH>; + }; + }; +}; + +&adm_dma { + status = "ok"; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064.dtsi b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064.dtsi new file mode 100644 index 000000000..90f7a1018 --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8064.dtsi @@ -0,0 +1,1333 @@ +/dts-v1/; + +#include "skeleton.dtsi" +#include +#include +#include +#include +#include +#include +#include +#include + +/ { + model = "Qualcomm IPQ8064"; + compatible = "qcom,ipq8064"; + interrupt-parent = <&intc>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + compatible = "qcom,krait"; + enable-method = "qcom,kpss-acc-v1"; + device_type = "cpu"; + reg = <0>; + next-level-cache = <&L2>; + qcom,acc = <&acc0>; + qcom,saw = <&saw0>; + clocks = <&kraitcc 0>, <&kraitcc 4>; + clock-names = "cpu", "l2"; + clock-latency = <100000>; + cpu-supply = <&smb208_s2a>; + voltage-tolerance = <5>; + cooling-min-state = <0>; + cooling-max-state = <10>; + #cooling-cells = <2>; + cpu-idle-states = <&CPU_SPC>; + }; + + cpu1: cpu@1 { + compatible = "qcom,krait"; + enable-method = "qcom,kpss-acc-v1"; + device_type = "cpu"; + reg = <1>; + next-level-cache = <&L2>; + qcom,acc = <&acc1>; + qcom,saw = <&saw1>; + clocks = <&kraitcc 1>, <&kraitcc 4>; + clock-names = "cpu", "l2"; + clock-latency = <100000>; + cpu-supply = <&smb208_s2b>; + cooling-min-state = <0>; + cooling-max-state = <10>; + #cooling-cells = <2>; + cpu-idle-states = <&CPU_SPC>; + }; + + L2: l2-cache { + compatible = "cache"; + cache-level = <2>; + qcom,saw = <&saw_l2>; + }; + + qcom,l2 { + qcom,l2-rates = <384000000 1000000000 1200000000>; + }; + + idle-states { + CPU_SPC: spc { + compatible = "qcom,idle-state-spc", + "arm,idle-state"; + entry-latency-us = <400>; + exit-latency-us = <900>; + min-residency-us = <3000>; + }; + }; + }; + + thermal-zones { + tsens_tz_sensor0 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens 0>; + + trips { + cpu-critical-hi { + temperature = <125000>; + hysteresis = <2000>; + type = "critical_high"; + }; + + cpu-config-hi { + temperature = <105000>; + hysteresis = <2000>; + type = "configurable_hi"; + }; + + cpu-config-lo { + temperature = <95000>; + hysteresis = <2000>; + type = "configurable_lo"; + }; + + cpu-critical-low { + temperature = <0>; + hysteresis = <2000>; + type = "critical_low"; + }; + }; + }; + + tsens_tz_sensor1 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens 1>; + + trips { + cpu-critical-hi { + temperature = <125000>; + hysteresis = <2000>; + type = "critical_high"; + }; + + cpu-config-hi { + temperature = <105000>; + hysteresis = <2000>; + type = "configurable_hi"; + }; + + cpu-config-lo { + temperature = <95000>; + hysteresis = <2000>; + type = "configurable_lo"; + }; + + cpu-critical-low { + temperature = <0>; + hysteresis = <2000>; + type = "critical_low"; + }; + }; + }; + + tsens_tz_sensor2 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens 2>; + + trips { + cpu-critical-hi { + temperature = <125000>; + hysteresis = <2000>; + type = "critical_high"; + }; + + cpu-config-hi { + temperature = <105000>; + hysteresis = <2000>; + type = "configurable_hi"; + }; + + cpu-config-lo { + temperature = <95000>; + hysteresis = <2000>; + type = "configurable_lo"; + }; + + cpu-critical-low { + temperature = <0>; + hysteresis = <2000>; + type = "critical_low"; + }; + }; + }; + + tsens_tz_sensor3 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens 3>; + + trips { + cpu-critical-hi { + temperature = <125000>; + hysteresis = <2000>; + type = "critical_high"; + }; + + cpu-config-hi { + temperature = <105000>; + hysteresis = <2000>; + type = "configurable_hi"; + }; + + cpu-config-lo { + temperature = <95000>; + hysteresis = <2000>; + type = "configurable_lo"; + }; + + cpu-critical-low { + temperature = <0>; + hysteresis = <2000>; + type = "critical_low"; + }; + }; + }; + + tsens_tz_sensor4 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens 4>; + + trips { + cpu-critical-hi { + temperature = <125000>; + hysteresis = <2000>; + type = "critical_high"; + }; + + cpu-config-hi { + temperature = <105000>; + hysteresis = <2000>; + type = "configurable_hi"; + }; + + cpu-config-lo { + temperature = <95000>; + hysteresis = <2000>; + type = "configurable_lo"; + }; + + cpu-critical-low { + temperature = <0>; + hysteresis = <2000>; + type = "critical_low"; + }; + }; + }; + + tsens_tz_sensor5 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens 5>; + + trips { + cpu-critical-hi { + temperature = <125000>; + hysteresis = <2000>; + type = "critical_high"; + }; + + cpu-config-hi { + temperature = <105000>; + hysteresis = <2000>; + type = "configurable_hi"; + }; + + cpu-config-lo { + temperature = <95000>; + hysteresis = <2000>; + type = "configurable_lo"; + }; + + cpu-critical-low { + temperature = <0>; + hysteresis = <2000>; + type = "critical_low"; + }; + }; + }; + + tsens_tz_sensor6 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens 6>; + + trips { + cpu-critical-hi { + temperature = <125000>; + hysteresis = <2000>; + type = "critical_high"; + }; + + cpu-config-hi { + temperature = <105000>; + hysteresis = <2000>; + type = "configurable_hi"; + }; + + cpu-config-lo { + temperature = <95000>; + hysteresis = <2000>; + type = "configurable_lo"; + }; + + cpu-critical-low { + temperature = <0>; + hysteresis = <2000>; + type = "critical_low"; + }; + }; + }; + + tsens_tz_sensor7 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens 7>; + + trips { + cpu-critical-hi { + temperature = <125000>; + hysteresis = <2000>; + type = "critical_high"; + }; + + cpu-config-hi { + temperature = <105000>; + hysteresis = <2000>; + type = "configurable_hi"; + }; + + cpu-config-lo { + temperature = <95000>; + hysteresis = <2000>; + type = "configurable_lo"; + }; + + cpu-critical-low { + temperature = <0>; + hysteresis = <2000>; + type = "critical_low"; + }; + }; + }; + + tsens_tz_sensor8 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens 8>; + + trips { + cpu-critical-hi { + temperature = <125000>; + hysteresis = <2000>; + type = "critical_high"; + }; + + cpu-config-hi { + temperature = <105000>; + hysteresis = <2000>; + type = "configurable_hi"; + }; + + cpu-config-lo { + temperature = <95000>; + hysteresis = <2000>; + type = "configurable_lo"; + }; + + cpu-critical-low { + temperature = <0>; + hysteresis = <2000>; + type = "critical_low"; + }; + }; + }; + + tsens_tz_sensor9 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens 9>; + + trips { + cpu-critical-hi { + temperature = <125000>; + hysteresis = <2000>; + type = "critical_high"; + }; + + cpu-config-hi { + temperature = <105000>; + hysteresis = <2000>; + type = "configurable_hi"; + }; + + cpu-config-lo { + temperature = <95000>; + hysteresis = <2000>; + type = "configurable_lo"; + }; + + cpu-critical-low { + temperature = <0>; + hysteresis = <2000>; + type = "critical_low"; + }; + }; + }; + + tsens_tz_sensor10 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens 10>; + + trips { + cpu-critical-hi { + temperature = <125000>; + hysteresis = <2000>; + type = "critical_high"; + }; + + cpu-config-hi { + temperature = <105000>; + hysteresis = <2000>; + type = "configurable_hi"; + }; + + cpu-config-lo { + temperature = <95000>; + hysteresis = <2000>; + type = "configurable_lo"; + }; + + cpu-critical-low { + temperature = <0>; + hysteresis = <2000>; + type = "critical_low"; + }; + }; + }; + }; + + cpu-pmu { + compatible = "qcom,krait-pmu"; + interrupts = <1 10 0x304>; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + nss@40000000 { + reg = <0x40000000 0x1000000>; + no-map; + }; + + smem: smem@41000000 { + reg = <0x41000000 0x200000>; + no-map; + }; + }; + + clocks { + cxo_board { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <25000000>; + }; + + pxo_board { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <25000000>; + }; + + sleep_clk: sleep_clk { + compatible = "fixed-clock"; + clock-frequency = <32768>; + #clock-cells = <0>; + }; + }; + + firmware { + scm { + compatible = "qcom,scm-ipq806x"; + }; + }; + + kraitcc: clock-controller { + compatible = "qcom,krait-cc-v1"; + #clock-cells = <1>; + }; + + qcom,pvs { + qcom,pvs-format-a; + qcom,speed0-pvs0-bin-v0 = + < 1400000000 1250000 >, + < 1200000000 1200000 >, + < 1000000000 1150000 >, + < 800000000 1100000 >, + < 600000000 1050000 >, + < 384000000 1000000 >; + + qcom,speed0-pvs1-bin-v0 = + < 1400000000 1175000 >, + < 1200000000 1125000 >, + < 1000000000 1075000 >, + < 800000000 1025000 >, + < 600000000 975000 >, + < 384000000 925000 >; + + qcom,speed0-pvs2-bin-v0 = + < 1400000000 1125000 >, + < 1200000000 1075000 >, + < 1000000000 1025000 >, + < 800000000 995000 >, + < 600000000 925000 >, + < 384000000 875000 >; + + qcom,speed0-pvs3-bin-v0 = + < 1400000000 1050000 >, + < 1200000000 1000000 >, + < 1000000000 950000 >, + < 800000000 900000 >, + < 600000000 850000 >, + < 384000000 800000 >; + }; + + soc: soc { + #address-cells = <1>; + #size-cells = <1>; + ranges; + compatible = "simple-bus"; + + lpass@28100000 { + compatible = "qcom,lpass-cpu"; + status = "disabled"; + clocks = <&lcc AHBIX_CLK>, + <&lcc MI2S_OSR_CLK>, + <&lcc MI2S_BIT_CLK>; + clock-names = "ahbix-clk", + "mi2s-osr-clk", + "mi2s-bit-clk"; + interrupts = <0 85 1>; + interrupt-names = "lpass-irq-lpaif"; + reg = <0x28100000 0x10000>; + reg-names = "lpass-lpaif"; + }; + + qfprom: qfprom@700000 { + compatible = "qcom,qfprom", "syscon"; + reg = <0x700000 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + status = "okay"; + tsens_calib: calib@400 { + reg = <0x400 0x10>; + }; + tsens_backup: backup@410 { + reg = <0x410 0x10>; + }; + }; + + rpm@108000 { + compatible = "qcom,rpm-ipq8064"; + reg = <0x108000 0x1000>; + qcom,ipc = <&l2cc 0x8 2>; + + interrupts = <0 19 0>, + <0 21 0>, + <0 22 0>; + interrupt-names = "ack", + "err", + "wakeup"; + + clocks = <&gcc RPM_MSG_RAM_H_CLK>; + clock-names = "ram"; + + #address-cells = <1>; + #size-cells = <0>; + + rpmcc: clock-controller { + compatible = "qcom,rpmcc-ipq806x", "qcom,rpmcc"; + #clock-cells = <1>; + }; + + regulators { + compatible = "qcom,rpm-smb208-regulators"; + + smb208_s1a: s1a { + regulator-min-microvolt = <1050000>; + regulator-max-microvolt = <1150000>; + + qcom,switch-mode-frequency = <1200000>; + + }; + + smb208_s1b: s1b { + regulator-min-microvolt = <1050000>; + regulator-max-microvolt = <1150000>; + + qcom,switch-mode-frequency = <1200000>; + }; + + smb208_s2a: s2a { + regulator-min-microvolt = < 800000>; + regulator-max-microvolt = <1250000>; + + qcom,switch-mode-frequency = <1200000>; + }; + + smb208_s2b: s2b { + regulator-min-microvolt = < 800000>; + regulator-max-microvolt = <1250000>; + + qcom,switch-mode-frequency = <1200000>; + }; + }; + }; + + rng@1a500000 { + compatible = "qcom,prng"; + reg = <0x1a500000 0x200>; + clocks = <&gcc PRNG_CLK>; + clock-names = "core"; + }; + + qcom_pinmux: pinmux@800000 { + compatible = "qcom,ipq8064-pinctrl"; + reg = <0x800000 0x4000>; + + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = <0 16 0x4>; + + pcie0_pins: pcie0_pinmux { + mux { + pins = "gpio3"; + function = "pcie1_rst"; + drive-strength = <2>; + bias-disable; + }; + }; + + pcie1_pins: pcie1_pinmux { + mux { + pins = "gpio48"; + function = "pcie2_rst"; + drive-strength = <2>; + bias-disable; + }; + }; + + pcie2_pins: pcie2_pinmux { + mux { + pins = "gpio63"; + function = "pcie3_rst"; + drive-strength = <2>; + bias-disable; + output-low; + }; + }; + }; + + intc: interrupt-controller@2000000 { + compatible = "qcom,msm-qgic2"; + interrupt-controller; + #interrupt-cells = <3>; + reg = <0x02000000 0x1000>, + <0x02002000 0x1000>; + }; + + timer@200a000 { + compatible = "qcom,kpss-timer", "qcom,msm-timer"; + interrupts = <1 1 0x301>, + <1 2 0x301>, + <1 3 0x301>, + <1 4 0x301>, + <1 5 0x301>; + reg = <0x0200a000 0x100>; + clock-frequency = <25000000>, + <32768>; + clocks = <&sleep_clk>; + clock-names = "sleep"; + cpu-offset = <0x80000>; + }; + + acc0: clock-controller@2088000 { + compatible = "qcom,kpss-acc-v1"; + reg = <0x02088000 0x1000>, <0x02008000 0x1000>; + clock-output-names = "acpu0_aux"; + }; + + acc1: clock-controller@2098000 { + compatible = "qcom,kpss-acc-v1"; + reg = <0x02098000 0x1000>, <0x02008000 0x1000>; + clock-output-names = "acpu1_aux"; + }; + + l2cc: clock-controller@2011000 { + compatible = "qcom,kpss-gcc", "syscon"; + reg = <0x2011000 0x1000>; + clock-output-names = "acpu_l2_aux"; + }; + + saw0: regulator@2089000 { + compatible = "qcom,saw2", "syscon"; + reg = <0x02089000 0x1000>, <0x02009000 0x1000>; + regulator; + }; + + saw1: regulator@2099000 { + compatible = "qcom,saw2", "syscon"; + reg = <0x02099000 0x1000>, <0x02009000 0x1000>; + regulator; + }; + + saw_l2: regulator@02012000 { + compatible = "qcom,saw2", "syscon"; + reg = <0x02012000 0x1000>; + regulator; + }; + + sic_non_secure: sic-non-secure@12100000 { + compatible = "syscon"; + reg = <0x12100000 0x10000>; + }; + + gsbi2: gsbi@12480000 { + compatible = "qcom,gsbi-v1.0.0"; + cell-index = <2>; + reg = <0x12480000 0x100>; + clocks = <&gcc GSBI2_H_CLK>; + clock-names = "iface"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + status = "disabled"; + + syscon-tcsr = <&tcsr>; + + uart2: serial@12490000 { + compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm"; + reg = <0x12490000 0x1000>, + <0x12480000 0x1000>; + interrupts = <0 195 0x0>; + clocks = <&gcc GSBI2_UART_CLK>, <&gcc GSBI2_H_CLK>; + clock-names = "core", "iface"; + status = "disabled"; + }; + + i2c@124a0000 { + compatible = "qcom,i2c-qup-v1.1.1"; + reg = <0x124a0000 0x1000>; + interrupts = <0 196 0>; + + clocks = <&gcc GSBI2_QUP_CLK>, <&gcc GSBI2_H_CLK>; + clock-names = "core", "iface"; + status = "disabled"; + + #address-cells = <1>; + #size-cells = <0>; + }; + + }; + + gsbi4: gsbi@16300000 { + compatible = "qcom,gsbi-v1.0.0"; + cell-index = <4>; + reg = <0x16300000 0x100>; + clocks = <&gcc GSBI4_H_CLK>; + clock-names = "iface"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + status = "disabled"; + + syscon-tcsr = <&tcsr>; + + gsbi4_serial: serial@16340000 { + compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm"; + reg = <0x16340000 0x1000>, + <0x16300000 0x1000>; + interrupts = <0 152 0x0>; + clocks = <&gcc GSBI4_UART_CLK>, <&gcc GSBI4_H_CLK>; + clock-names = "core", "iface"; + status = "disabled"; + }; + + i2c@16380000 { + compatible = "qcom,i2c-qup-v1.1.1"; + reg = <0x16380000 0x1000>; + interrupts = <0 153 0>; + + clocks = <&gcc GSBI4_QUP_CLK>, <&gcc GSBI4_H_CLK>; + clock-names = "core", "iface"; + status = "disabled"; + + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + gsbi5: gsbi@1a200000 { + compatible = "qcom,gsbi-v1.0.0"; + cell-index = <5>; + reg = <0x1a200000 0x100>; + clocks = <&gcc GSBI5_H_CLK>; + clock-names = "iface"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + status = "disabled"; + + syscon-tcsr = <&tcsr>; + + uart5: serial@1a240000 { + compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm"; + reg = <0x1a240000 0x1000>, + <0x1a200000 0x1000>; + interrupts = <0 154 0x0>; + clocks = <&gcc GSBI5_UART_CLK>, <&gcc GSBI5_H_CLK>; + clock-names = "core", "iface"; + status = "disabled"; + }; + + i2c@1a280000 { + compatible = "qcom,i2c-qup-v1.1.1"; + reg = <0x1a280000 0x1000>; + interrupts = <0 155 0>; + + clocks = <&gcc GSBI5_QUP_CLK>, <&gcc GSBI5_H_CLK>; + clock-names = "core", "iface"; + status = "disabled"; + + #address-cells = <1>; + #size-cells = <0>; + }; + + spi@1a280000 { + compatible = "qcom,spi-qup-v1.1.1"; + reg = <0x1a280000 0x1000>; + interrupts = <0 155 0>; + + clocks = <&gcc GSBI5_QUP_CLK>, <&gcc GSBI5_H_CLK>; + clock-names = "core", "iface"; + status = "disabled"; + + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + sata_phy: sata-phy@1b400000 { + compatible = "qcom,ipq806x-sata-phy"; + reg = <0x1b400000 0x200>; + + clocks = <&gcc SATA_PHY_CFG_CLK>; + clock-names = "cfg"; + + #phy-cells = <0>; + status = "disabled"; + }; + + sata@29000000 { + compatible = "qcom,ipq806x-ahci", "generic-ahci"; + reg = <0x29000000 0x180>; + + interrupts = <0 209 0x0>; + + clocks = <&gcc SFAB_SATA_S_H_CLK>, + <&gcc SATA_H_CLK>, + <&gcc SATA_A_CLK>, + <&gcc SATA_RXOOB_CLK>, + <&gcc SATA_PMALIVE_CLK>; + clock-names = "slave_face", "iface", "core", + "rxoob", "pmalive"; + + assigned-clocks = <&gcc SATA_RXOOB_CLK>, <&gcc SATA_PMALIVE_CLK>; + assigned-clock-rates = <100000000>, <100000000>; + + phys = <&sata_phy>; + phy-names = "sata-phy"; + status = "disabled"; + }; + + qcom,ssbi@500000 { + compatible = "qcom,ssbi"; + reg = <0x00500000 0x1000>; + qcom,controller-type = "pmic-arbiter"; + }; + + gcc: clock-controller@900000 { + compatible = "qcom,gcc-ipq8064"; + reg = <0x00900000 0x4000>; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + }; + + tsens: thermal-sensor@900000 { + compatible = "qcom,ipq8064-tsens"; + reg = <0x900000 0x3680>; + nvmem-cells = <&tsens_calib>, <&tsens_backup>; + nvmem-cell-names = "calib", "calib_backup"; + interrupts = <0 178 0>; + #thermal-sensor-cells = <1>; + }; + + tcsr: syscon@1a400000 { + compatible = "qcom,tcsr-ipq8064", "syscon"; + reg = <0x1a400000 0x100>; + }; + + lcc: clock-controller@28000000 { + compatible = "qcom,lcc-ipq8064"; + reg = <0x28000000 0x1000>; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + sfpb_mutex_block: syscon@1200600 { + compatible = "syscon"; + reg = <0x01200600 0x100>; + }; + + hs_phy_1: phy@100f8800 { + compatible = "qcom,dwc3-hs-usb-phy"; + reg = <0x100f8800 0x30>; + clocks = <&gcc USB30_1_UTMI_CLK>; + clock-names = "ref"; + #phy-cells = <0>; + + status = "disabled"; + }; + + ss_phy_1: phy@100f8830 { + compatible = "qcom,dwc3-ss-usb-phy"; + reg = <0x100f8830 0x30>; + clocks = <&gcc USB30_1_MASTER_CLK>; + clock-names = "ref"; + #phy-cells = <0>; + + status = "disabled"; + }; + + hs_phy_0: phy@110f8800 { + compatible = "qcom,dwc3-hs-usb-phy"; + reg = <0x110f8800 0x30>; + clocks = <&gcc USB30_0_UTMI_CLK>; + clock-names = "ref"; + #phy-cells = <0>; + + status = "disabled"; + }; + + ss_phy_0: phy@110f8830 { + compatible = "qcom,dwc3-ss-usb-phy"; + reg = <0x110f8830 0x30>; + clocks = <&gcc USB30_0_MASTER_CLK>; + clock-names = "ref"; + #phy-cells = <0>; + + status = "disabled"; + }; + + usb3_0: usb30@0 { + compatible = "qcom,dwc3"; + #address-cells = <1>; + #size-cells = <1>; + clocks = <&gcc USB30_0_MASTER_CLK>; + clock-names = "core"; + + ranges; + + resets = <&gcc USB30_0_MASTER_RESET>; + reset-names = "usb30_0_mstr_rst"; + + status = "disabled"; + + dwc3@11000000 { + compatible = "snps,dwc3"; + reg = <0x11000000 0xcd00>; + interrupts = <0 110 0x4>; + phys = <&hs_phy_0>, <&ss_phy_0>; + phy-names = "usb2-phy", "usb3-phy"; + dr_mode = "host"; + snps,dis_u3_susphy_quirk; + }; + }; + + usb3_1: usb30@1 { + compatible = "qcom,dwc3"; + #address-cells = <1>; + #size-cells = <1>; + clocks = <&gcc USB30_1_MASTER_CLK>; + clock-names = "core"; + + ranges; + + resets = <&gcc USB30_1_MASTER_RESET>; + reset-names = "usb30_1_mstr_rst"; + + status = "disabled"; + + dwc3@10000000 { + compatible = "snps,dwc3"; + reg = <0x10000000 0xcd00>; + interrupts = <0 205 0x4>; + phys = <&hs_phy_1>, <&ss_phy_1>; + phy-names = "usb2-phy", "usb3-phy"; + dr_mode = "host"; + snps,dis_u3_susphy_quirk; + }; + }; + + pcie0: pci@1b500000 { + compatible = "qcom,pcie-ipq8064"; + reg = <0x1b500000 0x1000 + 0x1b502000 0x80 + 0x1b600000 0x100 + 0x0ff00000 0x100000>; + reg-names = "dbi", "elbi", "parf", "config"; + device_type = "pci"; + linux,pci-domain = <0>; + bus-range = <0x00 0xff>; + num-lanes = <1>; + #address-cells = <3>; + #size-cells = <2>; + + ranges = <0x81000000 0 0x0fe00000 0x0fe00000 0 0x00100000 /* downstream I/O */ + 0x82000000 0 0x08000000 0x08000000 0 0x07e00000>; /* non-prefetchable memory */ + + interrupts = ; + interrupt-names = "msi"; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0x7>; + interrupt-map = <0 0 0 1 &intc 0 36 IRQ_TYPE_LEVEL_HIGH>, /* int_a */ + <0 0 0 2 &intc 0 37 IRQ_TYPE_LEVEL_HIGH>, /* int_b */ + <0 0 0 3 &intc 0 38 IRQ_TYPE_LEVEL_HIGH>, /* int_c */ + <0 0 0 4 &intc 0 39 IRQ_TYPE_LEVEL_HIGH>; /* int_d */ + + clocks = <&gcc PCIE_A_CLK>, + <&gcc PCIE_H_CLK>, + <&gcc PCIE_PHY_CLK>, + <&gcc PCIE_AUX_CLK>, + <&gcc PCIE_ALT_REF_CLK>; + clock-names = "core", "iface", "phy", "aux", "ref"; + + assigned-clocks = <&gcc PCIE_ALT_REF_CLK>; + assigned-clock-rates = <100000000>; + + resets = <&gcc PCIE_ACLK_RESET>, + <&gcc PCIE_HCLK_RESET>, + <&gcc PCIE_POR_RESET>, + <&gcc PCIE_PCI_RESET>, + <&gcc PCIE_PHY_RESET>, + <&gcc PCIE_EXT_RESET>; + reset-names = "axi", "ahb", "por", "pci", "phy", "ext"; + + pinctrl-0 = <&pcie0_pins>; + pinctrl-names = "default"; + + perst-gpios = <&qcom_pinmux 3 GPIO_ACTIVE_LOW>; + + phy-tx0-term-offset = <7>; + + status = "disabled"; + }; + + pcie1: pci@1b700000 { + compatible = "qcom,pcie-ipq8064"; + reg = <0x1b700000 0x1000 + 0x1b702000 0x80 + 0x1b800000 0x100 + 0x31f00000 0x100000>; + reg-names = "dbi", "elbi", "parf", "config"; + device_type = "pci"; + linux,pci-domain = <1>; + bus-range = <0x00 0xff>; + num-lanes = <1>; + #address-cells = <3>; + #size-cells = <2>; + + ranges = <0x81000000 0 0x31e00000 0x31e00000 0 0x00100000 /* downstream I/O */ + 0x82000000 0 0x2e000000 0x2e000000 0 0x03e00000>; /* non-prefetchable memory */ + + interrupts = ; + interrupt-names = "msi"; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0x7>; + interrupt-map = <0 0 0 1 &intc 0 58 IRQ_TYPE_LEVEL_HIGH>, /* int_a */ + <0 0 0 2 &intc 0 59 IRQ_TYPE_LEVEL_HIGH>, /* int_b */ + <0 0 0 3 &intc 0 60 IRQ_TYPE_LEVEL_HIGH>, /* int_c */ + <0 0 0 4 &intc 0 61 IRQ_TYPE_LEVEL_HIGH>; /* int_d */ + + clocks = <&gcc PCIE_1_A_CLK>, + <&gcc PCIE_1_H_CLK>, + <&gcc PCIE_1_PHY_CLK>, + <&gcc PCIE_1_AUX_CLK>, + <&gcc PCIE_1_ALT_REF_CLK>; + clock-names = "core", "iface", "phy", "aux", "ref"; + + assigned-clocks = <&gcc PCIE_1_ALT_REF_CLK>; + assigned-clock-rates = <100000000>; + + resets = <&gcc PCIE_1_ACLK_RESET>, + <&gcc PCIE_1_HCLK_RESET>, + <&gcc PCIE_1_POR_RESET>, + <&gcc PCIE_1_PCI_RESET>, + <&gcc PCIE_1_PHY_RESET>, + <&gcc PCIE_1_EXT_RESET>; + reset-names = "axi", "ahb", "por", "pci", "phy", "ext"; + + pinctrl-0 = <&pcie1_pins>; + pinctrl-names = "default"; + + perst-gpios = <&qcom_pinmux 48 GPIO_ACTIVE_LOW>; + + phy-tx0-term-offset = <7>; + + status = "disabled"; + }; + + pcie2: pci@1b900000 { + compatible = "qcom,pcie-ipq8064"; + reg = <0x1b900000 0x1000 + 0x1b902000 0x80 + 0x1ba00000 0x100 + 0x35f00000 0x100000>; + reg-names = "dbi", "elbi", "parf", "config"; + device_type = "pci"; + linux,pci-domain = <2>; + bus-range = <0x00 0xff>; + num-lanes = <1>; + #address-cells = <3>; + #size-cells = <2>; + + ranges = <0x81000000 0 0x35e00000 0x35e00000 0 0x00100000 /* downstream I/O */ + 0x82000000 0 0x32000000 0x32000000 0 0x03e00000>; /* non-prefetchable memory */ + + interrupts = ; + interrupt-names = "msi"; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0x7>; + interrupt-map = <0 0 0 1 &intc 0 72 IRQ_TYPE_LEVEL_HIGH>, /* int_a */ + <0 0 0 2 &intc 0 73 IRQ_TYPE_LEVEL_HIGH>, /* int_b */ + <0 0 0 3 &intc 0 74 IRQ_TYPE_LEVEL_HIGH>, /* int_c */ + <0 0 0 4 &intc 0 75 IRQ_TYPE_LEVEL_HIGH>; /* int_d */ + + clocks = <&gcc PCIE_2_A_CLK>, + <&gcc PCIE_2_H_CLK>, + <&gcc PCIE_2_PHY_CLK>, + <&gcc PCIE_2_AUX_CLK>, + <&gcc PCIE_2_ALT_REF_CLK>; + clock-names = "core", "iface", "phy", "aux", "ref"; + + assigned-clocks = <&gcc PCIE_2_ALT_REF_CLK>; + assigned-clock-rates = <100000000>; + + resets = <&gcc PCIE_2_ACLK_RESET>, + <&gcc PCIE_2_HCLK_RESET>, + <&gcc PCIE_2_POR_RESET>, + <&gcc PCIE_2_PCI_RESET>, + <&gcc PCIE_2_PHY_RESET>, + <&gcc PCIE_2_EXT_RESET>; + reset-names = "axi", "ahb", "por", "pci", "phy", "ext"; + + pinctrl-0 = <&pcie2_pins>; + pinctrl-names = "default"; + + perst-gpios = <&qcom_pinmux 63 GPIO_ACTIVE_LOW>; + + phy-tx0-term-offset = <7>; + + status = "disabled"; + }; + + adm_dma: dma@18300000 { + compatible = "qcom,adm"; + reg = <0x18300000 0x100000>; + interrupts = <0 170 0>; + #dma-cells = <1>; + + clocks = <&gcc ADM0_CLK>, <&gcc ADM0_PBUS_CLK>; + clock-names = "core", "iface"; + + resets = <&gcc ADM0_RESET>, + <&gcc ADM0_PBUS_RESET>, + <&gcc ADM0_C0_RESET>, + <&gcc ADM0_C1_RESET>, + <&gcc ADM0_C2_RESET>; + reset-names = "clk", "pbus", "c0", "c1", "c2"; + qcom,ee = <0>; + + status = "disabled"; + }; + + nand@1ac00000 { + compatible = "qcom,ipq806x-nand"; + reg = <0x1ac00000 0x800>; + + clocks = <&gcc EBI2_CLK>, + <&gcc EBI2_AON_CLK>; + clock-names = "core", "aon"; + + dmas = <&adm_dma 3>; + dma-names = "rxtx"; + qcom,cmd-crci = <15>; + qcom,data-crci = <3>; + + status = "disabled"; + + #address-cells = <1>; + #size-cells = <0>; + }; + + nss_common: syscon@03000000 { + compatible = "syscon"; + reg = <0x03000000 0x0000FFFF>; + }; + + qsgmii_csr: syscon@1bb00000 { + compatible = "syscon"; + reg = <0x1bb00000 0x000001FF>; + }; + + stmmac_axi_setup: stmmac-axi-config { + snps,wr_osr_lmt = <7>; + snps,rd_osr_lmt = <7>; + snps,blen = <16 0 0 0 0 0 0>; + }; + + gmac0: ethernet@37000000 { + device_type = "network"; + compatible = "qcom,ipq806x-gmac"; + reg = <0x37000000 0x200000>; + interrupts = ; + interrupt-names = "macirq"; + + snps,axi-config = <&stmmac_axi_setup>; + snps,pbl = <32>; + snps,aal = <1>; + + qcom,nss-common = <&nss_common>; + qcom,qsgmii-csr = <&qsgmii_csr>; + + clocks = <&gcc GMAC_CORE1_CLK>; + clock-names = "stmmaceth"; + + resets = <&gcc GMAC_CORE1_RESET>; + reset-names = "stmmaceth"; + + status = "disabled"; + }; + + gmac1: ethernet@37200000 { + device_type = "network"; + compatible = "qcom,ipq806x-gmac"; + reg = <0x37200000 0x200000>; + interrupts = ; + interrupt-names = "macirq"; + + snps,axi-config = <&stmmac_axi_setup>; + snps,pbl = <32>; + snps,aal = <1>; + + qcom,nss-common = <&nss_common>; + qcom,qsgmii-csr = <&qsgmii_csr>; + + clocks = <&gcc GMAC_CORE2_CLK>; + clock-names = "stmmaceth"; + + resets = <&gcc GMAC_CORE2_RESET>; + reset-names = "stmmaceth"; + + status = "disabled"; + }; + + gmac2: ethernet@37400000 { + device_type = "network"; + compatible = "qcom,ipq806x-gmac"; + reg = <0x37400000 0x200000>; + interrupts = ; + interrupt-names = "macirq"; + + snps,axi-config = <&stmmac_axi_setup>; + snps,pbl = <32>; + snps,aal = <1>; + + qcom,nss-common = <&nss_common>; + qcom,qsgmii-csr = <&qsgmii_csr>; + + clocks = <&gcc GMAC_CORE3_CLK>; + clock-names = "stmmaceth"; + + resets = <&gcc GMAC_CORE3_RESET>; + reset-names = "stmmaceth"; + + status = "disabled"; + }; + + gmac3: ethernet@37600000 { + device_type = "network"; + compatible = "qcom,ipq806x-gmac"; + reg = <0x37600000 0x200000>; + interrupts = ; + interrupt-names = "macirq"; + + snps,axi-config = <&stmmac_axi_setup>; + snps,pbl = <32>; + snps,aal = <1>; + + qcom,nss-common = <&nss_common>; + qcom,qsgmii-csr = <&qsgmii_csr>; + + clocks = <&gcc GMAC_CORE4_CLK>; + clock-names = "stmmaceth"; + + resets = <&gcc GMAC_CORE4_RESET>; + reset-names = "stmmaceth"; + + status = "disabled"; + }; + }; + + sfpb_mutex: sfpb-mutex { + compatible = "qcom,sfpb-mutex"; + syscon = <&sfpb_mutex_block 4 4>; + + #hwlock-cells = <1>; + }; + + smem { + compatible = "qcom,smem"; + memory-region = <&smem>; + hwlocks = <&sfpb_mutex 3>; + }; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8065-nbg6817.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8065-nbg6817.dts new file mode 100644 index 000000000..987ee852c --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8065-nbg6817.dts @@ -0,0 +1,391 @@ +#include "qcom-ipq8065-v1.0.dtsi" + +#include + +/ { + model = "ZyXEL NBG6817"; + compatible = "zyxel,nbg6817", "qcom,ipq8065"; + + memory@0 { + reg = <0x42000000 0x1e000000>; + device_type = "memory"; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + rsvd@41200000 { + reg = <0x41200000 0x300000>; + no-map; + }; + }; + + aliases { + serial0 = &gsbi4_serial; + mdio-gpio0 = &mdio0; + sdcc1 = &sdcc1; + + led-boot = &power; + led-failsafe = &power; + led-running = &power; + led-upgrade = &power; + }; + + chosen { + bootargs = "rootfstype=squashfs,ext4 rootwait noinitrd"; + linux,stdout-path = "serial0:115200n8"; + append-rootblock = "root=/dev/mmcblk0p"; + }; + + soc { + pinmux@800000 { + button_pins: button_pins { + mux { + pins = "gpio53", "gpio54", "gpio65"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + i2c4_pins: i2c4_pinmux { + mux { + pins = "gpio12", "gpio13"; + function = "gsbi4"; + drive-strength = <12>; + bias-disable; + }; + }; + + led_pins: led_pins { + mux { + pins = "gpio9", "gpio26", "gpio33", "gpio64"; + function = "gpio"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + mdio0_pins: mdio0_pins { + mux { + pins = "gpio0", "gpio1"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + }; + + clk { + pins = "gpio1"; + input-disable; + }; + }; + + rgmii2_pins: rgmii2_pins { + mux { + pins = "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32", + "gpio51", "gpio52", "gpio59", "gpio60", "gpio61", "gpio62" ; + function = "rgmii2"; + drive-strength = <8>; + bias-disable; + }; + + tx { + pins = "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32" ; + input-disable; + }; + }; + + spi_pins: spi_pins { + mux { + pins = "gpio18", "gpio19", "gpio21"; + function = "gsbi5"; + drive-strength = <10>; + bias-none; + }; + + cs { + pins = "gpio20"; + drive-strength = <12>; + }; + }; + + usb0_pwr_en_pins: usb0_pwr_en_pins { + mux { + pins = "gpio16", "gpio17"; + function = "gpio"; + drive-strength = <12>; + }; + + pwr { + pins = "gpio17"; + bias-pull-down; + output-high; + }; + + ovc { + pins = "gpio16"; + bias-pull-up; + }; + }; + + usb1_pwr_en_pins: usb1_pwr_en_pins { + mux { + pins = "gpio14", "gpio15"; + function = "gpio"; + drive-strength = <12>; + }; + + pwr { + pins = "gpio14"; + bias-pull-down; + output-high; + }; + + ovc { + pins = "gpio15"; + bias-pull-up; + }; + }; + }; + + gsbi@16300000 { + qcom,mode = ; + status = "ok"; + serial@16340000 { + status = "ok"; + }; + /* + * The i2c device on gsbi4 should not be enabled. + * On ipq806x designs gsbi4 i2c is meant for exclusive + * RPM usage. Turning this on in kernel manifests as + * i2c failure for the RPM. + */ + }; + + gsbi5: gsbi@1a200000 { + qcom,mode = ; + status = "ok"; + + spi4: spi@1a280000 { + status = "ok"; + + pinctrl-0 = <&spi_pins>; + pinctrl-names = "default"; + + cs-gpios = <&qcom_pinmux 20 GPIO_ACTIVE_HIGH>; + + flash: m25p80@0 { + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <51200000>; + reg = <0>; + + linux,part-probe = "qcom-smem"; + }; + }; + }; + + phy@100f8800 { /* USB3 port 1 HS phy */ + status = "ok"; + }; + + phy@100f8830 { /* USB3 port 1 SS phy */ + status = "ok"; + }; + + phy@110f8800 { /* USB3 port 0 HS phy */ + status = "ok"; + }; + + phy@110f8830 { /* USB3 port 0 SS phy */ + status = "ok"; + }; + + usb30@0 { + status = "ok"; + + pinctrl-0 = <&usb0_pwr_en_pins>; + pinctrl-names = "default"; + }; + + usb30@1 { + status = "ok"; + + pinctrl-0 = <&usb1_pwr_en_pins>; + pinctrl-names = "default"; + }; + + pcie0: pci@1b500000 { + status = "ok"; + reset-gpio = <&qcom_pinmux 3 GPIO_ACTIVE_LOW>; + pinctrl-0 = <&pcie0_pins>; + pinctrl-names = "default"; + }; + + pcie1: pci@1b700000 { + status = "ok"; + reset-gpio = <&qcom_pinmux 48 GPIO_ACTIVE_LOW>; + pinctrl-0 = <&pcie1_pins>; + pinctrl-names = "default"; + force_gen1 = <1>; + }; + + mdio0: mdio { + compatible = "virtual,mdio-gpio"; + #address-cells = <1>; + #size-cells = <0>; + gpios = <&qcom_pinmux 1 GPIO_ACTIVE_HIGH &qcom_pinmux 0 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&mdio0_pins>; + pinctrl-names = "default"; + + phy0: ethernet-phy@0 { + device_type = "ethernet-phy"; + reg = <0>; + qca,ar8327-initvals = < + 0x00004 0x7600000 /* PAD0_MODE */ + 0x00008 0x1000000 /* PAD5_MODE */ + 0x0000c 0x80 /* PAD6_MODE */ + 0x000e4 0xaa545 /* MAC_POWER_SEL */ + 0x000e0 0xc74164de /* SGMII_CTRL */ + 0x0007c 0x4e /* PORT0_STATUS */ + 0x00094 0x4e /* PORT6_STATUS */ + 0x00970 0x1e864443 /* QM_PORT0_CTRL0 */ + 0x00974 0x000001c6 /* QM_PORT0_CTRL1 */ + 0x00978 0x19008643 /* QM_PORT1_CTRL0 */ + 0x0097c 0x000001c6 /* QM_PORT1_CTRL1 */ + 0x00980 0x19008643 /* QM_PORT2_CTRL0 */ + 0x00984 0x000001c6 /* QM_PORT2_CTRL1 */ + 0x00988 0x19008643 /* QM_PORT3_CTRL0 */ + 0x0098c 0x000001c6 /* QM_PORT3_CTRL1 */ + 0x00990 0x19008643 /* QM_PORT4_CTRL0 */ + 0x00994 0x000001c6 /* QM_PORT4_CTRL1 */ + 0x00998 0x1e864443 /* QM_PORT5_CTRL0 */ + 0x0099c 0x000001c6 /* QM_PORT5_CTRL1 */ + 0x009a0 0x1e864443 /* QM_PORT6_CTRL0 */ + 0x009a4 0x000001c6 /* QM_PORT6_CTRL1 */ + >; + }; + + phy4: ethernet-phy@4 { + device_type = "ethernet-phy"; + reg = <4>; + qca,ar8327-initvals = < + 0x000e4 0x6a545 /* MAC_POWER_SEL */ + 0x0000c 0x80 /* PAD6_MODE */ + >; + }; + }; + + gmac1: ethernet@37200000 { + status = "ok"; + phy-mode = "rgmii"; + qcom,id = <1>; + qcom,phy_mdio_addr = <4>; + qcom,poll_required = <0>; + qcom,rgmii_delay = <1>; + qcom,phy_mii_type = <0>; + qcom,emulation = <0>; + qcom,irq = <255>; + mdiobus = <&mdio0>; + + pinctrl-0 = <&rgmii2_pins>; + pinctrl-names = "default"; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + gmac2: ethernet@37400000 { + status = "ok"; + phy-mode = "sgmii"; + qcom,id = <2>; + qcom,phy_mdio_addr = <0>; /* none */ + qcom,poll_required = <0>; /* no polling */ + qcom,rgmii_delay = <0>; + qcom,phy_mii_type = <1>; + qcom,emulation = <0>; + qcom,irq = <258>; + mdiobus = <&mdio0>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + rpm@108000 { + pinctrl-0 = <&i2c4_pins>; + pinctrl-names = "default"; + }; + + amba { + sdcc1: sdcc@12400000 { + status = "okay"; + }; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + wifi { + label = "wifi"; + gpios = <&qcom_pinmux 53 GPIO_ACTIVE_LOW>; + linux,code = ; + linux,input-type = ; + }; + + reset { + label = "reset"; + gpios = <&qcom_pinmux 54 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + wps { + label = "wps"; + gpios = <&qcom_pinmux 65 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + pinctrl-0 = <&led_pins>; + pinctrl-names = "default"; + + internet { + label = "nbg6817:white:internet"; + gpios = <&qcom_pinmux 64 GPIO_ACTIVE_HIGH>; + }; + + power: power { + label = "nbg6817:white:power"; + gpios = <&qcom_pinmux 9 GPIO_ACTIVE_HIGH>; + default-state = "keep"; + }; + + wifi2g { + label = "nbg6817:amber:wifi2g"; + gpios = <&qcom_pinmux 33 GPIO_ACTIVE_HIGH>; + }; + + /* wifi2g amber from the manual is missing */ + + wifi5g { + label = "nbg6817:amber:wifi5g"; + gpios = <&qcom_pinmux 26 GPIO_ACTIVE_HIGH>; + }; + + /* wifi5g amber from the manual is missing */ + }; +}; + +&adm_dma { + status = "ok"; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8065-r7800.dts b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8065-r7800.dts new file mode 100644 index 000000000..4c89dcf76 --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8065-r7800.dts @@ -0,0 +1,573 @@ +#include "qcom-ipq8065-v1.0.dtsi" + +#include + +/ { + model = "Netgear Nighthawk X4S R7800"; + compatible = "netgear,r7800", "qcom,ipq8065", "qcom,ipq8064"; + + memory@0 { + reg = <0x42000000 0x1e000000>; + device_type = "memory"; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + rsvd@41200000 { + reg = <0x41200000 0x300000>; + no-map; + }; + + rsvd@5fe00000 { + reg = <0x5fe00000 0x200000>; + reusable; + }; + }; + + aliases { + serial0 = &gsbi4_serial; + mdio-gpio0 = &mdio0; + + led-boot = &power_white; + led-failsafe = &power_amber; + led-running = &power_white; + led-upgrade = &power_amber; + }; + + chosen { + linux,stdout-path = "serial0:115200n8"; + }; + + soc { + pinmux@800000 { + button_pins: button_pins { + mux { + pins = "gpio6", "gpio54", "gpio65"; + function = "gpio"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + i2c4_pins: i2c4_pinmux { + mux { + pins = "gpio12", "gpio13"; + function = "gsbi4"; + drive-strength = <12>; + bias-disable; + }; + }; + + led_pins: led_pins { + pins = "gpio7", "gpio8", "gpio9", "gpio22", "gpio23", + "gpio24","gpio26", "gpio53", "gpio64"; + function = "gpio"; + drive-strength = <2>; + bias-pull-down; + }; + + nand_pins: nand_pins { + mux { + pins = "gpio34", "gpio35", "gpio36", + "gpio37", "gpio38", "gpio39", + "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", + "gpio46", "gpio47"; + function = "nand"; + drive-strength = <10>; + bias-disable; + }; + pullups { + pins = "gpio39"; + bias-pull-up; + }; + hold { + pins = "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", + "gpio46", "gpio47"; + bias-bus-hold; + }; + }; + + mdio0_pins: mdio0_pins { + mux { + pins = "gpio0", "gpio1"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + }; + + clk { + pins = "gpio1"; + input-disable; + }; + }; + + rgmii2_pins: rgmii2_pins { + mux { + pins = "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32", + "gpio51", "gpio52", "gpio59", "gpio60", "gpio61", "gpio62" ; + function = "rgmii2"; + drive-strength = <8>; + bias-disable; + }; + + tx { + pins = "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32" ; + input-disable; + }; + }; + + spi_pins: spi_pins { + mux { + pins = "gpio18", "gpio19", "gpio21"; + function = "gsbi5"; + bias-pull-down; + }; + + data { + pins = "gpio18", "gpio19"; + drive-strength = <10>; + }; + + cs { + pins = "gpio20"; + drive-strength = <10>; + bias-pull-up; + }; + + clk { + pins = "gpio21"; + drive-strength = <12>; + }; + }; + + spi6_pins: spi6_pins { + mux { + pins = "gpio55", "gpio56", "gpio58"; + function = "gsbi6"; + bias-pull-down; + }; + + mosi { + pins = "gpio55"; + drive-strength = <12>; + }; + + miso { + pins = "gpio56"; + drive-strength = <14>; + }; + + cs { + pins = "gpio57"; + drive-strength = <12>; + bias-pull-up; + }; + + clk { + pins = "gpio58"; + drive-strength = <12>; + }; + + reset { + pins = "gpio33"; + drive-strength = <10>; + bias-pull-down; + output-high; + }; + }; + + usb0_pwr_en_pins: usb0_pwr_en_pins { + mux { + pins = "gpio15"; + function = "gpio"; + drive-strength = <12>; + bias-pull-down; + output-high; + }; + }; + + usb1_pwr_en_pins: usb1_pwr_en_pins { + mux { + pins = "gpio16", "gpio68"; + function = "gpio"; + drive-strength = <12>; + bias-pull-down; + output-high; + }; + }; + }; + + gsbi@16300000 { + qcom,mode = ; + status = "ok"; + serial@16340000 { + status = "ok"; + }; + /* + * The i2c device on gsbi4 should not be enabled. + * On ipq806x designs gsbi4 i2c is meant for exclusive + * RPM usage. Turning this on in kernel manifests as + * i2c failure for the RPM. + */ + }; + + gsbi5: gsbi@1a200000 { + qcom,mode = ; + status = "ok"; + + spi5: spi@1a280000 { + status = "ok"; + + pinctrl-0 = <&spi_pins>; + pinctrl-names = "default"; + + cs-gpios = <&qcom_pinmux 20 GPIO_ACTIVE_HIGH>; + + flash: m25p80@0 { + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <50000000>; + reg = <0>; + + linux,part-probe = "qcom-smem"; + }; + }; + }; + + gsbi6: gsbi@16500000 { + qcom,mode = ; + status = "ok"; + spi6: spi@16580000 { + status = "ok"; + + pinctrl-0 = <&spi6_pins>; + pinctrl-names = "default"; + + cs-gpios = <&qcom_pinmux 57 GPIO_ACTIVE_HIGH>; + + spi-nor@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <6000000>; + }; + }; + }; + + sata-phy@1b400000 { + status = "ok"; + }; + + sata@29000000 { + ports-implemented = <0x1>; + status = "ok"; + }; + + phy@100f8800 { /* USB3 port 1 HS phy */ + status = "ok"; + }; + + phy@100f8830 { /* USB3 port 1 SS phy */ + status = "ok"; + }; + + phy@110f8800 { /* USB3 port 0 HS phy */ + status = "ok"; + }; + + phy@110f8830 { /* USB3 port 0 SS phy */ + status = "ok"; + }; + + usb30@0 { + status = "ok"; + + pinctrl-0 = <&usb0_pwr_en_pins>; + pinctrl-names = "default"; + }; + + usb30@1 { + status = "ok"; + + pinctrl-0 = <&usb1_pwr_en_pins>; + pinctrl-names = "default"; + }; + + pcie0: pci@1b500000 { + status = "ok"; + }; + + pcie1: pci@1b700000 { + status = "ok"; + force_gen1 = <1>; + }; + + nand@1ac00000 { + status = "ok"; + + pinctrl-0 = <&nand_pins>; + pinctrl-names = "default"; + + cs0 { + reg = <0>; + compatible = "qcom,nandcs"; + + nand-ecc-strength = <4>; + nand-bus-width = <8>; + nand-ecc-step-size = <512>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + qcadata@0 { + label = "qcadata"; + reg = <0x0000000 0x0c80000>; + read-only; + }; + + APPSBL@c80000 { + label = "APPSBL"; + reg = <0x0c80000 0x0500000>; + read-only; + }; + + APPSBLENV@1180000 { + label = "APPSBLENV"; + reg = <0x1180000 0x0080000>; + read-only; + }; + + art: art@1200000 { + label = "art"; + reg = <0x1200000 0x0140000>; + read-only; + }; + + artbak: art@1340000 { + label = "artbak"; + reg = <0x1340000 0x0140000>; + read-only; + }; + + kernel@1480000 { + label = "kernel"; + reg = <0x1480000 0x0200000>; + }; + + ubi@1680000 { + label = "ubi"; + reg = <0x1680000 0x1E00000>; + }; + + netgear@3480000 { + label = "netgear"; + reg = <0x3480000 0x4480000>; + read-only; + }; + + reserve@7900000 { + label = "reserve"; + reg = <0x7900000 0x0700000>; + read-only; + }; + + firmware@1480000 { + label = "firmware"; + reg = <0x1480000 0x2000000>; + }; + }; + }; + }; + + mdio0: mdio { + compatible = "virtual,mdio-gpio"; + #address-cells = <1>; + #size-cells = <0>; + gpios = <&qcom_pinmux 1 GPIO_ACTIVE_HIGH &qcom_pinmux 0 GPIO_ACTIVE_HIGH>; + pinctrl-0 = <&mdio0_pins>; + pinctrl-names = "default"; + + + phy0: ethernet-phy@0 { + device_type = "ethernet-phy"; + reg = <0>; + qca,ar8327-initvals = < + 0x00004 0x7600000 /* PAD0_MODE */ + 0x00008 0x1000000 /* PAD5_MODE */ + 0x0000c 0x80 /* PAD6_MODE */ + 0x000e4 0xaa545 /* MAC_POWER_SEL */ + 0x000e0 0xc74164de /* SGMII_CTRL */ + 0x0007c 0x4e /* PORT0_STATUS */ + 0x00094 0x4e /* PORT6_STATUS */ + 0x00970 0x1e864443 /* QM_PORT0_CTRL0 */ + 0x00974 0x000001c6 /* QM_PORT0_CTRL1 */ + 0x00978 0x19008643 /* QM_PORT1_CTRL0 */ + 0x0097c 0x000001c6 /* QM_PORT1_CTRL1 */ + 0x00980 0x19008643 /* QM_PORT2_CTRL0 */ + 0x00984 0x000001c6 /* QM_PORT2_CTRL1 */ + 0x00988 0x19008643 /* QM_PORT3_CTRL0 */ + 0x0098c 0x000001c6 /* QM_PORT3_CTRL1 */ + 0x00990 0x19008643 /* QM_PORT4_CTRL0 */ + 0x00994 0x000001c6 /* QM_PORT4_CTRL1 */ + 0x00998 0x1e864443 /* QM_PORT5_CTRL0 */ + 0x0099c 0x000001c6 /* QM_PORT5_CTRL1 */ + 0x009a0 0x1e864443 /* QM_PORT6_CTRL0 */ + 0x009a4 0x000001c6 /* QM_PORT6_CTRL1 */ + >; + qca,ar8327-vlans = < + 0x1 0x5e /* VLAN1 Ports 1/2/3/4/6 */ + 0x2 0x21 /* VLAN2 Ports 0/5 */ + >; + }; + + phy4: ethernet-phy@4 { + device_type = "ethernet-phy"; + reg = <4>; + qca,ar8327-initvals = < + 0x000e4 0x6a545 /* MAC_POWER_SEL */ + 0x0000c 0x80 /* PAD6_MODE */ + >; + }; + }; + + gmac1: ethernet@37200000 { + status = "ok"; + phy-mode = "rgmii"; + qcom,id = <1>; + qcom,phy_mdio_addr = <4>; + qcom,poll_required = <0>; + qcom,rgmii_delay = <1>; + qcom,phy_mii_type = <0>; + qcom,emulation = <0>; + qcom,irq = <255>; + mdiobus = <&mdio0>; + + pinctrl-0 = <&rgmii2_pins>; + pinctrl-names = "default"; + + mtd-mac-address = <&art 6>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + gmac2: ethernet@37400000 { + status = "ok"; + phy-mode = "sgmii"; + qcom,id = <2>; + qcom,phy_mdio_addr = <0>; /* none */ + qcom,poll_required = <0>; /* no polling */ + qcom,rgmii_delay = <0>; + qcom,phy_mii_type = <1>; + qcom,emulation = <0>; + qcom,irq = <258>; + mdiobus = <&mdio0>; + + mtd-mac-address = <&art 0>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + rpm@108000 { + pinctrl-0 = <&i2c4_pins>; + pinctrl-names = "default"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + wifi { + label = "wifi"; + gpios = <&qcom_pinmux 6 GPIO_ACTIVE_LOW>; + linux,code = ; + debounce-interval = <60>; + wakeup-source; + }; + + reset { + label = "reset"; + gpios = <&qcom_pinmux 54 GPIO_ACTIVE_LOW>; + linux,code = ; + debounce-interval = <60>; + wakeup-source; + }; + + wps { + label = "wps"; + gpios = <&qcom_pinmux 65 GPIO_ACTIVE_LOW>; + linux,code = ; + debounce-interval = <60>; + wakeup-source; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + pinctrl-0 = <&led_pins>; + pinctrl-names = "default"; + + power_white: power_white { + label = "r7800:white:power"; + gpios = <&qcom_pinmux 53 GPIO_ACTIVE_HIGH>; + default-state = "keep"; + }; + + power_amber: power_amber { + label = "r7800:amber:power"; + gpios = <&qcom_pinmux 9 GPIO_ACTIVE_HIGH>; + }; + + wan_white { + label = "r7800:white:wan"; + gpios = <&qcom_pinmux 22 GPIO_ACTIVE_HIGH>; + }; + + wan_amber { + label = "r7800:amber:wan"; + gpios = <&qcom_pinmux 23 GPIO_ACTIVE_HIGH>; + }; + + usb1 { + label = "r7800:white:usb1"; + gpios = <&qcom_pinmux 7 GPIO_ACTIVE_HIGH>; + }; + + usb2 { + label = "r7800:white:usb2"; + gpios = <&qcom_pinmux 8 GPIO_ACTIVE_HIGH>; + }; + + esata { + label = "r7800:white:esata"; + gpios = <&qcom_pinmux 26 GPIO_ACTIVE_HIGH>; + }; + + wifi { + label = "r7800:white:wifi"; + gpios = <&qcom_pinmux 64 GPIO_ACTIVE_HIGH>; + }; + + wps { + label = "r7800:white:wps"; + gpios = <&qcom_pinmux 24 GPIO_ACTIVE_HIGH>; + }; + }; +}; + +&adm_dma { + status = "ok"; +}; diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8065-v1.0.dtsi b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8065-v1.0.dtsi new file mode 100644 index 000000000..22b5338cc --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8065-v1.0.dtsi @@ -0,0 +1 @@ +#include "qcom-ipq8065.dtsi" diff --git a/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8065.dtsi b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8065.dtsi new file mode 100644 index 000000000..15f3b95f8 --- /dev/null +++ b/target/linux/ipq40xx/files-4.9/arch/arm/boot/dts/qcom-ipq8065.dtsi @@ -0,0 +1,164 @@ +#include "qcom-ipq8064.dtsi" + +/ { + model = "Qualcomm IPQ8065"; + compatible = "qcom,ipq8065", "qcom,ipq8064"; + + qcom,pvs { + qcom,pvs-format-a; + qcom,speed0-pvs0-bin-v0 = + < 1725000000 1262500 >, + < 1400000000 1175000 >, + < 1000000000 1100000 >, + < 800000000 1050000 >, + < 600000000 1000000 >, + < 384000000 975000 >; + qcom,speed0-pvs1-bin-v0 = + < 1725000000 1225000 >, + < 1400000000 1150000 >, + < 1000000000 1075000 >, + < 800000000 1025000 >, + < 600000000 975000 >, + < 384000000 950000 >; + qcom,speed0-pvs2-bin-v0 = + < 1725000000 1200000 >, + < 1400000000 1125000 >, + < 1000000000 1050000 >, + < 800000000 1000000 >, + < 600000000 950000 >, + < 384000000 925000 >; + qcom,speed0-pvs3-bin-v0 = + < 1725000000 1175000 >, + < 1400000000 1100000 >, + < 1000000000 1025000 >, + < 800000000 975000 >, + < 600000000 925000 >, + < 384000000 900000 >; + qcom,speed0-pvs4-bin-v0 = + < 1725000000 1150000 >, + < 1400000000 1075000 >, + < 1000000000 1000000 >, + < 800000000 950000 >, + < 600000000 900000 >, + < 384000000 875000 >; + qcom,speed0-pvs5-bin-v0 = + < 1725000000 1100000 >, + < 1400000000 1025000 >, + < 1000000000 950000 >, + < 800000000 900000 >, + < 600000000 850000 >, + < 384000000 825000 >; + qcom,speed0-pvs6-bin-v0 = + < 1725000000 1050000 >, + < 1400000000 975000 >, + < 1000000000 900000 >, + < 800000000 850000 >, + < 600000000 800000 >, + < 384000000 775000 >; + }; + + soc: soc { + + rpm@108000 { + + regulators { + + smb208_s2a: s2a { + regulator-min-microvolt = <775000>; + regulator-max-microvolt = <1275000>; + }; + + smb208_s2b: s2b { + regulator-min-microvolt = <775000>; + regulator-max-microvolt = <1275000>; + }; + }; + }; + + ss_phy_0: phy@110f8830 { + rx_eq = <2>; + tx_deamp_3_5db = <32>; + mpll = <0xa0>; + }; + ss_phy_1: phy@100f8830 { + rx_eq = <2>; + tx_deamp_3_5db = <32>; + mpll = <0xa0>; + }; + + /* Temporary fixed regulator */ + vsdcc_fixed: vsdcc-regulator { + compatible = "regulator-fixed"; + regulator-name = "SDCC Power"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + sdcc1bam:dma@12402000 { + compatible = "qcom,bam-v1.3.0"; + reg = <0x12402000 0x8000>; + interrupts = <0 98 0>; + clocks = <&gcc SDC1_H_CLK>; + clock-names = "bam_clk"; + #dma-cells = <1>; + qcom,ee = <0>; + }; + + sdcc3bam:dma@12182000 { + compatible = "qcom,bam-v1.3.0"; + reg = <0x12182000 0x8000>; + interrupts = <0 96 0>; + clocks = <&gcc SDC3_H_CLK>; + clock-names = "bam_clk"; + #dma-cells = <1>; + qcom,ee = <0>; + }; + + amba { + compatible = "arm,amba-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + sdcc1: sdcc@12400000 { + status = "disabled"; + compatible = "arm,pl18x", "arm,primecell"; + arm,primecell-periphid = <0x00051180>; + reg = <0x12400000 0x2000>; + interrupts = ; + interrupt-names = "cmd_irq"; + clocks = <&gcc SDC1_CLK>, <&gcc SDC1_H_CLK>; + clock-names = "mclk", "apb_pclk"; + bus-width = <8>; + max-frequency = <96000000>; + non-removable; + cap-sd-highspeed; + cap-mmc-highspeed; + vmmc-supply = <&vsdcc_fixed>; + dmas = <&sdcc1bam 2>, <&sdcc1bam 1>; + dma-names = "tx", "rx"; + }; + + sdcc3: sdcc@12180000 { + compatible = "arm,pl18x", "arm,primecell"; + arm,primecell-periphid = <0x00051180>; + status = "disabled"; + reg = <0x12180000 0x2000>; + interrupts = ; + interrupt-names = "cmd_irq"; + clocks = <&gcc SDC3_CLK>, <&gcc SDC3_H_CLK>; + clock-names = "mclk", "apb_pclk"; + bus-width = <8>; + cap-sd-highspeed; + cap-mmc-highspeed; + max-frequency = <192000000>; + #mmc-ddr-1_8v; + sd-uhs-sdr104; + sd-uhs-ddr50; + vqmmc-supply = <&vsdcc_fixed>; + dmas = <&sdcc3bam 2>, <&sdcc3bam 1>; + dma-names = "tx", "rx"; + }; + }; + }; +}; diff --git a/target/linux/ipq40xx/patches-4.9/0001-dtbindings-qcom_adm-Fix-channel-specifiers.patch b/target/linux/ipq40xx/patches-4.9/0001-dtbindings-qcom_adm-Fix-channel-specifiers.patch new file mode 100644 index 000000000..2a36a85b8 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0001-dtbindings-qcom_adm-Fix-channel-specifiers.patch @@ -0,0 +1,71 @@ +From 28d0ed88f536dd639adf1b0c7c08e04be3c8f294 Mon Sep 17 00:00:00 2001 +From: Thomas Pedersen +Date: Mon, 16 May 2016 17:58:50 -0700 +Subject: [PATCH 01/69] dtbindings: qcom_adm: Fix channel specifiers + +Original patch from Andy Gross. + +This patch removes the crci information from the dma +channel property. At least one client device requires +using more than one CRCI value for a channel. This does +not match the current binding and the crci information +needs to be removed. + +Instead, the client device will provide this information +via other means. + +Signed-off-by: Andy Gross +Signed-off-by: Thomas Pedersen +--- + Documentation/devicetree/bindings/dma/qcom_adm.txt | 16 ++++++---------- + 1 file changed, 6 insertions(+), 10 deletions(-) + +--- a/Documentation/devicetree/bindings/dma/qcom_adm.txt ++++ b/Documentation/devicetree/bindings/dma/qcom_adm.txt +@@ -4,8 +4,7 @@ Required properties: + - compatible: must contain "qcom,adm" for IPQ/APQ8064 and MSM8960 + - reg: Address range for DMA registers + - interrupts: Should contain one interrupt shared by all channels +-- #dma-cells: must be <2>. First cell denotes the channel number. Second cell +- denotes CRCI (client rate control interface) flow control assignment. ++- #dma-cells: must be <1>. First cell denotes the channel number. + - clocks: Should contain the core clock and interface clock. + - clock-names: Must contain "core" for the core clock and "iface" for the + interface clock. +@@ -22,7 +21,7 @@ Example: + compatible = "qcom,adm"; + reg = <0x18300000 0x100000>; + interrupts = <0 170 0>; +- #dma-cells = <2>; ++ #dma-cells = <1>; + + clocks = <&gcc ADM0_CLK>, <&gcc ADM0_PBUS_CLK>; + clock-names = "core", "iface"; +@@ -35,15 +34,12 @@ Example: + qcom,ee = <0>; + }; + +-DMA clients must use the format descripted in the dma.txt file, using a three ++DMA clients must use the format descripted in the dma.txt file, using a two + cell specifier for each channel. + +-Each dmas request consists of 3 cells: ++Each dmas request consists of two cells: + 1. phandle pointing to the DMA controller + 2. channel number +- 3. CRCI assignment, if applicable. If no CRCI flow control is required, use 0. +- The CRCI is used for flow control. It identifies the peripheral device that +- is the source/destination for the transferred data. + + Example: + +@@ -56,7 +52,7 @@ Example: + + cs-gpios = <&qcom_pinmux 20 0>; + +- dmas = <&adm_dma 6 9>, +- <&adm_dma 5 10>; ++ dmas = <&adm_dma 6>, ++ <&adm_dma 5>; + dma-names = "rx", "tx"; + }; diff --git a/target/linux/ipq40xx/patches-4.9/0002-dmaengine-Add-ADM-driver.patch b/target/linux/ipq40xx/patches-4.9/0002-dmaengine-Add-ADM-driver.patch new file mode 100644 index 000000000..4ad025c9c --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0002-dmaengine-Add-ADM-driver.patch @@ -0,0 +1,966 @@ +From 563fa24db4e529c5a3311928d73a8a90531ee527 Mon Sep 17 00:00:00 2001 +From: Thomas Pedersen +Date: Mon, 16 May 2016 17:58:51 -0700 +Subject: [PATCH 02/69] dmaengine: Add ADM driver + +Original patch by Andy Gross. + +Add the DMA engine driver for the QCOM Application Data Mover (ADM) DMA +controller found in the MSM8x60 and IPQ/APQ8064 platforms. + +The ADM supports both memory to memory transactions and memory +to/from peripheral device transactions. The controller also provides flow +control capabilities for transactions to/from peripheral devices. + +The initial release of this driver supports slave transfers to/from peripherals +and also incorporates CRCI (client rate control interface) flow control. + +Signed-off-by: Andy Gross +Signed-off-by: Thomas Pedersen +--- + drivers/dma/qcom/Kconfig | 10 + + drivers/dma/qcom/Makefile | 1 + + drivers/dma/qcom/qcom_adm.c | 900 ++++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 911 insertions(+) + create mode 100644 drivers/dma/qcom/qcom_adm.c + +--- a/drivers/dma/qcom/Kconfig ++++ b/drivers/dma/qcom/Kconfig +@@ -27,3 +27,13 @@ config QCOM_HIDMA + (user to kernel, kernel to kernel, etc.). It only supports + memcpy interface. The core is not intended for general + purpose slave DMA. ++ ++config QCOM_ADM ++ tristate "Qualcomm ADM support" ++ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM) ++ select DMA_ENGINE ++ select DMA_VIRTUAL_CHANNELS ++ ---help--- ++ Enable support for the Qualcomm ADM DMA controller. This controller ++ provides DMA capabilities for both general purpose and on-chip ++ peripheral devices. +--- a/drivers/dma/qcom/Makefile ++++ b/drivers/dma/qcom/Makefile +@@ -3,3 +3,4 @@ obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mg + hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o + obj-$(CONFIG_QCOM_HIDMA) += hdma.o + hdma-objs := hidma_ll.o hidma.o hidma_dbg.o ++obj-$(CONFIG_QCOM_ADM) += qcom_adm.o +--- /dev/null ++++ b/drivers/dma/qcom/qcom_adm.c +@@ -0,0 +1,914 @@ ++/* ++ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../dmaengine.h" ++#include "../virt-dma.h" ++ ++/* ADM registers - calculated from channel number and security domain */ ++#define ADM_CHAN_MULTI 0x4 ++#define ADM_CI_MULTI 0x4 ++#define ADM_CRCI_MULTI 0x4 ++#define ADM_EE_MULTI 0x800 ++#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * chan) ++#define ADM_EE_OFFS(ee) (ADM_EE_MULTI * ee) ++#define ADM_CHAN_EE_OFFS(chan, ee) (ADM_CHAN_OFFS(chan) + ADM_EE_OFFS(ee)) ++#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * chan) ++#define ADM_CI_OFFS(ci) (ADM_CHAN_OFF(ci)) ++#define ADM_CH_CMD_PTR(chan, ee) (ADM_CHAN_EE_OFFS(chan, ee)) ++#define ADM_CH_RSLT(chan, ee) (0x40 + ADM_CHAN_EE_OFFS(chan, ee)) ++#define ADM_CH_FLUSH_STATE0(chan, ee) (0x80 + ADM_CHAN_EE_OFFS(chan, ee)) ++#define ADM_CH_STATUS_SD(chan, ee) (0x200 + ADM_CHAN_EE_OFFS(chan, ee)) ++#define ADM_CH_CONF(chan) (0x240 + ADM_CHAN_OFFS(chan)) ++#define ADM_CH_RSLT_CONF(chan, ee) (0x300 + ADM_CHAN_EE_OFFS(chan, ee)) ++#define ADM_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + ADM_EE_OFFS(ee)) ++#define ADM_CI_CONF(ci) (0x390 + ci * ADM_CI_MULTI) ++#define ADM_GP_CTL 0x3d8 ++#define ADM_CRCI_CTL(crci, ee) (0x400 + crci * ADM_CRCI_MULTI + \ ++ ADM_EE_OFFS(ee)) ++ ++/* channel status */ ++#define ADM_CH_STATUS_VALID BIT(1) ++ ++/* channel result */ ++#define ADM_CH_RSLT_VALID BIT(31) ++#define ADM_CH_RSLT_ERR BIT(3) ++#define ADM_CH_RSLT_FLUSH BIT(2) ++#define ADM_CH_RSLT_TPD BIT(1) ++ ++/* channel conf */ ++#define ADM_CH_CONF_SHADOW_EN BIT(12) ++#define ADM_CH_CONF_MPU_DISABLE BIT(11) ++#define ADM_CH_CONF_PERM_MPU_CONF BIT(9) ++#define ADM_CH_CONF_FORCE_RSLT_EN BIT(7) ++#define ADM_CH_CONF_SEC_DOMAIN(ee) (((ee & 0x3) << 4) | ((ee & 0x4) << 11)) ++ ++/* channel result conf */ ++#define ADM_CH_RSLT_CONF_FLUSH_EN BIT(1) ++#define ADM_CH_RSLT_CONF_IRQ_EN BIT(0) ++ ++/* CRCI CTL */ ++#define ADM_CRCI_CTL_MUX_SEL BIT(18) ++#define ADM_CRCI_CTL_RST BIT(17) ++ ++/* CI configuration */ ++#define ADM_CI_RANGE_END(x) (x << 24) ++#define ADM_CI_RANGE_START(x) (x << 16) ++#define ADM_CI_BURST_4_WORDS BIT(2) ++#define ADM_CI_BURST_8_WORDS BIT(3) ++ ++/* GP CTL */ ++#define ADM_GP_CTL_LP_EN BIT(12) ++#define ADM_GP_CTL_LP_CNT(x) (x << 8) ++ ++/* Command pointer list entry */ ++#define ADM_CPLE_LP BIT(31) ++#define ADM_CPLE_CMD_PTR_LIST BIT(29) ++ ++/* Command list entry */ ++#define ADM_CMD_LC BIT(31) ++#define ADM_CMD_DST_CRCI(n) (((n) & 0xf) << 7) ++#define ADM_CMD_SRC_CRCI(n) (((n) & 0xf) << 3) ++ ++#define ADM_CMD_TYPE_SINGLE 0x0 ++#define ADM_CMD_TYPE_BOX 0x3 ++ ++#define ADM_CRCI_MUX_SEL BIT(4) ++#define ADM_DESC_ALIGN 8 ++#define ADM_MAX_XFER (SZ_64K-1) ++#define ADM_MAX_ROWS (SZ_64K-1) ++#define ADM_MAX_CHANNELS 16 ++ ++struct adm_desc_hw_box { ++ u32 cmd; ++ u32 src_addr; ++ u32 dst_addr; ++ u32 row_len; ++ u32 num_rows; ++ u32 row_offset; ++}; ++ ++struct adm_desc_hw_single { ++ u32 cmd; ++ u32 src_addr; ++ u32 dst_addr; ++ u32 len; ++}; ++ ++struct adm_async_desc { ++ struct virt_dma_desc vd; ++ struct adm_device *adev; ++ ++ size_t length; ++ enum dma_transfer_direction dir; ++ dma_addr_t dma_addr; ++ size_t dma_len; ++ ++ void *cpl; ++ dma_addr_t cp_addr; ++ u32 crci; ++ u32 mux; ++ u32 blk_size; ++}; ++ ++struct adm_chan { ++ struct virt_dma_chan vc; ++ struct adm_device *adev; ++ ++ /* parsed from DT */ ++ u32 id; /* channel id */ ++ ++ struct adm_async_desc *curr_txd; ++ struct dma_slave_config slave; ++ struct list_head node; ++ ++ int error; ++ int initialized; ++}; ++ ++static inline struct adm_chan *to_adm_chan(struct dma_chan *common) ++{ ++ return container_of(common, struct adm_chan, vc.chan); ++} ++ ++struct adm_device { ++ void __iomem *regs; ++ struct device *dev; ++ struct dma_device common; ++ struct device_dma_parameters dma_parms; ++ struct adm_chan *channels; ++ ++ u32 ee; ++ ++ struct clk *core_clk; ++ struct clk *iface_clk; ++ ++ struct reset_control *clk_reset; ++ struct reset_control *c0_reset; ++ struct reset_control *c1_reset; ++ struct reset_control *c2_reset; ++ int irq; ++}; ++ ++/** ++ * adm_free_chan - Frees dma resources associated with the specific channel ++ * ++ * Free all allocated descriptors associated with this channel ++ * ++ */ ++static void adm_free_chan(struct dma_chan *chan) ++{ ++ /* free all queued descriptors */ ++ vchan_free_chan_resources(to_virt_chan(chan)); ++} ++ ++/** ++ * adm_get_blksize - Get block size from burst value ++ * ++ */ ++static int adm_get_blksize(unsigned int burst) ++{ ++ int ret; ++ ++ switch (burst) { ++ case 16: ++ case 32: ++ case 64: ++ case 128: ++ ret = ffs(burst>>4) - 1; ++ break; ++ case 192: ++ ret = 4; ++ break; ++ case 256: ++ ret = 5; ++ break; ++ default: ++ ret = -EINVAL; ++ break; ++ } ++ ++ return ret; ++} ++ ++/** ++ * adm_process_fc_descriptors - Process descriptors for flow controlled xfers ++ * ++ * @achan: ADM channel ++ * @desc: Descriptor memory pointer ++ * @sg: Scatterlist entry ++ * @crci: CRCI value ++ * @burst: Burst size of transaction ++ * @direction: DMA transfer direction ++ */ ++static void *adm_process_fc_descriptors(struct adm_chan *achan, ++ void *desc, struct scatterlist *sg, u32 crci, u32 burst, ++ enum dma_transfer_direction direction) ++{ ++ struct adm_desc_hw_box *box_desc = NULL; ++ struct adm_desc_hw_single *single_desc; ++ u32 remainder = sg_dma_len(sg); ++ u32 rows, row_offset, crci_cmd; ++ u32 mem_addr = sg_dma_address(sg); ++ u32 *incr_addr = &mem_addr; ++ u32 *src, *dst; ++ ++ if (direction == DMA_DEV_TO_MEM) { ++ crci_cmd = ADM_CMD_SRC_CRCI(crci); ++ row_offset = burst; ++ src = &achan->slave.src_addr; ++ dst = &mem_addr; ++ } else { ++ crci_cmd = ADM_CMD_DST_CRCI(crci); ++ row_offset = burst << 16; ++ src = &mem_addr; ++ dst = &achan->slave.dst_addr; ++ } ++ ++ while (remainder >= burst) { ++ box_desc = desc; ++ box_desc->cmd = ADM_CMD_TYPE_BOX | crci_cmd; ++ box_desc->row_offset = row_offset; ++ box_desc->src_addr = *src; ++ box_desc->dst_addr = *dst; ++ ++ rows = remainder / burst; ++ rows = min_t(u32, rows, ADM_MAX_ROWS); ++ box_desc->num_rows = rows << 16 | rows; ++ box_desc->row_len = burst << 16 | burst; ++ ++ *incr_addr += burst * rows; ++ remainder -= burst * rows; ++ desc += sizeof(*box_desc); ++ } ++ ++ /* if leftover bytes, do one single descriptor */ ++ if (remainder) { ++ single_desc = desc; ++ single_desc->cmd = ADM_CMD_TYPE_SINGLE | crci_cmd; ++ single_desc->len = remainder; ++ single_desc->src_addr = *src; ++ single_desc->dst_addr = *dst; ++ desc += sizeof(*single_desc); ++ ++ if (sg_is_last(sg)) ++ single_desc->cmd |= ADM_CMD_LC; ++ } else { ++ if (box_desc && sg_is_last(sg)) ++ box_desc->cmd |= ADM_CMD_LC; ++ } ++ ++ return desc; ++} ++ ++/** ++ * adm_process_non_fc_descriptors - Process descriptors for non-fc xfers ++ * ++ * @achan: ADM channel ++ * @desc: Descriptor memory pointer ++ * @sg: Scatterlist entry ++ * @direction: DMA transfer direction ++ */ ++static void *adm_process_non_fc_descriptors(struct adm_chan *achan, ++ void *desc, struct scatterlist *sg, ++ enum dma_transfer_direction direction) ++{ ++ struct adm_desc_hw_single *single_desc; ++ u32 remainder = sg_dma_len(sg); ++ u32 mem_addr = sg_dma_address(sg); ++ u32 *incr_addr = &mem_addr; ++ u32 *src, *dst; ++ ++ if (direction == DMA_DEV_TO_MEM) { ++ src = &achan->slave.src_addr; ++ dst = &mem_addr; ++ } else { ++ src = &mem_addr; ++ dst = &achan->slave.dst_addr; ++ } ++ ++ do { ++ single_desc = desc; ++ single_desc->cmd = ADM_CMD_TYPE_SINGLE; ++ single_desc->src_addr = *src; ++ single_desc->dst_addr = *dst; ++ single_desc->len = (remainder > ADM_MAX_XFER) ? ++ ADM_MAX_XFER : remainder; ++ ++ remainder -= single_desc->len; ++ *incr_addr += single_desc->len; ++ desc += sizeof(*single_desc); ++ } while (remainder); ++ ++ /* set last command if this is the end of the whole transaction */ ++ if (sg_is_last(sg)) ++ single_desc->cmd |= ADM_CMD_LC; ++ ++ return desc; ++} ++ ++/** ++ * adm_prep_slave_sg - Prep slave sg transaction ++ * ++ * @chan: dma channel ++ * @sgl: scatter gather list ++ * @sg_len: length of sg ++ * @direction: DMA transfer direction ++ * @flags: DMA flags ++ * @context: transfer context (unused) ++ */ ++static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan, ++ struct scatterlist *sgl, unsigned int sg_len, ++ enum dma_transfer_direction direction, unsigned long flags, ++ void *context) ++{ ++ struct adm_chan *achan = to_adm_chan(chan); ++ struct adm_device *adev = achan->adev; ++ struct adm_async_desc *async_desc; ++ struct scatterlist *sg; ++ dma_addr_t cple_addr; ++ u32 i, burst; ++ u32 single_count = 0, box_count = 0, crci = 0; ++ void *desc; ++ u32 *cple; ++ int blk_size = 0; ++ ++ if (!is_slave_direction(direction)) { ++ dev_err(adev->dev, "invalid dma direction\n"); ++ return NULL; ++ } ++ ++ /* ++ * get burst value from slave configuration ++ */ ++ burst = (direction == DMA_MEM_TO_DEV) ? ++ achan->slave.dst_maxburst : ++ achan->slave.src_maxburst; ++ ++ /* if using flow control, validate burst and crci values */ ++ if (achan->slave.device_fc) { ++ ++ blk_size = adm_get_blksize(burst); ++ if (blk_size < 0) { ++ dev_err(adev->dev, "invalid burst value: %d\n", ++ burst); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ crci = achan->slave.slave_id & 0xf; ++ if (!crci || achan->slave.slave_id > 0x1f) { ++ dev_err(adev->dev, "invalid crci value\n"); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ /* iterate through sgs and compute allocation size of structures */ ++ for_each_sg(sgl, sg, sg_len, i) { ++ if (achan->slave.device_fc) { ++ box_count += DIV_ROUND_UP(sg_dma_len(sg) / burst, ++ ADM_MAX_ROWS); ++ if (sg_dma_len(sg) % burst) ++ single_count++; ++ } else { ++ single_count += DIV_ROUND_UP(sg_dma_len(sg), ++ ADM_MAX_XFER); ++ } ++ } ++ ++ async_desc = kzalloc(sizeof(*async_desc), GFP_ATOMIC); ++ if (!async_desc) ++ return ERR_PTR(-ENOMEM); ++ ++ if (crci) ++ async_desc->mux = achan->slave.slave_id & ADM_CRCI_MUX_SEL ? ++ ADM_CRCI_CTL_MUX_SEL : 0; ++ async_desc->crci = crci; ++ async_desc->blk_size = blk_size; ++ async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) + ++ box_count * sizeof(struct adm_desc_hw_box) + ++ sizeof(*cple) + 2 * ADM_DESC_ALIGN; ++ ++ async_desc->cpl = kzalloc(async_desc->dma_len, GFP_ATOMIC); ++ if (!async_desc->cpl) ++ goto free; ++ ++ async_desc->adev = adev; ++ ++ /* both command list entry and descriptors must be 8 byte aligned */ ++ cple = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN); ++ desc = PTR_ALIGN(cple + 1, ADM_DESC_ALIGN); ++ ++ for_each_sg(sgl, sg, sg_len, i) { ++ async_desc->length += sg_dma_len(sg); ++ ++ if (achan->slave.device_fc) ++ desc = adm_process_fc_descriptors(achan, desc, sg, crci, ++ burst, direction); ++ else ++ desc = adm_process_non_fc_descriptors(achan, desc, sg, ++ direction); ++ } ++ ++ async_desc->dma_addr = dma_map_single(adev->dev, async_desc->cpl, ++ async_desc->dma_len, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(adev->dev, async_desc->dma_addr)) ++ goto free; ++ ++ cple_addr = async_desc->dma_addr + ((void *)cple - async_desc->cpl); ++ ++ /* init cmd list */ ++ dma_sync_single_for_cpu(adev->dev, cple_addr, sizeof(*cple), ++ DMA_TO_DEVICE); ++ *cple = ADM_CPLE_LP; ++ *cple |= (async_desc->dma_addr + ADM_DESC_ALIGN) >> 3; ++ dma_sync_single_for_device(adev->dev, cple_addr, sizeof(*cple), ++ DMA_TO_DEVICE); ++ ++ return vchan_tx_prep(&achan->vc, &async_desc->vd, flags); ++ ++free: ++ kfree(async_desc); ++ return ERR_PTR(-ENOMEM); ++} ++ ++/** ++ * adm_terminate_all - terminate all transactions on a channel ++ * @achan: adm dma channel ++ * ++ * Dequeues and frees all transactions, aborts current transaction ++ * No callbacks are done ++ * ++ */ ++static int adm_terminate_all(struct dma_chan *chan) ++{ ++ struct adm_chan *achan = to_adm_chan(chan); ++ struct adm_device *adev = achan->adev; ++ unsigned long flags; ++ LIST_HEAD(head); ++ ++ spin_lock_irqsave(&achan->vc.lock, flags); ++ vchan_get_all_descriptors(&achan->vc, &head); ++ ++ /* send flush command to terminate current transaction */ ++ writel_relaxed(0x0, ++ adev->regs + ADM_CH_FLUSH_STATE0(achan->id, adev->ee)); ++ ++ spin_unlock_irqrestore(&achan->vc.lock, flags); ++ ++ vchan_dma_desc_free_list(&achan->vc, &head); ++ ++ return 0; ++} ++ ++static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) ++{ ++ struct adm_chan *achan = to_adm_chan(chan); ++ unsigned long flag; ++ ++ spin_lock_irqsave(&achan->vc.lock, flag); ++ memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config)); ++ spin_unlock_irqrestore(&achan->vc.lock, flag); ++ ++ return 0; ++} ++ ++/** ++ * adm_start_dma - start next transaction ++ * @achan - ADM dma channel ++ */ ++static void adm_start_dma(struct adm_chan *achan) ++{ ++ struct virt_dma_desc *vd = vchan_next_desc(&achan->vc); ++ struct adm_device *adev = achan->adev; ++ struct adm_async_desc *async_desc; ++ ++ lockdep_assert_held(&achan->vc.lock); ++ ++ if (!vd) ++ return; ++ ++ list_del(&vd->node); ++ ++ /* write next command list out to the CMD FIFO */ ++ async_desc = container_of(vd, struct adm_async_desc, vd); ++ achan->curr_txd = async_desc; ++ ++ /* reset channel error */ ++ achan->error = 0; ++ ++ if (!achan->initialized) { ++ /* enable interrupts */ ++ writel(ADM_CH_CONF_SHADOW_EN | ++ ADM_CH_CONF_PERM_MPU_CONF | ++ ADM_CH_CONF_MPU_DISABLE | ++ ADM_CH_CONF_SEC_DOMAIN(adev->ee), ++ adev->regs + ADM_CH_CONF(achan->id)); ++ ++ writel(ADM_CH_RSLT_CONF_IRQ_EN | ADM_CH_RSLT_CONF_FLUSH_EN, ++ adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee)); ++ ++ achan->initialized = 1; ++ } ++ ++ /* set the crci block size if this transaction requires CRCI */ ++ if (async_desc->crci) { ++ writel(async_desc->mux | async_desc->blk_size, ++ adev->regs + ADM_CRCI_CTL(async_desc->crci, adev->ee)); ++ } ++ ++ /* make sure IRQ enable doesn't get reordered */ ++ wmb(); ++ ++ /* write next command list out to the CMD FIFO */ ++ writel(ALIGN(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3, ++ adev->regs + ADM_CH_CMD_PTR(achan->id, adev->ee)); ++} ++ ++/** ++ * adm_dma_irq - irq handler for ADM controller ++ * @irq: IRQ of interrupt ++ * @data: callback data ++ * ++ * IRQ handler for the bam controller ++ */ ++static irqreturn_t adm_dma_irq(int irq, void *data) ++{ ++ struct adm_device *adev = data; ++ u32 srcs, i; ++ struct adm_async_desc *async_desc; ++ unsigned long flags; ++ ++ srcs = readl_relaxed(adev->regs + ++ ADM_SEC_DOMAIN_IRQ_STATUS(adev->ee)); ++ ++ for (i = 0; i < ADM_MAX_CHANNELS; i++) { ++ struct adm_chan *achan = &adev->channels[i]; ++ u32 status, result; ++ ++ if (srcs & BIT(i)) { ++ status = readl_relaxed(adev->regs + ++ ADM_CH_STATUS_SD(i, adev->ee)); ++ ++ /* if no result present, skip */ ++ if (!(status & ADM_CH_STATUS_VALID)) ++ continue; ++ ++ result = readl_relaxed(adev->regs + ++ ADM_CH_RSLT(i, adev->ee)); ++ ++ /* no valid results, skip */ ++ if (!(result & ADM_CH_RSLT_VALID)) ++ continue; ++ ++ /* flag error if transaction was flushed or failed */ ++ if (result & (ADM_CH_RSLT_ERR | ADM_CH_RSLT_FLUSH)) ++ achan->error = 1; ++ ++ spin_lock_irqsave(&achan->vc.lock, flags); ++ async_desc = achan->curr_txd; ++ ++ achan->curr_txd = NULL; ++ ++ if (async_desc) { ++ vchan_cookie_complete(&async_desc->vd); ++ ++ /* kick off next DMA */ ++ adm_start_dma(achan); ++ } ++ ++ spin_unlock_irqrestore(&achan->vc.lock, flags); ++ } ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++/** ++ * adm_tx_status - returns status of transaction ++ * @chan: dma channel ++ * @cookie: transaction cookie ++ * @txstate: DMA transaction state ++ * ++ * Return status of dma transaction ++ */ ++static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie, ++ struct dma_tx_state *txstate) ++{ ++ struct adm_chan *achan = to_adm_chan(chan); ++ struct virt_dma_desc *vd; ++ enum dma_status ret; ++ unsigned long flags; ++ size_t residue = 0; ++ ++ ret = dma_cookie_status(chan, cookie, txstate); ++ if (ret == DMA_COMPLETE || !txstate) ++ return ret; ++ ++ spin_lock_irqsave(&achan->vc.lock, flags); ++ ++ vd = vchan_find_desc(&achan->vc, cookie); ++ if (vd) ++ residue = container_of(vd, struct adm_async_desc, vd)->length; ++ ++ spin_unlock_irqrestore(&achan->vc.lock, flags); ++ ++ /* ++ * residue is either the full length if it is in the issued list, or 0 ++ * if it is in progress. We have no reliable way of determining ++ * anything inbetween ++ */ ++ dma_set_residue(txstate, residue); ++ ++ if (achan->error) ++ return DMA_ERROR; ++ ++ return ret; ++} ++ ++/** ++ * adm_issue_pending - starts pending transactions ++ * @chan: dma channel ++ * ++ * Issues all pending transactions and starts DMA ++ */ ++static void adm_issue_pending(struct dma_chan *chan) ++{ ++ struct adm_chan *achan = to_adm_chan(chan); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&achan->vc.lock, flags); ++ ++ if (vchan_issue_pending(&achan->vc) && !achan->curr_txd) ++ adm_start_dma(achan); ++ spin_unlock_irqrestore(&achan->vc.lock, flags); ++} ++ ++/** ++ * adm_dma_free_desc - free descriptor memory ++ * @vd: virtual descriptor ++ * ++ */ ++static void adm_dma_free_desc(struct virt_dma_desc *vd) ++{ ++ struct adm_async_desc *async_desc = container_of(vd, ++ struct adm_async_desc, vd); ++ ++ dma_unmap_single(async_desc->adev->dev, async_desc->dma_addr, ++ async_desc->dma_len, DMA_TO_DEVICE); ++ kfree(async_desc->cpl); ++ kfree(async_desc); ++} ++ ++static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan, ++ u32 index) ++{ ++ achan->id = index; ++ achan->adev = adev; ++ ++ vchan_init(&achan->vc, &adev->common); ++ achan->vc.desc_free = adm_dma_free_desc; ++} ++ ++static int adm_dma_probe(struct platform_device *pdev) ++{ ++ struct adm_device *adev; ++ struct resource *iores; ++ int ret; ++ u32 i; ++ ++ adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); ++ if (!adev) ++ return -ENOMEM; ++ ++ adev->dev = &pdev->dev; ++ ++ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ adev->regs = devm_ioremap_resource(&pdev->dev, iores); ++ if (IS_ERR(adev->regs)) ++ return PTR_ERR(adev->regs); ++ ++ adev->irq = platform_get_irq(pdev, 0); ++ if (adev->irq < 0) ++ return adev->irq; ++ ++ ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee); ++ if (ret) { ++ dev_err(adev->dev, "Execution environment unspecified\n"); ++ return ret; ++ } ++ ++ adev->core_clk = devm_clk_get(adev->dev, "core"); ++ if (IS_ERR(adev->core_clk)) ++ return PTR_ERR(adev->core_clk); ++ ++ ret = clk_prepare_enable(adev->core_clk); ++ if (ret) { ++ dev_err(adev->dev, "failed to prepare/enable core clock\n"); ++ return ret; ++ } ++ ++ adev->iface_clk = devm_clk_get(adev->dev, "iface"); ++ if (IS_ERR(adev->iface_clk)) { ++ ret = PTR_ERR(adev->iface_clk); ++ goto err_disable_core_clk; ++ } ++ ++ ret = clk_prepare_enable(adev->iface_clk); ++ if (ret) { ++ dev_err(adev->dev, "failed to prepare/enable iface clock\n"); ++ goto err_disable_core_clk; ++ } ++ ++ adev->clk_reset = devm_reset_control_get(&pdev->dev, "clk"); ++ if (IS_ERR(adev->clk_reset)) { ++ dev_err(adev->dev, "failed to get ADM0 reset\n"); ++ ret = PTR_ERR(adev->clk_reset); ++ goto err_disable_clks; ++ } ++ ++ adev->c0_reset = devm_reset_control_get(&pdev->dev, "c0"); ++ if (IS_ERR(adev->c0_reset)) { ++ dev_err(adev->dev, "failed to get ADM0 C0 reset\n"); ++ ret = PTR_ERR(adev->c0_reset); ++ goto err_disable_clks; ++ } ++ ++ adev->c1_reset = devm_reset_control_get(&pdev->dev, "c1"); ++ if (IS_ERR(adev->c1_reset)) { ++ dev_err(adev->dev, "failed to get ADM0 C1 reset\n"); ++ ret = PTR_ERR(adev->c1_reset); ++ goto err_disable_clks; ++ } ++ ++ adev->c2_reset = devm_reset_control_get(&pdev->dev, "c2"); ++ if (IS_ERR(adev->c2_reset)) { ++ dev_err(adev->dev, "failed to get ADM0 C2 reset\n"); ++ ret = PTR_ERR(adev->c2_reset); ++ goto err_disable_clks; ++ } ++ ++ reset_control_assert(adev->clk_reset); ++ reset_control_assert(adev->c0_reset); ++ reset_control_assert(adev->c1_reset); ++ reset_control_assert(adev->c2_reset); ++ ++ reset_control_deassert(adev->clk_reset); ++ reset_control_deassert(adev->c0_reset); ++ reset_control_deassert(adev->c1_reset); ++ reset_control_deassert(adev->c2_reset); ++ ++ adev->channels = devm_kcalloc(adev->dev, ADM_MAX_CHANNELS, ++ sizeof(*adev->channels), GFP_KERNEL); ++ ++ if (!adev->channels) { ++ ret = -ENOMEM; ++ goto err_disable_clks; ++ } ++ ++ /* allocate and initialize channels */ ++ INIT_LIST_HEAD(&adev->common.channels); ++ ++ for (i = 0; i < ADM_MAX_CHANNELS; i++) ++ adm_channel_init(adev, &adev->channels[i], i); ++ ++ /* reset CRCIs */ ++ for (i = 0; i < 16; i++) ++ writel(ADM_CRCI_CTL_RST, adev->regs + ++ ADM_CRCI_CTL(i, adev->ee)); ++ ++ /* configure client interfaces */ ++ writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) | ++ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(0)); ++ writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) | ++ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(1)); ++ writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) | ++ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(2)); ++ writel(ADM_GP_CTL_LP_EN | ADM_GP_CTL_LP_CNT(0xf), ++ adev->regs + ADM_GP_CTL); ++ ++ ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq, ++ 0, "adm_dma", adev); ++ if (ret) ++ goto err_disable_clks; ++ ++ platform_set_drvdata(pdev, adev); ++ ++ adev->common.dev = adev->dev; ++ adev->common.dev->dma_parms = &adev->dma_parms; ++ ++ /* set capabilities */ ++ dma_cap_zero(adev->common.cap_mask); ++ dma_cap_set(DMA_SLAVE, adev->common.cap_mask); ++ dma_cap_set(DMA_PRIVATE, adev->common.cap_mask); ++ ++ /* initialize dmaengine apis */ ++ adev->common.directions = BIT(DMA_DEV_TO_MEM | DMA_MEM_TO_DEV); ++ adev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; ++ adev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; ++ adev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; ++ adev->common.device_free_chan_resources = adm_free_chan; ++ adev->common.device_prep_slave_sg = adm_prep_slave_sg; ++ adev->common.device_issue_pending = adm_issue_pending; ++ adev->common.device_tx_status = adm_tx_status; ++ adev->common.device_terminate_all = adm_terminate_all; ++ adev->common.device_config = adm_slave_config; ++ ++ ret = dma_async_device_register(&adev->common); ++ if (ret) { ++ dev_err(adev->dev, "failed to register dma async device\n"); ++ goto err_disable_clks; ++ } ++ ++ ret = of_dma_controller_register(pdev->dev.of_node, ++ of_dma_xlate_by_chan_id, ++ &adev->common); ++ if (ret) ++ goto err_unregister_dma; ++ ++ return 0; ++ ++err_unregister_dma: ++ dma_async_device_unregister(&adev->common); ++err_disable_clks: ++ clk_disable_unprepare(adev->iface_clk); ++err_disable_core_clk: ++ clk_disable_unprepare(adev->core_clk); ++ ++ return ret; ++} ++ ++static int adm_dma_remove(struct platform_device *pdev) ++{ ++ struct adm_device *adev = platform_get_drvdata(pdev); ++ struct adm_chan *achan; ++ u32 i; ++ ++ of_dma_controller_free(pdev->dev.of_node); ++ dma_async_device_unregister(&adev->common); ++ ++ for (i = 0; i < ADM_MAX_CHANNELS; i++) { ++ achan = &adev->channels[i]; ++ ++ /* mask IRQs for this channel/EE pair */ ++ writel(0, adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee)); ++ ++ adm_terminate_all(&adev->channels[i].vc.chan); ++ } ++ ++ devm_free_irq(adev->dev, adev->irq, adev); ++ ++ clk_disable_unprepare(adev->core_clk); ++ clk_disable_unprepare(adev->iface_clk); ++ ++ return 0; ++} ++ ++static const struct of_device_id adm_of_match[] = { ++ { .compatible = "qcom,adm", }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, adm_of_match); ++ ++static struct platform_driver adm_dma_driver = { ++ .probe = adm_dma_probe, ++ .remove = adm_dma_remove, ++ .driver = { ++ .name = "adm-dma-engine", ++ .of_match_table = adm_of_match, ++ }, ++}; ++ ++module_platform_driver(adm_dma_driver); ++ ++MODULE_AUTHOR("Andy Gross "); ++MODULE_DESCRIPTION("QCOM ADM DMA engine driver"); ++MODULE_LICENSE("GPL v2"); diff --git a/target/linux/ipq40xx/patches-4.9/0003-spi-qup-Make-sure-mode-is-only-determined-once.patch b/target/linux/ipq40xx/patches-4.9/0003-spi-qup-Make-sure-mode-is-only-determined-once.patch new file mode 100644 index 000000000..fd9df4448 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0003-spi-qup-Make-sure-mode-is-only-determined-once.patch @@ -0,0 +1,208 @@ +From 57c4d2626bcb990a2e677b4f769a88c3d8e0911d Mon Sep 17 00:00:00 2001 +From: Andy Gross +Date: Tue, 12 Apr 2016 09:11:47 -0500 +Subject: [PATCH 03/69] spi: qup: Make sure mode is only determined once + +This patch calculates the mode once. All decisions on the current +transaction +is made using the mode instead of use_dma + +Signed-off-by: Andy Gross +--- + drivers/spi/spi-qup.c | 87 ++++++++++++++++++++++----------------------------- + 1 file changed, 37 insertions(+), 50 deletions(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -149,12 +149,20 @@ struct spi_qup { + int rx_bytes; + int qup_v1; + +- int use_dma; ++ int mode; + struct dma_slave_config rx_conf; + struct dma_slave_config tx_conf; + }; + + ++static inline bool spi_qup_is_dma_xfer(int mode) ++{ ++ if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM) ++ return true; ++ ++ return false; ++} ++ + static inline bool spi_qup_is_valid_state(struct spi_qup *controller) + { + u32 opstate = readl_relaxed(controller->base + QUP_STATE); +@@ -424,7 +432,7 @@ static irqreturn_t spi_qup_qup_irq(int i + error = -EIO; + } + +- if (!controller->use_dma) { ++ if (!spi_qup_is_dma_xfer(controller->mode)) { + if (opflags & QUP_OP_IN_SERVICE_FLAG) + spi_qup_fifo_read(controller, xfer); + +@@ -443,34 +451,11 @@ static irqreturn_t spi_qup_qup_irq(int i + return IRQ_HANDLED; + } + +-static u32 +-spi_qup_get_mode(struct spi_master *master, struct spi_transfer *xfer) +-{ +- struct spi_qup *qup = spi_master_get_devdata(master); +- u32 mode; +- +- qup->w_size = 4; +- +- if (xfer->bits_per_word <= 8) +- qup->w_size = 1; +- else if (xfer->bits_per_word <= 16) +- qup->w_size = 2; +- +- qup->n_words = xfer->len / qup->w_size; +- +- if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32))) +- mode = QUP_IO_M_MODE_FIFO; +- else +- mode = QUP_IO_M_MODE_BLOCK; +- +- return mode; +-} +- + /* set clock freq ... bits per word */ + static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) + { + struct spi_qup *controller = spi_master_get_devdata(spi->master); +- u32 config, iomode, mode, control; ++ u32 config, iomode, control; + int ret, n_words; + + if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { +@@ -491,23 +476,22 @@ static int spi_qup_io_config(struct spi_ + return -EIO; + } + +- mode = spi_qup_get_mode(spi->master, xfer); ++ controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8); ++ controller->n_words = xfer->len / controller->w_size; + n_words = controller->n_words; + +- if (mode == QUP_IO_M_MODE_FIFO) { ++ if (n_words <= (controller->in_fifo_sz / sizeof(u32))) { ++ controller->mode = QUP_IO_M_MODE_FIFO; + writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT); + writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT); + /* must be zero for FIFO */ + writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT); + writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); +- } else if (!controller->use_dma) { +- writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT); +- writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT); +- /* must be zero for BLOCK and BAM */ +- writel_relaxed(0, controller->base + QUP_MX_READ_CNT); +- writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); +- } else { +- mode = QUP_IO_M_MODE_BAM; ++ ++ } else if (spi->master->can_dma && ++ spi->master->can_dma(spi->master, spi, xfer) && ++ spi->master->cur_msg_mapped) { ++ controller->mode = QUP_IO_M_MODE_BAM; + writel_relaxed(0, controller->base + QUP_MX_READ_CNT); + writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); + +@@ -528,19 +512,26 @@ static int spi_qup_io_config(struct spi_ + + writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); + } ++ } else { ++ controller->mode = QUP_IO_M_MODE_BLOCK; ++ writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT); ++ writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT); ++ /* must be zero for BLOCK and BAM */ ++ writel_relaxed(0, controller->base + QUP_MX_READ_CNT); ++ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); + } + + iomode = readl_relaxed(controller->base + QUP_IO_M_MODES); + /* Set input and output transfer mode */ + iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK); + +- if (!controller->use_dma) ++ if (!spi_qup_is_dma_xfer(controller->mode)) + iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN); + else + iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN; + +- iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT); +- iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT); ++ iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT); ++ iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT); + + writel_relaxed(iomode, controller->base + QUP_IO_M_MODES); + +@@ -581,7 +572,7 @@ static int spi_qup_io_config(struct spi_ + config |= xfer->bits_per_word - 1; + config |= QUP_CONFIG_SPI_MODE; + +- if (controller->use_dma) { ++ if (spi_qup_is_dma_xfer(controller->mode)) { + if (!xfer->tx_buf) + config |= QUP_CONFIG_NO_OUTPUT; + if (!xfer->rx_buf) +@@ -599,7 +590,7 @@ static int spi_qup_io_config(struct spi_ + * status change in BAM mode + */ + +- if (mode == QUP_IO_M_MODE_BAM) ++ if (spi_qup_is_dma_xfer(controller->mode)) + mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG; + + writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK); +@@ -633,7 +624,7 @@ static int spi_qup_transfer_one(struct s + controller->tx_bytes = 0; + spin_unlock_irqrestore(&controller->lock, flags); + +- if (controller->use_dma) ++ if (spi_qup_is_dma_xfer(controller->mode)) + ret = spi_qup_do_dma(master, xfer); + else + ret = spi_qup_do_pio(master, xfer); +@@ -657,7 +648,7 @@ exit: + ret = controller->error; + spin_unlock_irqrestore(&controller->lock, flags); + +- if (ret && controller->use_dma) ++ if (ret && spi_qup_is_dma_xfer(controller->mode)) + spi_qup_dma_terminate(master, xfer); + + return ret; +@@ -668,9 +659,7 @@ static bool spi_qup_can_dma(struct spi_m + { + struct spi_qup *qup = spi_master_get_devdata(master); + size_t dma_align = dma_get_cache_alignment(); +- u32 mode; +- +- qup->use_dma = 0; ++ int n_words; + + if (xfer->rx_buf && (xfer->len % qup->in_blk_sz || + IS_ERR_OR_NULL(master->dma_rx) || +@@ -682,12 +671,10 @@ static bool spi_qup_can_dma(struct spi_m + !IS_ALIGNED((size_t)xfer->tx_buf, dma_align))) + return false; + +- mode = spi_qup_get_mode(master, xfer); +- if (mode == QUP_IO_M_MODE_FIFO) ++ n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8); ++ if (n_words <= (qup->in_fifo_sz / sizeof(u32))) + return false; + +- qup->use_dma = 1; +- + return true; + } + diff --git a/target/linux/ipq40xx/patches-4.9/0004-spi-qup-Fix-transaction-done-signaling.patch b/target/linux/ipq40xx/patches-4.9/0004-spi-qup-Fix-transaction-done-signaling.patch new file mode 100644 index 000000000..5881ffa8b --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0004-spi-qup-Fix-transaction-done-signaling.patch @@ -0,0 +1,29 @@ +From fbdf80d138f8c7fda8e598287109fb90446d557d Mon Sep 17 00:00:00 2001 +From: Andy Gross +Date: Fri, 29 Jan 2016 22:06:50 -0600 +Subject: [PATCH 04/69] spi: qup: Fix transaction done signaling + +Wait to signal done until we get all of the interrupts we are expecting +to get for a transaction. If we don't wait for the input done flag, we +can be inbetween transactions when the done flag comes in and this can +mess up the next transaction. + +CC: Grant Grundler +CC: Sarthak Kukreti +Signed-off-by: Andy Gross +--- + drivers/spi/spi-qup.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -445,7 +445,8 @@ static irqreturn_t spi_qup_qup_irq(int i + controller->xfer = xfer; + spin_unlock_irqrestore(&controller->lock, flags); + +- if (controller->rx_bytes == xfer->len || error) ++ if ((controller->rx_bytes == xfer->len && ++ (opflags & QUP_OP_MAX_INPUT_DONE_FLAG)) || error) + complete(&controller->done); + + return IRQ_HANDLED; diff --git a/target/linux/ipq40xx/patches-4.9/0005-spi-qup-Fix-DMA-mode-to-work-correctly.patch b/target/linux/ipq40xx/patches-4.9/0005-spi-qup-Fix-DMA-mode-to-work-correctly.patch new file mode 100644 index 000000000..20ab5c180 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0005-spi-qup-Fix-DMA-mode-to-work-correctly.patch @@ -0,0 +1,219 @@ +From 1204ea49f3f7ded898d1ee202776093715a9ecf6 Mon Sep 17 00:00:00 2001 +From: Andy Gross +Date: Tue, 2 Feb 2016 17:00:53 -0600 +Subject: [PATCH 05/69] spi: qup: Fix DMA mode to work correctly + +This patch fixes a few issues with the DMA mode. The QUP needs to be +placed in the run mode before the DMA transactions are executed. The +conditions for being able to DMA vary between revisions of the QUP. +This is due to v1.1.1 using ADM DMA and later revisions using BAM. + +Signed-off-by: Andy Gross +--- + drivers/spi/spi-qup.c | 94 +++++++++++++++++++++++++++++++++------------------ + 1 file changed, 62 insertions(+), 32 deletions(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -142,6 +142,7 @@ struct spi_qup { + + struct spi_transfer *xfer; + struct completion done; ++ struct completion dma_tx_done; + int error; + int w_size; /* bytes per SPI word */ + int n_words; +@@ -284,16 +285,16 @@ static void spi_qup_fifo_write(struct sp + + static void spi_qup_dma_done(void *data) + { +- struct spi_qup *qup = data; ++ struct completion *done = data; + +- complete(&qup->done); ++ complete(done); + } + + static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer, + enum dma_transfer_direction dir, +- dma_async_tx_callback callback) ++ dma_async_tx_callback callback, ++ void *data) + { +- struct spi_qup *qup = spi_master_get_devdata(master); + unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE; + struct dma_async_tx_descriptor *desc; + struct scatterlist *sgl; +@@ -312,11 +313,11 @@ static int spi_qup_prep_sg(struct spi_ma + } + + desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags); +- if (!desc) +- return -EINVAL; ++ if (IS_ERR_OR_NULL(desc)) ++ return desc ? PTR_ERR(desc) : -EINVAL; + + desc->callback = callback; +- desc->callback_param = qup; ++ desc->callback_param = data; + + cookie = dmaengine_submit(desc); + +@@ -332,18 +333,29 @@ static void spi_qup_dma_terminate(struct + dmaengine_terminate_all(master->dma_rx); + } + +-static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer) ++static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer, ++unsigned long timeout) + { ++ struct spi_qup *qup = spi_master_get_devdata(master); + dma_async_tx_callback rx_done = NULL, tx_done = NULL; + int ret; + ++ /* before issuing the descriptors, set the QUP to run */ ++ ret = spi_qup_set_state(qup, QUP_STATE_RUN); ++ if (ret) { ++ dev_warn(qup->dev, "cannot set RUN state\n"); ++ return ret; ++ } ++ + if (xfer->rx_buf) + rx_done = spi_qup_dma_done; +- else if (xfer->tx_buf) ++ ++ if (xfer->tx_buf) + tx_done = spi_qup_dma_done; + + if (xfer->rx_buf) { +- ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done); ++ ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done, ++ &qup->done); + if (ret) + return ret; + +@@ -351,17 +363,25 @@ static int spi_qup_do_dma(struct spi_mas + } + + if (xfer->tx_buf) { +- ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done); ++ ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done, ++ &qup->dma_tx_done); + if (ret) + return ret; + + dma_async_issue_pending(master->dma_tx); + } + +- return 0; ++ if (xfer->rx_buf && !wait_for_completion_timeout(&qup->done, timeout)) ++ return -ETIMEDOUT; ++ ++ if (xfer->tx_buf && !wait_for_completion_timeout(&qup->dma_tx_done, timeout)) ++ ret = -ETIMEDOUT; ++ ++ return ret; + } + +-static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer) ++static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer, ++ unsigned long timeout) + { + struct spi_qup *qup = spi_master_get_devdata(master); + int ret; +@@ -380,6 +400,15 @@ static int spi_qup_do_pio(struct spi_mas + + spi_qup_fifo_write(qup, xfer); + ++ ret = spi_qup_set_state(qup, QUP_STATE_RUN); ++ if (ret) { ++ dev_warn(qup->dev, "cannot set RUN state\n"); ++ return ret; ++ } ++ ++ if (!wait_for_completion_timeout(&qup->done, timeout)) ++ return -ETIMEDOUT; ++ + return 0; + } + +@@ -428,7 +457,6 @@ static irqreturn_t spi_qup_qup_irq(int i + dev_warn(controller->dev, "CLK_OVER_RUN\n"); + if (spi_err & SPI_ERROR_CLK_UNDER_RUN) + dev_warn(controller->dev, "CLK_UNDER_RUN\n"); +- + error = -EIO; + } + +@@ -617,6 +645,7 @@ static int spi_qup_transfer_one(struct s + timeout = 100 * msecs_to_jiffies(timeout); + + reinit_completion(&controller->done); ++ reinit_completion(&controller->dma_tx_done); + + spin_lock_irqsave(&controller->lock, flags); + controller->xfer = xfer; +@@ -626,21 +655,13 @@ static int spi_qup_transfer_one(struct s + spin_unlock_irqrestore(&controller->lock, flags); + + if (spi_qup_is_dma_xfer(controller->mode)) +- ret = spi_qup_do_dma(master, xfer); ++ ret = spi_qup_do_dma(master, xfer, timeout); + else +- ret = spi_qup_do_pio(master, xfer); ++ ret = spi_qup_do_pio(master, xfer, timeout); + + if (ret) + goto exit; + +- if (spi_qup_set_state(controller, QUP_STATE_RUN)) { +- dev_warn(controller->dev, "cannot set EXECUTE state\n"); +- goto exit; +- } +- +- if (!wait_for_completion_timeout(&controller->done, timeout)) +- ret = -ETIMEDOUT; +- + exit: + spi_qup_set_state(controller, QUP_STATE_RESET); + spin_lock_irqsave(&controller->lock, flags); +@@ -662,15 +683,23 @@ static bool spi_qup_can_dma(struct spi_m + size_t dma_align = dma_get_cache_alignment(); + int n_words; + +- if (xfer->rx_buf && (xfer->len % qup->in_blk_sz || +- IS_ERR_OR_NULL(master->dma_rx) || +- !IS_ALIGNED((size_t)xfer->rx_buf, dma_align))) +- return false; ++ if (xfer->rx_buf) { ++ if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) || ++ IS_ERR_OR_NULL(master->dma_rx)) ++ return false; + +- if (xfer->tx_buf && (xfer->len % qup->out_blk_sz || +- IS_ERR_OR_NULL(master->dma_tx) || +- !IS_ALIGNED((size_t)xfer->tx_buf, dma_align))) +- return false; ++ if (qup->qup_v1 && (xfer->len % qup->in_blk_sz)) ++ return false; ++ } ++ ++ if (xfer->tx_buf) { ++ if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) || ++ IS_ERR_OR_NULL(master->dma_tx)) ++ return false; ++ ++ if (qup->qup_v1 && (xfer->len % qup->out_blk_sz)) ++ return false; ++ } + + n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8); + if (n_words <= (qup->in_fifo_sz / sizeof(u32))) +@@ -836,6 +865,7 @@ static int spi_qup_probe(struct platform + + spin_lock_init(&controller->lock); + init_completion(&controller->done); ++ init_completion(&controller->dma_tx_done); + + iomode = readl_relaxed(base + QUP_IO_M_MODES); + diff --git a/target/linux/ipq40xx/patches-4.9/0006-spi-qup-Fix-block-mode-to-work-correctly.patch b/target/linux/ipq40xx/patches-4.9/0006-spi-qup-Fix-block-mode-to-work-correctly.patch new file mode 100644 index 000000000..e7d0d656b --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0006-spi-qup-Fix-block-mode-to-work-correctly.patch @@ -0,0 +1,310 @@ +From b56c1e35cc550fd014fa601ca56b964d88fd44a9 Mon Sep 17 00:00:00 2001 +From: Andy Gross +Date: Sun, 31 Jan 2016 21:28:13 -0600 +Subject: [PATCH 06/69] spi: qup: Fix block mode to work correctly + +This patch corrects the behavior of the BLOCK transactions. During block +transactions, the controller must be read/written to in block size transactions. + +Signed-off-by: Andy Gross +--- + drivers/spi/spi-qup.c | 182 +++++++++++++++++++++++++++++++++++++++----------- + 1 file changed, 142 insertions(+), 40 deletions(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -82,6 +82,8 @@ + #define QUP_IO_M_MODE_BAM 3 + + /* QUP_OPERATIONAL fields */ ++#define QUP_OP_IN_BLOCK_READ_REQ BIT(13) ++#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12) + #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11) + #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10) + #define QUP_OP_IN_SERVICE_FLAG BIT(9) +@@ -155,6 +157,12 @@ struct spi_qup { + struct dma_slave_config tx_conf; + }; + ++static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag) ++{ ++ u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL); ++ ++ return opflag & flag; ++} + + static inline bool spi_qup_is_dma_xfer(int mode) + { +@@ -216,29 +224,26 @@ static int spi_qup_set_state(struct spi_ + return 0; + } + +-static void spi_qup_fifo_read(struct spi_qup *controller, +- struct spi_transfer *xfer) ++static void spi_qup_read_from_fifo(struct spi_qup *controller, ++ struct spi_transfer *xfer, u32 num_words) + { + u8 *rx_buf = xfer->rx_buf; +- u32 word, state; +- int idx, shift, w_size; +- +- w_size = controller->w_size; ++ int i, shift, num_bytes; ++ u32 word; + +- while (controller->rx_bytes < xfer->len) { +- +- state = readl_relaxed(controller->base + QUP_OPERATIONAL); +- if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY)) +- break; ++ for (; num_words; num_words--) { + + word = readl_relaxed(controller->base + QUP_INPUT_FIFO); + ++ num_bytes = min_t(int, xfer->len - controller->rx_bytes, ++ controller->w_size); ++ + if (!rx_buf) { +- controller->rx_bytes += w_size; ++ controller->rx_bytes += num_bytes; + continue; + } + +- for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) { ++ for (i = 0; i < num_bytes; i++, controller->rx_bytes++) { + /* + * The data format depends on bytes per SPI word: + * 4 bytes: 0x12345678 +@@ -246,38 +251,80 @@ static void spi_qup_fifo_read(struct spi + * 1 byte : 0x00000012 + */ + shift = BITS_PER_BYTE; +- shift *= (w_size - idx - 1); ++ shift *= (controller->w_size - i - 1); + rx_buf[controller->rx_bytes] = word >> shift; + } + } + } + +-static void spi_qup_fifo_write(struct spi_qup *controller, ++static void spi_qup_read(struct spi_qup *controller, + struct spi_transfer *xfer) + { +- const u8 *tx_buf = xfer->tx_buf; +- u32 word, state, data; +- int idx, w_size; ++ u32 remainder, words_per_block, num_words; ++ bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK; ++ ++ remainder = DIV_ROUND_UP(xfer->len - controller->rx_bytes, ++ controller->w_size); ++ words_per_block = controller->in_blk_sz >> 2; ++ ++ do { ++ /* ACK by clearing service flag */ ++ writel_relaxed(QUP_OP_IN_SERVICE_FLAG, ++ controller->base + QUP_OPERATIONAL); ++ ++ if (is_block_mode) { ++ num_words = (remainder > words_per_block) ? ++ words_per_block : remainder; ++ } else { ++ if (!spi_qup_is_flag_set(controller, ++ QUP_OP_IN_FIFO_NOT_EMPTY)) ++ break; + +- w_size = controller->w_size; ++ num_words = 1; ++ } ++ ++ /* read up to the maximum transfer size available */ ++ spi_qup_read_from_fifo(controller, xfer, num_words); + +- while (controller->tx_bytes < xfer->len) { ++ remainder -= num_words; + +- state = readl_relaxed(controller->base + QUP_OPERATIONAL); +- if (state & QUP_OP_OUT_FIFO_FULL) ++ /* if block mode, check to see if next block is available */ ++ if (is_block_mode && !spi_qup_is_flag_set(controller, ++ QUP_OP_IN_BLOCK_READ_REQ)) + break; + ++ } while (remainder); ++ ++ /* ++ * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block ++ * mode reads, it has to be cleared again at the very end ++ */ ++ if (is_block_mode && spi_qup_is_flag_set(controller, ++ QUP_OP_MAX_INPUT_DONE_FLAG)) ++ writel_relaxed(QUP_OP_IN_SERVICE_FLAG, ++ controller->base + QUP_OPERATIONAL); ++ ++} ++ ++static void spi_qup_write_to_fifo(struct spi_qup *controller, ++ struct spi_transfer *xfer, u32 num_words) ++{ ++ const u8 *tx_buf = xfer->tx_buf; ++ int i, num_bytes; ++ u32 word, data; ++ ++ for (; num_words; num_words--) { + word = 0; +- for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) { + +- if (!tx_buf) { +- controller->tx_bytes += w_size; +- break; ++ num_bytes = min_t(int, xfer->len - controller->tx_bytes, ++ controller->w_size); ++ if (tx_buf) ++ for (i = 0; i < num_bytes; i++) { ++ data = tx_buf[controller->tx_bytes + i]; ++ word |= data << (BITS_PER_BYTE * (3 - i)); + } + +- data = tx_buf[controller->tx_bytes]; +- word |= data << (BITS_PER_BYTE * (3 - idx)); +- } ++ controller->tx_bytes += num_bytes; + + writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO); + } +@@ -290,6 +337,44 @@ static void spi_qup_dma_done(void *data) + complete(done); + } + ++static void spi_qup_write(struct spi_qup *controller, ++ struct spi_transfer *xfer) ++{ ++ bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK; ++ u32 remainder, words_per_block, num_words; ++ ++ remainder = DIV_ROUND_UP(xfer->len - controller->tx_bytes, ++ controller->w_size); ++ words_per_block = controller->out_blk_sz >> 2; ++ ++ do { ++ /* ACK by clearing service flag */ ++ writel_relaxed(QUP_OP_OUT_SERVICE_FLAG, ++ controller->base + QUP_OPERATIONAL); ++ ++ if (is_block_mode) { ++ num_words = (remainder > words_per_block) ? ++ words_per_block : remainder; ++ } else { ++ if (spi_qup_is_flag_set(controller, ++ QUP_OP_OUT_FIFO_FULL)) ++ break; ++ ++ num_words = 1; ++ } ++ ++ spi_qup_write_to_fifo(controller, xfer, num_words); ++ ++ remainder -= num_words; ++ ++ /* if block mode, check to see if next block is available */ ++ if (is_block_mode && !spi_qup_is_flag_set(controller, ++ QUP_OP_OUT_BLOCK_WRITE_REQ)) ++ break; ++ ++ } while (remainder); ++} ++ + static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer, + enum dma_transfer_direction dir, + dma_async_tx_callback callback, +@@ -347,11 +432,13 @@ unsigned long timeout) + return ret; + } + +- if (xfer->rx_buf) +- rx_done = spi_qup_dma_done; ++ if (!qup->qup_v1) { ++ if (xfer->rx_buf) ++ rx_done = spi_qup_dma_done; + +- if (xfer->tx_buf) +- tx_done = spi_qup_dma_done; ++ if (xfer->tx_buf) ++ tx_done = spi_qup_dma_done; ++ } + + if (xfer->rx_buf) { + ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done, +@@ -398,7 +485,8 @@ static int spi_qup_do_pio(struct spi_mas + return ret; + } + +- spi_qup_fifo_write(qup, xfer); ++ if (qup->mode == QUP_IO_M_MODE_FIFO) ++ spi_qup_write(qup, xfer); + + ret = spi_qup_set_state(qup, QUP_STATE_RUN); + if (ret) { +@@ -431,10 +519,11 @@ static irqreturn_t spi_qup_qup_irq(int i + + writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS); + writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS); +- writel_relaxed(opflags, controller->base + QUP_OPERATIONAL); + + if (!xfer) { +- dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n", ++ writel_relaxed(opflags, controller->base + QUP_OPERATIONAL); ++ dev_err_ratelimited(controller->dev, ++ "unexpected irq %08x %08x %08x\n", + qup_err, spi_err, opflags); + return IRQ_HANDLED; + } +@@ -460,12 +549,20 @@ static irqreturn_t spi_qup_qup_irq(int i + error = -EIO; + } + +- if (!spi_qup_is_dma_xfer(controller->mode)) { ++ if (spi_qup_is_dma_xfer(controller->mode)) { ++ writel_relaxed(opflags, controller->base + QUP_OPERATIONAL); ++ if (opflags & QUP_OP_IN_SERVICE_FLAG && ++ opflags & QUP_OP_MAX_INPUT_DONE_FLAG) ++ complete(&controller->done); ++ if (opflags & QUP_OP_OUT_SERVICE_FLAG && ++ opflags & QUP_OP_MAX_OUTPUT_DONE_FLAG) ++ complete(&controller->dma_tx_done); ++ } else { + if (opflags & QUP_OP_IN_SERVICE_FLAG) +- spi_qup_fifo_read(controller, xfer); ++ spi_qup_read(controller, xfer); + + if (opflags & QUP_OP_OUT_SERVICE_FLAG) +- spi_qup_fifo_write(controller, xfer); ++ spi_qup_write(controller, xfer); + } + + spin_lock_irqsave(&controller->lock, flags); +@@ -473,6 +570,9 @@ static irqreturn_t spi_qup_qup_irq(int i + controller->xfer = xfer; + spin_unlock_irqrestore(&controller->lock, flags); + ++ /* re-read opflags as flags may have changed due to actions above */ ++ opflags = readl_relaxed(controller->base + QUP_OPERATIONAL); ++ + if ((controller->rx_bytes == xfer->len && + (opflags & QUP_OP_MAX_INPUT_DONE_FLAG)) || error) + complete(&controller->done); +@@ -516,11 +616,13 @@ static int spi_qup_io_config(struct spi_ + /* must be zero for FIFO */ + writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT); + writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); +- + } else if (spi->master->can_dma && + spi->master->can_dma(spi->master, spi, xfer) && + spi->master->cur_msg_mapped) { + controller->mode = QUP_IO_M_MODE_BAM; ++ writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT); ++ writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT); ++ /* must be zero for BLOCK and BAM */ + writel_relaxed(0, controller->base + QUP_MX_READ_CNT); + writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); + diff --git a/target/linux/ipq40xx/patches-4.9/0007-spi-qup-properly-detect-extra-interrupts.patch b/target/linux/ipq40xx/patches-4.9/0007-spi-qup-properly-detect-extra-interrupts.patch new file mode 100644 index 000000000..8a3b02744 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0007-spi-qup-properly-detect-extra-interrupts.patch @@ -0,0 +1,61 @@ +From 0f32f976ebaa7d8643fcd9419f12bc801ba14407 Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Thu, 10 Mar 2016 16:44:55 -0600 +Subject: [PATCH 07/69] spi: qup: properly detect extra interrupts + +It's possible for a SPI transaction to complete and get another +interrupt and have it processed on the same spi_transfer before the +transfer_one can set it to NULL. + +This masks unexpected interrupts, so let's set the spi_transfer to +NULL in the interrupt once the transaction is done. So we can +properly detect these bad interrupts and print warning messages. + +Signed-off-by: Matthew McClintock +--- + drivers/spi/spi-qup.c | 15 +++++++++------ + 1 file changed, 9 insertions(+), 6 deletions(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -507,6 +507,7 @@ static irqreturn_t spi_qup_qup_irq(int i + u32 opflags, qup_err, spi_err; + unsigned long flags; + int error = 0; ++ bool done = 0; + + spin_lock_irqsave(&controller->lock, flags); + xfer = controller->xfer; +@@ -565,16 +566,19 @@ static irqreturn_t spi_qup_qup_irq(int i + spi_qup_write(controller, xfer); + } + +- spin_lock_irqsave(&controller->lock, flags); +- controller->error = error; +- controller->xfer = xfer; +- spin_unlock_irqrestore(&controller->lock, flags); +- + /* re-read opflags as flags may have changed due to actions above */ + opflags = readl_relaxed(controller->base + QUP_OPERATIONAL); + + if ((controller->rx_bytes == xfer->len && + (opflags & QUP_OP_MAX_INPUT_DONE_FLAG)) || error) ++ done = true; ++ ++ spin_lock_irqsave(&controller->lock, flags); ++ controller->error = error; ++ controller->xfer = done ? NULL : xfer; ++ spin_unlock_irqrestore(&controller->lock, flags); ++ ++ if (done) + complete(&controller->done); + + return IRQ_HANDLED; +@@ -767,7 +771,6 @@ static int spi_qup_transfer_one(struct s + exit: + spi_qup_set_state(controller, QUP_STATE_RESET); + spin_lock_irqsave(&controller->lock, flags); +- controller->xfer = NULL; + if (!ret) + ret = controller->error; + spin_unlock_irqrestore(&controller->lock, flags); diff --git a/target/linux/ipq40xx/patches-4.9/0008-spi-qup-don-t-re-read-opflags-to-see-if-transaction-.patch b/target/linux/ipq40xx/patches-4.9/0008-spi-qup-don-t-re-read-opflags-to-see-if-transaction-.patch new file mode 100644 index 000000000..989359698 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0008-spi-qup-don-t-re-read-opflags-to-see-if-transaction-.patch @@ -0,0 +1,26 @@ +From 9864f39695aefe0831b3c6e86c0dff30489ad580 Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Thu, 10 Mar 2016 16:48:27 -0600 +Subject: [PATCH 08/69] spi: qup: don't re-read opflags to see if transaction + is done for reads + +For reads, we will get another interrupt so we need to handle things +then. For writes, we can finish up now. + +Signed-off-by: Matthew McClintock +--- + drivers/spi/spi-qup.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -567,7 +567,8 @@ static irqreturn_t spi_qup_qup_irq(int i + } + + /* re-read opflags as flags may have changed due to actions above */ +- opflags = readl_relaxed(controller->base + QUP_OPERATIONAL); ++ if (opflags & QUP_OP_OUT_SERVICE_FLAG) ++ opflags = readl_relaxed(controller->base + QUP_OPERATIONAL); + + if ((controller->rx_bytes == xfer->len && + (opflags & QUP_OP_MAX_INPUT_DONE_FLAG)) || error) diff --git a/target/linux/ipq40xx/patches-4.9/0009-spi-qup-refactor-spi_qup_io_config-in-two-functions.patch b/target/linux/ipq40xx/patches-4.9/0009-spi-qup-refactor-spi_qup_io_config-in-two-functions.patch new file mode 100644 index 000000000..36c225bad --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0009-spi-qup-refactor-spi_qup_io_config-in-two-functions.patch @@ -0,0 +1,202 @@ +From e06f04d55752e460d8f332f28317aebc27ab1b17 Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Tue, 26 Apr 2016 12:57:46 -0500 +Subject: [PATCH 09/69] spi: qup: refactor spi_qup_io_config in two functions + +This is preparation for handling transactions larger than 64K-1 bytes in +block mode which is currently unsupported quietly fails. + +We need to break these into two functions 1) prep is called once per +spi_message and 2) io_config is calle once per spi-qup bus transaction + +This is just refactoring, there should be no functional change + +Signed-off-by: Matthew McClintock +--- + drivers/spi/spi-qup.c | 141 ++++++++++++++++++++++++++++++-------------------- + 1 file changed, 86 insertions(+), 55 deletions(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -585,12 +585,11 @@ static irqreturn_t spi_qup_qup_irq(int i + return IRQ_HANDLED; + } + +-/* set clock freq ... bits per word */ +-static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) ++/* set clock freq ... bits per word, determine mode */ ++static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer) + { + struct spi_qup *controller = spi_master_get_devdata(spi->master); +- u32 config, iomode, control; +- int ret, n_words; ++ int ret; + + if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { + dev_err(controller->dev, "too big size for loopback %d > %d\n", +@@ -605,56 +604,94 @@ static int spi_qup_io_config(struct spi_ + return -EIO; + } + +- if (spi_qup_set_state(controller, QUP_STATE_RESET)) { +- dev_err(controller->dev, "cannot set RESET state\n"); +- return -EIO; +- } +- + controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8); + controller->n_words = xfer->len / controller->w_size; +- n_words = controller->n_words; + +- if (n_words <= (controller->in_fifo_sz / sizeof(u32))) { ++ if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32))) + controller->mode = QUP_IO_M_MODE_FIFO; +- writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT); +- writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT); +- /* must be zero for FIFO */ +- writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT); +- writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); +- } else if (spi->master->can_dma && +- spi->master->can_dma(spi->master, spi, xfer) && +- spi->master->cur_msg_mapped) { ++ else if (spi->master->can_dma && ++ spi->master->can_dma(spi->master, spi, xfer) && ++ spi->master->cur_msg_mapped) + controller->mode = QUP_IO_M_MODE_BAM; +- writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT); +- writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT); +- /* must be zero for BLOCK and BAM */ +- writel_relaxed(0, controller->base + QUP_MX_READ_CNT); +- writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); +- +- if (!controller->qup_v1) { +- void __iomem *input_cnt; +- +- input_cnt = controller->base + QUP_MX_INPUT_CNT; +- /* +- * for DMA transfers, both QUP_MX_INPUT_CNT and +- * QUP_MX_OUTPUT_CNT must be zero to all cases but one. +- * That case is a non-balanced transfer when there is +- * only a rx_buf. +- */ +- if (xfer->tx_buf) +- writel_relaxed(0, input_cnt); +- else +- writel_relaxed(n_words, input_cnt); ++ else ++ controller->mode = QUP_IO_M_MODE_BLOCK; ++ ++ return 0; ++} + ++/* prep qup for another spi transaction of specific type */ ++static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) ++{ ++ struct spi_qup *controller = spi_master_get_devdata(spi->master); ++ u32 config, iomode, control; ++ unsigned long flags; ++ ++ reinit_completion(&controller->done); ++ reinit_completion(&controller->dma_tx_done); ++ ++ spin_lock_irqsave(&controller->lock, flags); ++ controller->xfer = xfer; ++ controller->error = 0; ++ controller->rx_bytes = 0; ++ controller->tx_bytes = 0; ++ spin_unlock_irqrestore(&controller->lock, flags); ++ ++ ++ if (spi_qup_set_state(controller, QUP_STATE_RESET)) { ++ dev_err(controller->dev, "cannot set RESET state\n"); ++ return -EIO; ++ } ++ ++ switch (controller->mode) { ++ case QUP_IO_M_MODE_FIFO: ++ writel_relaxed(controller->n_words, ++ controller->base + QUP_MX_READ_CNT); ++ writel_relaxed(controller->n_words, ++ controller->base + QUP_MX_WRITE_CNT); ++ /* must be zero for FIFO */ ++ writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT); + writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); +- } +- } else { +- controller->mode = QUP_IO_M_MODE_BLOCK; +- writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT); +- writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT); +- /* must be zero for BLOCK and BAM */ +- writel_relaxed(0, controller->base + QUP_MX_READ_CNT); +- writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); ++ break; ++ case QUP_IO_M_MODE_BAM: ++ writel_relaxed(controller->n_words, ++ controller->base + QUP_MX_INPUT_CNT); ++ writel_relaxed(controller->n_words, ++ controller->base + QUP_MX_OUTPUT_CNT); ++ /* must be zero for BLOCK and BAM */ ++ writel_relaxed(0, controller->base + QUP_MX_READ_CNT); ++ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); ++ if (!controller->qup_v1) { ++ void __iomem *input_cnt; ++ ++ input_cnt = controller->base + QUP_MX_INPUT_CNT; ++ /* ++ * for DMA transfers, both QUP_MX_INPUT_CNT and ++ * QUP_MX_OUTPUT_CNT must be zero to all cases ++ * but one. That case is a non-balanced ++ * transfer when there is only a rx_buf. ++ */ ++ if (xfer->tx_buf) ++ writel_relaxed(0, input_cnt); ++ else ++ writel_relaxed(controller->n_words, ++ input_cnt); ++ ++ writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); ++ } ++ break; ++ case QUP_IO_M_MODE_BLOCK: ++ writel_relaxed(controller->n_words, ++ controller->base + QUP_MX_INPUT_CNT); ++ writel_relaxed(controller->n_words, ++ controller->base + QUP_MX_OUTPUT_CNT); ++ /* must be zero for BLOCK and BAM */ ++ writel_relaxed(0, controller->base + QUP_MX_READ_CNT); ++ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); ++ break; ++ default: ++ dev_err(controller->dev, "unknown mode = %d\n", ++ controller->mode); ++ return -EIO; + } + + iomode = readl_relaxed(controller->base + QUP_IO_M_MODES); +@@ -743,6 +780,10 @@ static int spi_qup_transfer_one(struct s + unsigned long timeout, flags; + int ret = -EIO; + ++ ret = spi_qup_io_prep(spi, xfer); ++ if (ret) ++ return ret; ++ + ret = spi_qup_io_config(spi, xfer); + if (ret) + return ret; +@@ -751,16 +792,6 @@ static int spi_qup_transfer_one(struct s + timeout = DIV_ROUND_UP(xfer->len * 8, timeout); + timeout = 100 * msecs_to_jiffies(timeout); + +- reinit_completion(&controller->done); +- reinit_completion(&controller->dma_tx_done); +- +- spin_lock_irqsave(&controller->lock, flags); +- controller->xfer = xfer; +- controller->error = 0; +- controller->rx_bytes = 0; +- controller->tx_bytes = 0; +- spin_unlock_irqrestore(&controller->lock, flags); +- + if (spi_qup_is_dma_xfer(controller->mode)) + ret = spi_qup_do_dma(master, xfer, timeout); + else diff --git a/target/linux/ipq40xx/patches-4.9/0010-spi-qup-call-io_config-in-mode-specific-function.patch b/target/linux/ipq40xx/patches-4.9/0010-spi-qup-call-io_config-in-mode-specific-function.patch new file mode 100644 index 000000000..dfbed620f --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0010-spi-qup-call-io_config-in-mode-specific-function.patch @@ -0,0 +1,391 @@ +From afe108e638a2dd441b11cd2c7b1e0658bb47b5e8 Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Tue, 26 Apr 2016 13:14:45 -0500 +Subject: [PATCH 10/69] spi: qup: call io_config in mode specific function + +DMA transactions should only only need to call io_config only once, but +block mode might call it several times to setup several transactions so +it can handle reads/writes larger than the max size per transaction, so +we move the call to the do_ functions. + +This is just refactoring, there should be no functional change + +Signed-off-by: Matthew McClintock +--- + drivers/spi/spi-qup.c | 327 +++++++++++++++++++++++++------------------------- + 1 file changed, 166 insertions(+), 161 deletions(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -418,13 +418,170 @@ static void spi_qup_dma_terminate(struct + dmaengine_terminate_all(master->dma_rx); + } + +-static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer, ++/* prep qup for another spi transaction of specific type */ ++static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) ++{ ++ struct spi_qup *controller = spi_master_get_devdata(spi->master); ++ u32 config, iomode, control; ++ unsigned long flags; ++ ++ reinit_completion(&controller->done); ++ reinit_completion(&controller->dma_tx_done); ++ ++ spin_lock_irqsave(&controller->lock, flags); ++ controller->xfer = xfer; ++ controller->error = 0; ++ controller->rx_bytes = 0; ++ controller->tx_bytes = 0; ++ spin_unlock_irqrestore(&controller->lock, flags); ++ ++ if (spi_qup_set_state(controller, QUP_STATE_RESET)) { ++ dev_err(controller->dev, "cannot set RESET state\n"); ++ return -EIO; ++ } ++ ++ switch (controller->mode) { ++ case QUP_IO_M_MODE_FIFO: ++ writel_relaxed(controller->n_words, ++ controller->base + QUP_MX_READ_CNT); ++ writel_relaxed(controller->n_words, ++ controller->base + QUP_MX_WRITE_CNT); ++ /* must be zero for FIFO */ ++ writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT); ++ writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); ++ break; ++ case QUP_IO_M_MODE_BAM: ++ writel_relaxed(controller->n_words, ++ controller->base + QUP_MX_INPUT_CNT); ++ writel_relaxed(controller->n_words, ++ controller->base + QUP_MX_OUTPUT_CNT); ++ /* must be zero for BLOCK and BAM */ ++ writel_relaxed(0, controller->base + QUP_MX_READ_CNT); ++ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); ++ if (!controller->qup_v1) { ++ void __iomem *input_cnt; ++ ++ input_cnt = controller->base + QUP_MX_INPUT_CNT; ++ /* ++ * for DMA transfers, both QUP_MX_INPUT_CNT and ++ * QUP_MX_OUTPUT_CNT must be zero to all cases ++ * but one. That case is a non-balanced ++ * transfer when there is only a rx_buf. ++ */ ++ if (xfer->tx_buf) ++ writel_relaxed(0, input_cnt); ++ else ++ writel_relaxed(controller->n_words, ++ input_cnt); ++ ++ writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); ++ } ++ break; ++ case QUP_IO_M_MODE_BLOCK: ++ writel_relaxed(controller->n_words, ++ controller->base + QUP_MX_INPUT_CNT); ++ writel_relaxed(controller->n_words, ++ controller->base + QUP_MX_OUTPUT_CNT); ++ /* must be zero for BLOCK and BAM */ ++ writel_relaxed(0, controller->base + QUP_MX_READ_CNT); ++ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); ++ break; ++ default: ++ dev_err(controller->dev, "unknown mode = %d\n", ++ controller->mode); ++ return -EIO; ++ } ++ ++ iomode = readl_relaxed(controller->base + QUP_IO_M_MODES); ++ /* Set input and output transfer mode */ ++ iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK); ++ ++ if (!spi_qup_is_dma_xfer(controller->mode)) ++ iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN); ++ else ++ iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN; ++ ++ iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT); ++ iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT); ++ ++ writel_relaxed(iomode, controller->base + QUP_IO_M_MODES); ++ ++ control = readl_relaxed(controller->base + SPI_IO_CONTROL); ++ ++ if (spi->mode & SPI_CPOL) ++ control |= SPI_IO_C_CLK_IDLE_HIGH; ++ else ++ control &= ~SPI_IO_C_CLK_IDLE_HIGH; ++ ++ writel_relaxed(control, controller->base + SPI_IO_CONTROL); ++ ++ config = readl_relaxed(controller->base + SPI_CONFIG); ++ ++ if (spi->mode & SPI_LOOP) ++ config |= SPI_CONFIG_LOOPBACK; ++ else ++ config &= ~SPI_CONFIG_LOOPBACK; ++ ++ if (spi->mode & SPI_CPHA) ++ config &= ~SPI_CONFIG_INPUT_FIRST; ++ else ++ config |= SPI_CONFIG_INPUT_FIRST; ++ ++ /* ++ * HS_MODE improves signal stability for spi-clk high rates, ++ * but is invalid in loop back mode. ++ */ ++ if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP)) ++ config |= SPI_CONFIG_HS_MODE; ++ else ++ config &= ~SPI_CONFIG_HS_MODE; ++ ++ writel_relaxed(config, controller->base + SPI_CONFIG); ++ ++ config = readl_relaxed(controller->base + QUP_CONFIG); ++ config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N); ++ config |= xfer->bits_per_word - 1; ++ config |= QUP_CONFIG_SPI_MODE; ++ ++ if (spi_qup_is_dma_xfer(controller->mode)) { ++ if (!xfer->tx_buf) ++ config |= QUP_CONFIG_NO_OUTPUT; ++ if (!xfer->rx_buf) ++ config |= QUP_CONFIG_NO_INPUT; ++ } ++ ++ writel_relaxed(config, controller->base + QUP_CONFIG); ++ ++ /* only write to OPERATIONAL_MASK when register is present */ ++ if (!controller->qup_v1) { ++ u32 mask = 0; ++ ++ /* ++ * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO ++ * status change in BAM mode ++ */ ++ ++ if (spi_qup_is_dma_xfer(controller->mode)) ++ mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG; ++ ++ writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK); ++ } ++ ++ return 0; ++} ++ ++static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer, + unsigned long timeout) + { ++ struct spi_master *master = spi->master; + struct spi_qup *qup = spi_master_get_devdata(master); + dma_async_tx_callback rx_done = NULL, tx_done = NULL; + int ret; + ++ ret = spi_qup_io_config(spi, xfer); ++ if (ret) ++ return ret; ++ + /* before issuing the descriptors, set the QUP to run */ + ret = spi_qup_set_state(qup, QUP_STATE_RUN); + if (ret) { +@@ -467,12 +624,17 @@ unsigned long timeout) + return ret; + } + +-static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer, ++static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer, + unsigned long timeout) + { ++ struct spi_master *master = spi->master; + struct spi_qup *qup = spi_master_get_devdata(master); + int ret; + ++ ret = spi_qup_io_config(spi, xfer); ++ if (ret) ++ return ret; ++ + ret = spi_qup_set_state(qup, QUP_STATE_RUN); + if (ret) { + dev_warn(qup->dev, "cannot set RUN state\n"); +@@ -619,159 +781,6 @@ static int spi_qup_io_prep(struct spi_de + return 0; + } + +-/* prep qup for another spi transaction of specific type */ +-static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) +-{ +- struct spi_qup *controller = spi_master_get_devdata(spi->master); +- u32 config, iomode, control; +- unsigned long flags; +- +- reinit_completion(&controller->done); +- reinit_completion(&controller->dma_tx_done); +- +- spin_lock_irqsave(&controller->lock, flags); +- controller->xfer = xfer; +- controller->error = 0; +- controller->rx_bytes = 0; +- controller->tx_bytes = 0; +- spin_unlock_irqrestore(&controller->lock, flags); +- +- +- if (spi_qup_set_state(controller, QUP_STATE_RESET)) { +- dev_err(controller->dev, "cannot set RESET state\n"); +- return -EIO; +- } +- +- switch (controller->mode) { +- case QUP_IO_M_MODE_FIFO: +- writel_relaxed(controller->n_words, +- controller->base + QUP_MX_READ_CNT); +- writel_relaxed(controller->n_words, +- controller->base + QUP_MX_WRITE_CNT); +- /* must be zero for FIFO */ +- writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT); +- writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); +- break; +- case QUP_IO_M_MODE_BAM: +- writel_relaxed(controller->n_words, +- controller->base + QUP_MX_INPUT_CNT); +- writel_relaxed(controller->n_words, +- controller->base + QUP_MX_OUTPUT_CNT); +- /* must be zero for BLOCK and BAM */ +- writel_relaxed(0, controller->base + QUP_MX_READ_CNT); +- writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); +- if (!controller->qup_v1) { +- void __iomem *input_cnt; +- +- input_cnt = controller->base + QUP_MX_INPUT_CNT; +- /* +- * for DMA transfers, both QUP_MX_INPUT_CNT and +- * QUP_MX_OUTPUT_CNT must be zero to all cases +- * but one. That case is a non-balanced +- * transfer when there is only a rx_buf. +- */ +- if (xfer->tx_buf) +- writel_relaxed(0, input_cnt); +- else +- writel_relaxed(controller->n_words, +- input_cnt); +- +- writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); +- } +- break; +- case QUP_IO_M_MODE_BLOCK: +- writel_relaxed(controller->n_words, +- controller->base + QUP_MX_INPUT_CNT); +- writel_relaxed(controller->n_words, +- controller->base + QUP_MX_OUTPUT_CNT); +- /* must be zero for BLOCK and BAM */ +- writel_relaxed(0, controller->base + QUP_MX_READ_CNT); +- writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); +- break; +- default: +- dev_err(controller->dev, "unknown mode = %d\n", +- controller->mode); +- return -EIO; +- } +- +- iomode = readl_relaxed(controller->base + QUP_IO_M_MODES); +- /* Set input and output transfer mode */ +- iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK); +- +- if (!spi_qup_is_dma_xfer(controller->mode)) +- iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN); +- else +- iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN; +- +- iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT); +- iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT); +- +- writel_relaxed(iomode, controller->base + QUP_IO_M_MODES); +- +- control = readl_relaxed(controller->base + SPI_IO_CONTROL); +- +- if (spi->mode & SPI_CPOL) +- control |= SPI_IO_C_CLK_IDLE_HIGH; +- else +- control &= ~SPI_IO_C_CLK_IDLE_HIGH; +- +- writel_relaxed(control, controller->base + SPI_IO_CONTROL); +- +- config = readl_relaxed(controller->base + SPI_CONFIG); +- +- if (spi->mode & SPI_LOOP) +- config |= SPI_CONFIG_LOOPBACK; +- else +- config &= ~SPI_CONFIG_LOOPBACK; +- +- if (spi->mode & SPI_CPHA) +- config &= ~SPI_CONFIG_INPUT_FIRST; +- else +- config |= SPI_CONFIG_INPUT_FIRST; +- +- /* +- * HS_MODE improves signal stability for spi-clk high rates, +- * but is invalid in loop back mode. +- */ +- if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP)) +- config |= SPI_CONFIG_HS_MODE; +- else +- config &= ~SPI_CONFIG_HS_MODE; +- +- writel_relaxed(config, controller->base + SPI_CONFIG); +- +- config = readl_relaxed(controller->base + QUP_CONFIG); +- config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N); +- config |= xfer->bits_per_word - 1; +- config |= QUP_CONFIG_SPI_MODE; +- +- if (spi_qup_is_dma_xfer(controller->mode)) { +- if (!xfer->tx_buf) +- config |= QUP_CONFIG_NO_OUTPUT; +- if (!xfer->rx_buf) +- config |= QUP_CONFIG_NO_INPUT; +- } +- +- writel_relaxed(config, controller->base + QUP_CONFIG); +- +- /* only write to OPERATIONAL_MASK when register is present */ +- if (!controller->qup_v1) { +- u32 mask = 0; +- +- /* +- * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO +- * status change in BAM mode +- */ +- +- if (spi_qup_is_dma_xfer(controller->mode)) +- mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG; +- +- writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK); +- } +- +- return 0; +-} +- + static int spi_qup_transfer_one(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *xfer) +@@ -784,18 +793,14 @@ static int spi_qup_transfer_one(struct s + if (ret) + return ret; + +- ret = spi_qup_io_config(spi, xfer); +- if (ret) +- return ret; +- + timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC); + timeout = DIV_ROUND_UP(xfer->len * 8, timeout); + timeout = 100 * msecs_to_jiffies(timeout); + + if (spi_qup_is_dma_xfer(controller->mode)) +- ret = spi_qup_do_dma(master, xfer, timeout); ++ ret = spi_qup_do_dma(spi, xfer, timeout); + else +- ret = spi_qup_do_pio(master, xfer, timeout); ++ ret = spi_qup_do_pio(spi, xfer, timeout); + + if (ret) + goto exit; diff --git a/target/linux/ipq40xx/patches-4.9/0011-spi-qup-allow-block-mode-to-generate-multiple-transa.patch b/target/linux/ipq40xx/patches-4.9/0011-spi-qup-allow-block-mode-to-generate-multiple-transa.patch new file mode 100644 index 000000000..39a1aecab --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0011-spi-qup-allow-block-mode-to-generate-multiple-transa.patch @@ -0,0 +1,268 @@ +From 6858a6a75f1ed364764afba938d77bbb57f80559 Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Tue, 26 Apr 2016 15:46:24 -0500 +Subject: [PATCH 11/69] spi: qup: allow block mode to generate multiple + transactions + +This let's you write more to the SPI bus than 64K-1 which is important +if the block size of a SPI device is >= 64K or some other device wants +to something larger. + +This has the benefit of completly removing spi_message from the spi-qup +transactions + +Signed-off-by: Matthew McClintock +--- + drivers/spi/spi-qup.c | 120 +++++++++++++++++++++++++++++++------------------- + 1 file changed, 75 insertions(+), 45 deletions(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -120,7 +120,7 @@ + + #define SPI_NUM_CHIPSELECTS 4 + +-#define SPI_MAX_DMA_XFER (SZ_64K - 64) ++#define SPI_MAX_XFER (SZ_64K - 64) + + /* high speed mode is when bus rate is greater then 26MHz */ + #define SPI_HS_MIN_RATE 26000000 +@@ -150,6 +150,8 @@ struct spi_qup { + int n_words; + int tx_bytes; + int rx_bytes; ++ const u8 *tx_buf; ++ u8 *rx_buf; + int qup_v1; + + int mode; +@@ -172,6 +174,12 @@ static inline bool spi_qup_is_dma_xfer(i + return false; + } + ++/* get's the transaction size length */ ++static inline unsigned spi_qup_len(struct spi_qup *controller) ++{ ++ return controller->n_words * controller->w_size; ++} ++ + static inline bool spi_qup_is_valid_state(struct spi_qup *controller) + { + u32 opstate = readl_relaxed(controller->base + QUP_STATE); +@@ -224,10 +232,9 @@ static int spi_qup_set_state(struct spi_ + return 0; + } + +-static void spi_qup_read_from_fifo(struct spi_qup *controller, +- struct spi_transfer *xfer, u32 num_words) ++static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words) + { +- u8 *rx_buf = xfer->rx_buf; ++ u8 *rx_buf = controller->rx_buf; + int i, shift, num_bytes; + u32 word; + +@@ -235,7 +242,7 @@ static void spi_qup_read_from_fifo(struc + + word = readl_relaxed(controller->base + QUP_INPUT_FIFO); + +- num_bytes = min_t(int, xfer->len - controller->rx_bytes, ++ num_bytes = min_t(int, spi_qup_len(controller) - controller->rx_bytes, + controller->w_size); + + if (!rx_buf) { +@@ -257,13 +264,12 @@ static void spi_qup_read_from_fifo(struc + } + } + +-static void spi_qup_read(struct spi_qup *controller, +- struct spi_transfer *xfer) ++static void spi_qup_read(struct spi_qup *controller) + { + u32 remainder, words_per_block, num_words; + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK; + +- remainder = DIV_ROUND_UP(xfer->len - controller->rx_bytes, ++ remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes, + controller->w_size); + words_per_block = controller->in_blk_sz >> 2; + +@@ -284,7 +290,7 @@ static void spi_qup_read(struct spi_qup + } + + /* read up to the maximum transfer size available */ +- spi_qup_read_from_fifo(controller, xfer, num_words); ++ spi_qup_read_from_fifo(controller, num_words); + + remainder -= num_words; + +@@ -306,17 +312,16 @@ static void spi_qup_read(struct spi_qup + + } + +-static void spi_qup_write_to_fifo(struct spi_qup *controller, +- struct spi_transfer *xfer, u32 num_words) ++static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words) + { +- const u8 *tx_buf = xfer->tx_buf; ++ const u8 *tx_buf = controller->tx_buf; + int i, num_bytes; + u32 word, data; + + for (; num_words; num_words--) { + word = 0; + +- num_bytes = min_t(int, xfer->len - controller->tx_bytes, ++ num_bytes = min_t(int, spi_qup_len(controller) - controller->tx_bytes, + controller->w_size); + if (tx_buf) + for (i = 0; i < num_bytes; i++) { +@@ -337,13 +342,12 @@ static void spi_qup_dma_done(void *data) + complete(done); + } + +-static void spi_qup_write(struct spi_qup *controller, +- struct spi_transfer *xfer) ++static void spi_qup_write(struct spi_qup *controller) + { + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK; + u32 remainder, words_per_block, num_words; + +- remainder = DIV_ROUND_UP(xfer->len - controller->tx_bytes, ++ remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes, + controller->w_size); + words_per_block = controller->out_blk_sz >> 2; + +@@ -363,7 +367,7 @@ static void spi_qup_write(struct spi_qup + num_words = 1; + } + +- spi_qup_write_to_fifo(controller, xfer, num_words); ++ spi_qup_write_to_fifo(controller, num_words); + + remainder -= num_words; + +@@ -629,35 +633,61 @@ static int spi_qup_do_pio(struct spi_dev + { + struct spi_master *master = spi->master; + struct spi_qup *qup = spi_master_get_devdata(master); +- int ret; ++ int ret, n_words, iterations, offset = 0; + +- ret = spi_qup_io_config(spi, xfer); +- if (ret) +- return ret; ++ n_words = qup->n_words; ++ iterations = n_words / SPI_MAX_XFER; /* round down */ + +- ret = spi_qup_set_state(qup, QUP_STATE_RUN); +- if (ret) { +- dev_warn(qup->dev, "cannot set RUN state\n"); +- return ret; +- } ++ qup->rx_buf = xfer->rx_buf; ++ qup->tx_buf = xfer->tx_buf; + +- ret = spi_qup_set_state(qup, QUP_STATE_PAUSE); +- if (ret) { +- dev_warn(qup->dev, "cannot set PAUSE state\n"); +- return ret; +- } ++ do { ++ if (iterations) ++ qup->n_words = SPI_MAX_XFER; ++ else ++ qup->n_words = n_words % SPI_MAX_XFER; ++ ++ if (qup->tx_buf && offset) ++ qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER; ++ ++ if (qup->rx_buf && offset) ++ qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER; ++ ++ /* if the transaction is small enough, we need ++ * to fallback to FIFO mode */ ++ if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32))) ++ qup->mode = QUP_IO_M_MODE_FIFO; + +- if (qup->mode == QUP_IO_M_MODE_FIFO) +- spi_qup_write(qup, xfer); ++ ret = spi_qup_io_config(spi, xfer); ++ if (ret) ++ return ret; + +- ret = spi_qup_set_state(qup, QUP_STATE_RUN); +- if (ret) { +- dev_warn(qup->dev, "cannot set RUN state\n"); +- return ret; +- } ++ ret = spi_qup_set_state(qup, QUP_STATE_RUN); ++ if (ret) { ++ dev_warn(qup->dev, "cannot set RUN state\n"); ++ return ret; ++ } + +- if (!wait_for_completion_timeout(&qup->done, timeout)) +- return -ETIMEDOUT; ++ ret = spi_qup_set_state(qup, QUP_STATE_PAUSE); ++ if (ret) { ++ dev_warn(qup->dev, "cannot set PAUSE state\n"); ++ return ret; ++ } ++ ++ if (qup->mode == QUP_IO_M_MODE_FIFO) ++ spi_qup_write(qup); ++ ++ ret = spi_qup_set_state(qup, QUP_STATE_RUN); ++ if (ret) { ++ dev_warn(qup->dev, "cannot set RUN state\n"); ++ return ret; ++ } ++ ++ if (!wait_for_completion_timeout(&qup->done, timeout)) ++ return -ETIMEDOUT; ++ ++ offset++; ++ } while (iterations--); + + return 0; + } +@@ -722,17 +752,17 @@ static irqreturn_t spi_qup_qup_irq(int i + complete(&controller->dma_tx_done); + } else { + if (opflags & QUP_OP_IN_SERVICE_FLAG) +- spi_qup_read(controller, xfer); ++ spi_qup_read(controller); + + if (opflags & QUP_OP_OUT_SERVICE_FLAG) +- spi_qup_write(controller, xfer); ++ spi_qup_write(controller); + } + + /* re-read opflags as flags may have changed due to actions above */ + if (opflags & QUP_OP_OUT_SERVICE_FLAG) + opflags = readl_relaxed(controller->base + QUP_OPERATIONAL); + +- if ((controller->rx_bytes == xfer->len && ++ if ((controller->rx_bytes == spi_qup_len(controller) && + (opflags & QUP_OP_MAX_INPUT_DONE_FLAG)) || error) + done = true; + +@@ -794,7 +824,7 @@ static int spi_qup_transfer_one(struct s + return ret; + + timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC); +- timeout = DIV_ROUND_UP(xfer->len * 8, timeout); ++ timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER, xfer->len) * 8, timeout); + timeout = 100 * msecs_to_jiffies(timeout); + + if (spi_qup_is_dma_xfer(controller->mode)) +@@ -983,7 +1013,7 @@ static int spi_qup_probe(struct platform + master->dev.of_node = pdev->dev.of_node; + master->auto_runtime_pm = true; + master->dma_alignment = dma_get_cache_alignment(); +- master->max_dma_len = SPI_MAX_DMA_XFER; ++ master->max_dma_len = SPI_MAX_XFER; + + platform_set_drvdata(pdev, master); + diff --git a/target/linux/ipq40xx/patches-4.9/0012-spi-qup-refactor-spi_qup_prep_sg-to-be-more-take-spe.patch b/target/linux/ipq40xx/patches-4.9/0012-spi-qup-refactor-spi_qup_prep_sg-to-be-more-take-spe.patch new file mode 100644 index 000000000..990cccd8a --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0012-spi-qup-refactor-spi_qup_prep_sg-to-be-more-take-spe.patch @@ -0,0 +1,73 @@ +From fca27bd516d30e33b9373a8c61ca4431077e479e Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Wed, 4 May 2016 16:33:42 -0500 +Subject: [PATCH 12/69] spi: qup: refactor spi_qup_prep_sg to be more take + specific sgl and nent + +This is in preparation for splitting DMA into multiple transacations, +this contains no code changes just refactoring. + +Signed-off-by: Matthew McClintock +--- + drivers/spi/spi-qup.c | 28 +++++++++++----------------- + 1 file changed, 11 insertions(+), 17 deletions(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -379,27 +379,19 @@ static void spi_qup_write(struct spi_qup + } while (remainder); + } + +-static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer, +- enum dma_transfer_direction dir, +- dma_async_tx_callback callback, +- void *data) ++static int spi_qup_prep_sg(struct spi_master *master, struct scatterlist *sgl, ++ unsigned int nents, enum dma_transfer_direction dir, ++ dma_async_tx_callback callback, void *data) + { + unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE; + struct dma_async_tx_descriptor *desc; +- struct scatterlist *sgl; + struct dma_chan *chan; + dma_cookie_t cookie; +- unsigned int nents; + +- if (dir == DMA_MEM_TO_DEV) { ++ if (dir == DMA_MEM_TO_DEV) + chan = master->dma_tx; +- nents = xfer->tx_sg.nents; +- sgl = xfer->tx_sg.sgl; +- } else { ++ else + chan = master->dma_rx; +- nents = xfer->rx_sg.nents; +- sgl = xfer->rx_sg.sgl; +- } + + desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags); + if (IS_ERR_OR_NULL(desc)) +@@ -602,8 +594,9 @@ unsigned long timeout) + } + + if (xfer->rx_buf) { +- ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done, +- &qup->done); ++ ret = spi_qup_prep_sg(master, xfer->rx_sg.sgl, ++ xfer->rx_sg.nents, DMA_DEV_TO_MEM, ++ rx_done, &qup->done); + if (ret) + return ret; + +@@ -611,8 +604,9 @@ unsigned long timeout) + } + + if (xfer->tx_buf) { +- ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done, +- &qup->dma_tx_done); ++ ret = spi_qup_prep_sg(master, xfer->tx_sg.sgl, ++ xfer->tx_sg.nents, DMA_MEM_TO_DEV, ++ tx_done, &qup->dma_tx_done); + if (ret) + return ret; + diff --git a/target/linux/ipq40xx/patches-4.9/0013-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch b/target/linux/ipq40xx/patches-4.9/0013-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch new file mode 100644 index 000000000..13e199c52 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0013-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch @@ -0,0 +1,166 @@ +From 028f915b20ec343dda88f1bcc99f07f6b428b4aa Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Thu, 5 May 2016 10:07:11 -0500 +Subject: [PATCH 13/69] spi: qup: allow mulitple DMA transactions per spi xfer + +Much like the block mode changes, we are breaking up DMA transactions +into 64K chunks so we can reset the QUP engine. + +Signed-off-by: Matthew McClintock +--- + drivers/spi/spi-qup.c | 120 ++++++++++++++++++++++++++++++++++++-------------- + 1 file changed, 86 insertions(+), 34 deletions(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -566,6 +566,21 @@ static int spi_qup_io_config(struct spi_ + return 0; + } + ++static unsigned int spi_qup_sgl_get_size(struct scatterlist *sgl, unsigned int nents) ++{ ++ struct scatterlist *sg; ++ int i; ++ unsigned int length = 0; ++ ++ if (!nents) ++ return 0; ++ ++ for_each_sg(sgl, sg, nents, i) ++ length += sg_dma_len(sg); ++ ++ return length; ++} ++ + static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer, + unsigned long timeout) + { +@@ -573,53 +588,90 @@ unsigned long timeout) + struct spi_qup *qup = spi_master_get_devdata(master); + dma_async_tx_callback rx_done = NULL, tx_done = NULL; + int ret; ++ struct scatterlist *tx_sgl, *rx_sgl; + +- ret = spi_qup_io_config(spi, xfer); +- if (ret) +- return ret; +- +- /* before issuing the descriptors, set the QUP to run */ +- ret = spi_qup_set_state(qup, QUP_STATE_RUN); +- if (ret) { +- dev_warn(qup->dev, "cannot set RUN state\n"); +- return ret; +- } +- +- if (!qup->qup_v1) { +- if (xfer->rx_buf) +- rx_done = spi_qup_dma_done; +- +- if (xfer->tx_buf) +- tx_done = spi_qup_dma_done; +- } +- +- if (xfer->rx_buf) { +- ret = spi_qup_prep_sg(master, xfer->rx_sg.sgl, +- xfer->rx_sg.nents, DMA_DEV_TO_MEM, +- rx_done, &qup->done); +- if (ret) +- return ret; ++ rx_sgl = xfer->rx_sg.sgl; ++ tx_sgl = xfer->tx_sg.sgl; + +- dma_async_issue_pending(master->dma_rx); +- } ++ do { ++ int rx_nents = 0, tx_nents = 0; + +- if (xfer->tx_buf) { +- ret = spi_qup_prep_sg(master, xfer->tx_sg.sgl, +- xfer->tx_sg.nents, DMA_MEM_TO_DEV, +- tx_done, &qup->dma_tx_done); ++ if (rx_sgl) { ++ rx_nents = sg_nents_for_len(rx_sgl, SPI_MAX_XFER); ++ if (rx_nents < 0) ++ rx_nents = sg_nents(rx_sgl); ++ ++ qup->n_words = spi_qup_sgl_get_size(rx_sgl, rx_nents) / ++ qup->w_size; ++ } ++ ++ if (tx_sgl) { ++ tx_nents = sg_nents_for_len(tx_sgl, SPI_MAX_XFER); ++ if (tx_nents < 0) ++ tx_nents = sg_nents(tx_sgl); ++ ++ qup->n_words = spi_qup_sgl_get_size(tx_sgl, tx_nents) / ++ qup->w_size; ++ } ++ ++ ++ ret = spi_qup_io_config(spi, xfer); + if (ret) + return ret; + +- dma_async_issue_pending(master->dma_tx); +- } ++ /* before issuing the descriptors, set the QUP to run */ ++ ret = spi_qup_set_state(qup, QUP_STATE_RUN); ++ if (ret) { ++ dev_warn(qup->dev, "cannot set RUN state\n"); ++ return ret; ++ } ++ ++ if (!qup->qup_v1) { ++ if (rx_sgl) { ++ rx_done = spi_qup_dma_done; ++ } ++ ++ if (tx_sgl) { ++ tx_done = spi_qup_dma_done; ++ } ++ } ++ ++ if (rx_sgl) { ++ ret = spi_qup_prep_sg(master, rx_sgl, rx_nents, ++ DMA_DEV_TO_MEM, rx_done, ++ &qup->done); ++ if (ret) ++ return ret; ++ ++ dma_async_issue_pending(master->dma_rx); ++ } ++ ++ if (tx_sgl) { ++ ret = spi_qup_prep_sg(master, tx_sgl, tx_nents, ++ DMA_MEM_TO_DEV, tx_done, ++ &qup->dma_tx_done); ++ if (ret) ++ return ret; ++ ++ dma_async_issue_pending(master->dma_tx); ++ } ++ ++ if (rx_sgl && !wait_for_completion_timeout(&qup->done, timeout)) { ++ pr_emerg(" rx timed out"); ++ return -ETIMEDOUT; ++ } ++ ++ if (tx_sgl && !wait_for_completion_timeout(&qup->dma_tx_done, timeout)) { ++ pr_emerg(" tx timed out\n"); ++ return -ETIMEDOUT; ++ } + +- if (xfer->rx_buf && !wait_for_completion_timeout(&qup->done, timeout)) +- return -ETIMEDOUT; ++ for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl)); ++ for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl)); + +- if (xfer->tx_buf && !wait_for_completion_timeout(&qup->dma_tx_done, timeout)) +- ret = -ETIMEDOUT; ++ } while (rx_sgl || tx_sgl); + +- return ret; ++ return 0; + } + + static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer, diff --git a/target/linux/ipq40xx/patches-4.9/0014-spi-qup-Fix-sg-nents-calculation.patch b/target/linux/ipq40xx/patches-4.9/0014-spi-qup-Fix-sg-nents-calculation.patch new file mode 100644 index 000000000..2d321f1d2 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0014-spi-qup-Fix-sg-nents-calculation.patch @@ -0,0 +1,86 @@ +From f5913e137c3dac4972ac0ddd5f248924d02d3dcb Mon Sep 17 00:00:00 2001 +From: Varadarajan Narayanan +Date: Wed, 25 May 2016 13:40:03 +0530 +Subject: [PATCH 14/69] spi: qup: Fix sg nents calculation + +lib/scatterlist.c:sg_nents_for_len() returns the number of SG +entries that total up to greater than or equal to the given +length. However, the spi-qup driver assumed that the returned +nents is for a total less than or equal to the given length. The +spi-qup driver requests nents for SPI_MAX_XFER, however the API +returns nents for SPI_MAX_XFER+delta (actually SZ_64K). + +Based on this, spi_qup_do_dma() calculates n_words and programs +that into QUP_MX_{IN|OUT}PUT_CNT register. The calculated +n_words value is more than the maximum value that can fit in the +the 16-bit COUNT field of the QUP_MX_{IN|OUT}PUT_CNT register. +And, the field gets programmed to zero. Since the COUNT field is +zero, the i/o doesn't start eventually resulting in the i/o +timing out. + +Signed-off-by: Varadarajan Narayanan +--- + drivers/spi/spi-qup.c | 38 ++++++++++++++++++++++++++++++++++++-- + 1 file changed, 36 insertions(+), 2 deletions(-) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -581,6 +581,38 @@ static unsigned int spi_qup_sgl_get_size + return length; + } + ++/** ++ * spi_qup_sg_nents_for_len - return total count of entries in scatterlist ++ * needed to satisfy the supplied length ++ * @sg: The scatterlist ++ * @len: The total required length ++ * ++ * Description: ++ * Determines the number of entries in sg that sum upto a maximum of ++ * the supplied length, taking into acount chaining as well ++ * ++ * Returns: ++ * the number of sg entries needed, negative error on failure ++ * ++ **/ ++int spi_qup_sg_nents_for_len(struct scatterlist *sg, u64 len) ++{ ++ int nents; ++ u64 total; ++ ++ if (!len) ++ return 0; ++ ++ for (nents = 0, total = 0; sg; sg = sg_next(sg)) { ++ nents++; ++ total += sg_dma_len(sg); ++ if (total > len) ++ return (nents - 1); ++ } ++ ++ return -EINVAL; ++} ++ + static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer, + unsigned long timeout) + { +@@ -597,7 +629,8 @@ unsigned long timeout) + int rx_nents = 0, tx_nents = 0; + + if (rx_sgl) { +- rx_nents = sg_nents_for_len(rx_sgl, SPI_MAX_XFER); ++ rx_nents = spi_qup_sg_nents_for_len(rx_sgl, ++ SPI_MAX_XFER); + if (rx_nents < 0) + rx_nents = sg_nents(rx_sgl); + +@@ -606,7 +639,8 @@ unsigned long timeout) + } + + if (tx_sgl) { +- tx_nents = sg_nents_for_len(tx_sgl, SPI_MAX_XFER); ++ tx_nents = spi_qup_sg_nents_for_len(tx_sgl, ++ SPI_MAX_XFER); + if (tx_nents < 0) + tx_nents = sg_nents(tx_sgl); + diff --git a/target/linux/ipq40xx/patches-4.9/0015-cpufreq-dt-qcom-ipq4019-Add-compat-for-qcom-ipq4019.patch b/target/linux/ipq40xx/patches-4.9/0015-cpufreq-dt-qcom-ipq4019-Add-compat-for-qcom-ipq4019.patch new file mode 100644 index 000000000..06b61a07e --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0015-cpufreq-dt-qcom-ipq4019-Add-compat-for-qcom-ipq4019.patch @@ -0,0 +1,27 @@ +From 5543e93f51d5e23f9b3a7fe11a722c91fc410485 Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Wed, 13 Apr 2016 14:03:14 -0500 +Subject: [PATCH 15/69] cpufreq: dt: qcom: ipq4019: Add compat for qcom ipq4019 + +Instantiate cpufreq-dt-platdev driver for ipq4019 to support changing +CPU frequencies. + +This depends on Viresh Kumar's patches in this series: +http://comments.gmane.org/gmane.linux.power-management.general/73887 + +Signed-off-by: Matthew McClintock +--- + drivers/cpufreq/cpufreq-dt-platdev.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/cpufreq/cpufreq-dt-platdev.c ++++ b/drivers/cpufreq/cpufreq-dt-platdev.c +@@ -35,6 +35,8 @@ static const struct of_device_id machine + + { .compatible = "marvell,berlin", }, + ++ { .compatible = "qcom,ipq4019", }, ++ + { .compatible = "samsung,exynos3250", }, + { .compatible = "samsung,exynos4210", }, + { .compatible = "samsung,exynos4212", }, diff --git a/target/linux/ipq40xx/patches-4.9/0016-clk-ipq4019-report-accurate-fixed-clock-rates.patch b/target/linux/ipq40xx/patches-4.9/0016-clk-ipq4019-report-accurate-fixed-clock-rates.patch new file mode 100644 index 000000000..be9c4eb1b --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0016-clk-ipq4019-report-accurate-fixed-clock-rates.patch @@ -0,0 +1,33 @@ +From 5e2df5f44e35d79fff2ab8bbb8a726ad5de78a3e Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Thu, 28 Apr 2016 12:55:08 -0500 +Subject: [PATCH 16/69] clk: ipq4019: report accurate fixed clock rates + +This looks like a copy-and-paste gone wrong, but update all +the fixed clock rates to report the correct values. + +Signed-off-by: Matthew McClintock +--- + drivers/clk/qcom/gcc-ipq4019.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +--- a/drivers/clk/qcom/gcc-ipq4019.c ++++ b/drivers/clk/qcom/gcc-ipq4019.c +@@ -1327,12 +1327,12 @@ static int gcc_ipq4019_probe(struct plat + { + struct device *dev = &pdev->dev; + +- clk_register_fixed_rate(dev, "fepll125", "xo", 0, 200000000); +- clk_register_fixed_rate(dev, "fepll125dly", "xo", 0, 200000000); +- clk_register_fixed_rate(dev, "fepllwcss2g", "xo", 0, 200000000); +- clk_register_fixed_rate(dev, "fepllwcss5g", "xo", 0, 200000000); ++ clk_register_fixed_rate(dev, "fepll125", "xo", 0, 125000000); ++ clk_register_fixed_rate(dev, "fepll125dly", "xo", 0, 125000000); ++ clk_register_fixed_rate(dev, "fepllwcss2g", "xo", 0, 250000000); ++ clk_register_fixed_rate(dev, "fepllwcss5g", "xo", 0, 250000000); + clk_register_fixed_rate(dev, "fepll200", "xo", 0, 200000000); +- clk_register_fixed_rate(dev, "fepll500", "xo", 0, 200000000); ++ clk_register_fixed_rate(dev, "fepll500", "xo", 0, 500000000); + clk_register_fixed_rate(dev, "ddrpllapss", "xo", 0, 666000000); + + return qcom_cc_probe(pdev, &gcc_ipq4019_desc); diff --git a/target/linux/ipq40xx/patches-4.9/0017-qcom-ipq4019-add-cpu-operating-points-for-cpufreq-su.patch b/target/linux/ipq40xx/patches-4.9/0017-qcom-ipq4019-add-cpu-operating-points-for-cpufreq-su.patch new file mode 100644 index 000000000..7cbd6a455 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0017-qcom-ipq4019-add-cpu-operating-points-for-cpufreq-su.patch @@ -0,0 +1,77 @@ +From 18c3b42575a154343831aec0637aab00e19440e1 Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Thu, 17 Mar 2016 15:01:09 -0500 +Subject: [PATCH 17/69] qcom: ipq4019: add cpu operating points for cpufreq + support + +This adds some operating points for cpu frequeny scaling + +Signed-off-by: Matthew McClintock +--- + arch/arm/boot/dts/qcom-ipq4019.dtsi | 34 ++++++++++++++++++++++++++-------- + 1 file changed, 26 insertions(+), 8 deletions(-) + +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -40,14 +40,7 @@ + reg = <0x0>; + clocks = <&gcc GCC_APPS_CLK_SRC>; + clock-frequency = <0>; +- operating-points = < +- /* kHz uV (fixed) */ +- 48000 1100000 +- 200000 1100000 +- 500000 1100000 +- 666000 1100000 +- >; +- clock-latency = <256000>; ++ operating-points-v2 = <&cpu0_opp_table>; + }; + + cpu@1 { +@@ -59,6 +52,7 @@ + reg = <0x1>; + clocks = <&gcc GCC_APPS_CLK_SRC>; + clock-frequency = <0>; ++ operating-points-v2 = <&cpu0_opp_table>; + }; + + cpu@2 { +@@ -70,6 +64,7 @@ + reg = <0x2>; + clocks = <&gcc GCC_APPS_CLK_SRC>; + clock-frequency = <0>; ++ operating-points-v2 = <&cpu0_opp_table>; + }; + + cpu@3 { +@@ -81,6 +76,29 @@ + reg = <0x3>; + clocks = <&gcc GCC_APPS_CLK_SRC>; + clock-frequency = <0>; ++ operating-points-v2 = <&cpu0_opp_table>; ++ }; ++ }; ++ ++ cpu0_opp_table: opp_table0 { ++ compatible = "operating-points-v2"; ++ opp-shared; ++ ++ opp@48000000 { ++ opp-hz = /bits/ 64 <48000000>; ++ clock-latency-ns = <256000>; ++ }; ++ opp@200000000 { ++ opp-hz = /bits/ 64 <200000000>; ++ clock-latency-ns = <256000>; ++ }; ++ opp@500000000 { ++ opp-hz = /bits/ 64 <500000000>; ++ clock-latency-ns = <256000>; ++ }; ++ opp@666000000 { ++ opp-hz = /bits/ 64 <666000000>; ++ clock-latency-ns = <256000>; + }; + }; + diff --git a/target/linux/ipq40xx/patches-4.9/0018-qcom-ipq4019-turn-on-DMA-for-i2c.patch b/target/linux/ipq40xx/patches-4.9/0018-qcom-ipq4019-turn-on-DMA-for-i2c.patch new file mode 100644 index 000000000..42bb4a6e3 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0018-qcom-ipq4019-turn-on-DMA-for-i2c.patch @@ -0,0 +1,23 @@ +From 71f82049dca86bc89b9da07e051e4ed492820233 Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Mon, 28 Mar 2016 11:16:51 -0500 +Subject: [PATCH 18/69] qcom: ipq4019: turn on DMA for i2c + +These are the required nodes for i2c-qup to use DMA + +Signed-off-by: Matthew McClintock +--- + arch/arm/boot/dts/qcom-ipq4019.dtsi | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -179,6 +179,8 @@ + clock-names = "iface", "core"; + #address-cells = <1>; + #size-cells = <0>; ++ dmas = <&blsp_dma 9>, <&blsp_dma 8>; ++ dma-names = "rx", "tx"; + status = "disabled"; + }; + diff --git a/target/linux/ipq40xx/patches-4.9/0019-qcom-ipq4019-use-correct-clock-for-i2c-bus-0.patch b/target/linux/ipq40xx/patches-4.9/0019-qcom-ipq4019-use-correct-clock-for-i2c-bus-0.patch new file mode 100644 index 000000000..54ee571cb --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0019-qcom-ipq4019-use-correct-clock-for-i2c-bus-0.patch @@ -0,0 +1,28 @@ +From 7292bf171cdf2fb48607058f12ddd0d812a87428 Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Fri, 29 Apr 2016 12:48:02 -0500 +Subject: [PATCH 19/69] qcom: ipq4019: use correct clock for i2c bus 0 + +For the record the mapping is as follows: + +QUP0 = SPI QUP1 +QUP1 = SPI QUP2 +QUP2 = I2C QUP1 +QUP3 = I2C QUP2 + +Signed-off-by: Matthew McClintock +--- + arch/arm/boot/dts/qcom-ipq4019.dtsi | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -175,7 +175,7 @@ + reg = <0x78b7000 0x6000>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_AHB_CLK>, +- <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>; ++ <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>; + clock-names = "iface", "core"; + #address-cells = <1>; + #size-cells = <0>; diff --git a/target/linux/ipq40xx/patches-4.9/0020-qcom-ipq4019-enable-DMA-for-spi.patch b/target/linux/ipq40xx/patches-4.9/0020-qcom-ipq4019-enable-DMA-for-spi.patch new file mode 100644 index 000000000..c1fa5c729 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0020-qcom-ipq4019-enable-DMA-for-spi.patch @@ -0,0 +1,23 @@ +From 4593e768393b9589f0a8987eaf57316c214865fe Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Mon, 11 Apr 2016 14:49:12 -0500 +Subject: [PATCH 20/69] qcom: ipq4019: enable DMA for spi + +These are the required nodes for spi-qup to use DMA + +Signed-off-by: Matthew McClintock +--- + arch/arm/boot/dts/qcom-ipq4019.dtsi | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -167,6 +167,8 @@ + clock-names = "core", "iface"; + #address-cells = <1>; + #size-cells = <0>; ++ dmas = <&blsp_dma 5>, <&blsp_dma 4>; ++ dma-names = "rx", "tx"; + status = "disabled"; + }; + diff --git a/target/linux/ipq40xx/patches-4.9/0026-dts-ipq4019-Add-support-for-IPQ4019-DK04-board.patch b/target/linux/ipq40xx/patches-4.9/0026-dts-ipq4019-Add-support-for-IPQ4019-DK04-board.patch new file mode 100644 index 000000000..8cd3e205d --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0026-dts-ipq4019-Add-support-for-IPQ4019-DK04-board.patch @@ -0,0 +1,225 @@ +From ec3e465ecf3f7dd26f2e22170e4c5f4b9979df5d Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Mon, 21 Mar 2016 15:55:21 -0500 +Subject: [PATCH 26/69] dts: ipq4019: Add support for IPQ4019 DK04 board + +This is pretty similiar to a DK01 but has a bit more IO. Some notable +differences are listed below however they are not in the device tree yet +as we continue adding more support + +- second serial port +- PCIe +- NAND +- SD/EMMC + +Signed-off-by: Matthew McClintock +--- + arch/arm/boot/dts/Makefile | 1 + + arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi | 12 +- + arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts | 21 +++ + arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi | 163 ++++++++++++++++++++++++ + 4 files changed, 189 insertions(+), 8 deletions(-) + create mode 100644 arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts + create mode 100644 arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi + +--- a/arch/arm/boot/dts/Makefile ++++ b/arch/arm/boot/dts/Makefile +@@ -617,6 +617,7 @@ dtb-$(CONFIG_ARCH_QCOM) += \ + qcom-apq8084-ifc6540.dtb \ + qcom-apq8084-mtp.dtb \ + qcom-ipq4019-ap.dk01.1-c1.dtb \ ++ qcom-ipq4019-ap.dk04.1-c1.dtb \ + qcom-ipq8064-ap148.dtb \ + qcom-msm8660-surf.dtb \ + qcom-msm8960-cdp.dtb \ +--- /dev/null ++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts +@@ -0,0 +1,22 @@ ++/* Copyright (c) 2015, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ */ ++ ++#include "qcom-ipq4019-ap.dk04.1.dtsi" ++ ++/ { ++ model = "Qualcomm Technologies, Inc. IPQ40xx/AP-DK04.1-C1"; ++ compatible = "qcom,ap-dk04.1-c1", "qcom,ipq4019"; ++}; +--- /dev/null ++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi +@@ -0,0 +1,163 @@ ++/* Copyright (c) 2015, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ */ ++ ++#include "qcom-ipq4019.dtsi" ++ ++/ { ++ model = "Qualcomm Technologies, Inc. IPQ4019/AP-DK04.1"; ++ compatible = "qcom,ipq4019"; ++ ++ clocks { ++ xo: xo { ++ compatible = "fixed-clock"; ++ clock-frequency = <48000000>; ++ #clock-cells = <0>; ++ }; ++ }; ++ ++ soc { ++ timer { ++ compatible = "arm,armv7-timer"; ++ interrupts = <1 2 0xf08>, ++ <1 3 0xf08>, ++ <1 4 0xf08>, ++ <1 1 0xf08>; ++ clock-frequency = <48000000>; ++ }; ++ ++ pinctrl@0x01000000 { ++ serial_0_pins: serial_pinmux { ++ mux { ++ pins = "gpio16", "gpio17"; ++ function = "blsp_uart0"; ++ bias-disable; ++ }; ++ }; ++ ++ serial_1_pins: serial1_pinmux { ++ mux { ++ pins = "gpio8", "gpio9"; ++ function = "blsp_uart1"; ++ bias-disable; ++ }; ++ }; ++ ++ spi_0_pins: spi_0_pinmux { ++ pinmux { ++ function = "blsp_spi0"; ++ pins = "gpio13", "gpio14", "gpio15"; ++ }; ++ pinmux_cs { ++ function = "gpio"; ++ pins = "gpio12"; ++ }; ++ pinconf { ++ pins = "gpio13", "gpio14", "gpio15"; ++ drive-strength = <12>; ++ bias-disable; ++ }; ++ pinconf_cs { ++ pins = "gpio12"; ++ drive-strength = <2>; ++ bias-disable; ++ output-high; ++ }; ++ }; ++ ++ i2c_0_pins: i2c_0_pinmux { ++ pinmux { ++ function = "blsp_i2c0"; ++ pins = "gpio10", "gpio11"; ++ }; ++ pinconf { ++ pins = "gpio10", "gpio11"; ++ drive-strength = <16>; ++ bias-disable; ++ }; ++ }; ++ }; ++ ++ blsp_dma: dma@7884000 { ++ status = "ok"; ++ }; ++ ++ spi_0: spi@78b5000 { ++ pinctrl-0 = <&spi_0_pins>; ++ pinctrl-names = "default"; ++ status = "ok"; ++ cs-gpios = <&tlmm 12 0>; ++ ++ mx25l25635e@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0>; ++ compatible = "mx25l25635e"; ++ spi-max-frequency = <24000000>; ++ }; ++ }; ++ ++ i2c_0: i2c@78b7000 { /* BLSP1 QUP2 */ ++ pinctrl-0 = <&i2c_0_pins>; ++ pinctrl-names = "default"; ++ ++ status = "ok"; ++ }; ++ ++ serial@78af000 { ++ pinctrl-0 = <&serial_0_pins>; ++ pinctrl-names = "default"; ++ status = "ok"; ++ }; ++ ++ serial@78b0000 { ++ pinctrl-0 = <&serial_1_pins>; ++ pinctrl-names = "default"; ++ status = "ok"; ++ }; ++ ++ usb3_ss_phy: ssphy@9a000 { ++ status = "ok"; ++ }; ++ ++ usb3_hs_phy: hsphy@a6000 { ++ status = "ok"; ++ }; ++ ++ usb3: usb3@8af8800 { ++ status = "ok"; ++ }; ++ ++ usb2_hs_phy: hsphy@a8000 { ++ status = "ok"; ++ }; ++ ++ usb2: usb2@60f8800 { ++ status = "ok"; ++ }; ++ ++ cryptobam: dma@8e04000 { ++ status = "ok"; ++ }; ++ ++ crypto@8e3a000 { ++ status = "ok"; ++ }; ++ ++ watchdog@b017000 { ++ status = "ok"; ++ }; ++ }; ++}; diff --git a/target/linux/ipq40xx/patches-4.9/0027-clk-qcom-Add-support-for-SMD-RPM-Clocks.patch b/target/linux/ipq40xx/patches-4.9/0027-clk-qcom-Add-support-for-SMD-RPM-Clocks.patch new file mode 100644 index 000000000..f7ff1da8d --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0027-clk-qcom-Add-support-for-SMD-RPM-Clocks.patch @@ -0,0 +1,731 @@ +From 41ee71bae788e1858c0a387d010c342e6bb3f4b0 Mon Sep 17 00:00:00 2001 +From: Georgi Djakov +Date: Wed, 2 Nov 2016 17:56:56 +0200 +Subject: [PATCH 27/69] clk: qcom: Add support for SMD-RPM Clocks + +This adds initial support for clocks controlled by the Resource +Power Manager (RPM) processor on some Qualcomm SoCs, which use +the qcom_smd_rpm driver to communicate with RPM. +Such platforms are msm8916, apq8084 and msm8974. + +The RPM is a dedicated hardware engine for managing the shared +SoC resources in order to keep the lowest power profile. It +communicates with other hardware subsystems via shared memory +and accepts clock requests, aggregates the requests and turns +the clocks on/off or scales them on demand. + +This driver is based on the codeaurora.org driver: +https://www.codeaurora.org/cgit/quic/la/kernel/msm-3.10/tree/drivers/clk/qcom/clock-rpm.c + +Signed-off-by: Georgi Djakov +--- + .../devicetree/bindings/clock/qcom,rpmcc.txt | 36 ++ + drivers/clk/qcom/Kconfig | 16 + + drivers/clk/qcom/Makefile | 1 + + drivers/clk/qcom/clk-smd-rpm.c | 571 +++++++++++++++++++++ + include/dt-bindings/clock/qcom,rpmcc.h | 45 ++ + 5 files changed, 669 insertions(+) + create mode 100644 Documentation/devicetree/bindings/clock/qcom,rpmcc.txt + create mode 100644 drivers/clk/qcom/clk-smd-rpm.c + create mode 100644 include/dt-bindings/clock/qcom,rpmcc.h + +--- /dev/null ++++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt +@@ -0,0 +1,36 @@ ++Qualcomm RPM Clock Controller Binding ++------------------------------------------------ ++The RPM is a dedicated hardware engine for managing the shared ++SoC resources in order to keep the lowest power profile. It ++communicates with other hardware subsystems via shared memory ++and accepts clock requests, aggregates the requests and turns ++the clocks on/off or scales them on demand. ++ ++Required properties : ++- compatible : shall contain only one of the following. The generic ++ compatible "qcom,rpmcc" should be also included. ++ ++ "qcom,rpmcc-msm8916", "qcom,rpmcc" ++ ++- #clock-cells : shall contain 1 ++ ++Example: ++ smd { ++ compatible = "qcom,smd"; ++ ++ rpm { ++ interrupts = <0 168 1>; ++ qcom,ipc = <&apcs 8 0>; ++ qcom,smd-edge = <15>; ++ ++ rpm_requests { ++ compatible = "qcom,rpm-msm8916"; ++ qcom,smd-channels = "rpm_requests"; ++ ++ rpmcc: clock-controller { ++ compatible = "qcom,rpmcc-msm8916", "qcom,rpmcc"; ++ #clock-cells = <1>; ++ }; ++ }; ++ }; ++ }; +--- a/drivers/clk/qcom/Kconfig ++++ b/drivers/clk/qcom/Kconfig +@@ -2,6 +2,9 @@ config QCOM_GDSC + bool + select PM_GENERIC_DOMAINS if PM + ++config QCOM_RPMCC ++ bool ++ + config COMMON_CLK_QCOM + tristate "Support for Qualcomm's clock controllers" + depends on OF +@@ -9,6 +12,19 @@ config COMMON_CLK_QCOM + select REGMAP_MMIO + select RESET_CONTROLLER + ++config QCOM_CLK_SMD_RPM ++ tristate "RPM over SMD based Clock Controller" ++ depends on COMMON_CLK_QCOM && QCOM_SMD_RPM ++ select QCOM_RPMCC ++ help ++ The RPM (Resource Power Manager) is a dedicated hardware engine for ++ managing the shared SoC resources in order to keep the lowest power ++ profile. It communicates with other hardware subsystems via shared ++ memory and accepts clock requests, aggregates the requests and turns ++ the clocks on/off or scales them on demand. ++ Say Y if you want to support the clocks exposed by the RPM on ++ platforms such as apq8016, apq8084, msm8974 etc. ++ + config APQ_GCC_8084 + tristate "APQ8084 Global Clock Controller" + select QCOM_GDSC +--- a/drivers/clk/qcom/Makefile ++++ b/drivers/clk/qcom/Makefile +@@ -29,3 +29,4 @@ obj-$(CONFIG_MSM_LCC_8960) += lcc-msm896 + obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o + obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o + obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o ++obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o +--- /dev/null ++++ b/drivers/clk/qcom/clk-smd-rpm.c +@@ -0,0 +1,571 @@ ++/* ++ * Copyright (c) 2016, Linaro Limited ++ * Copyright (c) 2014, The Linux Foundation. All rights reserved. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773 ++#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370 ++#define QCOM_RPM_SMD_KEY_RATE 0x007a484b ++#define QCOM_RPM_SMD_KEY_ENABLE 0x62616e45 ++#define QCOM_RPM_SMD_KEY_STATE 0x54415453 ++#define QCOM_RPM_SCALING_ENABLE_ID 0x2 ++ ++#define __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, stat_id, \ ++ key) \ ++ static struct clk_smd_rpm _platform##_##_active; \ ++ static struct clk_smd_rpm _platform##_##_name = { \ ++ .rpm_res_type = (type), \ ++ .rpm_clk_id = (r_id), \ ++ .rpm_status_id = (stat_id), \ ++ .rpm_key = (key), \ ++ .peer = &_platform##_##_active, \ ++ .rate = INT_MAX, \ ++ .hw.init = &(struct clk_init_data){ \ ++ .ops = &clk_smd_rpm_ops, \ ++ .name = #_name, \ ++ .parent_names = (const char *[]){ "xo_board" }, \ ++ .num_parents = 1, \ ++ }, \ ++ }; \ ++ static struct clk_smd_rpm _platform##_##_active = { \ ++ .rpm_res_type = (type), \ ++ .rpm_clk_id = (r_id), \ ++ .rpm_status_id = (stat_id), \ ++ .active_only = true, \ ++ .rpm_key = (key), \ ++ .peer = &_platform##_##_name, \ ++ .rate = INT_MAX, \ ++ .hw.init = &(struct clk_init_data){ \ ++ .ops = &clk_smd_rpm_ops, \ ++ .name = #_active, \ ++ .parent_names = (const char *[]){ "xo_board" }, \ ++ .num_parents = 1, \ ++ }, \ ++ } ++ ++#define __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, \ ++ stat_id, r, key) \ ++ static struct clk_smd_rpm _platform##_##_active; \ ++ static struct clk_smd_rpm _platform##_##_name = { \ ++ .rpm_res_type = (type), \ ++ .rpm_clk_id = (r_id), \ ++ .rpm_status_id = (stat_id), \ ++ .rpm_key = (key), \ ++ .branch = true, \ ++ .peer = &_platform##_##_active, \ ++ .rate = (r), \ ++ .hw.init = &(struct clk_init_data){ \ ++ .ops = &clk_smd_rpm_branch_ops, \ ++ .name = #_name, \ ++ .parent_names = (const char *[]){ "xo_board" }, \ ++ .num_parents = 1, \ ++ }, \ ++ }; \ ++ static struct clk_smd_rpm _platform##_##_active = { \ ++ .rpm_res_type = (type), \ ++ .rpm_clk_id = (r_id), \ ++ .rpm_status_id = (stat_id), \ ++ .active_only = true, \ ++ .rpm_key = (key), \ ++ .branch = true, \ ++ .peer = &_platform##_##_name, \ ++ .rate = (r), \ ++ .hw.init = &(struct clk_init_data){ \ ++ .ops = &clk_smd_rpm_branch_ops, \ ++ .name = #_active, \ ++ .parent_names = (const char *[]){ "xo_board" }, \ ++ .num_parents = 1, \ ++ }, \ ++ } ++ ++#define DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id) \ ++ __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \ ++ 0, QCOM_RPM_SMD_KEY_RATE) ++ ++#define DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, r) \ ++ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, \ ++ r_id, 0, r, QCOM_RPM_SMD_KEY_ENABLE) ++ ++#define DEFINE_CLK_SMD_RPM_QDSS(_platform, _name, _active, type, r_id) \ ++ __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \ ++ 0, QCOM_RPM_SMD_KEY_STATE) ++ ++#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_platform, _name, _active, r_id) \ ++ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \ ++ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \ ++ QCOM_RPM_KEY_SOFTWARE_ENABLE) ++ ++#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_platform, _name, _active, r_id) \ ++ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \ ++ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \ ++ QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY) ++ ++#define to_clk_smd_rpm(_hw) container_of(_hw, struct clk_smd_rpm, hw) ++ ++struct clk_smd_rpm { ++ const int rpm_res_type; ++ const int rpm_key; ++ const int rpm_clk_id; ++ const int rpm_status_id; ++ const bool active_only; ++ bool enabled; ++ bool branch; ++ struct clk_smd_rpm *peer; ++ struct clk_hw hw; ++ unsigned long rate; ++ struct qcom_smd_rpm *rpm; ++}; ++ ++struct clk_smd_rpm_req { ++ __le32 key; ++ __le32 nbytes; ++ __le32 value; ++}; ++ ++struct rpm_cc { ++ struct qcom_rpm *rpm; ++ struct clk_hw_onecell_data data; ++ struct clk_hw *hws[]; ++}; ++ ++struct rpm_smd_clk_desc { ++ struct clk_smd_rpm **clks; ++ size_t num_clks; ++}; ++ ++static DEFINE_MUTEX(rpm_smd_clk_lock); ++ ++static int clk_smd_rpm_handoff(struct clk_smd_rpm *r) ++{ ++ int ret; ++ struct clk_smd_rpm_req req = { ++ .key = cpu_to_le32(r->rpm_key), ++ .nbytes = cpu_to_le32(sizeof(u32)), ++ .value = cpu_to_le32(INT_MAX), ++ }; ++ ++ ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE, ++ r->rpm_res_type, r->rpm_clk_id, &req, ++ sizeof(req)); ++ if (ret) ++ return ret; ++ ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE, ++ r->rpm_res_type, r->rpm_clk_id, &req, ++ sizeof(req)); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r, ++ unsigned long rate) ++{ ++ struct clk_smd_rpm_req req = { ++ .key = cpu_to_le32(r->rpm_key), ++ .nbytes = cpu_to_le32(sizeof(u32)), ++ .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */ ++ }; ++ ++ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE, ++ r->rpm_res_type, r->rpm_clk_id, &req, ++ sizeof(req)); ++} ++ ++static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r, ++ unsigned long rate) ++{ ++ struct clk_smd_rpm_req req = { ++ .key = cpu_to_le32(r->rpm_key), ++ .nbytes = cpu_to_le32(sizeof(u32)), ++ .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */ ++ }; ++ ++ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE, ++ r->rpm_res_type, r->rpm_clk_id, &req, ++ sizeof(req)); ++} ++ ++static void to_active_sleep(struct clk_smd_rpm *r, unsigned long rate, ++ unsigned long *active, unsigned long *sleep) ++{ ++ *active = rate; ++ ++ /* ++ * Active-only clocks don't care what the rate is during sleep. So, ++ * they vote for zero. ++ */ ++ if (r->active_only) ++ *sleep = 0; ++ else ++ *sleep = *active; ++} ++ ++static int clk_smd_rpm_prepare(struct clk_hw *hw) ++{ ++ struct clk_smd_rpm *r = to_clk_smd_rpm(hw); ++ struct clk_smd_rpm *peer = r->peer; ++ unsigned long this_rate = 0, this_sleep_rate = 0; ++ unsigned long peer_rate = 0, peer_sleep_rate = 0; ++ unsigned long active_rate, sleep_rate; ++ int ret = 0; ++ ++ mutex_lock(&rpm_smd_clk_lock); ++ ++ /* Don't send requests to the RPM if the rate has not been set. */ ++ if (!r->rate) ++ goto out; ++ ++ to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate); ++ ++ /* Take peer clock's rate into account only if it's enabled. */ ++ if (peer->enabled) ++ to_active_sleep(peer, peer->rate, ++ &peer_rate, &peer_sleep_rate); ++ ++ active_rate = max(this_rate, peer_rate); ++ ++ if (r->branch) ++ active_rate = !!active_rate; ++ ++ ret = clk_smd_rpm_set_rate_active(r, active_rate); ++ if (ret) ++ goto out; ++ ++ sleep_rate = max(this_sleep_rate, peer_sleep_rate); ++ if (r->branch) ++ sleep_rate = !!sleep_rate; ++ ++ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate); ++ if (ret) ++ /* Undo the active set vote and restore it */ ++ ret = clk_smd_rpm_set_rate_active(r, peer_rate); ++ ++out: ++ if (!ret) ++ r->enabled = true; ++ ++ mutex_unlock(&rpm_smd_clk_lock); ++ ++ return ret; ++} ++ ++static void clk_smd_rpm_unprepare(struct clk_hw *hw) ++{ ++ struct clk_smd_rpm *r = to_clk_smd_rpm(hw); ++ struct clk_smd_rpm *peer = r->peer; ++ unsigned long peer_rate = 0, peer_sleep_rate = 0; ++ unsigned long active_rate, sleep_rate; ++ int ret; ++ ++ mutex_lock(&rpm_smd_clk_lock); ++ ++ if (!r->rate) ++ goto out; ++ ++ /* Take peer clock's rate into account only if it's enabled. */ ++ if (peer->enabled) ++ to_active_sleep(peer, peer->rate, &peer_rate, ++ &peer_sleep_rate); ++ ++ active_rate = r->branch ? !!peer_rate : peer_rate; ++ ret = clk_smd_rpm_set_rate_active(r, active_rate); ++ if (ret) ++ goto out; ++ ++ sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate; ++ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate); ++ if (ret) ++ goto out; ++ ++ r->enabled = false; ++ ++out: ++ mutex_unlock(&rpm_smd_clk_lock); ++} ++ ++static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ struct clk_smd_rpm *r = to_clk_smd_rpm(hw); ++ struct clk_smd_rpm *peer = r->peer; ++ unsigned long active_rate, sleep_rate; ++ unsigned long this_rate = 0, this_sleep_rate = 0; ++ unsigned long peer_rate = 0, peer_sleep_rate = 0; ++ int ret = 0; ++ ++ mutex_lock(&rpm_smd_clk_lock); ++ ++ if (!r->enabled) ++ goto out; ++ ++ to_active_sleep(r, rate, &this_rate, &this_sleep_rate); ++ ++ /* Take peer clock's rate into account only if it's enabled. */ ++ if (peer->enabled) ++ to_active_sleep(peer, peer->rate, ++ &peer_rate, &peer_sleep_rate); ++ ++ active_rate = max(this_rate, peer_rate); ++ ret = clk_smd_rpm_set_rate_active(r, active_rate); ++ if (ret) ++ goto out; ++ ++ sleep_rate = max(this_sleep_rate, peer_sleep_rate); ++ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate); ++ if (ret) ++ goto out; ++ ++ r->rate = rate; ++ ++out: ++ mutex_unlock(&rpm_smd_clk_lock); ++ ++ return ret; ++} ++ ++static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *parent_rate) ++{ ++ /* ++ * RPM handles rate rounding and we don't have a way to ++ * know what the rate will be, so just return whatever ++ * rate is requested. ++ */ ++ return rate; ++} ++ ++static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ struct clk_smd_rpm *r = to_clk_smd_rpm(hw); ++ ++ /* ++ * RPM handles rate rounding and we don't have a way to ++ * know what the rate will be, so just return whatever ++ * rate was set. ++ */ ++ return r->rate; ++} ++ ++static int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm) ++{ ++ int ret; ++ struct clk_smd_rpm_req req = { ++ .key = cpu_to_le32(QCOM_RPM_SMD_KEY_ENABLE), ++ .nbytes = cpu_to_le32(sizeof(u32)), ++ .value = cpu_to_le32(1), ++ }; ++ ++ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_SLEEP_STATE, ++ QCOM_SMD_RPM_MISC_CLK, ++ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req)); ++ if (ret) { ++ pr_err("RPM clock scaling (sleep set) not enabled!\n"); ++ return ret; ++ } ++ ++ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_ACTIVE_STATE, ++ QCOM_SMD_RPM_MISC_CLK, ++ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req)); ++ if (ret) { ++ pr_err("RPM clock scaling (active set) not enabled!\n"); ++ return ret; ++ } ++ ++ pr_debug("%s: RPM clock scaling is enabled\n", __func__); ++ return 0; ++} ++ ++static const struct clk_ops clk_smd_rpm_ops = { ++ .prepare = clk_smd_rpm_prepare, ++ .unprepare = clk_smd_rpm_unprepare, ++ .set_rate = clk_smd_rpm_set_rate, ++ .round_rate = clk_smd_rpm_round_rate, ++ .recalc_rate = clk_smd_rpm_recalc_rate, ++}; ++ ++static const struct clk_ops clk_smd_rpm_branch_ops = { ++ .prepare = clk_smd_rpm_prepare, ++ .unprepare = clk_smd_rpm_unprepare, ++ .round_rate = clk_smd_rpm_round_rate, ++ .recalc_rate = clk_smd_rpm_recalc_rate, ++}; ++ ++/* msm8916 */ ++DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); ++DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); ++DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); ++DEFINE_CLK_SMD_RPM_QDSS(msm8916, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1); ++DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1); ++DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2); ++DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk1, rf_clk1_a, 4); ++DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk2, rf_clk2_a, 5); ++DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk1_pin, bb_clk1_a_pin, 1); ++DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2); ++DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4); ++DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5); ++ ++static struct clk_smd_rpm *msm8916_clks[] = { ++ [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk, ++ [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk, ++ [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk, ++ [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk, ++ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk, ++ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk, ++ [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk, ++ [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk, ++ [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1, ++ [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a, ++ [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2, ++ [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a, ++ [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1, ++ [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a, ++ [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2, ++ [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a, ++ [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin, ++ [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin, ++ [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin, ++ [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin, ++ [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin, ++ [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin, ++ [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin, ++ [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin, ++}; ++ ++static const struct rpm_smd_clk_desc rpm_clk_msm8916 = { ++ .clks = msm8916_clks, ++ .num_clks = ARRAY_SIZE(msm8916_clks), ++}; ++ ++static const struct of_device_id rpm_smd_clk_match_table[] = { ++ { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table); ++ ++static int rpm_smd_clk_probe(struct platform_device *pdev) ++{ ++ struct clk_hw **hws; ++ struct rpm_cc *rcc; ++ struct clk_hw_onecell_data *data; ++ int ret; ++ size_t num_clks, i; ++ struct qcom_smd_rpm *rpm; ++ struct clk_smd_rpm **rpm_smd_clks; ++ const struct rpm_smd_clk_desc *desc; ++ ++ rpm = dev_get_drvdata(pdev->dev.parent); ++ if (!rpm) { ++ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n"); ++ return -ENODEV; ++ } ++ ++ desc = of_device_get_match_data(&pdev->dev); ++ if (!desc) ++ return -EINVAL; ++ ++ rpm_smd_clks = desc->clks; ++ num_clks = desc->num_clks; ++ ++ rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*hws) * num_clks, ++ GFP_KERNEL); ++ if (!rcc) ++ return -ENOMEM; ++ ++ hws = rcc->hws; ++ data = &rcc->data; ++ data->num = num_clks; ++ ++ for (i = 0; i < num_clks; i++) { ++ if (!rpm_smd_clks[i]) { ++ continue; ++ } ++ ++ rpm_smd_clks[i]->rpm = rpm; ++ ++ ret = clk_smd_rpm_handoff(rpm_smd_clks[i]); ++ if (ret) ++ goto err; ++ } ++ ++ ret = clk_smd_rpm_enable_scaling(rpm); ++ if (ret) ++ goto err; ++ ++ for (i = 0; i < num_clks; i++) { ++ if (!rpm_smd_clks[i]) { ++ data->hws[i] = ERR_PTR(-ENOENT); ++ continue; ++ } ++ ++ ret = devm_clk_hw_register(&pdev->dev, &rpm_smd_clks[i]->hw); ++ if (ret) ++ goto err; ++ } ++ ++ ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get, ++ data); ++ if (ret) ++ goto err; ++ ++ return 0; ++err: ++ dev_err(&pdev->dev, "Error registering SMD clock driver (%d)\n", ret); ++ return ret; ++} ++ ++static int rpm_smd_clk_remove(struct platform_device *pdev) ++{ ++ of_clk_del_provider(pdev->dev.of_node); ++ return 0; ++} ++ ++static struct platform_driver rpm_smd_clk_driver = { ++ .driver = { ++ .name = "qcom-clk-smd-rpm", ++ .of_match_table = rpm_smd_clk_match_table, ++ }, ++ .probe = rpm_smd_clk_probe, ++ .remove = rpm_smd_clk_remove, ++}; ++ ++static int __init rpm_smd_clk_init(void) ++{ ++ return platform_driver_register(&rpm_smd_clk_driver); ++} ++core_initcall(rpm_smd_clk_init); ++ ++static void __exit rpm_smd_clk_exit(void) ++{ ++ platform_driver_unregister(&rpm_smd_clk_driver); ++} ++module_exit(rpm_smd_clk_exit); ++ ++MODULE_DESCRIPTION("Qualcomm RPM over SMD Clock Controller Driver"); ++MODULE_LICENSE("GPL v2"); ++MODULE_ALIAS("platform:qcom-clk-smd-rpm"); +--- /dev/null ++++ b/include/dt-bindings/clock/qcom,rpmcc.h +@@ -0,0 +1,45 @@ ++/* ++ * Copyright 2015 Linaro Limited ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef _DT_BINDINGS_CLK_MSM_RPMCC_H ++#define _DT_BINDINGS_CLK_MSM_RPMCC_H ++ ++/* msm8916 */ ++#define RPM_SMD_XO_CLK_SRC 0 ++#define RPM_SMD_XO_A_CLK_SRC 1 ++#define RPM_SMD_PCNOC_CLK 2 ++#define RPM_SMD_PCNOC_A_CLK 3 ++#define RPM_SMD_SNOC_CLK 4 ++#define RPM_SMD_SNOC_A_CLK 5 ++#define RPM_SMD_BIMC_CLK 6 ++#define RPM_SMD_BIMC_A_CLK 7 ++#define RPM_SMD_QDSS_CLK 8 ++#define RPM_SMD_QDSS_A_CLK 9 ++#define RPM_SMD_BB_CLK1 10 ++#define RPM_SMD_BB_CLK1_A 11 ++#define RPM_SMD_BB_CLK2 12 ++#define RPM_SMD_BB_CLK2_A 13 ++#define RPM_SMD_RF_CLK1 14 ++#define RPM_SMD_RF_CLK1_A 15 ++#define RPM_SMD_RF_CLK2 16 ++#define RPM_SMD_RF_CLK2_A 17 ++#define RPM_SMD_BB_CLK1_PIN 18 ++#define RPM_SMD_BB_CLK1_A_PIN 19 ++#define RPM_SMD_BB_CLK2_PIN 20 ++#define RPM_SMD_BB_CLK2_A_PIN 21 ++#define RPM_SMD_RF_CLK1_PIN 22 ++#define RPM_SMD_RF_CLK1_A_PIN 23 ++#define RPM_SMD_RF_CLK2_PIN 24 ++#define RPM_SMD_RF_CLK2_A_PIN 25 ++ ++#endif diff --git a/target/linux/ipq40xx/patches-4.9/0028-clk-qcom-Add-support-for-RPM-Clocks.patch b/target/linux/ipq40xx/patches-4.9/0028-clk-qcom-Add-support-for-RPM-Clocks.patch new file mode 100644 index 000000000..72b392dc1 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0028-clk-qcom-Add-support-for-RPM-Clocks.patch @@ -0,0 +1,587 @@ +From 21e7116c9d639f3283d4cec286fed1e703832b43 Mon Sep 17 00:00:00 2001 +From: Georgi Djakov +Date: Wed, 2 Nov 2016 17:56:57 +0200 +Subject: [PATCH 28/69] clk: qcom: Add support for RPM Clocks + +This adds initial support for clocks controlled by the Resource +Power Manager (RPM) processor on some Qualcomm SoCs, which use +the qcom_rpm driver to communicate with RPM. +Such platforms are apq8064 and msm8960. + +Signed-off-by: Georgi Djakov +Acked-by: Rob Herring +Signed-off-by: Stephen Boyd +--- + .../devicetree/bindings/clock/qcom,rpmcc.txt | 1 + + drivers/clk/qcom/Kconfig | 13 + + drivers/clk/qcom/Makefile | 1 + + drivers/clk/qcom/clk-rpm.c | 489 +++++++++++++++++++++ + include/dt-bindings/clock/qcom,rpmcc.h | 24 + + 5 files changed, 528 insertions(+) + create mode 100644 drivers/clk/qcom/clk-rpm.c + +--- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt ++++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt +@@ -11,6 +11,7 @@ Required properties : + compatible "qcom,rpmcc" should be also included. + + "qcom,rpmcc-msm8916", "qcom,rpmcc" ++ "qcom,rpmcc-apq8064", "qcom,rpmcc" + + - #clock-cells : shall contain 1 + +--- a/drivers/clk/qcom/Kconfig ++++ b/drivers/clk/qcom/Kconfig +@@ -12,6 +12,19 @@ config COMMON_CLK_QCOM + select REGMAP_MMIO + select RESET_CONTROLLER + ++config QCOM_CLK_RPM ++ tristate "RPM based Clock Controller" ++ depends on COMMON_CLK_QCOM && MFD_QCOM_RPM ++ select QCOM_RPMCC ++ help ++ The RPM (Resource Power Manager) is a dedicated hardware engine for ++ managing the shared SoC resources in order to keep the lowest power ++ profile. It communicates with other hardware subsystems via shared ++ memory and accepts clock requests, aggregates the requests and turns ++ the clocks on/off or scales them on demand. ++ Say Y if you want to support the clocks exposed by the RPM on ++ platforms such as ipq806x, msm8660, msm8960 etc. ++ + config QCOM_CLK_SMD_RPM + tristate "RPM over SMD based Clock Controller" + depends on COMMON_CLK_QCOM && QCOM_SMD_RPM +--- a/drivers/clk/qcom/Makefile ++++ b/drivers/clk/qcom/Makefile +@@ -29,4 +29,5 @@ obj-$(CONFIG_MSM_LCC_8960) += lcc-msm896 + obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o + obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o + obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o ++obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o + obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o +--- /dev/null ++++ b/drivers/clk/qcom/clk-rpm.c +@@ -0,0 +1,489 @@ ++/* ++ * Copyright (c) 2016, Linaro Limited ++ * Copyright (c) 2014, The Linux Foundation. All rights reserved. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63 ++#define QCOM_RPM_SCALING_ENABLE_ID 0x2 ++ ++#define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \ ++ static struct clk_rpm _platform##_##_active; \ ++ static struct clk_rpm _platform##_##_name = { \ ++ .rpm_clk_id = (r_id), \ ++ .peer = &_platform##_##_active, \ ++ .rate = INT_MAX, \ ++ .hw.init = &(struct clk_init_data){ \ ++ .ops = &clk_rpm_ops, \ ++ .name = #_name, \ ++ .parent_names = (const char *[]){ "pxo_board" }, \ ++ .num_parents = 1, \ ++ }, \ ++ }; \ ++ static struct clk_rpm _platform##_##_active = { \ ++ .rpm_clk_id = (r_id), \ ++ .peer = &_platform##_##_name, \ ++ .active_only = true, \ ++ .rate = INT_MAX, \ ++ .hw.init = &(struct clk_init_data){ \ ++ .ops = &clk_rpm_ops, \ ++ .name = #_active, \ ++ .parent_names = (const char *[]){ "pxo_board" }, \ ++ .num_parents = 1, \ ++ }, \ ++ } ++ ++#define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, _active, r_id, r) \ ++ static struct clk_rpm _platform##_##_active; \ ++ static struct clk_rpm _platform##_##_name = { \ ++ .rpm_clk_id = (r_id), \ ++ .active_only = true, \ ++ .peer = &_platform##_##_active, \ ++ .rate = (r), \ ++ .branch = true, \ ++ .hw.init = &(struct clk_init_data){ \ ++ .ops = &clk_rpm_branch_ops, \ ++ .name = #_name, \ ++ .parent_names = (const char *[]){ "pxo_board" }, \ ++ .num_parents = 1, \ ++ }, \ ++ }; \ ++ static struct clk_rpm _platform##_##_active = { \ ++ .rpm_clk_id = (r_id), \ ++ .peer = &_platform##_##_name, \ ++ .rate = (r), \ ++ .branch = true, \ ++ .hw.init = &(struct clk_init_data){ \ ++ .ops = &clk_rpm_branch_ops, \ ++ .name = #_active, \ ++ .parent_names = (const char *[]){ "pxo_board" }, \ ++ .num_parents = 1, \ ++ }, \ ++ } ++ ++#define DEFINE_CLK_RPM_CXO_BRANCH(_platform, _name, _active, r_id, r) \ ++ static struct clk_rpm _platform##_##_active; \ ++ static struct clk_rpm _platform##_##_name = { \ ++ .rpm_clk_id = (r_id), \ ++ .peer = &_platform##_##_active, \ ++ .rate = (r), \ ++ .branch = true, \ ++ .hw.init = &(struct clk_init_data){ \ ++ .ops = &clk_rpm_branch_ops, \ ++ .name = #_name, \ ++ .parent_names = (const char *[]){ "cxo_board" }, \ ++ .num_parents = 1, \ ++ }, \ ++ }; \ ++ static struct clk_rpm _platform##_##_active = { \ ++ .rpm_clk_id = (r_id), \ ++ .active_only = true, \ ++ .peer = &_platform##_##_name, \ ++ .rate = (r), \ ++ .branch = true, \ ++ .hw.init = &(struct clk_init_data){ \ ++ .ops = &clk_rpm_branch_ops, \ ++ .name = #_active, \ ++ .parent_names = (const char *[]){ "cxo_board" }, \ ++ .num_parents = 1, \ ++ }, \ ++ } ++ ++#define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw) ++ ++struct clk_rpm { ++ const int rpm_clk_id; ++ const bool active_only; ++ unsigned long rate; ++ bool enabled; ++ bool branch; ++ struct clk_rpm *peer; ++ struct clk_hw hw; ++ struct qcom_rpm *rpm; ++}; ++ ++struct rpm_cc { ++ struct qcom_rpm *rpm; ++ struct clk_hw_onecell_data data; ++ struct clk_hw *hws[]; ++}; ++ ++struct rpm_clk_desc { ++ struct clk_rpm **clks; ++ size_t num_clks; ++}; ++ ++static DEFINE_MUTEX(rpm_clk_lock); ++ ++static int clk_rpm_handoff(struct clk_rpm *r) ++{ ++ int ret; ++ u32 value = INT_MAX; ++ ++ ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, ++ r->rpm_clk_id, &value, 1); ++ if (ret) ++ return ret; ++ ret = qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE, ++ r->rpm_clk_id, &value, 1); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate) ++{ ++ u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */ ++ ++ return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, ++ r->rpm_clk_id, &value, 1); ++} ++ ++static int clk_rpm_set_rate_sleep(struct clk_rpm *r, unsigned long rate) ++{ ++ u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */ ++ ++ return qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE, ++ r->rpm_clk_id, &value, 1); ++} ++ ++static void to_active_sleep(struct clk_rpm *r, unsigned long rate, ++ unsigned long *active, unsigned long *sleep) ++{ ++ *active = rate; ++ ++ /* ++ * Active-only clocks don't care what the rate is during sleep. So, ++ * they vote for zero. ++ */ ++ if (r->active_only) ++ *sleep = 0; ++ else ++ *sleep = *active; ++} ++ ++static int clk_rpm_prepare(struct clk_hw *hw) ++{ ++ struct clk_rpm *r = to_clk_rpm(hw); ++ struct clk_rpm *peer = r->peer; ++ unsigned long this_rate = 0, this_sleep_rate = 0; ++ unsigned long peer_rate = 0, peer_sleep_rate = 0; ++ unsigned long active_rate, sleep_rate; ++ int ret = 0; ++ ++ mutex_lock(&rpm_clk_lock); ++ ++ /* Don't send requests to the RPM if the rate has not been set. */ ++ if (!r->rate) ++ goto out; ++ ++ to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate); ++ ++ /* Take peer clock's rate into account only if it's enabled. */ ++ if (peer->enabled) ++ to_active_sleep(peer, peer->rate, ++ &peer_rate, &peer_sleep_rate); ++ ++ active_rate = max(this_rate, peer_rate); ++ ++ if (r->branch) ++ active_rate = !!active_rate; ++ ++ ret = clk_rpm_set_rate_active(r, active_rate); ++ if (ret) ++ goto out; ++ ++ sleep_rate = max(this_sleep_rate, peer_sleep_rate); ++ if (r->branch) ++ sleep_rate = !!sleep_rate; ++ ++ ret = clk_rpm_set_rate_sleep(r, sleep_rate); ++ if (ret) ++ /* Undo the active set vote and restore it */ ++ ret = clk_rpm_set_rate_active(r, peer_rate); ++ ++out: ++ if (!ret) ++ r->enabled = true; ++ ++ mutex_unlock(&rpm_clk_lock); ++ ++ return ret; ++} ++ ++static void clk_rpm_unprepare(struct clk_hw *hw) ++{ ++ struct clk_rpm *r = to_clk_rpm(hw); ++ struct clk_rpm *peer = r->peer; ++ unsigned long peer_rate = 0, peer_sleep_rate = 0; ++ unsigned long active_rate, sleep_rate; ++ int ret; ++ ++ mutex_lock(&rpm_clk_lock); ++ ++ if (!r->rate) ++ goto out; ++ ++ /* Take peer clock's rate into account only if it's enabled. */ ++ if (peer->enabled) ++ to_active_sleep(peer, peer->rate, &peer_rate, ++ &peer_sleep_rate); ++ ++ active_rate = r->branch ? !!peer_rate : peer_rate; ++ ret = clk_rpm_set_rate_active(r, active_rate); ++ if (ret) ++ goto out; ++ ++ sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate; ++ ret = clk_rpm_set_rate_sleep(r, sleep_rate); ++ if (ret) ++ goto out; ++ ++ r->enabled = false; ++ ++out: ++ mutex_unlock(&rpm_clk_lock); ++} ++ ++static int clk_rpm_set_rate(struct clk_hw *hw, ++ unsigned long rate, unsigned long parent_rate) ++{ ++ struct clk_rpm *r = to_clk_rpm(hw); ++ struct clk_rpm *peer = r->peer; ++ unsigned long active_rate, sleep_rate; ++ unsigned long this_rate = 0, this_sleep_rate = 0; ++ unsigned long peer_rate = 0, peer_sleep_rate = 0; ++ int ret = 0; ++ ++ mutex_lock(&rpm_clk_lock); ++ ++ if (!r->enabled) ++ goto out; ++ ++ to_active_sleep(r, rate, &this_rate, &this_sleep_rate); ++ ++ /* Take peer clock's rate into account only if it's enabled. */ ++ if (peer->enabled) ++ to_active_sleep(peer, peer->rate, ++ &peer_rate, &peer_sleep_rate); ++ ++ active_rate = max(this_rate, peer_rate); ++ ret = clk_rpm_set_rate_active(r, active_rate); ++ if (ret) ++ goto out; ++ ++ sleep_rate = max(this_sleep_rate, peer_sleep_rate); ++ ret = clk_rpm_set_rate_sleep(r, sleep_rate); ++ if (ret) ++ goto out; ++ ++ r->rate = rate; ++ ++out: ++ mutex_unlock(&rpm_clk_lock); ++ ++ return ret; ++} ++ ++static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *parent_rate) ++{ ++ /* ++ * RPM handles rate rounding and we don't have a way to ++ * know what the rate will be, so just return whatever ++ * rate is requested. ++ */ ++ return rate; ++} ++ ++static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ struct clk_rpm *r = to_clk_rpm(hw); ++ ++ /* ++ * RPM handles rate rounding and we don't have a way to ++ * know what the rate will be, so just return whatever ++ * rate was set. ++ */ ++ return r->rate; ++} ++ ++static const struct clk_ops clk_rpm_ops = { ++ .prepare = clk_rpm_prepare, ++ .unprepare = clk_rpm_unprepare, ++ .set_rate = clk_rpm_set_rate, ++ .round_rate = clk_rpm_round_rate, ++ .recalc_rate = clk_rpm_recalc_rate, ++}; ++ ++static const struct clk_ops clk_rpm_branch_ops = { ++ .prepare = clk_rpm_prepare, ++ .unprepare = clk_rpm_unprepare, ++ .round_rate = clk_rpm_round_rate, ++ .recalc_rate = clk_rpm_recalc_rate, ++}; ++ ++/* apq8064 */ ++DEFINE_CLK_RPM(apq8064, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK); ++DEFINE_CLK_RPM(apq8064, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK); ++DEFINE_CLK_RPM(apq8064, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK); ++DEFINE_CLK_RPM(apq8064, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK); ++DEFINE_CLK_RPM(apq8064, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK); ++DEFINE_CLK_RPM(apq8064, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK); ++DEFINE_CLK_RPM(apq8064, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK); ++DEFINE_CLK_RPM(apq8064, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK); ++DEFINE_CLK_RPM(apq8064, qdss_clk, qdss_a_clk, QCOM_RPM_QDSS_CLK); ++ ++static struct clk_rpm *apq8064_clks[] = { ++ [RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk, ++ [RPM_APPS_FABRIC_A_CLK] = &apq8064_afab_a_clk, ++ [RPM_CFPB_CLK] = &apq8064_cfpb_clk, ++ [RPM_CFPB_A_CLK] = &apq8064_cfpb_a_clk, ++ [RPM_DAYTONA_FABRIC_CLK] = &apq8064_daytona_clk, ++ [RPM_DAYTONA_FABRIC_A_CLK] = &apq8064_daytona_a_clk, ++ [RPM_EBI1_CLK] = &apq8064_ebi1_clk, ++ [RPM_EBI1_A_CLK] = &apq8064_ebi1_a_clk, ++ [RPM_MM_FABRIC_CLK] = &apq8064_mmfab_clk, ++ [RPM_MM_FABRIC_A_CLK] = &apq8064_mmfab_a_clk, ++ [RPM_MMFPB_CLK] = &apq8064_mmfpb_clk, ++ [RPM_MMFPB_A_CLK] = &apq8064_mmfpb_a_clk, ++ [RPM_SYS_FABRIC_CLK] = &apq8064_sfab_clk, ++ [RPM_SYS_FABRIC_A_CLK] = &apq8064_sfab_a_clk, ++ [RPM_SFPB_CLK] = &apq8064_sfpb_clk, ++ [RPM_SFPB_A_CLK] = &apq8064_sfpb_a_clk, ++ [RPM_QDSS_CLK] = &apq8064_qdss_clk, ++ [RPM_QDSS_A_CLK] = &apq8064_qdss_a_clk, ++}; ++ ++static const struct rpm_clk_desc rpm_clk_apq8064 = { ++ .clks = apq8064_clks, ++ .num_clks = ARRAY_SIZE(apq8064_clks), ++}; ++ ++static const struct of_device_id rpm_clk_match_table[] = { ++ { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, rpm_clk_match_table); ++ ++static int rpm_clk_probe(struct platform_device *pdev) ++{ ++ struct clk_hw **hws; ++ struct rpm_cc *rcc; ++ struct clk_hw_onecell_data *data; ++ int ret; ++ size_t num_clks, i; ++ struct qcom_rpm *rpm; ++ struct clk_rpm **rpm_clks; ++ const struct rpm_clk_desc *desc; ++ ++ rpm = dev_get_drvdata(pdev->dev.parent); ++ if (!rpm) { ++ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n"); ++ return -ENODEV; ++ } ++ ++ desc = of_device_get_match_data(&pdev->dev); ++ if (!desc) ++ return -EINVAL; ++ ++ rpm_clks = desc->clks; ++ num_clks = desc->num_clks; ++ ++ rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*hws) * num_clks, ++ GFP_KERNEL); ++ if (!rcc) ++ return -ENOMEM; ++ ++ hws = rcc->hws; ++ data = &rcc->data; ++ data->num = num_clks; ++ ++ for (i = 0; i < num_clks; i++) { ++ if (!rpm_clks[i]) ++ continue; ++ ++ rpm_clks[i]->rpm = rpm; ++ ++ ret = clk_rpm_handoff(rpm_clks[i]); ++ if (ret) ++ goto err; ++ } ++ ++ for (i = 0; i < num_clks; i++) { ++ if (!rpm_clks[i]) { ++ data->hws[i] = ERR_PTR(-ENOENT); ++ continue; ++ } ++ ++ ret = devm_clk_hw_register(&pdev->dev, &rpm_clks[i]->hw); ++ if (ret) ++ goto err; ++ } ++ ++ ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get, ++ data); ++ if (ret) ++ goto err; ++ ++ return 0; ++err: ++ dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret); ++ return ret; ++} ++ ++static int rpm_clk_remove(struct platform_device *pdev) ++{ ++ of_clk_del_provider(pdev->dev.of_node); ++ return 0; ++} ++ ++static struct platform_driver rpm_clk_driver = { ++ .driver = { ++ .name = "qcom-clk-rpm", ++ .of_match_table = rpm_clk_match_table, ++ }, ++ .probe = rpm_clk_probe, ++ .remove = rpm_clk_remove, ++}; ++ ++static int __init rpm_clk_init(void) ++{ ++ return platform_driver_register(&rpm_clk_driver); ++} ++core_initcall(rpm_clk_init); ++ ++static void __exit rpm_clk_exit(void) ++{ ++ platform_driver_unregister(&rpm_clk_driver); ++} ++module_exit(rpm_clk_exit); ++ ++MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver"); ++MODULE_LICENSE("GPL v2"); ++MODULE_ALIAS("platform:qcom-clk-rpm"); +--- a/include/dt-bindings/clock/qcom,rpmcc.h ++++ b/include/dt-bindings/clock/qcom,rpmcc.h +@@ -14,6 +14,30 @@ + #ifndef _DT_BINDINGS_CLK_MSM_RPMCC_H + #define _DT_BINDINGS_CLK_MSM_RPMCC_H + ++/* apq8064 */ ++#define RPM_PXO_CLK 0 ++#define RPM_PXO_A_CLK 1 ++#define RPM_CXO_CLK 2 ++#define RPM_CXO_A_CLK 3 ++#define RPM_APPS_FABRIC_CLK 4 ++#define RPM_APPS_FABRIC_A_CLK 5 ++#define RPM_CFPB_CLK 6 ++#define RPM_CFPB_A_CLK 7 ++#define RPM_QDSS_CLK 8 ++#define RPM_QDSS_A_CLK 9 ++#define RPM_DAYTONA_FABRIC_CLK 10 ++#define RPM_DAYTONA_FABRIC_A_CLK 11 ++#define RPM_EBI1_CLK 12 ++#define RPM_EBI1_A_CLK 13 ++#define RPM_MM_FABRIC_CLK 14 ++#define RPM_MM_FABRIC_A_CLK 15 ++#define RPM_MMFPB_CLK 16 ++#define RPM_MMFPB_A_CLK 17 ++#define RPM_SYS_FABRIC_CLK 18 ++#define RPM_SYS_FABRIC_A_CLK 19 ++#define RPM_SFPB_CLK 20 ++#define RPM_SFPB_A_CLK 21 ++ + /* msm8916 */ + #define RPM_SMD_XO_CLK_SRC 0 + #define RPM_SMD_XO_A_CLK_SRC 1 diff --git a/target/linux/ipq40xx/patches-4.9/0029-clk-qcom-clk-rpm-Fix-clk_hw-references.patch b/target/linux/ipq40xx/patches-4.9/0029-clk-qcom-clk-rpm-Fix-clk_hw-references.patch new file mode 100644 index 000000000..1b1b558dd --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0029-clk-qcom-clk-rpm-Fix-clk_hw-references.patch @@ -0,0 +1,94 @@ +From 7028c21deb2c6205bb896cc3719414de3d6d6a6e Mon Sep 17 00:00:00 2001 +From: Georgi Djakov +Date: Wed, 23 Nov 2016 16:52:49 +0200 +Subject: [PATCH 29/69] clk: qcom: clk-rpm: Fix clk_hw references + +Fix the clk_hw references to the actual clocks and add a xlate function +to return the hw pointers from the already existing static array. + +Reported-by: Michael Scott +Signed-off-by: Georgi Djakov +Signed-off-by: Stephen Boyd +--- + drivers/clk/qcom/clk-rpm.c | 36 ++++++++++++++++++++++-------------- + 1 file changed, 22 insertions(+), 14 deletions(-) + +--- a/drivers/clk/qcom/clk-rpm.c ++++ b/drivers/clk/qcom/clk-rpm.c +@@ -127,8 +127,8 @@ struct clk_rpm { + + struct rpm_cc { + struct qcom_rpm *rpm; +- struct clk_hw_onecell_data data; +- struct clk_hw *hws[]; ++ struct clk_rpm **clks; ++ size_t num_clks; + }; + + struct rpm_clk_desc { +@@ -391,11 +391,23 @@ static const struct of_device_id rpm_clk + }; + MODULE_DEVICE_TABLE(of, rpm_clk_match_table); + ++static struct clk_hw *qcom_rpm_clk_hw_get(struct of_phandle_args *clkspec, ++ void *data) ++{ ++ struct rpm_cc *rcc = data; ++ unsigned int idx = clkspec->args[0]; ++ ++ if (idx >= rcc->num_clks) { ++ pr_err("%s: invalid index %u\n", __func__, idx); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT); ++} ++ + static int rpm_clk_probe(struct platform_device *pdev) + { +- struct clk_hw **hws; + struct rpm_cc *rcc; +- struct clk_hw_onecell_data *data; + int ret; + size_t num_clks, i; + struct qcom_rpm *rpm; +@@ -415,14 +427,12 @@ static int rpm_clk_probe(struct platform + rpm_clks = desc->clks; + num_clks = desc->num_clks; + +- rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*hws) * num_clks, +- GFP_KERNEL); ++ rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL); + if (!rcc) + return -ENOMEM; + +- hws = rcc->hws; +- data = &rcc->data; +- data->num = num_clks; ++ rcc->clks = rpm_clks; ++ rcc->num_clks = num_clks; + + for (i = 0; i < num_clks; i++) { + if (!rpm_clks[i]) +@@ -436,18 +446,16 @@ static int rpm_clk_probe(struct platform + } + + for (i = 0; i < num_clks; i++) { +- if (!rpm_clks[i]) { +- data->hws[i] = ERR_PTR(-ENOENT); ++ if (!rpm_clks[i]) + continue; +- } + + ret = devm_clk_hw_register(&pdev->dev, &rpm_clks[i]->hw); + if (ret) + goto err; + } + +- ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get, +- data); ++ ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_rpm_clk_hw_get, ++ rcc); + if (ret) + goto err; + diff --git a/target/linux/ipq40xx/patches-4.9/0030-clk-Disable-i2c-device-on-gsbi4.patch b/target/linux/ipq40xx/patches-4.9/0030-clk-Disable-i2c-device-on-gsbi4.patch new file mode 100644 index 000000000..b2a6afe0d --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0030-clk-Disable-i2c-device-on-gsbi4.patch @@ -0,0 +1,40 @@ +From 0c974b87829e007dc4fae94e20d488204e20e662 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Thu, 9 Mar 2017 08:16:10 +0100 +Subject: [PATCH 30/69] clk: Disable i2c device on gsbi4 + +This patch was not annotated and comes from the v4.4 tree. + +Signed-off-by: John Crispin +--- + drivers/clk/qcom/gcc-ipq806x.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/drivers/clk/qcom/gcc-ipq806x.c ++++ b/drivers/clk/qcom/gcc-ipq806x.c +@@ -294,7 +294,7 @@ static struct clk_rcg gsbi1_uart_src = { + .parent_names = gcc_pxo_pll8, + .num_parents = 2, + .ops = &clk_rcg_ops, +- .flags = CLK_SET_PARENT_GATE, ++ .flags = CLK_SET_PARENT_GATE | CLK_IGNORE_UNUSED, + }, + }, + }; +@@ -312,7 +312,7 @@ static struct clk_branch gsbi1_uart_clk + }, + .num_parents = 1, + .ops = &clk_branch_ops, +- .flags = CLK_SET_RATE_PARENT, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, + }, + }, + }; +@@ -890,6 +890,7 @@ static struct clk_branch gsbi1_h_clk = { + .hw.init = &(struct clk_init_data){ + .name = "gsbi1_h_clk", + .ops = &clk_branch_ops, ++ .flags = CLK_IGNORE_UNUSED, + }, + }, + }; diff --git a/target/linux/ipq40xx/patches-4.9/0031-mtd-add-SMEM-parser-for-QCOM-platforms.patch b/target/linux/ipq40xx/patches-4.9/0031-mtd-add-SMEM-parser-for-QCOM-platforms.patch new file mode 100644 index 000000000..ad0b09b11 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0031-mtd-add-SMEM-parser-for-QCOM-platforms.patch @@ -0,0 +1,275 @@ +From d8eeb4de90e968ba32d956728c866f20752cf2c3 Mon Sep 17 00:00:00 2001 +From: Mathieu Olivari +Date: Thu, 9 Mar 2017 08:18:08 +0100 +Subject: [PATCH 31/69] mtd: add SMEM parser for QCOM platforms + +On QCOM platforms using MTD devices storage (such as IPQ806x), SMEM is +used to store partition layout. This new parser can now be used to read +SMEM and use it to register an MTD layout according to its content. + +Signed-off-by: Mathieu Olivari +Signed-off-by: Ram Chandra Jangir +--- + drivers/mtd/Kconfig | 7 ++ + drivers/mtd/Makefile | 1 + + drivers/mtd/qcom_smem_part.c | 228 +++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 236 insertions(+) + create mode 100644 drivers/mtd/qcom_smem_part.c + +--- a/drivers/mtd/Kconfig ++++ b/drivers/mtd/Kconfig +@@ -194,6 +194,13 @@ config MTD_MYLOADER_PARTS + You will still need the parsing functions to be called by the driver + for your particular device. It won't happen automatically. + ++config MTD_QCOM_SMEM_PARTS ++ tristate "QCOM SMEM partitioning support" ++ depends on QCOM_SMEM ++ help ++ This provides partitions parser for QCOM devices using SMEM ++ such as IPQ806x. ++ + comment "User Modules And Translation Layers" + + # +--- /dev/null ++++ b/drivers/mtd/qcom_smem_part.c +@@ -0,0 +1,228 @@ ++/* ++ * Copyright (c) 2015, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++/* Processor/host identifier for the application processor */ ++#define SMEM_HOST_APPS 0 ++ ++/* SMEM items index */ ++#define SMEM_AARM_PARTITION_TABLE 9 ++#define SMEM_BOOT_FLASH_TYPE 421 ++#define SMEM_BOOT_FLASH_BLOCK_SIZE 424 ++ ++/* SMEM Flash types */ ++#define SMEM_FLASH_NAND 2 ++#define SMEM_FLASH_SPI 6 ++ ++#define SMEM_PART_NAME_SZ 16 ++#define SMEM_PARTS_MAX 32 ++ ++struct smem_partition { ++ char name[SMEM_PART_NAME_SZ]; ++ __le32 start; ++ __le32 size; ++ __le32 attr; ++}; ++ ++struct smem_partition_table { ++ u8 magic[8]; ++ __le32 version; ++ __le32 len; ++ struct smem_partition parts[SMEM_PARTS_MAX]; ++}; ++ ++/* SMEM Magic values in partition table */ ++static const u8 SMEM_PTABLE_MAGIC[] = { ++ 0xaa, 0x73, 0xee, 0x55, ++ 0xdb, 0xbd, 0x5e, 0xe3, ++}; ++ ++static int qcom_smem_get_flash_blksz(u64 **smem_blksz) ++{ ++ size_t size; ++ ++ *smem_blksz = qcom_smem_get(SMEM_HOST_APPS, SMEM_BOOT_FLASH_BLOCK_SIZE, ++ &size); ++ ++ if (IS_ERR(*smem_blksz)) { ++ pr_err("Unable to read flash blksz from SMEM\n"); ++ return -ENOENT; ++ } ++ ++ if (size != sizeof(**smem_blksz)) { ++ pr_err("Invalid flash blksz size in SMEM\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int qcom_smem_get_flash_type(u64 **smem_flash_type) ++{ ++ size_t size; ++ ++ *smem_flash_type = qcom_smem_get(SMEM_HOST_APPS, SMEM_BOOT_FLASH_TYPE, ++ &size); ++ ++ if (IS_ERR(*smem_flash_type)) { ++ pr_err("Unable to read flash type from SMEM\n"); ++ return -ENOENT; ++ } ++ ++ if (size != sizeof(**smem_flash_type)) { ++ pr_err("Invalid flash type size in SMEM\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int qcom_smem_get_flash_partitions(struct smem_partition_table **pparts) ++{ ++ size_t size; ++ ++ *pparts = qcom_smem_get(SMEM_HOST_APPS, SMEM_AARM_PARTITION_TABLE, ++ &size); ++ ++ if (IS_ERR(*pparts)) { ++ pr_err("Unable to read partition table from SMEM\n"); ++ return -ENOENT; ++ } ++ ++ return 0; ++} ++ ++static int of_dev_node_match(struct device *dev, void *data) ++{ ++ return dev->of_node == data; ++} ++ ++static bool is_spi_device(struct device_node *np) ++{ ++ struct device *dev; ++ ++ dev = bus_find_device(&spi_bus_type, NULL, np, of_dev_node_match); ++ if (!dev) ++ return false; ++ ++ put_device(dev); ++ return true; ++} ++ ++static int parse_qcom_smem_partitions(struct mtd_info *master, ++ const struct mtd_partition **pparts, ++ struct mtd_part_parser_data *data) ++{ ++ struct smem_partition_table *smem_parts; ++ u64 *smem_flash_type, *smem_blksz; ++ struct mtd_partition *mtd_parts; ++ struct device_node *of_node = master->dev.of_node; ++ int i, ret; ++ ++ /* ++ * SMEM will only store the partition table of the boot device. ++ * If this is not the boot device, do not return any partition. ++ */ ++ ret = qcom_smem_get_flash_type(&smem_flash_type); ++ if (ret < 0) ++ return ret; ++ ++ if ((*smem_flash_type == SMEM_FLASH_NAND && !mtd_type_is_nand(master)) ++ || (*smem_flash_type == SMEM_FLASH_SPI && !is_spi_device(of_node))) ++ return 0; ++ ++ /* ++ * Just for sanity purpose, make sure the block size in SMEM matches the ++ * block size of the MTD device ++ */ ++ ret = qcom_smem_get_flash_blksz(&smem_blksz); ++ if (ret < 0) ++ return ret; ++ ++ if (*smem_blksz != master->erasesize) { ++ pr_err("SMEM block size differs from MTD block size\n"); ++ return -EINVAL; ++ } ++ ++ /* Get partition pointer from SMEM */ ++ ret = qcom_smem_get_flash_partitions(&smem_parts); ++ if (ret < 0) ++ return ret; ++ ++ if (memcmp(SMEM_PTABLE_MAGIC, smem_parts->magic, ++ sizeof(SMEM_PTABLE_MAGIC))) { ++ pr_err("SMEM partition magic invalid\n"); ++ return -EINVAL; ++ } ++ ++ /* Allocate and populate the mtd structures */ ++ mtd_parts = kcalloc(le32_to_cpu(smem_parts->len), sizeof(*mtd_parts), ++ GFP_KERNEL); ++ if (!mtd_parts) ++ return -ENOMEM; ++ ++ for (i = 0; i < smem_parts->len; i++) { ++ struct smem_partition *s_part = &smem_parts->parts[i]; ++ struct mtd_partition *m_part = &mtd_parts[i]; ++ ++ m_part->name = s_part->name; ++ m_part->size = le32_to_cpu(s_part->size) * (*smem_blksz); ++ m_part->offset = le32_to_cpu(s_part->start) * (*smem_blksz); ++ ++ /* ++ * The last SMEM partition may have its size marked as ++ * something like 0xffffffff, which means "until the end of the ++ * flash device". In this case, truncate it. ++ */ ++ if (m_part->offset + m_part->size > master->size) ++ m_part->size = master->size - m_part->offset; ++ } ++ ++ *pparts = mtd_parts; ++ ++ return smem_parts->len; ++} ++ ++static struct mtd_part_parser qcom_smem_parser = { ++ .owner = THIS_MODULE, ++ .parse_fn = parse_qcom_smem_partitions, ++ .name = "qcom-smem", ++}; ++ ++static int __init qcom_smem_parser_init(void) ++{ ++ register_mtd_parser(&qcom_smem_parser); ++ return 0; ++} ++ ++static void __exit qcom_smem_parser_exit(void) ++{ ++ deregister_mtd_parser(&qcom_smem_parser); ++} ++ ++module_init(qcom_smem_parser_init); ++module_exit(qcom_smem_parser_exit); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Mathieu Olivari "); ++MODULE_DESCRIPTION("Parsing code for SMEM based partition tables"); +--- a/drivers/mtd/Makefile ++++ b/drivers/mtd/Makefile +@@ -16,6 +16,7 @@ obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o + obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o + obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o + obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o ++obj-$(CONFIG_MTD_QCOM_SMEM_PARTS) += qcom_smem_part.o + obj-y += parsers/ + + # 'Users' - code which presents functionality to userspace. diff --git a/target/linux/ipq40xx/patches-4.9/0032-phy-add-qcom-dwc3-phy.patch b/target/linux/ipq40xx/patches-4.9/0032-phy-add-qcom-dwc3-phy.patch new file mode 100644 index 000000000..bd777858e --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0032-phy-add-qcom-dwc3-phy.patch @@ -0,0 +1,617 @@ +From b9004f4fd23e4c614d71c972f3a9311665480e29 Mon Sep 17 00:00:00 2001 +From: Andy Gross +Date: Thu, 9 Mar 2017 08:19:18 +0100 +Subject: [PATCH 32/69] phy: add qcom dwc3 phy + +Signed-off-by: Andy Gross +--- + drivers/phy/Kconfig | 12 + + drivers/phy/Makefile | 1 + + drivers/phy/phy-qcom-dwc3.c | 575 ++++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 588 insertions(+) + create mode 100644 drivers/phy/phy-qcom-dwc3.c + +--- a/drivers/phy/Kconfig ++++ b/drivers/phy/Kconfig +@@ -490,4 +490,16 @@ config PHY_NS2_PCIE + help + Enable this to support the Broadcom Northstar2 PCIe PHY. + If unsure, say N. ++ ++config PHY_QCOM_DWC3 ++ tristate "QCOM DWC3 USB PHY support" ++ depends on ARCH_QCOM ++ depends on HAS_IOMEM ++ depends on OF ++ select GENERIC_PHY ++ help ++ This option enables support for the Synopsis PHYs present inside the ++ Qualcomm USB3.0 DWC3 controller. This driver supports both HS and SS ++ PHY controllers. ++ + endmenu +--- a/drivers/phy/Makefile ++++ b/drivers/phy/Makefile +@@ -60,3 +60,4 @@ obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o + obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o + obj-$(CONFIG_ARCH_TEGRA) += tegra/ + obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o ++obj-$(CONFIG_PHY_QCOM_DWC3) += phy-qcom-dwc3.o +--- /dev/null ++++ b/drivers/phy/phy-qcom-dwc3.c +@@ -0,0 +1,575 @@ ++/* Copyright (c) 2014-2015, Code Aurora Forum. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++* This program is distributed in the hope that it will be useful, ++* but WITHOUT ANY WARRANTY; without even the implied warranty of ++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++* GNU General Public License for more details. ++*/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/** ++ * USB QSCRATCH Hardware registers ++ */ ++#define QSCRATCH_GENERAL_CFG (0x08) ++#define HSUSB_PHY_CTRL_REG (0x10) ++ ++/* PHY_CTRL_REG */ ++#define HSUSB_CTRL_DMSEHV_CLAMP BIT(24) ++#define HSUSB_CTRL_USB2_SUSPEND BIT(23) ++#define HSUSB_CTRL_UTMI_CLK_EN BIT(21) ++#define HSUSB_CTRL_UTMI_OTG_VBUS_VALID BIT(20) ++#define HSUSB_CTRL_USE_CLKCORE BIT(18) ++#define HSUSB_CTRL_DPSEHV_CLAMP BIT(17) ++#define HSUSB_CTRL_COMMONONN BIT(11) ++#define HSUSB_CTRL_ID_HV_CLAMP BIT(9) ++#define HSUSB_CTRL_OTGSESSVLD_CLAMP BIT(8) ++#define HSUSB_CTRL_CLAMP_EN BIT(7) ++#define HSUSB_CTRL_RETENABLEN BIT(1) ++#define HSUSB_CTRL_POR BIT(0) ++ ++/* QSCRATCH_GENERAL_CFG */ ++#define HSUSB_GCFG_XHCI_REV BIT(2) ++ ++/** ++ * USB QSCRATCH Hardware registers ++ */ ++#define SSUSB_PHY_CTRL_REG (0x00) ++#define SSUSB_PHY_PARAM_CTRL_1 (0x04) ++#define SSUSB_PHY_PARAM_CTRL_2 (0x08) ++#define CR_PROTOCOL_DATA_IN_REG (0x0c) ++#define CR_PROTOCOL_DATA_OUT_REG (0x10) ++#define CR_PROTOCOL_CAP_ADDR_REG (0x14) ++#define CR_PROTOCOL_CAP_DATA_REG (0x18) ++#define CR_PROTOCOL_READ_REG (0x1c) ++#define CR_PROTOCOL_WRITE_REG (0x20) ++ ++/* PHY_CTRL_REG */ ++#define SSUSB_CTRL_REF_USE_PAD BIT(28) ++#define SSUSB_CTRL_TEST_POWERDOWN BIT(27) ++#define SSUSB_CTRL_LANE0_PWR_PRESENT BIT(24) ++#define SSUSB_CTRL_SS_PHY_EN BIT(8) ++#define SSUSB_CTRL_SS_PHY_RESET BIT(7) ++ ++/* SSPHY control registers */ ++#define SSPHY_CTRL_RX_OVRD_IN_HI(lane) (0x1006 + 0x100 * lane) ++#define SSPHY_CTRL_TX_OVRD_DRV_LO(lane) (0x1002 + 0x100 * lane) ++ ++/* SSPHY SoC version specific values */ ++#define SSPHY_RX_EQ_VALUE 4 /* Override value for rx_eq */ ++#define SSPHY_TX_DEEMPH_3_5DB 23 /* Override value for transmit ++ preemphasis */ ++#define SSPHY_MPLL_VALUE 0 /* Override value for mpll */ ++ ++/* QSCRATCH PHY_PARAM_CTRL1 fields */ ++#define PHY_PARAM_CTRL1_TX_FULL_SWING_MASK 0x07f00000u ++#define PHY_PARAM_CTRL1_TX_DEEMPH_6DB_MASK 0x000fc000u ++#define PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB_MASK 0x00003f00u ++#define PHY_PARAM_CTRL1_LOS_BIAS_MASK 0x000000f8u ++ ++#define PHY_PARAM_CTRL1_MASK \ ++ (PHY_PARAM_CTRL1_TX_FULL_SWING_MASK | \ ++ PHY_PARAM_CTRL1_TX_DEEMPH_6DB_MASK | \ ++ PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB_MASK | \ ++ PHY_PARAM_CTRL1_LOS_BIAS_MASK) ++ ++#define PHY_PARAM_CTRL1_TX_FULL_SWING(x) \ ++ (((x) << 20) & PHY_PARAM_CTRL1_TX_FULL_SWING_MASK) ++#define PHY_PARAM_CTRL1_TX_DEEMPH_6DB(x) \ ++ (((x) << 14) & PHY_PARAM_CTRL1_TX_DEEMPH_6DB_MASK) ++#define PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB(x) \ ++ (((x) << 8) & PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB_MASK) ++#define PHY_PARAM_CTRL1_LOS_BIAS(x) \ ++ (((x) << 3) & PHY_PARAM_CTRL1_LOS_BIAS_MASK) ++ ++/* RX OVRD IN HI bits */ ++#define RX_OVRD_IN_HI_RX_RESET_OVRD BIT(13) ++#define RX_OVRD_IN_HI_RX_RX_RESET BIT(12) ++#define RX_OVRD_IN_HI_RX_EQ_OVRD BIT(11) ++#define RX_OVRD_IN_HI_RX_EQ_MASK 0x0700 ++#define RX_OVRD_IN_HI_RX_EQ_SHIFT 8 ++#define RX_OVRD_IN_HI_RX_EQ_EN_OVRD BIT(7) ++#define RX_OVRD_IN_HI_RX_EQ_EN BIT(6) ++#define RX_OVRD_IN_HI_RX_LOS_FILTER_OVRD BIT(5) ++#define RX_OVRD_IN_HI_RX_LOS_FILTER_MASK 0x0018 ++#define RX_OVRD_IN_HI_RX_RATE_OVRD BIT(2) ++#define RX_OVRD_IN_HI_RX_RATE_MASK 0x0003 ++ ++/* TX OVRD DRV LO register bits */ ++#define TX_OVRD_DRV_LO_AMPLITUDE_MASK 0x007F ++#define TX_OVRD_DRV_LO_PREEMPH_MASK 0x3F80 ++#define TX_OVRD_DRV_LO_PREEMPH_SHIFT 7 ++#define TX_OVRD_DRV_LO_EN BIT(14) ++ ++/* SS CAP register bits */ ++#define SS_CR_CAP_ADDR_REG BIT(0) ++#define SS_CR_CAP_DATA_REG BIT(0) ++#define SS_CR_READ_REG BIT(0) ++#define SS_CR_WRITE_REG BIT(0) ++ ++struct qcom_dwc3_usb_phy { ++ void __iomem *base; ++ struct device *dev; ++ struct clk *xo_clk; ++ struct clk *ref_clk; ++ u32 rx_eq; ++ u32 tx_deamp_3_5db; ++ u32 mpll; ++}; ++ ++struct qcom_dwc3_phy_drvdata { ++ struct phy_ops ops; ++ u32 clk_rate; ++}; ++ ++/** ++ * Write register and read back masked value to confirm it is written ++ * ++ * @base - QCOM DWC3 PHY base virtual address. ++ * @offset - register offset. ++ * @mask - register bitmask specifying what should be updated ++ * @val - value to write. ++ */ ++static inline void qcom_dwc3_phy_write_readback( ++ struct qcom_dwc3_usb_phy *phy_dwc3, u32 offset, ++ const u32 mask, u32 val) ++{ ++ u32 write_val, tmp = readl(phy_dwc3->base + offset); ++ ++ tmp &= ~mask; /* retain other bits */ ++ write_val = tmp | val; ++ ++ writel(write_val, phy_dwc3->base + offset); ++ ++ /* Read back to see if val was written */ ++ tmp = readl(phy_dwc3->base + offset); ++ tmp &= mask; /* clear other bits */ ++ ++ if (tmp != val) ++ dev_err(phy_dwc3->dev, "write: %x to QSCRATCH: %x FAILED\n", ++ val, offset); ++} ++ ++static int wait_for_latch(void __iomem *addr) ++{ ++ u32 retry = 10; ++ ++ while (true) { ++ if (!readl(addr)) ++ break; ++ ++ if (--retry == 0) ++ return -ETIMEDOUT; ++ ++ usleep_range(10, 20); ++ } ++ ++ return 0; ++} ++ ++/** ++ * Write SSPHY register ++ * ++ * @base - QCOM DWC3 PHY base virtual address. ++ * @addr - SSPHY address to write. ++ * @val - value to write. ++ */ ++static int qcom_dwc3_ss_write_phycreg(struct qcom_dwc3_usb_phy *phy_dwc3, ++ u32 addr, u32 val) ++{ ++ int ret; ++ ++ writel(addr, phy_dwc3->base + CR_PROTOCOL_DATA_IN_REG); ++ writel(SS_CR_CAP_ADDR_REG, phy_dwc3->base + CR_PROTOCOL_CAP_ADDR_REG); ++ ++ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_CAP_ADDR_REG); ++ if (ret) ++ goto err_wait; ++ ++ writel(val, phy_dwc3->base + CR_PROTOCOL_DATA_IN_REG); ++ writel(SS_CR_CAP_DATA_REG, phy_dwc3->base + CR_PROTOCOL_CAP_DATA_REG); ++ ++ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_CAP_DATA_REG); ++ if (ret) ++ goto err_wait; ++ ++ writel(SS_CR_WRITE_REG, phy_dwc3->base + CR_PROTOCOL_WRITE_REG); ++ ++ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_WRITE_REG); ++ ++err_wait: ++ if (ret) ++ dev_err(phy_dwc3->dev, "timeout waiting for latch\n"); ++ return ret; ++} ++ ++/** ++ * Read SSPHY register. ++ * ++ * @base - QCOM DWC3 PHY base virtual address. ++ * @addr - SSPHY address to read. ++ */ ++static int qcom_dwc3_ss_read_phycreg(void __iomem *base, u32 addr, u32 *val) ++{ ++ int ret; ++ ++ writel(addr, base + CR_PROTOCOL_DATA_IN_REG); ++ writel(SS_CR_CAP_ADDR_REG, base + CR_PROTOCOL_CAP_ADDR_REG); ++ ++ ret = wait_for_latch(base + CR_PROTOCOL_CAP_ADDR_REG); ++ if (ret) ++ goto err_wait; ++ ++ /* ++ * Due to hardware bug, first read of SSPHY register might be ++ * incorrect. Hence as workaround, SW should perform SSPHY register ++ * read twice, but use only second read and ignore first read. ++ */ ++ writel(SS_CR_READ_REG, base + CR_PROTOCOL_READ_REG); ++ ++ ret = wait_for_latch(base + CR_PROTOCOL_READ_REG); ++ if (ret) ++ goto err_wait; ++ ++ /* throwaway read */ ++ readl(base + CR_PROTOCOL_DATA_OUT_REG); ++ ++ writel(SS_CR_READ_REG, base + CR_PROTOCOL_READ_REG); ++ ++ ret = wait_for_latch(base + CR_PROTOCOL_READ_REG); ++ if (ret) ++ goto err_wait; ++ ++ *val = readl(base + CR_PROTOCOL_DATA_OUT_REG); ++ ++err_wait: ++ return ret; ++} ++ ++static int qcom_dwc3_hs_phy_init(struct phy *phy) ++{ ++ struct qcom_dwc3_usb_phy *phy_dwc3 = phy_get_drvdata(phy); ++ int ret; ++ u32 val; ++ ++ ret = clk_prepare_enable(phy_dwc3->xo_clk); ++ if (ret) ++ return ret; ++ ++ ret = clk_prepare_enable(phy_dwc3->ref_clk); ++ if (ret) { ++ clk_disable_unprepare(phy_dwc3->xo_clk); ++ return ret; ++ } ++ ++ /* ++ * HSPHY Initialization: Enable UTMI clock, select 19.2MHz fsel ++ * enable clamping, and disable RETENTION (power-on default is ENABLED) ++ */ ++ val = HSUSB_CTRL_DPSEHV_CLAMP | HSUSB_CTRL_DMSEHV_CLAMP | ++ HSUSB_CTRL_RETENABLEN | HSUSB_CTRL_COMMONONN | ++ HSUSB_CTRL_OTGSESSVLD_CLAMP | HSUSB_CTRL_ID_HV_CLAMP | ++ HSUSB_CTRL_DPSEHV_CLAMP | HSUSB_CTRL_UTMI_OTG_VBUS_VALID | ++ HSUSB_CTRL_UTMI_CLK_EN | HSUSB_CTRL_CLAMP_EN | 0x70; ++ ++ /* use core clock if external reference is not present */ ++ if (!phy_dwc3->xo_clk) ++ val |= HSUSB_CTRL_USE_CLKCORE; ++ ++ writel(val, phy_dwc3->base + HSUSB_PHY_CTRL_REG); ++ usleep_range(2000, 2200); ++ ++ /* Disable (bypass) VBUS and ID filters */ ++ writel(HSUSB_GCFG_XHCI_REV, phy_dwc3->base + QSCRATCH_GENERAL_CFG); ++ ++ return 0; ++} ++ ++static int qcom_dwc3_hs_phy_exit(struct phy *phy) ++{ ++ struct qcom_dwc3_usb_phy *phy_dwc3 = phy_get_drvdata(phy); ++ ++ clk_disable_unprepare(phy_dwc3->ref_clk); ++ clk_disable_unprepare(phy_dwc3->xo_clk); ++ ++ return 0; ++} ++ ++static int qcom_dwc3_ss_phy_init(struct phy *phy) ++{ ++ struct qcom_dwc3_usb_phy *phy_dwc3 = phy_get_drvdata(phy); ++ int ret; ++ u32 data = 0; ++ ++ ret = clk_prepare_enable(phy_dwc3->xo_clk); ++ if (ret) ++ return ret; ++ ++ ret = clk_prepare_enable(phy_dwc3->ref_clk); ++ if (ret) { ++ clk_disable_unprepare(phy_dwc3->xo_clk); ++ return ret; ++ } ++ ++ /* reset phy */ ++ data = readl(phy_dwc3->base + SSUSB_PHY_CTRL_REG); ++ writel(data | SSUSB_CTRL_SS_PHY_RESET, ++ phy_dwc3->base + SSUSB_PHY_CTRL_REG); ++ usleep_range(2000, 2200); ++ writel(data, phy_dwc3->base + SSUSB_PHY_CTRL_REG); ++ ++ /* clear REF_PAD if we don't have XO clk */ ++ if (!phy_dwc3->xo_clk) ++ data &= ~SSUSB_CTRL_REF_USE_PAD; ++ else ++ data |= SSUSB_CTRL_REF_USE_PAD; ++ ++ writel(data, phy_dwc3->base + SSUSB_PHY_CTRL_REG); ++ ++ /* wait for ref clk to become stable, this can take up to 30ms */ ++ msleep(30); ++ ++ data |= SSUSB_CTRL_SS_PHY_EN | SSUSB_CTRL_LANE0_PWR_PRESENT; ++ writel(data, phy_dwc3->base + SSUSB_PHY_CTRL_REG); ++ ++ /* ++ * WORKAROUND: There is SSPHY suspend bug due to which USB enumerates ++ * in HS mode instead of SS mode. Workaround it by asserting ++ * LANE0.TX_ALT_BLOCK.EN_ALT_BUS to enable TX to use alt bus mode ++ */ ++ ret = qcom_dwc3_ss_read_phycreg(phy_dwc3->base, 0x102D, &data); ++ if (ret) ++ goto err_phy_trans; ++ ++ data |= (1 << 7); ++ ret = qcom_dwc3_ss_write_phycreg(phy_dwc3, 0x102D, data); ++ if (ret) ++ goto err_phy_trans; ++ ++ ret = qcom_dwc3_ss_read_phycreg(phy_dwc3->base, 0x1010, &data); ++ if (ret) ++ goto err_phy_trans; ++ ++ data &= ~0xff0; ++ data |= 0x20; ++ ret = qcom_dwc3_ss_write_phycreg(phy_dwc3, 0x1010, data); ++ if (ret) ++ goto err_phy_trans; ++ ++ /* ++ * Fix RX Equalization setting as follows ++ * LANE0.RX_OVRD_IN_HI. RX_EQ_EN set to 0 ++ * LANE0.RX_OVRD_IN_HI.RX_EQ_EN_OVRD set to 1 ++ * LANE0.RX_OVRD_IN_HI.RX_EQ set based on SoC version ++ * LANE0.RX_OVRD_IN_HI.RX_EQ_OVRD set to 1 ++ */ ++ ret = qcom_dwc3_ss_read_phycreg(phy_dwc3->base, ++ SSPHY_CTRL_RX_OVRD_IN_HI(0), &data); ++ if (ret) ++ goto err_phy_trans; ++ ++ data &= ~RX_OVRD_IN_HI_RX_EQ_EN; ++ data |= RX_OVRD_IN_HI_RX_EQ_EN_OVRD; ++ data &= ~RX_OVRD_IN_HI_RX_EQ_MASK; ++ data |= phy_dwc3->rx_eq << RX_OVRD_IN_HI_RX_EQ_SHIFT; ++ data |= RX_OVRD_IN_HI_RX_EQ_OVRD; ++ ret = qcom_dwc3_ss_write_phycreg(phy_dwc3, ++ SSPHY_CTRL_RX_OVRD_IN_HI(0), data); ++ if (ret) ++ goto err_phy_trans; ++ ++ /* ++ * Set EQ and TX launch amplitudes as follows ++ * LANE0.TX_OVRD_DRV_LO.PREEMPH set based on SoC version ++ * LANE0.TX_OVRD_DRV_LO.AMPLITUDE set to 110 ++ * LANE0.TX_OVRD_DRV_LO.EN set to 1. ++ */ ++ ret = qcom_dwc3_ss_read_phycreg(phy_dwc3->base, ++ SSPHY_CTRL_TX_OVRD_DRV_LO(0), &data); ++ if (ret) ++ goto err_phy_trans; ++ ++ data &= ~TX_OVRD_DRV_LO_PREEMPH_MASK; ++ data |= phy_dwc3->tx_deamp_3_5db << TX_OVRD_DRV_LO_PREEMPH_SHIFT; ++ data &= ~TX_OVRD_DRV_LO_AMPLITUDE_MASK; ++ data |= 0x6E; ++ data |= TX_OVRD_DRV_LO_EN; ++ ret = qcom_dwc3_ss_write_phycreg(phy_dwc3, ++ SSPHY_CTRL_TX_OVRD_DRV_LO(0), data); ++ if (ret) ++ goto err_phy_trans; ++ ++ qcom_dwc3_ss_write_phycreg(phy_dwc3, 0x30, phy_dwc3->mpll); ++ ++ /* ++ * Set the QSCRATCH PHY_PARAM_CTRL1 parameters as follows ++ * TX_FULL_SWING [26:20] amplitude to 110 ++ * TX_DEEMPH_6DB [19:14] to 32 ++ * TX_DEEMPH_3_5DB [13:8] set based on SoC version ++ * LOS_BIAS [7:3] to 9 ++ */ ++ data = readl(phy_dwc3->base + SSUSB_PHY_PARAM_CTRL_1); ++ ++ data &= ~PHY_PARAM_CTRL1_MASK; ++ ++ data |= PHY_PARAM_CTRL1_TX_FULL_SWING(0x6e) | ++ PHY_PARAM_CTRL1_TX_DEEMPH_6DB(0x20) | ++ PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB(phy_dwc3->tx_deamp_3_5db) | ++ PHY_PARAM_CTRL1_LOS_BIAS(0x9); ++ ++ qcom_dwc3_phy_write_readback(phy_dwc3, SSUSB_PHY_PARAM_CTRL_1, ++ PHY_PARAM_CTRL1_MASK, data); ++ ++err_phy_trans: ++ return ret; ++} ++ ++static int qcom_dwc3_ss_phy_exit(struct phy *phy) ++{ ++ struct qcom_dwc3_usb_phy *phy_dwc3 = phy_get_drvdata(phy); ++ ++ /* Sequence to put SSPHY in low power state: ++ * 1. Clear REF_PHY_EN in PHY_CTRL_REG ++ * 2. Clear REF_USE_PAD in PHY_CTRL_REG ++ * 3. Set TEST_POWERED_DOWN in PHY_CTRL_REG to enable PHY retention ++ */ ++ qcom_dwc3_phy_write_readback(phy_dwc3, SSUSB_PHY_CTRL_REG, ++ SSUSB_CTRL_SS_PHY_EN, 0x0); ++ qcom_dwc3_phy_write_readback(phy_dwc3, SSUSB_PHY_CTRL_REG, ++ SSUSB_CTRL_REF_USE_PAD, 0x0); ++ qcom_dwc3_phy_write_readback(phy_dwc3, SSUSB_PHY_CTRL_REG, ++ SSUSB_CTRL_TEST_POWERDOWN, 0x0); ++ ++ clk_disable_unprepare(phy_dwc3->ref_clk); ++ clk_disable_unprepare(phy_dwc3->xo_clk); ++ ++ return 0; ++} ++ ++static const struct qcom_dwc3_phy_drvdata qcom_dwc3_hs_drvdata = { ++ .ops = { ++ .init = qcom_dwc3_hs_phy_init, ++ .exit = qcom_dwc3_hs_phy_exit, ++ .owner = THIS_MODULE, ++ }, ++ .clk_rate = 60000000, ++}; ++ ++static const struct qcom_dwc3_phy_drvdata qcom_dwc3_ss_drvdata = { ++ .ops = { ++ .init = qcom_dwc3_ss_phy_init, ++ .exit = qcom_dwc3_ss_phy_exit, ++ .owner = THIS_MODULE, ++ }, ++ .clk_rate = 125000000, ++}; ++ ++static const struct of_device_id qcom_dwc3_phy_table[] = { ++ { .compatible = "qcom,dwc3-hs-usb-phy", .data = &qcom_dwc3_hs_drvdata }, ++ { .compatible = "qcom,dwc3-ss-usb-phy", .data = &qcom_dwc3_ss_drvdata }, ++ { /* Sentinel */ } ++}; ++MODULE_DEVICE_TABLE(of, qcom_dwc3_phy_table); ++ ++static int qcom_dwc3_phy_probe(struct platform_device *pdev) ++{ ++ struct qcom_dwc3_usb_phy *phy_dwc3; ++ struct phy_provider *phy_provider; ++ struct phy *generic_phy; ++ struct resource *res; ++ const struct of_device_id *match; ++ const struct qcom_dwc3_phy_drvdata *data; ++ struct device_node *np; ++ ++ phy_dwc3 = devm_kzalloc(&pdev->dev, sizeof(*phy_dwc3), GFP_KERNEL); ++ if (!phy_dwc3) ++ return -ENOMEM; ++ ++ match = of_match_node(qcom_dwc3_phy_table, pdev->dev.of_node); ++ data = match->data; ++ ++ phy_dwc3->dev = &pdev->dev; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ phy_dwc3->base = devm_ioremap_resource(phy_dwc3->dev, res); ++ if (IS_ERR(phy_dwc3->base)) ++ return PTR_ERR(phy_dwc3->base); ++ ++ phy_dwc3->ref_clk = devm_clk_get(phy_dwc3->dev, "ref"); ++ if (IS_ERR(phy_dwc3->ref_clk)) { ++ dev_dbg(phy_dwc3->dev, "cannot get reference clock\n"); ++ return PTR_ERR(phy_dwc3->ref_clk); ++ } ++ ++ clk_set_rate(phy_dwc3->ref_clk, data->clk_rate); ++ ++ phy_dwc3->xo_clk = devm_clk_get(phy_dwc3->dev, "xo"); ++ if (IS_ERR(phy_dwc3->xo_clk)) { ++ dev_dbg(phy_dwc3->dev, "cannot get TCXO clock\n"); ++ phy_dwc3->xo_clk = NULL; ++ } ++ ++ /* Parse device node to probe HSIO settings */ ++ np = of_node_get(pdev->dev.of_node); ++ if (!of_compat_cmp(match->compatible, "qcom,dwc3-ss-usb-phy", ++ strlen(match->compatible))) { ++ ++ if (of_property_read_u32(np, "rx_eq", &phy_dwc3->rx_eq) || ++ of_property_read_u32(np, "tx_deamp_3_5db", ++ &phy_dwc3->tx_deamp_3_5db) || ++ of_property_read_u32(np, "mpll", &phy_dwc3->mpll)) { ++ ++ dev_err(phy_dwc3->dev, "cannot get HSIO settings from device node, using default values\n"); ++ ++ /* Default HSIO settings */ ++ phy_dwc3->rx_eq = SSPHY_RX_EQ_VALUE; ++ phy_dwc3->tx_deamp_3_5db = SSPHY_TX_DEEMPH_3_5DB; ++ phy_dwc3->mpll = SSPHY_MPLL_VALUE; ++ } ++ } ++ ++ generic_phy = devm_phy_create(phy_dwc3->dev, pdev->dev.of_node, ++ &data->ops); ++ ++ if (IS_ERR(generic_phy)) ++ return PTR_ERR(generic_phy); ++ ++ phy_set_drvdata(generic_phy, phy_dwc3); ++ platform_set_drvdata(pdev, phy_dwc3); ++ ++ phy_provider = devm_of_phy_provider_register(phy_dwc3->dev, ++ of_phy_simple_xlate); ++ ++ if (IS_ERR(phy_provider)) ++ return PTR_ERR(phy_provider); ++ ++ return 0; ++} ++ ++static struct platform_driver qcom_dwc3_phy_driver = { ++ .probe = qcom_dwc3_phy_probe, ++ .driver = { ++ .name = "qcom-dwc3-usb-phy", ++ .owner = THIS_MODULE, ++ .of_match_table = qcom_dwc3_phy_table, ++ }, ++}; ++ ++module_platform_driver(qcom_dwc3_phy_driver); ++ ++MODULE_ALIAS("platform:phy-qcom-dwc3"); ++MODULE_LICENSE("GPL v2"); ++MODULE_AUTHOR("Andy Gross "); ++MODULE_AUTHOR("Ivan T. Ivanov "); ++MODULE_DESCRIPTION("DesignWare USB3 QCOM PHY driver"); diff --git a/target/linux/ipq40xx/patches-4.9/0033-ARM-qcom-automatically-select-PCI_DOMAINS-if-PCI-is-.patch b/target/linux/ipq40xx/patches-4.9/0033-ARM-qcom-automatically-select-PCI_DOMAINS-if-PCI-is-.patch new file mode 100644 index 000000000..a6c7953aa --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0033-ARM-qcom-automatically-select-PCI_DOMAINS-if-PCI-is-.patch @@ -0,0 +1,29 @@ +From 48051ece78136e4235a2415a52797db56f8a4478 Mon Sep 17 00:00:00 2001 +From: Mathieu Olivari +Date: Tue, 21 Apr 2015 19:09:07 -0700 +Subject: [PATCH 33/69] ARM: qcom: automatically select PCI_DOMAINS if PCI is + enabled + +If multiple PCIe devices are present in the system, the kernel will +panic at boot time when trying to scan the PCI buses. This happens on +IPQ806x based platforms, which has 3 PCIe ports. + +Enabling this option allows the kernel to assign the pci-domains +according to the device-tree content. This allows multiple PCIe +controllers to coexist in the system. + +Signed-off-by: Mathieu Olivari +--- + arch/arm/mach-qcom/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/arm/mach-qcom/Kconfig ++++ b/arch/arm/mach-qcom/Kconfig +@@ -6,6 +6,7 @@ menuconfig ARCH_QCOM + select ARM_AMBA + select PINCTRL + select QCOM_SCM if SMP ++ select PCI_DOMAINS if PCI + help + Support for Qualcomm's devicetree based systems. + diff --git a/target/linux/ipq40xx/patches-4.9/0034-ARM-Add-Krait-L2-register-accessor-functions.patch b/target/linux/ipq40xx/patches-4.9/0034-ARM-Add-Krait-L2-register-accessor-functions.patch new file mode 100644 index 000000000..019170b6d --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0034-ARM-Add-Krait-L2-register-accessor-functions.patch @@ -0,0 +1,147 @@ +From patchwork Fri Dec 8 09:42:19 2017 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [v4,01/12] ARM: Add Krait L2 register accessor functions +From: Sricharan R +X-Patchwork-Id: 10102101 +Message-Id: <1512726150-7204-2-git-send-email-sricharan@codeaurora.org> +To: mturquette@baylibre.com, sboyd@codeaurora.org, + devicetree@vger.kernel.org, linux-pm@vger.kernel.org, + linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org, + viresh.kumar@linaro.org, linux-arm-kernel@lists.infradead.org +Cc: sricharan@codeaurora.org, Mark Rutland , + Russell King , + Courtney Cavin +Date: Fri, 8 Dec 2017 15:12:19 +0530 + +From: Stephen Boyd + +Krait CPUs have a handful of L2 cache controller registers that +live behind a cp15 based indirection register. First you program +the indirection register (l2cpselr) to point the L2 'window' +register (l2cpdr) at what you want to read/write. Then you +read/write the 'window' register to do what you want. The +l2cpselr register is not banked per-cpu so we must lock around +accesses to it to prevent other CPUs from re-pointing l2cpdr +underneath us. + +Cc: Mark Rutland +Cc: Russell King +Cc: Courtney Cavin +Signed-off-by: Stephen Boyd +--- + arch/arm/common/Kconfig | 3 ++ + arch/arm/common/Makefile | 1 + + arch/arm/common/krait-l2-accessors.c | 58 +++++++++++++++++++++++++++++++ + arch/arm/include/asm/krait-l2-accessors.h | 20 +++++++++++ + 4 files changed, 82 insertions(+) + create mode 100644 arch/arm/common/krait-l2-accessors.c + create mode 100644 arch/arm/include/asm/krait-l2-accessors.h + +--- a/arch/arm/common/Kconfig ++++ b/arch/arm/common/Kconfig +@@ -9,6 +9,9 @@ config DMABOUNCE + bool + select ZONE_DMA + ++config KRAIT_L2_ACCESSORS ++ bool ++ + config SHARP_LOCOMO + bool + +--- a/arch/arm/common/Makefile ++++ b/arch/arm/common/Makefile +@@ -7,6 +7,7 @@ obj-y += firmware.o + obj-$(CONFIG_ICST) += icst.o + obj-$(CONFIG_SA1111) += sa1111.o + obj-$(CONFIG_DMABOUNCE) += dmabounce.o ++obj-$(CONFIG_KRAIT_L2_ACCESSORS) += krait-l2-accessors.o + obj-$(CONFIG_SHARP_LOCOMO) += locomo.o + obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o + obj-$(CONFIG_SHARP_SCOOP) += scoop.o +--- /dev/null ++++ b/arch/arm/common/krait-l2-accessors.c +@@ -0,0 +1,58 @@ ++/* ++ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++ ++#include ++#include ++ ++static DEFINE_RAW_SPINLOCK(krait_l2_lock); ++ ++void krait_set_l2_indirect_reg(u32 addr, u32 val) ++{ ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&krait_l2_lock, flags); ++ /* ++ * Select the L2 window by poking l2cpselr, then write to the window ++ * via l2cpdr. ++ */ ++ asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr)); ++ isb(); ++ asm volatile ("mcr p15, 3, %0, c15, c0, 7 @ l2cpdr" : : "r" (val)); ++ isb(); ++ ++ raw_spin_unlock_irqrestore(&krait_l2_lock, flags); ++} ++EXPORT_SYMBOL(krait_set_l2_indirect_reg); ++ ++u32 krait_get_l2_indirect_reg(u32 addr) ++{ ++ u32 val; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&krait_l2_lock, flags); ++ /* ++ * Select the L2 window by poking l2cpselr, then read from the window ++ * via l2cpdr. ++ */ ++ asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr)); ++ isb(); ++ asm volatile ("mrc p15, 3, %0, c15, c0, 7 @ l2cpdr" : "=r" (val)); ++ ++ raw_spin_unlock_irqrestore(&krait_l2_lock, flags); ++ ++ return val; ++} ++EXPORT_SYMBOL(krait_get_l2_indirect_reg); +--- /dev/null ++++ b/arch/arm/include/asm/krait-l2-accessors.h +@@ -0,0 +1,20 @@ ++/* ++ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef __ASMARM_KRAIT_L2_ACCESSORS_H ++#define __ASMARM_KRAIT_L2_ACCESSORS_H ++ ++extern void krait_set_l2_indirect_reg(u32 addr, u32 val); ++extern u32 krait_get_l2_indirect_reg(u32 addr); ++ ++#endif diff --git a/target/linux/ipq40xx/patches-4.9/0035-clk-mux-Split-out-register-accessors-for-reuse.patch b/target/linux/ipq40xx/patches-4.9/0035-clk-mux-Split-out-register-accessors-for-reuse.patch new file mode 100644 index 000000000..5a64e6c56 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0035-clk-mux-Split-out-register-accessors-for-reuse.patch @@ -0,0 +1,195 @@ +From patchwork Fri Dec 8 09:42:20 2017 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [v4,02/12] clk: mux: Split out register accessors for reuse +From: Sricharan R +X-Patchwork-Id: 10102103 +Message-Id: <1512726150-7204-3-git-send-email-sricharan@codeaurora.org> +To: mturquette@baylibre.com, sboyd@codeaurora.org, + devicetree@vger.kernel.org, linux-pm@vger.kernel.org, + linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org, + viresh.kumar@linaro.org, linux-arm-kernel@lists.infradead.org +Cc: sricharan@codeaurora.org +Date: Fri, 8 Dec 2017 15:12:20 +0530 + +From: Stephen Boyd + +We want to reuse the logic in clk-mux.c for other clock drivers +that don't use readl as register accessors. Fortunately, there +really isn't much to the mux code besides the table indirection +and quirk flags if you assume any bit shifting and masking has +been done already. Pull that logic out into reusable functions +that operate on an optional table and some flags so that other +drivers can use the same logic. + +Signed-off-by: Stephen Boyd +--- + drivers/clk/clk-mux.c | 75 +++++++++++++++++++++++++++----------------- + include/linux/clk-provider.h | 9 ++++-- + 2 files changed, 54 insertions(+), 30 deletions(-) + +--- a/drivers/clk/clk-mux.c ++++ b/drivers/clk/clk-mux.c +@@ -26,35 +26,24 @@ + * parent - parent is adjustable through clk_set_parent + */ + +-static u8 clk_mux_get_parent(struct clk_hw *hw) ++unsigned int clk_mux_get_parent(struct clk_hw *hw, unsigned int val, ++ unsigned int *table, unsigned long flags) + { +- struct clk_mux *mux = to_clk_mux(hw); + int num_parents = clk_hw_get_num_parents(hw); +- u32 val; +- +- /* +- * FIXME need a mux-specific flag to determine if val is bitwise or numeric +- * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1 +- * to 0x7 (index starts at one) +- * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so +- * val = 0x4 really means "bit 2, index starts at bit 0" +- */ +- val = clk_readl(mux->reg) >> mux->shift; +- val &= mux->mask; + +- if (mux->table) { ++ if (table) { + int i; + + for (i = 0; i < num_parents; i++) +- if (mux->table[i] == val) ++ if (table[i] == val) + return i; + return -EINVAL; + } + +- if (val && (mux->flags & CLK_MUX_INDEX_BIT)) ++ if (val && (flags & CLK_MUX_INDEX_BIT)) + val = ffs(val) - 1; + +- if (val && (mux->flags & CLK_MUX_INDEX_ONE)) ++ if (val && (flags & CLK_MUX_INDEX_ONE)) + val--; + + if (val >= num_parents) +@@ -62,23 +51,53 @@ static u8 clk_mux_get_parent(struct clk_ + + return val; + } ++EXPORT_SYMBOL_GPL(clk_mux_get_parent); + +-static int clk_mux_set_parent(struct clk_hw *hw, u8 index) ++static u8 _clk_mux_get_parent(struct clk_hw *hw) + { + struct clk_mux *mux = to_clk_mux(hw); + u32 val; +- unsigned long flags = 0; + +- if (mux->table) { +- index = mux->table[index]; ++ /* ++ * FIXME need a mux-specific flag to determine if val is bitwise or ++ * numeric e.g. sys_clkin_ck's clksel field is 3 bits wide, ++ * but ranges from 0x1 to 0x7 (index starts at one) ++ * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so ++ * val = 0x4 really means "bit 2, index starts at bit 0" ++ */ ++ val = clk_readl(mux->reg) >> mux->shift; ++ val &= mux->mask; ++ ++ return clk_mux_get_parent(hw, val, mux->table, mux->flags); ++} ++ ++unsigned int clk_mux_reindex(u8 index, unsigned int *table, ++ unsigned long flags) ++{ ++ unsigned int val = index; ++ ++ if (table) { ++ val = table[val]; + } else { +- if (mux->flags & CLK_MUX_INDEX_BIT) +- index = 1 << index; ++ if (flags & CLK_MUX_INDEX_BIT) ++ val = 1 << index; + +- if (mux->flags & CLK_MUX_INDEX_ONE) +- index++; ++ if (flags & CLK_MUX_INDEX_ONE) ++ val++; + } + ++ return val; ++} ++EXPORT_SYMBOL_GPL(clk_mux_reindex); ++ ++static int clk_mux_set_parent(struct clk_hw *hw, u8 index) ++{ ++ struct clk_mux *mux = to_clk_mux(hw); ++ u32 val; ++ unsigned long flags = 0; ++ ++ index = clk_mux_reindex(index, mux->table, mux->flags); ++ + if (mux->lock) + spin_lock_irqsave(mux->lock, flags); + else +@@ -102,14 +121,14 @@ static int clk_mux_set_parent(struct clk + } + + const struct clk_ops clk_mux_ops = { +- .get_parent = clk_mux_get_parent, ++ .get_parent = _clk_mux_get_parent, + .set_parent = clk_mux_set_parent, + .determine_rate = __clk_mux_determine_rate, + }; + EXPORT_SYMBOL_GPL(clk_mux_ops); + + const struct clk_ops clk_mux_ro_ops = { +- .get_parent = clk_mux_get_parent, ++ .get_parent = _clk_mux_get_parent, + }; + EXPORT_SYMBOL_GPL(clk_mux_ro_ops); + +@@ -117,7 +136,7 @@ struct clk_hw *clk_hw_register_mux_table + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, +- u8 clk_mux_flags, u32 *table, spinlock_t *lock) ++ u8 clk_mux_flags, unsigned int *table, spinlock_t *lock) + { + struct clk_mux *mux; + struct clk_hw *hw; +--- a/include/linux/clk-provider.h ++++ b/include/linux/clk-provider.h +@@ -466,7 +466,7 @@ void clk_hw_unregister_divider(struct cl + struct clk_mux { + struct clk_hw hw; + void __iomem *reg; +- u32 *table; ++ unsigned int *table; + u32 mask; + u8 shift; + u8 flags; +@@ -484,6 +484,11 @@ struct clk_mux { + extern const struct clk_ops clk_mux_ops; + extern const struct clk_ops clk_mux_ro_ops; + ++unsigned int clk_mux_get_parent(struct clk_hw *hw, unsigned int val, ++ unsigned int *table, unsigned long flags); ++unsigned int clk_mux_reindex(u8 index, unsigned int *table, ++ unsigned long flags); ++ + struct clk *clk_register_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, +@@ -504,7 +509,7 @@ struct clk_hw *clk_hw_register_mux_table + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, +- u8 clk_mux_flags, u32 *table, spinlock_t *lock); ++ u8 clk_mux_flags, unsigned int *table, spinlock_t *lock); + + void clk_unregister_mux(struct clk *clk); + void clk_hw_unregister_mux(struct clk_hw *hw); diff --git a/target/linux/ipq40xx/patches-4.9/0038-clk-qcom-Add-support-for-High-Frequency-PLLs-HFPLLs.patch b/target/linux/ipq40xx/patches-4.9/0038-clk-qcom-Add-support-for-High-Frequency-PLLs-HFPLLs.patch new file mode 100644 index 000000000..70926143e --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0038-clk-qcom-Add-support-for-High-Frequency-PLLs-HFPLLs.patch @@ -0,0 +1,352 @@ +From patchwork Fri Dec 8 09:42:21 2017 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [v4,03/12] clk: qcom: Add support for High-Frequency PLLs (HFPLLs) +From: Sricharan R +X-Patchwork-Id: 10102083 +Message-Id: <1512726150-7204-4-git-send-email-sricharan@codeaurora.org> +To: mturquette@baylibre.com, sboyd@codeaurora.org, + devicetree@vger.kernel.org, linux-pm@vger.kernel.org, + linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org, + viresh.kumar@linaro.org, linux-arm-kernel@lists.infradead.org +Cc: sricharan@codeaurora.org +Date: Fri, 8 Dec 2017 15:12:21 +0530 + +From: Stephen Boyd + +HFPLLs are the main frequency source for Krait CPU clocks. Add +support for changing the rate of these PLLs. + +Signed-off-by: Stephen Boyd +--- + drivers/clk/qcom/Makefile | 1 + + drivers/clk/qcom/clk-hfpll.c | 253 +++++++++++++++++++++++++++++++++++++++++++ + drivers/clk/qcom/clk-hfpll.h | 54 +++++++++ + 3 files changed, 308 insertions(+) + create mode 100644 drivers/clk/qcom/clk-hfpll.c + create mode 100644 drivers/clk/qcom/clk-hfpll.h + +--- a/drivers/clk/qcom/Makefile ++++ b/drivers/clk/qcom/Makefile +@@ -9,6 +9,7 @@ clk-qcom-y += clk-rcg2.o + clk-qcom-y += clk-branch.o + clk-qcom-y += clk-regmap-divider.o + clk-qcom-y += clk-regmap-mux.o ++clk-qcom-y += clk-hfpll.o + clk-qcom-y += reset.o + clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o + +--- /dev/null ++++ b/drivers/clk/qcom/clk-hfpll.c +@@ -0,0 +1,253 @@ ++/* ++ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "clk-regmap.h" ++#include "clk-hfpll.h" ++ ++#define PLL_OUTCTRL BIT(0) ++#define PLL_BYPASSNL BIT(1) ++#define PLL_RESET_N BIT(2) ++ ++/* Initialize a HFPLL at a given rate and enable it. */ ++static void __clk_hfpll_init_once(struct clk_hw *hw) ++{ ++ struct clk_hfpll *h = to_clk_hfpll(hw); ++ struct hfpll_data const *hd = h->d; ++ struct regmap *regmap = h->clkr.regmap; ++ ++ if (likely(h->init_done)) ++ return; ++ ++ /* Configure PLL parameters for integer mode. */ ++ if (hd->config_val) ++ regmap_write(regmap, hd->config_reg, hd->config_val); ++ regmap_write(regmap, hd->m_reg, 0); ++ regmap_write(regmap, hd->n_reg, 1); ++ ++ if (hd->user_reg) { ++ u32 regval = hd->user_val; ++ unsigned long rate; ++ ++ rate = clk_hw_get_rate(hw); ++ ++ /* Pick the right VCO. */ ++ if (hd->user_vco_mask && rate > hd->low_vco_max_rate) ++ regval |= hd->user_vco_mask; ++ regmap_write(regmap, hd->user_reg, regval); ++ } ++ ++ if (hd->droop_reg) ++ regmap_write(regmap, hd->droop_reg, hd->droop_val); ++ ++ h->init_done = true; ++} ++ ++static void __clk_hfpll_enable(struct clk_hw *hw) ++{ ++ struct clk_hfpll *h = to_clk_hfpll(hw); ++ struct hfpll_data const *hd = h->d; ++ struct regmap *regmap = h->clkr.regmap; ++ u32 val; ++ ++ __clk_hfpll_init_once(hw); ++ ++ /* Disable PLL bypass mode. */ ++ regmap_update_bits(regmap, hd->mode_reg, PLL_BYPASSNL, PLL_BYPASSNL); ++ ++ /* ++ * H/W requires a 5us delay between disabling the bypass and ++ * de-asserting the reset. Delay 10us just to be safe. ++ */ ++ usleep_range(10, 100); ++ ++ /* De-assert active-low PLL reset. */ ++ regmap_update_bits(regmap, hd->mode_reg, PLL_RESET_N, PLL_RESET_N); ++ ++ /* Wait for PLL to lock. */ ++ if (hd->status_reg) { ++ do { ++ regmap_read(regmap, hd->status_reg, &val); ++ } while (!(val & BIT(hd->lock_bit))); ++ } else { ++ usleep_range(60, 100); ++ } ++ ++ /* Enable PLL output. */ ++ regmap_update_bits(regmap, hd->mode_reg, PLL_OUTCTRL, PLL_OUTCTRL); ++} ++ ++/* Enable an already-configured HFPLL. */ ++static int clk_hfpll_enable(struct clk_hw *hw) ++{ ++ unsigned long flags; ++ struct clk_hfpll *h = to_clk_hfpll(hw); ++ struct hfpll_data const *hd = h->d; ++ struct regmap *regmap = h->clkr.regmap; ++ u32 mode; ++ ++ spin_lock_irqsave(&h->lock, flags); ++ regmap_read(regmap, hd->mode_reg, &mode); ++ if (!(mode & (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL))) ++ __clk_hfpll_enable(hw); ++ spin_unlock_irqrestore(&h->lock, flags); ++ ++ return 0; ++} ++ ++static void __clk_hfpll_disable(struct clk_hfpll *h) ++{ ++ struct hfpll_data const *hd = h->d; ++ struct regmap *regmap = h->clkr.regmap; ++ ++ /* ++ * Disable the PLL output, disable test mode, enable the bypass mode, ++ * and assert the reset. ++ */ ++ regmap_update_bits(regmap, hd->mode_reg, ++ PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL, 0); ++} ++ ++static void clk_hfpll_disable(struct clk_hw *hw) ++{ ++ struct clk_hfpll *h = to_clk_hfpll(hw); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&h->lock, flags); ++ __clk_hfpll_disable(h); ++ spin_unlock_irqrestore(&h->lock, flags); ++} ++ ++static long clk_hfpll_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *parent_rate) ++{ ++ struct clk_hfpll *h = to_clk_hfpll(hw); ++ struct hfpll_data const *hd = h->d; ++ unsigned long rrate; ++ ++ rate = clamp(rate, hd->min_rate, hd->max_rate); ++ ++ rrate = DIV_ROUND_UP(rate, *parent_rate) * *parent_rate; ++ if (rrate > hd->max_rate) ++ rrate -= *parent_rate; ++ ++ return rrate; ++} ++ ++/* ++ * For optimization reasons, assumes no downstream clocks are actively using ++ * it. ++ */ ++static int clk_hfpll_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ struct clk_hfpll *h = to_clk_hfpll(hw); ++ struct hfpll_data const *hd = h->d; ++ struct regmap *regmap = h->clkr.regmap; ++ unsigned long flags; ++ u32 l_val, val; ++ bool enabled; ++ ++ l_val = rate / parent_rate; ++ ++ spin_lock_irqsave(&h->lock, flags); ++ ++ enabled = __clk_is_enabled(hw->clk); ++ if (enabled) ++ __clk_hfpll_disable(h); ++ ++ /* Pick the right VCO. */ ++ if (hd->user_reg && hd->user_vco_mask) { ++ regmap_read(regmap, hd->user_reg, &val); ++ if (rate <= hd->low_vco_max_rate) ++ val &= ~hd->user_vco_mask; ++ else ++ val |= hd->user_vco_mask; ++ regmap_write(regmap, hd->user_reg, val); ++ } ++ ++ regmap_write(regmap, hd->l_reg, l_val); ++ ++ if (enabled) ++ __clk_hfpll_enable(hw); ++ ++ spin_unlock_irqrestore(&h->lock, flags); ++ ++ return 0; ++} ++ ++static unsigned long clk_hfpll_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ struct clk_hfpll *h = to_clk_hfpll(hw); ++ struct hfpll_data const *hd = h->d; ++ struct regmap *regmap = h->clkr.regmap; ++ u32 l_val; ++ ++ regmap_read(regmap, hd->l_reg, &l_val); ++ ++ return l_val * parent_rate; ++} ++ ++static void clk_hfpll_init(struct clk_hw *hw) ++{ ++ struct clk_hfpll *h = to_clk_hfpll(hw); ++ struct hfpll_data const *hd = h->d; ++ struct regmap *regmap = h->clkr.regmap; ++ u32 mode, status; ++ ++ regmap_read(regmap, hd->mode_reg, &mode); ++ if (mode != (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)) { ++ __clk_hfpll_init_once(hw); ++ return; ++ } ++ ++ if (hd->status_reg) { ++ regmap_read(regmap, hd->status_reg, &status); ++ if (!(status & BIT(hd->lock_bit))) { ++ WARN(1, "HFPLL %s is ON, but not locked!\n", ++ __clk_get_name(hw->clk)); ++ clk_hfpll_disable(hw); ++ __clk_hfpll_init_once(hw); ++ } ++ } ++} ++ ++static int hfpll_is_enabled(struct clk_hw *hw) ++{ ++ struct clk_hfpll *h = to_clk_hfpll(hw); ++ struct hfpll_data const *hd = h->d; ++ struct regmap *regmap = h->clkr.regmap; ++ u32 mode; ++ ++ regmap_read(regmap, hd->mode_reg, &mode); ++ mode &= 0x7; ++ return mode == (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL); ++} ++ ++const struct clk_ops clk_ops_hfpll = { ++ .enable = clk_hfpll_enable, ++ .disable = clk_hfpll_disable, ++ .is_enabled = hfpll_is_enabled, ++ .round_rate = clk_hfpll_round_rate, ++ .set_rate = clk_hfpll_set_rate, ++ .recalc_rate = clk_hfpll_recalc_rate, ++ .init = clk_hfpll_init, ++}; ++EXPORT_SYMBOL_GPL(clk_ops_hfpll); +--- /dev/null ++++ b/drivers/clk/qcom/clk-hfpll.h +@@ -0,0 +1,54 @@ ++/* ++ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++#ifndef __QCOM_CLK_HFPLL_H__ ++#define __QCOM_CLK_HFPLL_H__ ++ ++#include ++#include ++#include "clk-regmap.h" ++ ++struct hfpll_data { ++ u32 mode_reg; ++ u32 l_reg; ++ u32 m_reg; ++ u32 n_reg; ++ u32 user_reg; ++ u32 droop_reg; ++ u32 config_reg; ++ u32 status_reg; ++ u8 lock_bit; ++ ++ u32 droop_val; ++ u32 config_val; ++ u32 user_val; ++ u32 user_vco_mask; ++ unsigned long low_vco_max_rate; ++ ++ unsigned long min_rate; ++ unsigned long max_rate; ++}; ++ ++struct clk_hfpll { ++ struct hfpll_data const *d; ++ int init_done; ++ ++ struct clk_regmap clkr; ++ spinlock_t lock; ++}; ++ ++#define to_clk_hfpll(_hw) \ ++ container_of(to_clk_regmap(_hw), struct clk_hfpll, clkr) ++ ++extern const struct clk_ops clk_ops_hfpll; ++ ++#endif diff --git a/target/linux/ipq40xx/patches-4.9/0039-clk-qcom-Add-HFPLL-driver.patch b/target/linux/ipq40xx/patches-4.9/0039-clk-qcom-Add-HFPLL-driver.patch new file mode 100644 index 000000000..d9ad391d0 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0039-clk-qcom-Add-HFPLL-driver.patch @@ -0,0 +1,206 @@ +From patchwork Fri Dec 8 09:42:22 2017 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [v4,04/12] clk: qcom: Add HFPLL driver +From: Sricharan R +X-Patchwork-Id: 10102079 +Message-Id: <1512726150-7204-5-git-send-email-sricharan@codeaurora.org> +To: mturquette@baylibre.com, sboyd@codeaurora.org, + devicetree@vger.kernel.org, linux-pm@vger.kernel.org, + linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org, + viresh.kumar@linaro.org, linux-arm-kernel@lists.infradead.org +Cc: sricharan@codeaurora.org +Date: Fri, 8 Dec 2017 15:12:22 +0530 + +From: Stephen Boyd + +On some devices (MSM8974 for example), the HFPLLs are +instantiated within the Krait processor subsystem as separate +register regions. Add a driver for these PLLs so that we can +provide HFPLL clocks for use by the system. + +Cc: +Signed-off-by: Stephen Boyd +--- + .../devicetree/bindings/clock/qcom,hfpll.txt | 40 ++++++++ + drivers/clk/qcom/Kconfig | 8 ++ + drivers/clk/qcom/Makefile | 1 + + drivers/clk/qcom/hfpll.c | 106 +++++++++++++++++++++ + 4 files changed, 155 insertions(+) + create mode 100644 Documentation/devicetree/bindings/clock/qcom,hfpll.txt + create mode 100644 drivers/clk/qcom/hfpll.c + +--- /dev/null ++++ b/Documentation/devicetree/bindings/clock/qcom,hfpll.txt +@@ -0,0 +1,40 @@ ++High-Frequency PLL (HFPLL) ++ ++PROPERTIES ++ ++- compatible: ++ Usage: required ++ Value type: ++ Definition: must be "qcom,hfpll" ++ ++- reg: ++ Usage: required ++ Value type: ++ Definition: address and size of HPLL registers. An optional second ++ element specifies the address and size of the alias ++ register region. ++ ++- clock-output-names: ++ Usage: required ++ Value type: ++ Definition: Name of the PLL. Typically hfpllX where X is a CPU number ++ starting at 0. Otherwise hfpll_Y where Y is more specific ++ such as "l2". ++ ++Example: ++ ++1) An HFPLL for the L2 cache. ++ ++ clock-controller@f9016000 { ++ compatible = "qcom,hfpll"; ++ reg = <0xf9016000 0x30>; ++ clock-output-names = "hfpll_l2"; ++ }; ++ ++2) An HFPLL for CPU0. This HFPLL has the alias register region. ++ ++ clock-controller@f908a000 { ++ compatible = "qcom,hfpll"; ++ reg = <0xf908a000 0x30>, <0xf900a000 0x30>; ++ clock-output-names = "hfpll0"; ++ }; +--- a/drivers/clk/qcom/Kconfig ++++ b/drivers/clk/qcom/Kconfig +@@ -179,3 +179,11 @@ config MSM_MMCC_8996 + Support for the multimedia clock controller on msm8996 devices. + Say Y if you want to support multimedia devices such as display, + graphics, video encode/decode, camera, etc. ++ ++config QCOM_HFPLL ++ tristate "High-Frequency PLL (HFPLL) Clock Controller" ++ depends on COMMON_CLK_QCOM ++ help ++ Support for the high-frequency PLLs present on Qualcomm devices. ++ Say Y if you want to support CPU frequency scaling on devices ++ such as MSM8974, APQ8084, etc. +--- a/drivers/clk/qcom/Makefile ++++ b/drivers/clk/qcom/Makefile +@@ -32,3 +32,4 @@ obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8 + obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o + obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o + obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o ++obj-$(CONFIG_QCOM_HFPLL) += hfpll.o +--- /dev/null ++++ b/drivers/clk/qcom/hfpll.c +@@ -0,0 +1,106 @@ ++/* ++ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "clk-regmap.h" ++#include "clk-hfpll.h" ++ ++static const struct hfpll_data hdata = { ++ .mode_reg = 0x00, ++ .l_reg = 0x04, ++ .m_reg = 0x08, ++ .n_reg = 0x0c, ++ .user_reg = 0x10, ++ .config_reg = 0x14, ++ .config_val = 0x430405d, ++ .status_reg = 0x1c, ++ .lock_bit = 16, ++ ++ .user_val = 0x8, ++ .user_vco_mask = 0x100000, ++ .low_vco_max_rate = 1248000000, ++ .min_rate = 537600000UL, ++ .max_rate = 2900000000UL, ++}; ++ ++static const struct of_device_id qcom_hfpll_match_table[] = { ++ { .compatible = "qcom,hfpll" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, qcom_hfpll_match_table); ++ ++static const struct regmap_config hfpll_regmap_config = { ++ .reg_bits = 32, ++ .reg_stride = 4, ++ .val_bits = 32, ++ .max_register = 0x30, ++ .fast_io = true, ++}; ++ ++static int qcom_hfpll_probe(struct platform_device *pdev) ++{ ++ struct resource *res; ++ struct device *dev = &pdev->dev; ++ void __iomem *base; ++ struct regmap *regmap; ++ struct clk_hfpll *h; ++ struct clk_init_data init = { ++ .parent_names = (const char *[]){ "xo" }, ++ .num_parents = 1, ++ .ops = &clk_ops_hfpll, ++ }; ++ ++ h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL); ++ if (!h) ++ return -ENOMEM; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ base = devm_ioremap_resource(dev, res); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); ++ ++ regmap = devm_regmap_init_mmio(&pdev->dev, base, &hfpll_regmap_config); ++ if (IS_ERR(regmap)) ++ return PTR_ERR(regmap); ++ ++ if (of_property_read_string_index(dev->of_node, "clock-output-names", ++ 0, &init.name)) ++ return -ENODEV; ++ ++ h->d = &hdata; ++ h->clkr.hw.init = &init; ++ spin_lock_init(&h->lock); ++ ++ return devm_clk_register_regmap(&pdev->dev, &h->clkr); ++} ++ ++static struct platform_driver qcom_hfpll_driver = { ++ .probe = qcom_hfpll_probe, ++ .driver = { ++ .name = "qcom-hfpll", ++ .of_match_table = qcom_hfpll_match_table, ++ }, ++}; ++module_platform_driver(qcom_hfpll_driver); ++ ++MODULE_DESCRIPTION("QCOM HFPLL Clock Driver"); ++MODULE_LICENSE("GPL v2"); ++MODULE_ALIAS("platform:qcom-hfpll"); diff --git a/target/linux/ipq40xx/patches-4.9/0040-clk-qcom-Add-IPQ806X-s-HFPLLs.patch b/target/linux/ipq40xx/patches-4.9/0040-clk-qcom-Add-IPQ806X-s-HFPLLs.patch new file mode 100644 index 000000000..c3af0fa55 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0040-clk-qcom-Add-IPQ806X-s-HFPLLs.patch @@ -0,0 +1,129 @@ +From patchwork Fri Dec 8 09:42:24 2017 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [v4,06/12] clk: qcom: Add IPQ806X's HFPLLs +From: Sricharan R +X-Patchwork-Id: 10102047 +Message-Id: <1512726150-7204-7-git-send-email-sricharan@codeaurora.org> +To: mturquette@baylibre.com, sboyd@codeaurora.org, + devicetree@vger.kernel.org, linux-pm@vger.kernel.org, + linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org, + viresh.kumar@linaro.org, linux-arm-kernel@lists.infradead.org +Cc: sricharan@codeaurora.org +Date: Fri, 8 Dec 2017 15:12:24 +0530 + +From: Stephen Boyd + +Describe the HFPLLs present on IPQ806X devices. + +Signed-off-by: Stephen Boyd +--- + drivers/clk/qcom/gcc-ipq806x.c | 82 ++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 82 insertions(+) + +--- a/drivers/clk/qcom/gcc-ipq806x.c ++++ b/drivers/clk/qcom/gcc-ipq806x.c +@@ -30,6 +30,7 @@ + #include "clk-pll.h" + #include "clk-rcg.h" + #include "clk-branch.h" ++#include "clk-hfpll.h" + #include "reset.h" + + static struct clk_pll pll0 = { +@@ -113,6 +114,84 @@ static struct clk_regmap pll8_vote = { + }, + }; + ++static struct hfpll_data hfpll0_data = { ++ .mode_reg = 0x3200, ++ .l_reg = 0x3208, ++ .m_reg = 0x320c, ++ .n_reg = 0x3210, ++ .config_reg = 0x3204, ++ .status_reg = 0x321c, ++ .config_val = 0x7845c665, ++ .droop_reg = 0x3214, ++ .droop_val = 0x0108c000, ++ .min_rate = 600000000UL, ++ .max_rate = 1800000000UL, ++}; ++ ++static struct clk_hfpll hfpll0 = { ++ .d = &hfpll0_data, ++ .clkr.hw.init = &(struct clk_init_data){ ++ .parent_names = (const char *[]){ "pxo" }, ++ .num_parents = 1, ++ .name = "hfpll0", ++ .ops = &clk_ops_hfpll, ++ .flags = CLK_IGNORE_UNUSED, ++ }, ++ .lock = __SPIN_LOCK_UNLOCKED(hfpll0.lock), ++}; ++ ++static struct hfpll_data hfpll1_data = { ++ .mode_reg = 0x3240, ++ .l_reg = 0x3248, ++ .m_reg = 0x324c, ++ .n_reg = 0x3250, ++ .config_reg = 0x3244, ++ .status_reg = 0x325c, ++ .config_val = 0x7845c665, ++ .droop_reg = 0x3314, ++ .droop_val = 0x0108c000, ++ .min_rate = 600000000UL, ++ .max_rate = 1800000000UL, ++}; ++ ++static struct clk_hfpll hfpll1 = { ++ .d = &hfpll1_data, ++ .clkr.hw.init = &(struct clk_init_data){ ++ .parent_names = (const char *[]){ "pxo" }, ++ .num_parents = 1, ++ .name = "hfpll1", ++ .ops = &clk_ops_hfpll, ++ .flags = CLK_IGNORE_UNUSED, ++ }, ++ .lock = __SPIN_LOCK_UNLOCKED(hfpll1.lock), ++}; ++ ++static struct hfpll_data hfpll_l2_data = { ++ .mode_reg = 0x3300, ++ .l_reg = 0x3308, ++ .m_reg = 0x330c, ++ .n_reg = 0x3310, ++ .config_reg = 0x3304, ++ .status_reg = 0x331c, ++ .config_val = 0x7845c665, ++ .droop_reg = 0x3314, ++ .droop_val = 0x0108c000, ++ .min_rate = 600000000UL, ++ .max_rate = 1800000000UL, ++}; ++ ++static struct clk_hfpll hfpll_l2 = { ++ .d = &hfpll_l2_data, ++ .clkr.hw.init = &(struct clk_init_data){ ++ .parent_names = (const char *[]){ "pxo" }, ++ .num_parents = 1, ++ .name = "hfpll_l2", ++ .ops = &clk_ops_hfpll, ++ .flags = CLK_IGNORE_UNUSED, ++ }, ++ .lock = __SPIN_LOCK_UNLOCKED(hfpll_l2.lock), ++}; ++ + static struct clk_pll pll14 = { + .l_reg = 0x31c4, + .m_reg = 0x31c8, +@@ -2801,6 +2880,9 @@ static struct clk_regmap *gcc_ipq806x_cl + [UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr, + [NSSTCM_CLK_SRC] = &nss_tcm_src.clkr, + [NSSTCM_CLK] = &nss_tcm_clk.clkr, ++ [PLL9] = &hfpll0.clkr, ++ [PLL10] = &hfpll1.clkr, ++ [PLL12] = &hfpll_l2.clkr, + }; + + static const struct qcom_reset_map gcc_ipq806x_resets[] = { diff --git a/target/linux/ipq40xx/patches-4.9/0041-clk-qcom-Add-support-for-Krait-clocks.patch b/target/linux/ipq40xx/patches-4.9/0041-clk-qcom-Add-support-for-Krait-clocks.patch new file mode 100644 index 000000000..a6d0f0d7a --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0041-clk-qcom-Add-support-for-Krait-clocks.patch @@ -0,0 +1,241 @@ +From patchwork Fri Dec 8 09:42:25 2017 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [v4,07/12] clk: qcom: Add support for Krait clocks +From: Sricharan R +X-Patchwork-Id: 10102051 +Message-Id: <1512726150-7204-8-git-send-email-sricharan@codeaurora.org> +To: mturquette@baylibre.com, sboyd@codeaurora.org, + devicetree@vger.kernel.org, linux-pm@vger.kernel.org, + linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org, + viresh.kumar@linaro.org, linux-arm-kernel@lists.infradead.org +Cc: sricharan@codeaurora.org +Date: Fri, 8 Dec 2017 15:12:25 +0530 + +From: Stephen Boyd + +The Krait clocks are made up of a series of muxes and a divider +that choose between a fixed rate clock and dedicated HFPLLs for +each CPU. Instead of using mmio accesses to remux parents, the +Krait implementation exposes the remux control via cp15 +registers. Support these clocks. + +Signed-off-by: Stephen Boyd +--- + drivers/clk/qcom/Kconfig | 4 ++ + drivers/clk/qcom/Makefile | 1 + + drivers/clk/qcom/clk-krait.c | 134 +++++++++++++++++++++++++++++++++++++++++++ + drivers/clk/qcom/clk-krait.h | 48 ++++++++++++++++ + 4 files changed, 187 insertions(+) + create mode 100644 drivers/clk/qcom/clk-krait.c + create mode 100644 drivers/clk/qcom/clk-krait.h + +--- a/drivers/clk/qcom/Kconfig ++++ b/drivers/clk/qcom/Kconfig +@@ -187,3 +187,7 @@ config QCOM_HFPLL + Support for the high-frequency PLLs present on Qualcomm devices. + Say Y if you want to support CPU frequency scaling on devices + such as MSM8974, APQ8084, etc. ++ ++config KRAIT_CLOCKS ++ bool ++ select KRAIT_L2_ACCESSORS +--- a/drivers/clk/qcom/Makefile ++++ b/drivers/clk/qcom/Makefile +@@ -9,6 +9,7 @@ clk-qcom-y += clk-rcg2.o + clk-qcom-y += clk-branch.o + clk-qcom-y += clk-regmap-divider.o + clk-qcom-y += clk-regmap-mux.o ++clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o + clk-qcom-y += clk-hfpll.o + clk-qcom-y += reset.o + clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o +--- /dev/null ++++ b/drivers/clk/qcom/clk-krait.c +@@ -0,0 +1,134 @@ ++/* ++ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "clk-krait.h" ++ ++/* Secondary and primary muxes share the same cp15 register */ ++static DEFINE_SPINLOCK(krait_clock_reg_lock); ++ ++#define LPL_SHIFT 8 ++static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel) ++{ ++ unsigned long flags; ++ u32 regval; ++ ++ spin_lock_irqsave(&krait_clock_reg_lock, flags); ++ regval = krait_get_l2_indirect_reg(mux->offset); ++ regval &= ~(mux->mask << mux->shift); ++ regval |= (sel & mux->mask) << mux->shift; ++ if (mux->lpl) { ++ regval &= ~(mux->mask << (mux->shift + LPL_SHIFT)); ++ regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT); ++ } ++ krait_set_l2_indirect_reg(mux->offset, regval); ++ spin_unlock_irqrestore(&krait_clock_reg_lock, flags); ++ ++ /* Wait for switch to complete. */ ++ mb(); ++ udelay(1); ++} ++ ++static int krait_mux_set_parent(struct clk_hw *hw, u8 index) ++{ ++ struct krait_mux_clk *mux = to_krait_mux_clk(hw); ++ u32 sel; ++ ++ sel = clk_mux_reindex(index, mux->parent_map, 0); ++ mux->en_mask = sel; ++ /* Don't touch mux if CPU is off as it won't work */ ++ if (__clk_is_enabled(hw->clk)) ++ __krait_mux_set_sel(mux, sel); ++ ++ return 0; ++} ++ ++static u8 krait_mux_get_parent(struct clk_hw *hw) ++{ ++ struct krait_mux_clk *mux = to_krait_mux_clk(hw); ++ u32 sel; ++ ++ sel = krait_get_l2_indirect_reg(mux->offset); ++ sel >>= mux->shift; ++ sel &= mux->mask; ++ mux->en_mask = sel; ++ ++ return clk_mux_get_parent(hw, sel, mux->parent_map, 0); ++} ++ ++const struct clk_ops krait_mux_clk_ops = { ++ .set_parent = krait_mux_set_parent, ++ .get_parent = krait_mux_get_parent, ++ .determine_rate = __clk_mux_determine_rate_closest, ++}; ++EXPORT_SYMBOL_GPL(krait_mux_clk_ops); ++ ++/* The divider can divide by 2, 4, 6 and 8. But we only really need div-2. */ ++static long krait_div2_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *parent_rate) ++{ ++ *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), rate * 2); ++ return DIV_ROUND_UP(*parent_rate, 2); ++} ++ ++static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ struct krait_div2_clk *d = to_krait_div2_clk(hw); ++ unsigned long flags; ++ u32 val; ++ u32 mask = BIT(d->width) - 1; ++ ++ if (d->lpl) ++ mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift; ++ ++ spin_lock_irqsave(&krait_clock_reg_lock, flags); ++ val = krait_get_l2_indirect_reg(d->offset); ++ val &= ~mask; ++ krait_set_l2_indirect_reg(d->offset, val); ++ spin_unlock_irqrestore(&krait_clock_reg_lock, flags); ++ ++ return 0; ++} ++ ++static unsigned long ++krait_div2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) ++{ ++ struct krait_div2_clk *d = to_krait_div2_clk(hw); ++ u32 mask = BIT(d->width) - 1; ++ u32 div; ++ ++ div = krait_get_l2_indirect_reg(d->offset); ++ div >>= d->shift; ++ div &= mask; ++ div = (div + 1) * 2; ++ ++ return DIV_ROUND_UP(parent_rate, div); ++} ++ ++const struct clk_ops krait_div2_clk_ops = { ++ .round_rate = krait_div2_round_rate, ++ .set_rate = krait_div2_set_rate, ++ .recalc_rate = krait_div2_recalc_rate, ++}; ++EXPORT_SYMBOL_GPL(krait_div2_clk_ops); +--- /dev/null ++++ b/drivers/clk/qcom/clk-krait.h +@@ -0,0 +1,48 @@ ++/* ++ * Copyright (c) 2013, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef __QCOM_CLK_KRAIT_H ++#define __QCOM_CLK_KRAIT_H ++ ++#include ++ ++struct krait_mux_clk { ++ unsigned int *parent_map; ++ u32 offset; ++ u32 mask; ++ u32 shift; ++ u32 en_mask; ++ bool lpl; ++ ++ struct clk_hw hw; ++ struct notifier_block clk_nb; ++}; ++ ++#define to_krait_mux_clk(_hw) container_of(_hw, struct krait_mux_clk, hw) ++ ++extern const struct clk_ops krait_mux_clk_ops; ++ ++struct krait_div2_clk { ++ u32 offset; ++ u8 width; ++ u32 shift; ++ bool lpl; ++ ++ struct clk_hw hw; ++}; ++ ++#define to_krait_div2_clk(_hw) container_of(_hw, struct krait_div2_clk, hw) ++ ++extern const struct clk_ops krait_div2_clk_ops; ++ ++#endif diff --git a/target/linux/ipq40xx/patches-4.9/0042-clk-qcom-Add-KPSS-ACC-GCC-driver.patch b/target/linux/ipq40xx/patches-4.9/0042-clk-qcom-Add-KPSS-ACC-GCC-driver.patch new file mode 100644 index 000000000..5a76fbc8b --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0042-clk-qcom-Add-KPSS-ACC-GCC-driver.patch @@ -0,0 +1,209 @@ +From patchwork Fri Dec 8 09:42:26 2017 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [v4,08/12] clk: qcom: Add KPSS ACC/GCC driver +From: Sricharan R +X-Patchwork-Id: 10102023 +Message-Id: <1512726150-7204-9-git-send-email-sricharan@codeaurora.org> +To: mturquette@baylibre.com, sboyd@codeaurora.org, + devicetree@vger.kernel.org, linux-pm@vger.kernel.org, + linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org, + viresh.kumar@linaro.org, linux-arm-kernel@lists.infradead.org +Cc: sricharan@codeaurora.org +Date: Fri, 8 Dec 2017 15:12:26 +0530 + +From: Stephen Boyd + +The ACC and GCC regions present in KPSSv1 contain registers to +control clocks and power to each Krait CPU and L2. For CPUfreq +purposes probe these devices and expose a mux clock that chooses +between PXO and PLL8. + +Cc: +Signed-off-by: Stephen Boyd +--- + .../devicetree/bindings/arm/msm/qcom,kpss-acc.txt | 7 ++ + .../devicetree/bindings/arm/msm/qcom,kpss-gcc.txt | 28 +++++++ + drivers/clk/qcom/Kconfig | 8 ++ + drivers/clk/qcom/Makefile | 1 + + drivers/clk/qcom/kpss-xcc.c | 96 ++++++++++++++++++++++ + 5 files changed, 140 insertions(+) + create mode 100644 Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt + create mode 100644 drivers/clk/qcom/kpss-xcc.c + +--- a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt ++++ b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt +@@ -21,10 +21,17 @@ PROPERTIES + the register region. An optional second element specifies + the base address and size of the alias register region. + ++- clock-output-names: ++ Usage: optional ++ Value type: ++ Definition: Name of the output clock. Typically acpuX_aux where X is a ++ CPU number starting at 0. ++ + Example: + + clock-controller@2088000 { + compatible = "qcom,kpss-acc-v2"; + reg = <0x02088000 0x1000>, + <0x02008000 0x1000>; ++ clock-output-names = "acpu0_aux"; + }; +--- /dev/null ++++ b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt +@@ -0,0 +1,28 @@ ++Krait Processor Sub-system (KPSS) Global Clock Controller (GCC) ++ ++PROPERTIES ++ ++- compatible: ++ Usage: required ++ Value type: ++ Definition: should be one of: ++ "qcom,kpss-gcc" ++ ++- reg: ++ Usage: required ++ Value type: ++ Definition: base address and size of the register region ++ ++- clock-output-names: ++ Usage: required ++ Value type: ++ Definition: Name of the output clock. Typically acpu_l2_aux indicating ++ an L2 cache auxiliary clock. ++ ++Example: ++ ++ l2cc: clock-controller@2011000 { ++ compatible = "qcom,kpss-gcc"; ++ reg = <0x2011000 0x1000>; ++ clock-output-names = "acpu_l2_aux"; ++ }; +--- a/drivers/clk/qcom/Kconfig ++++ b/drivers/clk/qcom/Kconfig +@@ -188,6 +188,14 @@ config QCOM_HFPLL + Say Y if you want to support CPU frequency scaling on devices + such as MSM8974, APQ8084, etc. + ++config KPSS_XCC ++ tristate "KPSS Clock Controller" ++ depends on COMMON_CLK_QCOM ++ help ++ Support for the Krait ACC and GCC clock controllers. Say Y ++ if you want to support CPU frequency scaling on devices such ++ as MSM8960, APQ8064, etc. ++ + config KRAIT_CLOCKS + bool + select KRAIT_L2_ACCESSORS +--- a/drivers/clk/qcom/Makefile ++++ b/drivers/clk/qcom/Makefile +@@ -33,4 +33,5 @@ obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8 + obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o + obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o + obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o ++obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o + obj-$(CONFIG_QCOM_HFPLL) += hfpll.o +--- /dev/null ++++ b/drivers/clk/qcom/kpss-xcc.c +@@ -0,0 +1,96 @@ ++/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static const char *aux_parents[] = { ++ "pll8_vote", ++ "pxo", ++}; ++ ++static unsigned int aux_parent_map[] = { ++ 3, ++ 0, ++}; ++ ++static const struct of_device_id kpss_xcc_match_table[] = { ++ { .compatible = "qcom,kpss-acc-v1", .data = (void *)1UL }, ++ { .compatible = "qcom,kpss-gcc" }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, kpss_xcc_match_table); ++ ++static int kpss_xcc_driver_probe(struct platform_device *pdev) ++{ ++ const struct of_device_id *id; ++ struct clk *clk; ++ struct resource *res; ++ void __iomem *base; ++ const char *name; ++ ++ id = of_match_device(kpss_xcc_match_table, &pdev->dev); ++ if (!id) ++ return -ENODEV; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); ++ ++ if (id->data) { ++ if (of_property_read_string_index(pdev->dev.of_node, ++ "clock-output-names", ++ 0, &name)) ++ return -ENODEV; ++ base += 0x14; ++ } else { ++ name = "acpu_l2_aux"; ++ base += 0x28; ++ } ++ ++ clk = clk_register_mux_table(&pdev->dev, name, aux_parents, ++ ARRAY_SIZE(aux_parents), 0, base, 0, 0x3, ++ 0, aux_parent_map, NULL); ++ ++ platform_set_drvdata(pdev, clk); ++ ++ return PTR_ERR_OR_ZERO(clk); ++} ++ ++static int kpss_xcc_driver_remove(struct platform_device *pdev) ++{ ++ clk_unregister_mux(platform_get_drvdata(pdev)); ++ return 0; ++} ++ ++static struct platform_driver kpss_xcc_driver = { ++ .probe = kpss_xcc_driver_probe, ++ .remove = kpss_xcc_driver_remove, ++ .driver = { ++ .name = "kpss-xcc", ++ .of_match_table = kpss_xcc_match_table, ++ }, ++}; ++module_platform_driver(kpss_xcc_driver); ++ ++MODULE_DESCRIPTION("Krait Processor Sub System (KPSS) Clock Driver"); ++MODULE_LICENSE("GPL v2"); ++MODULE_ALIAS("platform:kpss-xcc"); diff --git a/target/linux/ipq40xx/patches-4.9/0043-clk-qcom-Add-Krait-clock-controller-driver.patch b/target/linux/ipq40xx/patches-4.9/0043-clk-qcom-Add-Krait-clock-controller-driver.patch new file mode 100644 index 000000000..1d32b2b05 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0043-clk-qcom-Add-Krait-clock-controller-driver.patch @@ -0,0 +1,436 @@ +From patchwork Fri Dec 8 09:42:27 2017 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [v4,09/12] clk: qcom: Add Krait clock controller driver +From: Sricharan R +X-Patchwork-Id: 10102061 +Message-Id: <1512726150-7204-10-git-send-email-sricharan@codeaurora.org> +To: mturquette@baylibre.com, sboyd@codeaurora.org, + devicetree@vger.kernel.org, linux-pm@vger.kernel.org, + linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org, + viresh.kumar@linaro.org, linux-arm-kernel@lists.infradead.org +Cc: sricharan@codeaurora.org +Date: Fri, 8 Dec 2017 15:12:27 +0530 + +From: Stephen Boyd + +The Krait CPU clocks are made up of a primary mux and secondary +mux for each CPU and the L2, controlled via cp15 accessors. For +Kraits within KPSSv1 each secondary mux accepts a different aux +source, but on KPSSv2 each secondary mux accepts the same aux +source. + +Cc: +Signed-off-by: Stephen Boyd +--- + .../devicetree/bindings/clock/qcom,krait-cc.txt | 22 ++ + drivers/clk/qcom/Kconfig | 8 + + drivers/clk/qcom/Makefile | 1 + + drivers/clk/qcom/krait-cc.c | 350 +++++++++++++++++++++ + 4 files changed, 381 insertions(+) + create mode 100644 Documentation/devicetree/bindings/clock/qcom,krait-cc.txt + create mode 100644 drivers/clk/qcom/krait-cc.c + +--- /dev/null ++++ b/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt +@@ -0,0 +1,22 @@ ++Krait Clock Controller ++ ++PROPERTIES ++ ++- compatible: ++ Usage: required ++ Value type: ++ Definition: must be one of: ++ "qcom,krait-cc-v1" ++ "qcom,krait-cc-v2" ++ ++- #clock-cells: ++ Usage: required ++ Value type: ++ Definition: must be 1 ++ ++Example: ++ ++ kraitcc: clock-controller { ++ compatible = "qcom,krait-cc-v1"; ++ #clock-cells = <1>; ++ }; +--- a/drivers/clk/qcom/Kconfig ++++ b/drivers/clk/qcom/Kconfig +@@ -196,6 +196,14 @@ config KPSS_XCC + if you want to support CPU frequency scaling on devices such + as MSM8960, APQ8064, etc. + ++config KRAITCC ++ tristate "Krait Clock Controller" ++ depends on COMMON_CLK_QCOM && ARM ++ select KRAIT_CLOCKS ++ help ++ Support for the Krait CPU clocks on Qualcomm devices. ++ Say Y if you want to support CPU frequency scaling. ++ + config KRAIT_CLOCKS + bool + select KRAIT_L2_ACCESSORS +--- a/drivers/clk/qcom/Makefile ++++ b/drivers/clk/qcom/Makefile +@@ -35,3 +35,4 @@ obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o + obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o + obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o + obj-$(CONFIG_QCOM_HFPLL) += hfpll.o ++obj-$(CONFIG_KRAITCC) += krait-cc.o +--- /dev/null ++++ b/drivers/clk/qcom/krait-cc.c +@@ -0,0 +1,350 @@ ++/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "clk-krait.h" ++ ++static unsigned int sec_mux_map[] = { ++ 2, ++ 0, ++}; ++ ++static unsigned int pri_mux_map[] = { ++ 1, ++ 2, ++ 0, ++}; ++ ++static int ++krait_add_div(struct device *dev, int id, const char *s, unsigned int offset) ++{ ++ struct krait_div2_clk *div; ++ struct clk_init_data init = { ++ .num_parents = 1, ++ .ops = &krait_div2_clk_ops, ++ .flags = CLK_SET_RATE_PARENT, ++ }; ++ const char *p_names[1]; ++ struct clk *clk; ++ ++ div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL); ++ if (!div) ++ return -ENOMEM; ++ ++ div->width = 2; ++ div->shift = 6; ++ div->lpl = id >= 0; ++ div->offset = offset; ++ div->hw.init = &init; ++ ++ init.name = kasprintf(GFP_KERNEL, "hfpll%s_div", s); ++ if (!init.name) ++ return -ENOMEM; ++ ++ init.parent_names = p_names; ++ p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s); ++ if (!p_names[0]) { ++ kfree(init.name); ++ return -ENOMEM; ++ } ++ ++ clk = devm_clk_register(dev, &div->hw); ++ kfree(p_names[0]); ++ kfree(init.name); ++ ++ return PTR_ERR_OR_ZERO(clk); ++} ++ ++static int ++krait_add_sec_mux(struct device *dev, int id, const char *s, ++ unsigned int offset, bool unique_aux) ++{ ++ struct krait_mux_clk *mux; ++ static const char *sec_mux_list[] = { ++ "acpu_aux", ++ "qsb", ++ }; ++ struct clk_init_data init = { ++ .parent_names = sec_mux_list, ++ .num_parents = ARRAY_SIZE(sec_mux_list), ++ .ops = &krait_mux_clk_ops, ++ .flags = CLK_SET_RATE_PARENT, ++ }; ++ struct clk *clk; ++ ++ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); ++ if (!mux) ++ return -ENOMEM; ++ ++ mux->offset = offset; ++ mux->lpl = id >= 0; ++ mux->mask = 0x3; ++ mux->shift = 2; ++ mux->parent_map = sec_mux_map; ++ mux->hw.init = &init; ++ ++ init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s); ++ if (!init.name) ++ return -ENOMEM; ++ ++ if (unique_aux) { ++ sec_mux_list[0] = kasprintf(GFP_KERNEL, "acpu%s_aux", s); ++ if (!sec_mux_list[0]) { ++ clk = ERR_PTR(-ENOMEM); ++ goto err_aux; ++ } ++ } ++ ++ clk = devm_clk_register(dev, &mux->hw); ++ ++ if (unique_aux) ++ kfree(sec_mux_list[0]); ++err_aux: ++ kfree(init.name); ++ return PTR_ERR_OR_ZERO(clk); ++} ++ ++static struct clk * ++krait_add_pri_mux(struct device *dev, int id, const char *s, ++ unsigned int offset) ++{ ++ struct krait_mux_clk *mux; ++ const char *p_names[3]; ++ struct clk_init_data init = { ++ .parent_names = p_names, ++ .num_parents = ARRAY_SIZE(p_names), ++ .ops = &krait_mux_clk_ops, ++ .flags = CLK_SET_RATE_PARENT, ++ }; ++ struct clk *clk; ++ ++ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); ++ if (!mux) ++ return ERR_PTR(-ENOMEM); ++ ++ mux->mask = 0x3; ++ mux->shift = 0; ++ mux->offset = offset; ++ mux->lpl = id >= 0; ++ mux->parent_map = pri_mux_map; ++ mux->hw.init = &init; ++ ++ init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s); ++ if (!init.name) ++ return ERR_PTR(-ENOMEM); ++ ++ p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s); ++ if (!p_names[0]) { ++ clk = ERR_PTR(-ENOMEM); ++ goto err_p0; ++ } ++ ++ p_names[1] = kasprintf(GFP_KERNEL, "hfpll%s_div", s); ++ if (!p_names[1]) { ++ clk = ERR_PTR(-ENOMEM); ++ goto err_p1; ++ } ++ ++ p_names[2] = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s); ++ if (!p_names[2]) { ++ clk = ERR_PTR(-ENOMEM); ++ goto err_p2; ++ } ++ ++ clk = devm_clk_register(dev, &mux->hw); ++ ++ kfree(p_names[2]); ++err_p2: ++ kfree(p_names[1]); ++err_p1: ++ kfree(p_names[0]); ++err_p0: ++ kfree(init.name); ++ return clk; ++} ++ ++/* id < 0 for L2, otherwise id == physical CPU number */ ++static struct clk *krait_add_clks(struct device *dev, int id, bool unique_aux) ++{ ++ int ret; ++ unsigned int offset; ++ void *p = NULL; ++ const char *s; ++ struct clk *clk; ++ ++ if (id >= 0) { ++ offset = 0x4501 + (0x1000 * id); ++ s = p = kasprintf(GFP_KERNEL, "%d", id); ++ if (!s) ++ return ERR_PTR(-ENOMEM); ++ } else { ++ offset = 0x500; ++ s = "_l2"; ++ } ++ ++ ret = krait_add_div(dev, id, s, offset); ++ if (ret) { ++ clk = ERR_PTR(ret); ++ goto err; ++ } ++ ++ ret = krait_add_sec_mux(dev, id, s, offset, unique_aux); ++ if (ret) { ++ clk = ERR_PTR(ret); ++ goto err; ++ } ++ ++ clk = krait_add_pri_mux(dev, id, s, offset); ++err: ++ kfree(p); ++ return clk; ++} ++ ++static struct clk *krait_of_get(struct of_phandle_args *clkspec, void *data) ++{ ++ unsigned int idx = clkspec->args[0]; ++ struct clk **clks = data; ++ ++ if (idx >= 5) { ++ pr_err("%s: invalid clock index %d\n", __func__, idx); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ return clks[idx] ? : ERR_PTR(-ENODEV); ++} ++ ++static const struct of_device_id krait_cc_match_table[] = { ++ { .compatible = "qcom,krait-cc-v1", (void *)1UL }, ++ { .compatible = "qcom,krait-cc-v2" }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, krait_cc_match_table); ++ ++static int krait_cc_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ const struct of_device_id *id; ++ unsigned long cur_rate, aux_rate; ++ int cpu; ++ struct clk *clk; ++ struct clk **clks; ++ struct clk *l2_pri_mux_clk; ++ ++ id = of_match_device(krait_cc_match_table, dev); ++ if (!id) ++ return -ENODEV; ++ ++ /* Rate is 1 because 0 causes problems for __clk_mux_determine_rate */ ++ clk = clk_register_fixed_rate(dev, "qsb", NULL, 0, 1); ++ if (IS_ERR(clk)) ++ return PTR_ERR(clk); ++ ++ if (!id->data) { ++ clk = clk_register_fixed_factor(dev, "acpu_aux", ++ "gpll0_vote", 0, 1, 2); ++ if (IS_ERR(clk)) ++ return PTR_ERR(clk); ++ } ++ ++ /* Krait configurations have at most 4 CPUs and one L2 */ ++ clks = devm_kcalloc(dev, 5, sizeof(*clks), GFP_KERNEL); ++ if (!clks) ++ return -ENOMEM; ++ ++ for_each_possible_cpu(cpu) { ++ clk = krait_add_clks(dev, cpu, id->data); ++ if (IS_ERR(clk)) ++ return PTR_ERR(clk); ++ clks[cpu] = clk; ++ } ++ ++ l2_pri_mux_clk = krait_add_clks(dev, -1, id->data); ++ if (IS_ERR(l2_pri_mux_clk)) ++ return PTR_ERR(l2_pri_mux_clk); ++ clks[4] = l2_pri_mux_clk; ++ ++ /* ++ * We don't want the CPU or L2 clocks to be turned off at late init ++ * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the ++ * refcount of these clocks. Any cpufreq/hotplug manager can assume ++ * that the clocks have already been prepared and enabled by the time ++ * they take over. ++ */ ++ for_each_online_cpu(cpu) { ++ clk_prepare_enable(l2_pri_mux_clk); ++ WARN(clk_prepare_enable(clks[cpu]), ++ "Unable to turn on CPU%d clock", cpu); ++ } ++ ++ /* ++ * Force reinit of HFPLLs and muxes to overwrite any potential ++ * incorrect configuration of HFPLLs and muxes by the bootloader. ++ * While at it, also make sure the cores are running at known rates ++ * and print the current rate. ++ * ++ * The clocks are set to aux clock rate first to make sure the ++ * secondary mux is not sourcing off of QSB. The rate is then set to ++ * two different rates to force a HFPLL reinit under all ++ * circumstances. ++ */ ++ cur_rate = clk_get_rate(l2_pri_mux_clk); ++ aux_rate = 384000000; ++ if (cur_rate == 1) { ++ pr_info("L2 @ QSB rate. Forcing new rate.\n"); ++ cur_rate = aux_rate; ++ } ++ clk_set_rate(l2_pri_mux_clk, aux_rate); ++ clk_set_rate(l2_pri_mux_clk, 2); ++ clk_set_rate(l2_pri_mux_clk, cur_rate); ++ pr_info("L2 @ %lu KHz\n", clk_get_rate(l2_pri_mux_clk) / 1000); ++ for_each_possible_cpu(cpu) { ++ clk = clks[cpu]; ++ cur_rate = clk_get_rate(clk); ++ if (cur_rate == 1) { ++ pr_info("CPU%d @ QSB rate. Forcing new rate.\n", cpu); ++ cur_rate = aux_rate; ++ } ++ ++ clk_set_rate(clk, aux_rate); ++ clk_set_rate(clk, 2); ++ clk_set_rate(clk, cur_rate); ++ pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000); ++ } ++ ++ of_clk_add_provider(dev->of_node, krait_of_get, clks); ++ ++ return 0; ++} ++ ++static struct platform_driver krait_cc_driver = { ++ .probe = krait_cc_probe, ++ .driver = { ++ .name = "krait-cc", ++ .of_match_table = krait_cc_match_table, ++ }, ++}; ++module_platform_driver(krait_cc_driver); ++ ++MODULE_DESCRIPTION("Krait CPU Clock Driver"); ++MODULE_LICENSE("GPL v2"); ++MODULE_ALIAS("platform:krait-cc"); diff --git a/target/linux/ipq40xx/patches-4.9/0044-clk-Add-safe-switch-hook.patch b/target/linux/ipq40xx/patches-4.9/0044-clk-Add-safe-switch-hook.patch new file mode 100644 index 000000000..d0eddc64c --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0044-clk-Add-safe-switch-hook.patch @@ -0,0 +1,160 @@ +From patchwork Fri Dec 8 09:42:28 2017 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [v4,10/12] clk: qcom: Add safe switch hook for krait mux clocks +From: Sricharan R +X-Patchwork-Id: 10102057 +Message-Id: <1512726150-7204-11-git-send-email-sricharan@codeaurora.org> +To: mturquette@baylibre.com, sboyd@codeaurora.org, + devicetree@vger.kernel.org, linux-pm@vger.kernel.org, + linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org, + viresh.kumar@linaro.org, linux-arm-kernel@lists.infradead.org +Cc: sricharan@codeaurora.org +Date: Fri, 8 Dec 2017 15:12:28 +0530 + +When the Hfplls are reprogrammed during the rate change, +the primary muxes which are sourced from the same hfpll +for higher frequencies, needs to be switched to the 'safe +secondary mux' as the parent for that small window. This +is done by registering a clk notifier for the muxes and +switching to the safe parent in the PRE_RATE_CHANGE notifier +and back to the original parent in the POST_RATE_CHANGE notifier. + +Signed-off-by: Sricharan R +--- + drivers/clk/qcom/clk-krait.c | 2 ++ + drivers/clk/qcom/clk-krait.h | 3 +++ + drivers/clk/qcom/krait-cc.c | 56 ++++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 61 insertions(+) + +--- a/drivers/clk/qcom/clk-krait.c ++++ b/drivers/clk/qcom/clk-krait.c +@@ -60,6 +60,8 @@ static int krait_mux_set_parent(struct c + if (__clk_is_enabled(hw->clk)) + __krait_mux_set_sel(mux, sel); + ++ mux->reparent = true; ++ + return 0; + } + +--- a/drivers/clk/qcom/clk-krait.h ++++ b/drivers/clk/qcom/clk-krait.h +@@ -23,6 +23,9 @@ struct krait_mux_clk { + u32 shift; + u32 en_mask; + bool lpl; ++ u8 safe_sel; ++ u8 old_index; ++ bool reparent; + + struct clk_hw hw; + struct notifier_block clk_nb; +--- a/drivers/clk/qcom/krait-cc.c ++++ b/drivers/clk/qcom/krait-cc.c +@@ -35,6 +35,49 @@ static unsigned int pri_mux_map[] = { + 0, + }; + ++/* ++ * Notifier function for switching the muxes to safe parent ++ * while the hfpll is getting reprogrammed. ++ */ ++static int krait_notifier_cb(struct notifier_block *nb, ++ unsigned long event, ++ void *data) ++{ ++ int ret = 0; ++ struct krait_mux_clk *mux = container_of(nb, struct krait_mux_clk, ++ clk_nb); ++ /* Switch to safe parent */ ++ if (event == PRE_RATE_CHANGE) { ++ mux->old_index = krait_mux_clk_ops.get_parent(&mux->hw); ++ ret = krait_mux_clk_ops.set_parent(&mux->hw, mux->safe_sel); ++ mux->reparent = false; ++ /* ++ * By the time POST_RATE_CHANGE notifier is called, ++ * clk framework itself would have changed the parent for the new rate. ++ * Only otherwise, put back to the old parent. ++ */ ++ } else if (event == POST_RATE_CHANGE) { ++ if (!mux->reparent) ++ ret = krait_mux_clk_ops.set_parent(&mux->hw, ++ mux->old_index); ++ } ++ ++ return notifier_from_errno(ret); ++} ++ ++static int krait_notifier_register(struct device *dev, struct clk *clk, ++ struct krait_mux_clk *mux) ++{ ++ int ret = 0; ++ ++ mux->clk_nb.notifier_call = krait_notifier_cb; ++ ret = clk_notifier_register(clk, &mux->clk_nb); ++ if (ret) ++ dev_err(dev, "failed to register clock notifier: %d\n", ret); ++ ++ return ret; ++} ++ + static int + krait_add_div(struct device *dev, int id, const char *s, unsigned int offset) + { +@@ -79,6 +122,7 @@ static int + krait_add_sec_mux(struct device *dev, int id, const char *s, + unsigned int offset, bool unique_aux) + { ++ int ret; + struct krait_mux_clk *mux; + static const char *sec_mux_list[] = { + "acpu_aux", +@@ -102,6 +146,7 @@ krait_add_sec_mux(struct device *dev, in + mux->shift = 2; + mux->parent_map = sec_mux_map; + mux->hw.init = &init; ++ mux->safe_sel = 0; + + init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s); + if (!init.name) +@@ -117,6 +162,11 @@ krait_add_sec_mux(struct device *dev, in + + clk = devm_clk_register(dev, &mux->hw); + ++ ret = krait_notifier_register(dev, clk, mux); ++ if (ret) ++ goto unique_aux; ++ ++unique_aux: + if (unique_aux) + kfree(sec_mux_list[0]); + err_aux: +@@ -128,6 +178,7 @@ static struct clk * + krait_add_pri_mux(struct device *dev, int id, const char *s, + unsigned int offset) + { ++ int ret; + struct krait_mux_clk *mux; + const char *p_names[3]; + struct clk_init_data init = { +@@ -148,6 +199,7 @@ krait_add_pri_mux(struct device *dev, in + mux->lpl = id >= 0; + mux->parent_map = pri_mux_map; + mux->hw.init = &init; ++ mux->safe_sel = 2; + + init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s); + if (!init.name) +@@ -173,6 +225,10 @@ krait_add_pri_mux(struct device *dev, in + + clk = devm_clk_register(dev, &mux->hw); + ++ ret = krait_notifier_register(dev, clk, mux); ++ if (ret) ++ goto err_p3; ++err_p3: + kfree(p_names[2]); + err_p2: + kfree(p_names[1]); diff --git a/target/linux/ipq40xx/patches-4.9/0045-cpufreq-Add-module-to-register-cpufreq-on-Krait-CPUs.patch b/target/linux/ipq40xx/patches-4.9/0045-cpufreq-Add-module-to-register-cpufreq-on-Krait-CPUs.patch new file mode 100644 index 000000000..820d13f3c --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0045-cpufreq-Add-module-to-register-cpufreq-on-Krait-CPUs.patch @@ -0,0 +1,307 @@ +From patchwork Fri Dec 8 09:42:29 2017 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [v4,11/12] cpufreq: Add module to register cpufreq on Krait CPUs +From: Sricharan R +X-Patchwork-Id: 10102075 +Message-Id: <1512726150-7204-12-git-send-email-sricharan@codeaurora.org> +To: mturquette@baylibre.com, sboyd@codeaurora.org, + devicetree@vger.kernel.org, linux-pm@vger.kernel.org, + linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org, + viresh.kumar@linaro.org, linux-arm-kernel@lists.infradead.org +Cc: sricharan@codeaurora.org +Date: Fri, 8 Dec 2017 15:12:29 +0530 + +From: Stephen Boyd + +Register a cpufreq-generic device whenever we detect that a +"qcom,krait" compatible CPU is present in DT. + +Cc: +Signed-off-by: Stephen Boyd +--- + .../devicetree/bindings/arm/msm/qcom,pvs.txt | 38 ++++ + drivers/cpufreq/Kconfig.arm | 9 + + drivers/cpufreq/Makefile | 1 + + drivers/cpufreq/qcom-cpufreq.c | 204 +++++++++++++++++++++ + 4 files changed, 252 insertions(+) + create mode 100644 Documentation/devicetree/bindings/arm/msm/qcom,pvs.txt + create mode 100644 drivers/cpufreq/qcom-cpufreq.c + +--- /dev/null ++++ b/Documentation/devicetree/bindings/arm/msm/qcom,pvs.txt +@@ -0,0 +1,38 @@ ++Qualcomm Process Voltage Scaling Tables ++ ++The node name is required to be "qcom,pvs". There shall only be one ++such node present in the root of the tree. ++ ++PROPERTIES ++ ++- qcom,pvs-format-a or qcom,pvs-format-b: ++ Usage: required ++ Value type: ++ Definition: Indicates the format of qcom,speedX-pvsY-bin-vZ properties. ++ If qcom,pvs-format-a is used the table is two columns ++ (frequency and voltage in that order). If qcom,pvs-format-b is used the table is three columns (frequency, voltage, ++ and current in that order). ++ ++- qcom,speedX-pvsY-bin-vZ: ++ Usage: required ++ Value type: ++ Definition: The PVS table corresponding to the speed bin X, pvs bin Y, ++ and version Z. ++Example: ++ ++ qcom,pvs { ++ qcom,pvs-format-a; ++ qcom,speed0-pvs0-bin-v0 = ++ < 384000000 950000 >, ++ < 486000000 975000 >, ++ < 594000000 1000000 >, ++ < 702000000 1025000 >, ++ < 810000000 1075000 >, ++ < 918000000 1100000 >, ++ < 1026000000 1125000 >, ++ < 1134000000 1175000 >, ++ < 1242000000 1200000 >, ++ < 1350000000 1225000 >, ++ < 1458000000 1237500 >, ++ < 1512000000 1250000 >; ++ }; +--- a/drivers/cpufreq/Kconfig.arm ++++ b/drivers/cpufreq/Kconfig.arm +@@ -88,6 +88,15 @@ config ARM_OMAP2PLUS_CPUFREQ + depends on ARCH_OMAP2PLUS + default ARCH_OMAP2PLUS + ++config ARM_QCOM_CPUFREQ ++ tristate "Qualcomm based" ++ depends on ARCH_QCOM ++ select PM_OPP ++ help ++ This adds the CPUFreq driver for Qualcomm SoC based boards. ++ ++ If in doubt, say N. ++ + config ARM_S3C_CPUFREQ + bool + help +--- a/drivers/cpufreq/Makefile ++++ b/drivers/cpufreq/Makefile +@@ -62,6 +62,7 @@ obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt81 + obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o + obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o + obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o ++obj-$(CONFIG_ARM_QCOM_CPUFREQ) += qcom-cpufreq.o + obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o + obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o + obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o +--- /dev/null ++++ b/drivers/cpufreq/qcom-cpufreq.c +@@ -0,0 +1,204 @@ ++/* Copyright (c) 2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "cpufreq-dt.h" ++ ++static void __init get_krait_bin_format_a(int *speed, int *pvs, int *pvs_ver) ++{ ++ void __iomem *base; ++ u32 pte_efuse; ++ ++ *speed = *pvs = *pvs_ver = 0; ++ ++ base = ioremap(0x007000c0, 4); ++ if (!base) { ++ pr_warn("Unable to read efuse data. Defaulting to 0!\n"); ++ return; ++ } ++ ++ pte_efuse = readl_relaxed(base); ++ iounmap(base); ++ ++ *speed = pte_efuse & 0xf; ++ if (*speed == 0xf) ++ *speed = (pte_efuse >> 4) & 0xf; ++ ++ if (*speed == 0xf) { ++ *speed = 0; ++ pr_warn("Speed bin: Defaulting to %d\n", *speed); ++ } else { ++ pr_info("Speed bin: %d\n", *speed); ++ } ++ ++ *pvs = (pte_efuse >> 10) & 0x7; ++ if (*pvs == 0x7) ++ *pvs = (pte_efuse >> 13) & 0x7; ++ ++ if (*pvs == 0x7) { ++ *pvs = 0; ++ pr_warn("PVS bin: Defaulting to %d\n", *pvs); ++ } else { ++ pr_info("PVS bin: %d\n", *pvs); ++ } ++} ++ ++static void __init get_krait_bin_format_b(int *speed, int *pvs, int *pvs_ver) ++{ ++ u32 pte_efuse, redundant_sel; ++ void __iomem *base; ++ ++ *speed = 0; ++ *pvs = 0; ++ *pvs_ver = 0; ++ ++ base = ioremap(0xfc4b80b0, 8); ++ if (!base) { ++ pr_warn("Unable to read efuse data. Defaulting to 0!\n"); ++ return; ++ } ++ ++ pte_efuse = readl_relaxed(base); ++ redundant_sel = (pte_efuse >> 24) & 0x7; ++ *speed = pte_efuse & 0x7; ++ /* 4 bits of PVS are in efuse register bits 31, 8-6. */ ++ *pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7); ++ *pvs_ver = (pte_efuse >> 4) & 0x3; ++ ++ switch (redundant_sel) { ++ case 1: ++ *speed = (pte_efuse >> 27) & 0xf; ++ break; ++ case 2: ++ *pvs = (pte_efuse >> 27) & 0xf; ++ break; ++ } ++ ++ /* Check SPEED_BIN_BLOW_STATUS */ ++ if (pte_efuse & BIT(3)) { ++ pr_info("Speed bin: %d\n", *speed); ++ } else { ++ pr_warn("Speed bin not set. Defaulting to 0!\n"); ++ *speed = 0; ++ } ++ ++ /* Check PVS_BLOW_STATUS */ ++ pte_efuse = readl_relaxed(base + 0x4) & BIT(21); ++ if (pte_efuse) { ++ pr_info("PVS bin: %d\n", *pvs); ++ } else { ++ pr_warn("PVS bin not set. Defaulting to 0!\n"); ++ *pvs = 0; ++ } ++ ++ pr_info("PVS version: %d\n", *pvs_ver); ++ iounmap(base); ++} ++ ++static int __init qcom_cpufreq_populate_opps(void) ++{ ++ int len, rows, cols, i, k, speed, pvs, pvs_ver; ++ char table_name[] = "qcom,speedXX-pvsXX-bin-vXX"; ++ struct device_node *np; ++ struct device *dev; ++ int cpu = 0; ++ ++ np = of_find_node_by_name(NULL, "qcom,pvs"); ++ if (!np) ++ return -ENODEV; ++ ++ if (of_property_read_bool(np, "qcom,pvs-format-a")) { ++ get_krait_bin_format_a(&speed, &pvs, &pvs_ver); ++ cols = 2; ++ } else if (of_property_read_bool(np, "qcom,pvs-format-b")) { ++ get_krait_bin_format_b(&speed, &pvs, &pvs_ver); ++ cols = 3; ++ } else { ++ return -ENODEV; ++ } ++ ++ snprintf(table_name, sizeof(table_name), ++ "qcom,speed%d-pvs%d-bin-v%d", speed, pvs, pvs_ver); ++ ++ if (!of_find_property(np, table_name, &len)) ++ return -EINVAL; ++ ++ len /= sizeof(u32); ++ if (len % cols || len == 0) ++ return -EINVAL; ++ ++ rows = len / cols; ++ ++ for (i = 0, k = 0; i < rows; i++) { ++ u32 freq, volt; ++ ++ of_property_read_u32_index(np, table_name, k++, &freq); ++ of_property_read_u32_index(np, table_name, k++, &volt); ++ while (k % cols) ++ k++; /* Skip uA entries if present */ ++ for (cpu = 0; cpu < num_possible_cpus(); cpu++) { ++ dev = get_cpu_device(cpu); ++ if (!dev) ++ return -ENODEV; ++ if (dev_pm_opp_add(dev, freq, volt)) ++ pr_warn("failed to add OPP %u\n", freq); ++ } ++ } ++ ++ return 0; ++} ++ ++static int __init qcom_cpufreq_driver_init(void) ++{ ++ struct cpufreq_dt_platform_data pdata = { .independent_clocks = true }; ++ struct platform_device_info devinfo = { ++ .name = "cpufreq-dt", ++ .data = &pdata, ++ .size_data = sizeof(pdata), ++ }; ++ struct device *cpu_dev; ++ struct device_node *np; ++ int ret; ++ ++ cpu_dev = get_cpu_device(0); ++ if (!cpu_dev) ++ return -ENODEV; ++ ++ np = of_node_get(cpu_dev->of_node); ++ if (!np) ++ return -ENOENT; ++ ++ if (!of_device_is_compatible(np, "qcom,krait")) { ++ of_node_put(np); ++ return -ENODEV; ++ } ++ of_node_put(np); ++ ++ ret = qcom_cpufreq_populate_opps(); ++ if (ret) ++ return ret; ++ ++ return PTR_ERR_OR_ZERO(platform_device_register_full(&devinfo)); ++} ++module_init(qcom_cpufreq_driver_init); ++ ++MODULE_DESCRIPTION("Qualcomm CPUfreq driver"); ++MODULE_LICENSE("GPL v2"); diff --git a/target/linux/ipq40xx/patches-4.9/0046-cpufreq-qcom-independent-core-clocks.patch b/target/linux/ipq40xx/patches-4.9/0046-cpufreq-qcom-independent-core-clocks.patch new file mode 100644 index 000000000..d767dbf5f --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0046-cpufreq-qcom-independent-core-clocks.patch @@ -0,0 +1,66 @@ +From patchwork Fri Dec 8 09:42:30 2017 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [v4,12/12] cpufreq: dt: Reintroduce independent_clocks platform data +From: Sricharan R +X-Patchwork-Id: 10102073 +Message-Id: <1512726150-7204-13-git-send-email-sricharan@codeaurora.org> +To: mturquette@baylibre.com, sboyd@codeaurora.org, + devicetree@vger.kernel.org, linux-pm@vger.kernel.org, + linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org, + viresh.kumar@linaro.org, linux-arm-kernel@lists.infradead.org +Cc: sricharan@codeaurora.org +Date: Fri, 8 Dec 2017 15:12:30 +0530 + +The Platform data was removed earlier by, +'commit eb96924acddc ("cpufreq: dt: Kill platform-data")' +since there were no users at that time. +Now this is required when the each of the cpu clocks +can be scaled independently, which is the case +for krait cores. So reintroduce it. + +Signed-off-by: Sricharan R +--- + drivers/cpufreq/cpufreq-dt.c | 7 ++++++- + drivers/cpufreq/cpufreq-dt.h | 6 ++++++ + 2 files changed, 12 insertions(+), 1 deletion(-) + +--- a/drivers/cpufreq/cpufreq-dt.c ++++ b/drivers/cpufreq/cpufreq-dt.c +@@ -221,7 +221,10 @@ static int cpufreq_init(struct cpufreq_p + } + + if (fallback) { +- cpumask_setall(policy->cpus); ++ struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data(); ++ ++ if (!pd || !pd->independent_clocks) ++ cpumask_setall(policy->cpus); + + /* + * OPP tables are initialized only for policy->cpu, do it for +@@ -376,6 +379,8 @@ static int dt_cpufreq_probe(struct platf + if (data && data->have_governor_per_policy) + dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY; + ++ dt_cpufreq_driver.driver_data = data; ++ + ret = cpufreq_register_driver(&dt_cpufreq_driver); + if (ret) + dev_err(&pdev->dev, "failed register driver: %d\n", ret); +--- a/drivers/cpufreq/cpufreq-dt.h ++++ b/drivers/cpufreq/cpufreq-dt.h +@@ -13,6 +13,12 @@ + #include + + struct cpufreq_dt_platform_data { ++ /* ++ * True when each CPU has its own clock to control its ++ * frequency, false when all CPUs are controlled by a single ++ * clock. ++ */ ++ bool independent_clocks; + bool have_governor_per_policy; + }; + diff --git a/target/linux/ipq40xx/patches-4.9/0047-mtd-nand-Create-a-BBT-flag-to-access-bad-block-marke.patch b/target/linux/ipq40xx/patches-4.9/0047-mtd-nand-Create-a-BBT-flag-to-access-bad-block-marke.patch new file mode 100644 index 000000000..be4a210f0 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0047-mtd-nand-Create-a-BBT-flag-to-access-bad-block-marke.patch @@ -0,0 +1,72 @@ +From c7c6a0f50f9ac3620c611ce06ba1f9fafea0444e Mon Sep 17 00:00:00 2001 +From: Archit Taneja +Date: Mon, 3 Aug 2015 10:38:14 +0530 +Subject: [PATCH 47/69] mtd: nand: Create a BBT flag to access bad block + markers in raw mode + +Some controllers can access the factory bad block marker from OOB only +when they read it in raw mode. When ECC is enabled, these controllers +discard reading/writing bad block markers, preventing access to them +altogether. + +The bbt driver assumes MTD_OPS_PLACE_OOB when scanning for bad blocks. +This results in the nand driver's ecc->read_oob() op to be called, which +works with ECC enabled. + +Create a new BBT option flag that tells nand_bbt to force the mode to +MTD_OPS_RAW. This would result in the correct op being called for the +underlying nand controller driver. + +Reviewed-by: Andy Gross +Signed-off-by: Archit Taneja +--- + drivers/mtd/nand/nand_base.c | 6 +++++- + drivers/mtd/nand/nand_bbt.c | 6 +++++- + include/linux/mtd/bbm.h | 6 ++++++ + 3 files changed, 16 insertions(+), 2 deletions(-) + +--- a/drivers/mtd/nand/nand_base.c ++++ b/drivers/mtd/nand/nand_base.c +@@ -488,7 +488,11 @@ static int nand_default_block_markbad(st + } else { + ops.len = ops.ooblen = 1; + } +- ops.mode = MTD_OPS_PLACE_OOB; ++ ++ if (unlikely(chip->bbt_options & NAND_BBT_ACCESS_BBM_RAW)) ++ ops.mode = MTD_OPS_RAW; ++ else ++ ops.mode = MTD_OPS_PLACE_OOB; + + /* Write to first/last page(s) if necessary */ + if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) +--- a/drivers/mtd/nand/nand_bbt.c ++++ b/drivers/mtd/nand/nand_bbt.c +@@ -420,7 +420,11 @@ static int scan_block_fast(struct mtd_in + ops.oobbuf = buf; + ops.ooboffs = 0; + ops.datbuf = NULL; +- ops.mode = MTD_OPS_PLACE_OOB; ++ ++ if (unlikely(bd->options & NAND_BBT_ACCESS_BBM_RAW)) ++ ops.mode = MTD_OPS_RAW; ++ else ++ ops.mode = MTD_OPS_PLACE_OOB; + + for (j = 0; j < numpages; j++) { + /* +--- a/include/linux/mtd/bbm.h ++++ b/include/linux/mtd/bbm.h +@@ -116,6 +116,12 @@ struct nand_bbt_descr { + #define NAND_BBT_NO_OOB_BBM 0x00080000 + + /* ++ * Force MTD_OPS_RAW mode when trying to access bad block markes from OOB. To ++ * be used by controllers which can access BBM only when ECC is disabled, i.e, ++ * when in RAW access mode ++ */ ++#define NAND_BBT_ACCESS_BBM_RAW 0x00100000 ++/* + * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr + * was allocated dynamicaly and must be freed in nand_release(). Has no meaning + * in nand_chip.bbt_options. diff --git a/target/linux/ipq40xx/patches-4.9/0048-PM-OPP-HACK-Allow-to-set-regulator-without-opp_list.patch b/target/linux/ipq40xx/patches-4.9/0048-PM-OPP-HACK-Allow-to-set-regulator-without-opp_list.patch new file mode 100644 index 000000000..da9c0dbef --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0048-PM-OPP-HACK-Allow-to-set-regulator-without-opp_list.patch @@ -0,0 +1,27 @@ +From 5c294df1715d673f94f3b0a6e1ea3a426ca35e6e Mon Sep 17 00:00:00 2001 +From: Georgi Djakov +Date: Thu, 28 Apr 2016 16:20:12 +0300 +Subject: [PATCH 48/69] PM / OPP: HACK: Allow to set regulator without opp_list + +Signed-off-by: Georgi Djakov +--- + drivers/base/power/opp/core.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/base/power/opp/core.c ++++ b/drivers/base/power/opp/core.c +@@ -1339,12 +1339,13 @@ struct opp_table *dev_pm_opp_set_regulat + ret = -ENOMEM; + goto unlock; + } +- ++#if 0 + /* This should be called before OPPs are initialized */ + if (WARN_ON(!list_empty(&opp_table->opp_list))) { + ret = -EBUSY; + goto err; + } ++#endif + + /* Already have a regulator set */ + if (WARN_ON(!IS_ERR(opp_table->regulator))) { diff --git a/target/linux/ipq40xx/patches-4.9/0049-PM-OPP-Support-adjusting-OPP-voltages-at-runtime.patch b/target/linux/ipq40xx/patches-4.9/0049-PM-OPP-Support-adjusting-OPP-voltages-at-runtime.patch new file mode 100644 index 000000000..e2a4eede8 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0049-PM-OPP-Support-adjusting-OPP-voltages-at-runtime.patch @@ -0,0 +1,147 @@ +From c949f08cf20fe82971fbdb4015daa38210da492e Mon Sep 17 00:00:00 2001 +From: Stephen Boyd +Date: Fri, 18 Sep 2015 17:52:06 -0700 +Subject: [PATCH 49/69] PM / OPP: Support adjusting OPP voltages at runtime + +On some SoCs the Adaptive Voltage Scaling (AVS) technique is +employed to optimize the operating voltage of a device. At a +given frequency, the hardware monitors dynamic factors and either +makes a suggestion for how much to adjust a voltage for the +current frequency, or it automatically adjusts the voltage +without software intervention. Add an API to the OPP library for +the former case, so that AVS type devices can update the voltages +for an OPP when the hardware determines the voltage should +change. The assumption is that drivers like CPUfreq or devfreq +will register for the OPP notifiers and adjust the voltage +according to suggestions that AVS makes. + +Cc: Nishanth Menon +Acked-by: Viresh Kumar +Signed-off-by: Stephen Boyd +Acked-by: Viresh Kumar +Signed-off-by: Georgi Djakov +--- + drivers/base/power/opp/core.c | 77 +++++++++++++++++++++++++++++++++++++++++++ + include/linux/pm_opp.h | 11 +++++++ + 2 files changed, 88 insertions(+) + +--- a/drivers/base/power/opp/core.c ++++ b/drivers/base/power/opp/core.c +@@ -1521,6 +1521,83 @@ unlock: + } + + /** ++ * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP ++ * @dev: device for which we do this operation ++ * @freq: OPP frequency to adjust voltage of ++ * @u_volt: new OPP voltage ++ * ++ * Change the voltage of an OPP with an RCU operation. ++ * ++ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the ++ * copy operation, returns 0 if no modifcation was done OR modification was ++ * successful. ++ * ++ * Locking: The internal device_opp and opp structures are RCU protected. ++ * Hence this function internally uses RCU updater strategy with mutex locks to ++ * keep the integrity of the internal data structures. Callers should ensure ++ * that this function is *NOT* called under RCU protection or in contexts where ++ * mutex locking or synchronize_rcu() blocking calls cannot be used. ++ */ ++int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, ++ unsigned long u_volt) ++{ ++ struct opp_table *opp_table; ++ struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); ++ int r = 0; ++ ++ /* keep the node allocated */ ++ new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); ++ if (!new_opp) ++ return -ENOMEM; ++ ++ mutex_lock(&opp_table_lock); ++ ++ /* Find the opp_table */ ++ opp_table = _find_opp_table(dev); ++ if (IS_ERR(opp_table)) { ++ r = PTR_ERR(opp_table); ++ dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); ++ goto unlock; ++ } ++ ++ /* Do we have the frequency? */ ++ list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { ++ if (tmp_opp->rate == freq) { ++ opp = tmp_opp; ++ break; ++ } ++ } ++ if (IS_ERR(opp)) { ++ r = PTR_ERR(opp); ++ goto unlock; ++ } ++ ++ /* Is update really needed? */ ++ if (opp->u_volt == u_volt) ++ goto unlock; ++ /* copy the old data over */ ++ *new_opp = *opp; ++ ++ /* plug in new node */ ++ new_opp->u_volt = u_volt; ++ ++ list_replace_rcu(&opp->node, &new_opp->node); ++ mutex_unlock(&opp_table_lock); ++ call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); ++ ++ /* Notify the change of the OPP */ ++ srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADJUST_VOLTAGE, ++ new_opp); ++ ++ return 0; ++ ++unlock: ++ mutex_unlock(&opp_table_lock); ++ kfree(new_opp); ++ return r; ++} ++ ++/** + * dev_pm_opp_enable() - Enable a specific OPP + * @dev: device for which we do this operation + * @freq: OPP frequency to enable +--- a/include/linux/pm_opp.h ++++ b/include/linux/pm_opp.h +@@ -23,6 +23,7 @@ struct opp_table; + + enum dev_pm_opp_event { + OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, ++ OPP_EVENT_ADJUST_VOLTAGE, + }; + + #if defined(CONFIG_PM_OPP) +@@ -53,6 +54,9 @@ int dev_pm_opp_add(struct device *dev, u + unsigned long u_volt); + void dev_pm_opp_remove(struct device *dev, unsigned long freq); + ++int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, ++ unsigned long u_volt); ++ + int dev_pm_opp_enable(struct device *dev, unsigned long freq); + + int dev_pm_opp_disable(struct device *dev, unsigned long freq); +@@ -139,6 +143,13 @@ static inline void dev_pm_opp_remove(str + { + } + ++static inline int ++dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, ++ unsigned long u_volt) ++{ ++ return 0; ++} ++ + static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) + { + return 0; diff --git a/target/linux/ipq40xx/patches-4.9/0050-OPP-Allow-notifiers-to-call-dev_pm_opp_get_-voltage-.patch b/target/linux/ipq40xx/patches-4.9/0050-OPP-Allow-notifiers-to-call-dev_pm_opp_get_-voltage-.patch new file mode 100644 index 000000000..7b41157fd --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0050-OPP-Allow-notifiers-to-call-dev_pm_opp_get_-voltage-.patch @@ -0,0 +1,107 @@ +From 4a17bbfcf72c94b37079e39a7c1e1e8653f7fe92 Mon Sep 17 00:00:00 2001 +From: Stephen Boyd +Date: Fri, 18 Sep 2015 17:52:07 -0700 +Subject: [PATCH 50/69] OPP: Allow notifiers to call dev_pm_opp_get_{voltage, + freq} RCU-free + +We pass the dev_pm_opp structure to OPP notifiers but the users +of the notifier need to surround calls to dev_pm_opp_get_*() with +RCU read locks to avoid lockdep warnings. The notifier is already +called with the dev_opp's srcu lock held, so it should be safe to +assume the devm_pm_opp structure is already protected inside the +notifier. Update the lockdep check for this. + +Cc: Krzysztof Kozlowski +Signed-off-by: Stephen Boyd +Acked-by: Viresh Kumar +Signed-off-by: Georgi Djakov +--- + drivers/base/power/opp/core.c | 19 ++++++++++--------- + 1 file changed, 10 insertions(+), 9 deletions(-) + +--- a/drivers/base/power/opp/core.c ++++ b/drivers/base/power/opp/core.c +@@ -32,9 +32,10 @@ LIST_HEAD(opp_tables); + /* Lock to allow exclusive modification to the device and opp lists */ + DEFINE_MUTEX(opp_table_lock); + +-#define opp_rcu_lockdep_assert() \ ++#define opp_rcu_lockdep_assert(s) \ + do { \ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ ++ !(s && srcu_read_lock_held(s)) && \ + !lockdep_is_held(&opp_table_lock), \ + "Missing rcu_read_lock() or " \ + "opp_table_lock protection"); \ +@@ -72,7 +73,7 @@ struct opp_table *_find_opp_table(struct + { + struct opp_table *opp_table; + +- opp_rcu_lockdep_assert(); ++ opp_rcu_lockdep_assert(NULL); + + if (IS_ERR_OR_NULL(dev)) { + pr_err("%s: Invalid parameters\n", __func__); +@@ -106,7 +107,7 @@ unsigned long dev_pm_opp_get_voltage(str + struct dev_pm_opp *tmp_opp; + unsigned long v = 0; + +- opp_rcu_lockdep_assert(); ++ opp_rcu_lockdep_assert(NULL); + + tmp_opp = rcu_dereference(opp); + if (IS_ERR_OR_NULL(tmp_opp)) +@@ -138,7 +139,7 @@ unsigned long dev_pm_opp_get_freq(struct + struct dev_pm_opp *tmp_opp; + unsigned long f = 0; + +- opp_rcu_lockdep_assert(); ++ opp_rcu_lockdep_assert(NULL); + + tmp_opp = rcu_dereference(opp); + if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) +@@ -172,7 +173,7 @@ bool dev_pm_opp_is_turbo(struct dev_pm_o + { + struct dev_pm_opp *tmp_opp; + +- opp_rcu_lockdep_assert(); ++ opp_rcu_lockdep_assert(NULL); + + tmp_opp = rcu_dereference(opp); + if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { +@@ -300,7 +301,7 @@ struct dev_pm_opp *dev_pm_opp_get_suspen + { + struct opp_table *opp_table; + +- opp_rcu_lockdep_assert(); ++ opp_rcu_lockdep_assert(NULL); + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table) || !opp_table->suspend_opp || +@@ -380,7 +381,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ + struct opp_table *opp_table; + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); + +- opp_rcu_lockdep_assert(); ++ opp_rcu_lockdep_assert(NULL); + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { +@@ -444,7 +445,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ + { + struct opp_table *opp_table; + +- opp_rcu_lockdep_assert(); ++ opp_rcu_lockdep_assert(NULL); + + if (!dev || !freq) { + dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); +@@ -486,7 +487,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ + struct opp_table *opp_table; + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); + +- opp_rcu_lockdep_assert(); ++ opp_rcu_lockdep_assert(NULL); + + if (!dev || !freq) { + dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); diff --git a/target/linux/ipq40xx/patches-4.9/0051-PM-OPP-Add-a-helper-to-get-an-opp-regulator-for-devi.patch b/target/linux/ipq40xx/patches-4.9/0051-PM-OPP-Add-a-helper-to-get-an-opp-regulator-for-devi.patch new file mode 100644 index 000000000..fc1a36efc --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0051-PM-OPP-Add-a-helper-to-get-an-opp-regulator-for-devi.patch @@ -0,0 +1,52 @@ +From d06ca5e7a3cf726f5be5ffd96e93ccd798b8c09a Mon Sep 17 00:00:00 2001 +From: Georgi Djakov +Date: Thu, 12 May 2016 14:41:33 +0300 +Subject: [PATCH 51/69] PM / OPP: Add a helper to get an opp regulator for + device + +Signed-off-by: Georgi Djakov +--- + drivers/base/power/opp/core.c | 21 +++++++++++++++++++++ + include/linux/pm_opp.h | 1 + + 2 files changed, 22 insertions(+) + +--- a/drivers/base/power/opp/core.c ++++ b/drivers/base/power/opp/core.c +@@ -151,6 +151,27 @@ unsigned long dev_pm_opp_get_freq(struct + } + EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); + ++struct regulator *dev_pm_opp_get_regulator(struct device *dev) ++{ ++ struct opp_table *opp_table; ++ struct regulator *reg; ++ ++ rcu_read_lock(); ++ ++ opp_table = _find_opp_table(dev); ++ if (IS_ERR(opp_table)) { ++ rcu_read_unlock(); ++ return ERR_CAST(opp_table); ++ } ++ ++ reg = opp_table->regulator; ++ ++ rcu_read_unlock(); ++ ++ return reg; ++} ++EXPORT_SYMBOL_GPL(dev_pm_opp_get_regulator); ++ + /** + * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not + * @opp: opp for which turbo mode is being verified +--- a/include/linux/pm_opp.h ++++ b/include/linux/pm_opp.h +@@ -31,6 +31,7 @@ enum dev_pm_opp_event { + unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); + + unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); ++struct regulator *dev_pm_opp_get_regulator(struct device *dev); + + bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); + diff --git a/target/linux/ipq40xx/patches-4.9/0052-PM-OPP-Update-the-voltage-tolerance-when-adjusting-t.patch b/target/linux/ipq40xx/patches-4.9/0052-PM-OPP-Update-the-voltage-tolerance-when-adjusting-t.patch new file mode 100644 index 000000000..9065911d5 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0052-PM-OPP-Update-the-voltage-tolerance-when-adjusting-t.patch @@ -0,0 +1,38 @@ +From 4533c285c2aedce6d4434d7b877066de3b1ecb33 Mon Sep 17 00:00:00 2001 +From: Georgi Djakov +Date: Thu, 25 Aug 2016 18:43:35 +0300 +Subject: [PATCH 52/69] PM / OPP: Update the voltage tolerance when adjusting + the OPP + +When the voltage is adjusted, the voltage tolerance is not updated. +This can lead to situations where the voltage min value is greater +than the voltage max value. The final result is triggering a BUG() +in the regulator core. +Fix this by updating the voltage tolerance values too. + +Signed-off-by: Georgi Djakov +--- + drivers/base/power/opp/core.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/drivers/base/power/opp/core.c ++++ b/drivers/base/power/opp/core.c +@@ -1566,6 +1566,7 @@ int dev_pm_opp_adjust_voltage(struct dev + struct opp_table *opp_table; + struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); + int r = 0; ++ unsigned long tol; + + /* keep the node allocated */ + new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); +@@ -1602,6 +1603,10 @@ int dev_pm_opp_adjust_voltage(struct dev + + /* plug in new node */ + new_opp->u_volt = u_volt; ++ tol = u_volt * opp_table->voltage_tolerance_v1 / 100; ++ new_opp->u_volt = u_volt; ++ new_opp->u_volt_min = u_volt - tol; ++ new_opp->u_volt_max = u_volt + tol; + + list_replace_rcu(&opp->node, &new_opp->node); + mutex_unlock(&opp_table_lock); diff --git a/target/linux/ipq40xx/patches-4.9/0053-regulator-add-smb208-support.patch b/target/linux/ipq40xx/patches-4.9/0053-regulator-add-smb208-support.patch new file mode 100644 index 000000000..0d2862c60 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0053-regulator-add-smb208-support.patch @@ -0,0 +1,55 @@ +From ef10381ca4d01848ebedb4afb2c78feb8052f103 Mon Sep 17 00:00:00 2001 +From: Adrian Panella +Date: Thu, 9 Mar 2017 08:26:54 +0100 +Subject: [PATCH 53/69] regulator: add smb208 support + +Signed-off-by: Adrian Panella +--- + Documentation/devicetree/bindings/mfd/qcom-rpm.txt | 4 ++++ + drivers/regulator/qcom_rpm-regulator.c | 9 +++++++++ + 2 files changed, 13 insertions(+) + +--- a/Documentation/devicetree/bindings/mfd/qcom-rpm.txt ++++ b/Documentation/devicetree/bindings/mfd/qcom-rpm.txt +@@ -61,6 +61,7 @@ Regulator nodes are identified by their + "qcom,rpm-pm8901-regulators" + "qcom,rpm-pm8921-regulators" + "qcom,rpm-pm8018-regulators" ++ "qcom,rpm-smb208-regulators" + + - vdd_l0_l1_lvs-supply: + - vdd_l2_l11_l12-supply: +@@ -171,6 +172,9 @@ pm8018: + s1, s2, s3, s4, s5, , l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, + l12, l14, lvs1 + ++smb208: ++ s1a, s1b, s2a, s2b ++ + The content of each sub-node is defined by the standard binding for regulators - + see regulator.txt - with additional custom properties described below: + +--- a/drivers/regulator/qcom_rpm-regulator.c ++++ b/drivers/regulator/qcom_rpm-regulator.c +@@ -933,12 +933,21 @@ static const struct rpm_regulator_data r + { } + }; + ++static const struct rpm_regulator_data rpm_smb208_regulators[] = { ++ { "s1a", QCOM_RPM_SMB208_S1a, &smb208_smps, "vin_s1a" }, ++ { "s1b", QCOM_RPM_SMB208_S1b, &smb208_smps, "vin_s1b" }, ++ { "s2a", QCOM_RPM_SMB208_S2a, &smb208_smps, "vin_s2a" }, ++ { "s2b", QCOM_RPM_SMB208_S2b, &smb208_smps, "vin_s2b" }, ++ { } ++}; ++ + static const struct of_device_id rpm_of_match[] = { + { .compatible = "qcom,rpm-pm8018-regulators", + .data = &rpm_pm8018_regulators }, + { .compatible = "qcom,rpm-pm8058-regulators", .data = &rpm_pm8058_regulators }, + { .compatible = "qcom,rpm-pm8901-regulators", .data = &rpm_pm8901_regulators }, + { .compatible = "qcom,rpm-pm8921-regulators", .data = &rpm_pm8921_regulators }, ++ { .compatible = "qcom,rpm-smb208-regulators", .data = &rpm_smb208_regulators }, + { } + }; + MODULE_DEVICE_TABLE(of, rpm_of_match); diff --git a/target/linux/ipq40xx/patches-4.9/0054-cpufreq-dt-Handle-OPP-voltage-adjust-events.patch b/target/linux/ipq40xx/patches-4.9/0054-cpufreq-dt-Handle-OPP-voltage-adjust-events.patch new file mode 100644 index 000000000..7cd6c6b5a --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0054-cpufreq-dt-Handle-OPP-voltage-adjust-events.patch @@ -0,0 +1,144 @@ +From 10577f74c35bd395951d1b2382c8d821089b5745 Mon Sep 17 00:00:00 2001 +From: Stephen Boyd +Date: Fri, 18 Sep 2015 17:52:08 -0700 +Subject: [PATCH 54/69] cpufreq-dt: Handle OPP voltage adjust events + +On some SoCs the Adaptive Voltage Scaling (AVS) technique is +employed to optimize the operating voltage of a device. At a +given frequency, the hardware monitors dynamic factors and either +makes a suggestion for how much to adjust a voltage for the +current frequency, or it automatically adjusts the voltage +without software intervention. + +In the former case, an AVS driver will call +dev_pm_opp_modify_voltage() and update the voltage for the +particular OPP the CPUs are using. Add an OPP notifier to +cpufreq-dt so that we can adjust the voltage of the CPU when AVS +updates the OPP. + +Signed-off-by: Stephen Boyd +Acked-by: Viresh Kumar +Signed-off-by: Georgi Djakov +--- + drivers/cpufreq/cpufreq-dt.c | 68 ++++++++++++++++++++++++++++++++++++++++++-- + 1 file changed, 65 insertions(+), 3 deletions(-) + +--- a/drivers/cpufreq/cpufreq-dt.c ++++ b/drivers/cpufreq/cpufreq-dt.c +@@ -32,6 +32,9 @@ struct private_data { + struct device *cpu_dev; + struct thermal_cooling_device *cdev; + const char *reg_name; ++ struct notifier_block opp_nb; ++ struct mutex lock; ++ unsigned long opp_freq; + }; + + static struct freq_attr *cpufreq_dt_attr[] = { +@@ -43,9 +46,16 @@ static struct freq_attr *cpufreq_dt_attr + static int set_target(struct cpufreq_policy *policy, unsigned int index) + { + struct private_data *priv = policy->driver_data; ++ int ret; ++ unsigned long target_freq = policy->freq_table[index].frequency * 1000; ++ ++ mutex_lock(&priv->lock); ++ ret = dev_pm_opp_set_rate(priv->cpu_dev, target_freq); ++ if (!ret) ++ priv->opp_freq = target_freq; ++ mutex_unlock(&priv->lock); + +- return dev_pm_opp_set_rate(priv->cpu_dev, +- policy->freq_table[index].frequency * 1000); ++ return ret; + } + + /* +@@ -86,6 +96,39 @@ node_put: + return name; + } + ++static int opp_notifier(struct notifier_block *nb, unsigned long event, ++ void *data) ++{ ++ struct dev_pm_opp *opp = data; ++ struct private_data *priv = container_of(nb, struct private_data, ++ opp_nb); ++ struct device *cpu_dev = priv->cpu_dev; ++ struct regulator *cpu_reg; ++ unsigned long volt, freq; ++ int ret = 0; ++ ++ if (event == OPP_EVENT_ADJUST_VOLTAGE) { ++ cpu_reg = dev_pm_opp_get_regulator(cpu_dev); ++ if (IS_ERR(cpu_reg)) { ++ ret = PTR_ERR(cpu_reg); ++ goto out; ++ } ++ volt = dev_pm_opp_get_voltage(opp); ++ freq = dev_pm_opp_get_freq(opp); ++ ++ mutex_lock(&priv->lock); ++ if (freq == priv->opp_freq) { ++ ret = regulator_set_voltage_triplet(cpu_reg, volt, volt, volt); ++ } ++ mutex_unlock(&priv->lock); ++ if (ret) ++ dev_err(cpu_dev, "failed to scale voltage: %d\n", ret); ++ } ++ ++out: ++ return notifier_from_errno(ret); ++} ++ + static int resources_available(void) + { + struct device *cpu_dev; +@@ -153,6 +196,7 @@ static int cpufreq_init(struct cpufreq_p + bool fallback = false; + const char *name; + int ret; ++ struct srcu_notifier_head *opp_srcu_head; + + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { +@@ -242,13 +286,29 @@ static int cpufreq_init(struct cpufreq_p + goto out_free_opp; + } + ++ mutex_init(&priv->lock); ++ ++ rcu_read_lock(); ++ opp_srcu_head = dev_pm_opp_get_notifier(cpu_dev); ++ if (IS_ERR(opp_srcu_head)) { ++ ret = PTR_ERR(opp_srcu_head); ++ rcu_read_unlock(); ++ goto out_free_priv; ++ } ++ ++ priv->opp_nb.notifier_call = opp_notifier; ++ ret = srcu_notifier_chain_register(opp_srcu_head, &priv->opp_nb); ++ rcu_read_unlock(); ++ if (ret) ++ goto out_free_priv; ++ + priv->reg_name = name; + priv->opp_table = opp_table; + + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); + if (ret) { + dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); +- goto out_free_priv; ++ goto out_unregister_nb; + } + + priv->cpu_dev = cpu_dev; +@@ -287,6 +347,8 @@ static int cpufreq_init(struct cpufreq_p + + out_free_cpufreq_table: + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); ++out_unregister_nb: ++ srcu_notifier_chain_unregister(opp_srcu_head, &priv->opp_nb); + out_free_priv: + kfree(priv); + out_free_opp: diff --git a/target/linux/ipq40xx/patches-4.9/0055-cpufreq-dt-Add-L2-frequency-scaling-support.patch b/target/linux/ipq40xx/patches-4.9/0055-cpufreq-dt-Add-L2-frequency-scaling-support.patch new file mode 100644 index 000000000..e0ae1f058 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0055-cpufreq-dt-Add-L2-frequency-scaling-support.patch @@ -0,0 +1,90 @@ +From 0759cdff49f1cf361bf503c13f7bcb33da43ab95 Mon Sep 17 00:00:00 2001 +From: Georgi Djakov +Date: Tue, 8 Sep 2015 11:24:41 +0300 +Subject: [PATCH 55/69] cpufreq-dt: Add L2 frequency scaling support + +Signed-off-by: Georgi Djakov +--- + drivers/cpufreq/cpufreq-dt.c | 41 ++++++++++++++++++++++++++++++++++++++++- + include/linux/cpufreq.h | 2 ++ + 2 files changed, 42 insertions(+), 1 deletion(-) + +--- a/drivers/cpufreq/cpufreq-dt.c ++++ b/drivers/cpufreq/cpufreq-dt.c +@@ -48,11 +48,41 @@ static int set_target(struct cpufreq_pol + struct private_data *priv = policy->driver_data; + int ret; + unsigned long target_freq = policy->freq_table[index].frequency * 1000; ++ struct clk *l2_clk = policy->l2_clk; ++ unsigned int l2_freq; ++ unsigned long new_l2_freq = 0; + + mutex_lock(&priv->lock); + ret = dev_pm_opp_set_rate(priv->cpu_dev, target_freq); +- if (!ret) ++ ++ if (!ret) { ++ if (!IS_ERR(l2_clk) && policy->l2_rate[0] && policy->l2_rate[1] && ++ policy->l2_rate[2]) { ++ static unsigned long krait_l2[CONFIG_NR_CPUS] = { }; ++ int cpu, ret = 0; ++ ++ if (target_freq >= policy->l2_rate[2]) ++ new_l2_freq = policy->l2_rate[2]; ++ else if (target_freq >= policy->l2_rate[1]) ++ new_l2_freq = policy->l2_rate[1]; ++ else ++ new_l2_freq = policy->l2_rate[0]; ++ ++ krait_l2[policy->cpu] = new_l2_freq; ++ for_each_present_cpu(cpu) ++ new_l2_freq = max(new_l2_freq, krait_l2[cpu]); ++ ++ l2_freq = clk_get_rate(l2_clk); ++ ++ if (l2_freq != new_l2_freq) { ++ /* scale l2 with the core */ ++ ret = clk_set_rate(l2_clk, new_l2_freq); ++ } ++ } ++ + priv->opp_freq = target_freq; ++ } ++ + mutex_unlock(&priv->lock); + + return ret; +@@ -197,6 +227,8 @@ static int cpufreq_init(struct cpufreq_p + const char *name; + int ret; + struct srcu_notifier_head *opp_srcu_head; ++ struct device_node *l2_np; ++ struct clk *l2_clk = NULL; + + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { +@@ -321,6 +353,13 @@ static int cpufreq_init(struct cpufreq_p + policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000; + rcu_read_unlock(); + ++ l2_clk = clk_get(cpu_dev, "l2"); ++ if (!IS_ERR(l2_clk)) ++ policy->l2_clk = l2_clk; ++ l2_np = of_find_node_by_name(NULL, "qcom,l2"); ++ if (l2_np) ++ of_property_read_u32_array(l2_np, "qcom,l2-rates", policy->l2_rate, 3); ++ + ret = cpufreq_table_validate_and_show(policy, freq_table); + if (ret) { + dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, +--- a/include/linux/cpufreq.h ++++ b/include/linux/cpufreq.h +@@ -73,6 +73,8 @@ struct cpufreq_policy { + unsigned int cpu; /* cpu managing this policy, must be online */ + + struct clk *clk; ++ struct clk *l2_clk; /* L2 clock */ ++ unsigned int l2_rate[3]; /* L2 bus clock rate thresholds */ + struct cpufreq_cpuinfo cpuinfo;/* see above */ + + unsigned int min; /* in kHz */ diff --git a/target/linux/ipq40xx/patches-4.9/0056-cpufreq-dt-Add-missing-rcu-locks.patch b/target/linux/ipq40xx/patches-4.9/0056-cpufreq-dt-Add-missing-rcu-locks.patch new file mode 100644 index 000000000..c0eb2eb3c --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0056-cpufreq-dt-Add-missing-rcu-locks.patch @@ -0,0 +1,23 @@ +From 001a8dcb56ced58c518aaa10a4f0ba5e878705b6 Mon Sep 17 00:00:00 2001 +From: Georgi Djakov +Date: Tue, 17 May 2016 16:15:43 +0300 +Subject: [PATCH 56/69] cpufreq-dt: Add missing rcu locks + +Signed-off-by: Georgi Djakov +--- + drivers/cpufreq/cpufreq-dt.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/cpufreq/cpufreq-dt.c ++++ b/drivers/cpufreq/cpufreq-dt.c +@@ -143,8 +143,10 @@ static int opp_notifier(struct notifier_ + ret = PTR_ERR(cpu_reg); + goto out; + } ++ rcu_read_lock(); + volt = dev_pm_opp_get_voltage(opp); + freq = dev_pm_opp_get_freq(opp); ++ rcu_read_unlock(); + + mutex_lock(&priv->lock); + if (freq == priv->opp_freq) { diff --git a/target/linux/ipq40xx/patches-4.9/0058-clk-qcom-Always-add-factor-clock-for-xo-clocks.patch b/target/linux/ipq40xx/patches-4.9/0058-clk-qcom-Always-add-factor-clock-for-xo-clocks.patch new file mode 100644 index 000000000..f679cf740 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0058-clk-qcom-Always-add-factor-clock-for-xo-clocks.patch @@ -0,0 +1,38 @@ +From 6081776c1eef63e3083387bb9ec2bf7edf92428b Mon Sep 17 00:00:00 2001 +From: Georgi Djakov +Date: Wed, 2 Nov 2016 17:56:58 +0200 +Subject: [PATCH 58/69] clk: qcom: Always add factor clock for xo clocks + +Currently the RPM/RPM-SMD clock drivers do not register the xo clocks, +so we should always add factor clock. When we later add xo clocks support +into the drivers, we should update this function to skip registration. +By doing so we avoid any DT dependencies. + +Signed-off-by: Georgi Djakov +--- + drivers/clk/qcom/common.c | 15 ++++++--------- + 1 file changed, 6 insertions(+), 9 deletions(-) + +--- a/drivers/clk/qcom/common.c ++++ b/drivers/clk/qcom/common.c +@@ -153,15 +153,12 @@ int qcom_cc_register_board_clk(struct de + const char *name, unsigned long rate) + { + bool add_factor = true; +- struct device_node *node; + +- /* The RPM clock driver will add the factor clock if present */ +- if (IS_ENABLED(CONFIG_QCOM_RPMCC)) { +- node = of_find_compatible_node(NULL, NULL, "qcom,rpmcc"); +- if (of_device_is_available(node)) +- add_factor = false; +- of_node_put(node); +- } ++ /* ++ * TODO: The RPM clock driver currently does not support the xo clock. ++ * When xo is added to the RPM clock driver, we should change this ++ * function to skip registration of xo factor clocks. ++ */ + + return _qcom_cc_register_board_clk(dev, path, name, rate, add_factor); + } diff --git a/target/linux/ipq40xx/patches-4.9/0059-ARM-cpuidle-Add-cpuidle-support-for-QCOM-cpus.patch b/target/linux/ipq40xx/patches-4.9/0059-ARM-cpuidle-Add-cpuidle-support-for-QCOM-cpus.patch new file mode 100644 index 000000000..b7349863f --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0059-ARM-cpuidle-Add-cpuidle-support-for-QCOM-cpus.patch @@ -0,0 +1,29 @@ +From 04ca10340f1b4d92e849724d322a7ca225d11539 Mon Sep 17 00:00:00 2001 +From: Lina Iyer +Date: Wed, 25 Mar 2015 14:25:29 -0600 +Subject: [PATCH 59/69] ARM: cpuidle: Add cpuidle support for QCOM cpus + +Define ARM_QCOM_CPUIDLE config item to enable cpuidle support. + +Cc: Stephen Boyd +Cc: Arnd Bergmann +Cc: Kevin Hilman +Cc: Daniel Lezcano +Signed-off-by: Lina Iyer +--- + drivers/cpuidle/Kconfig.arm | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/drivers/cpuidle/Kconfig.arm ++++ b/drivers/cpuidle/Kconfig.arm +@@ -74,3 +74,10 @@ config ARM_MVEBU_V7_CPUIDLE + depends on ARCH_MVEBU && !ARM64 + help + Select this to enable cpuidle on Armada 370, 38x and XP processors. ++ ++config ARM_QCOM_CPUIDLE ++ bool "CPU Idle Driver for QCOM processors" ++ depends on ARCH_QCOM ++ select ARM_CPUIDLE ++ help ++ Select this to enable cpuidle on QCOM processors. diff --git a/target/linux/ipq40xx/patches-4.9/0060-HACK-arch-arm-force-ZRELADDR-on-arch-qcom.patch b/target/linux/ipq40xx/patches-4.9/0060-HACK-arch-arm-force-ZRELADDR-on-arch-qcom.patch new file mode 100644 index 000000000..c188d0da5 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0060-HACK-arch-arm-force-ZRELADDR-on-arch-qcom.patch @@ -0,0 +1,62 @@ +From fa71139b55e114aa8c3c4823ff8ee7d49ee810d4 Mon Sep 17 00:00:00 2001 +From: Mathieu Olivari +Date: Wed, 29 Apr 2015 15:21:46 -0700 +Subject: [PATCH 60/69] HACK: arch: arm: force ZRELADDR on arch-qcom + +ARCH_QCOM is using the ARCH_MULTIPLATFORM option, as now recommended +on most ARM architectures. This automatically calculate ZRELADDR by +masking PHYS_OFFSET with 0xf8000000. + +However, on IPQ806x, the first ~20MB of RAM is reserved for the hardware +network accelerators, and the bootloader removes this section from the +layout passed from the ATAGS (when used). + +For newer bootloader, when DT is used, this is not a problem, we just +reserve this memory in the device tree. But if the bootloader doesn't +have DT support, then ATAGS have to be used. In this case, the ARM +decompressor will position the kernel in this low mem, which will not be +in the RAM section mapped by the bootloader, which means the kernel will +freeze in the middle of the boot process trying to map the memory. + +As a work around, this patch allows disabling AUTO_ZRELADDR when +ARCH_QCOM is selected. It makes the zImage usage possible on bootloaders +which don't support device-tree, which is the case on certain early +IPQ806x based designs. + +Signed-off-by: Mathieu Olivari +--- + arch/arm/Kconfig | 2 +- + arch/arm/Makefile | 2 ++ + arch/arm/mach-qcom/Makefile.boot | 1 + + 3 files changed, 4 insertions(+), 1 deletion(-) + create mode 100644 arch/arm/mach-qcom/Makefile.boot + +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -331,7 +331,7 @@ config ARCH_MULTIPLATFORM + depends on MMU + select ARM_HAS_SG_CHAIN + select ARM_PATCH_PHYS_VIRT +- select AUTO_ZRELADDR ++ select AUTO_ZRELADDR if !ARCH_QCOM + select CLKSRC_OF + select COMMON_CLK + select GENERIC_CLOCKEVENTS +--- a/arch/arm/Makefile ++++ b/arch/arm/Makefile +@@ -251,9 +251,11 @@ MACHINE := arch/arm/mach-$(word 1,$(mac + else + MACHINE := + endif ++ifeq ($(CONFIG_ARCH_QCOM),) + ifeq ($(CONFIG_ARCH_MULTIPLATFORM),y) + MACHINE := + endif ++endif + + machdirs := $(patsubst %,arch/arm/mach-%/,$(machine-y)) + platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y))) +--- /dev/null ++++ b/arch/arm/mach-qcom/Makefile.boot +@@ -0,0 +1 @@ ++zreladdr-y+= 0x42208000 diff --git a/target/linux/ipq40xx/patches-4.9/0061-mtd-rootfs-conflicts-with-OpenWrt-auto-mounting.patch b/target/linux/ipq40xx/patches-4.9/0061-mtd-rootfs-conflicts-with-OpenWrt-auto-mounting.patch new file mode 100644 index 000000000..a4a957545 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0061-mtd-rootfs-conflicts-with-OpenWrt-auto-mounting.patch @@ -0,0 +1,23 @@ +From 5001f2e1a325b68dbf225bd17f69a4d3d975cca5 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Thu, 9 Mar 2017 09:31:44 +0100 +Subject: [PATCH 61/69] mtd: "rootfs" conflicts with OpenWrt auto mounting + +Signed-off-by: John Crispin +--- + drivers/mtd/qcom_smem_part.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/drivers/mtd/qcom_smem_part.c ++++ b/drivers/mtd/qcom_smem_part.c +@@ -189,6 +189,10 @@ static int parse_qcom_smem_partitions(st + m_part->size = le32_to_cpu(s_part->size) * (*smem_blksz); + m_part->offset = le32_to_cpu(s_part->start) * (*smem_blksz); + ++ /* "rootfs" conflicts with OpenWrt auto mounting */ ++ if (mtd_type_is_nand(master) && !strcmp(m_part->name, "rootfs")) ++ m_part->name = "ubi"; ++ + /* + * The last SMEM partition may have its size marked as + * something like 0xffffffff, which means "until the end of the diff --git a/target/linux/ipq40xx/patches-4.9/0062-ipq806x-gcc-Added-the-enable-regs-and-mask-for-PRNG.patch b/target/linux/ipq40xx/patches-4.9/0062-ipq806x-gcc-Added-the-enable-regs-and-mask-for-PRNG.patch new file mode 100644 index 000000000..717934315 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0062-ipq806x-gcc-Added-the-enable-regs-and-mask-for-PRNG.patch @@ -0,0 +1,25 @@ +From a16fcf911a020e46439a3bb3e702463fc3159831 Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Wed, 18 Nov 2015 12:38:56 +0530 +Subject: [PATCH 62/69] ipq806x: gcc: Added the enable regs and mask for PRNG + +kernel got hanged while reading from /dev/hwrng at the +time of PRNG clock enable + +Change-Id: I89856c7e19e6639508e6a2774304583a3ec91172 +Signed-off-by: Abhishek Sahu +--- + drivers/clk/qcom/gcc-ipq806x.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/clk/qcom/gcc-ipq806x.c ++++ b/drivers/clk/qcom/gcc-ipq806x.c +@@ -1233,6 +1233,8 @@ static struct clk_rcg prng_src = { + .parent_map = gcc_pxo_pll8_map, + }, + .clkr = { ++ .enable_reg = 0x2e80, ++ .enable_mask = BIT(11), + .hw.init = &(struct clk_init_data){ + .name = "prng_src", + .parent_names = gcc_pxo_pll8, diff --git a/target/linux/ipq40xx/patches-4.9/0063-1-ipq806x-tsens-driver.patch b/target/linux/ipq40xx/patches-4.9/0063-1-ipq806x-tsens-driver.patch new file mode 100644 index 000000000..685b0c3ce --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0063-1-ipq806x-tsens-driver.patch @@ -0,0 +1,627 @@ +From 3302e1e1a3cfa4e67fda2a61d6f0c42205d40932 Mon Sep 17 00:00:00 2001 +From: Rajith Cherian +Date: Tue, 14 Feb 2017 18:30:43 +0530 +Subject: [PATCH] ipq8064: tsens: Base tsens driver for IPQ8064 + +Add TSENS driver template to support IPQ8064. +This is a base file copied from tsens-8960.c + +Change-Id: I47c573fdfa2d898243c6a6ba952d1632f91391f7 +Signed-off-by: Rajith Cherian + +ipq8064: tsens: TSENS driver support for IPQ8064 + +Support for IPQ8064 tsens driver. The driver works +with the thermal framework. The driver overrides the +following fucntionalities: + +1. Get current temperature. +2. Get/Set trip temperatures. +3. Enabled/Disable trip points. +4. ISR for threshold generated interrupt. +5. Notify userspace when trip points are hit. + +Change-Id: I8bc7204fd627d10875ab13fc1de8cb6c2ed7a918 +Signed-off-by: Rajith Cherian +--- + .../devicetree/bindings/thermal/qcom-tsens.txt | 1 + + drivers/thermal/qcom/Makefile | 3 +- + drivers/thermal/qcom/tsens-ipq8064.c | 551 +++++++++++++++++++++ + drivers/thermal/qcom/tsens.c | 3 + + drivers/thermal/qcom/tsens.h | 2 +- + 5 files changed, 558 insertions(+), 2 deletions(-) + create mode 100644 drivers/thermal/qcom/tsens-ipq8064.c + +--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.txt ++++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt +@@ -5,6 +5,7 @@ Required properties: + - "qcom,msm8916-tsens" : For 8916 Family of SoCs + - "qcom,msm8974-tsens" : For 8974 Family of SoCs + - "qcom,msm8996-tsens" : For 8996 Family of SoCs ++ - "qcom,ipq8064-tsens" : For IPQ8064 + + - reg: Address range of the thermal registers + - #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description. +--- a/drivers/thermal/qcom/Makefile ++++ b/drivers/thermal/qcom/Makefile +@@ -1,2 +1,3 @@ + obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o +-qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o ++qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o \ ++ tsens-ipq8064.o +--- /dev/null ++++ b/drivers/thermal/qcom/tsens-ipq8064.c +@@ -0,0 +1,551 @@ ++/* ++ * Copyright (c) 2015, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "tsens.h" ++ ++#define CAL_MDEGC 30000 ++ ++#define CONFIG_ADDR 0x3640 ++/* CONFIG_ADDR bitmasks */ ++#define CONFIG 0x9b ++#define CONFIG_MASK 0xf ++#define CONFIG_SHIFT 0 ++ ++#define STATUS_CNTL_8064 0x3660 ++#define CNTL_ADDR 0x3620 ++/* CNTL_ADDR bitmasks */ ++#define EN BIT(0) ++#define SW_RST BIT(1) ++#define SENSOR0_EN BIT(3) ++#define SLP_CLK_ENA BIT(26) ++#define MEASURE_PERIOD 1 ++#define SENSOR0_SHIFT 3 ++ ++/* INT_STATUS_ADDR bitmasks */ ++#define MIN_STATUS_MASK BIT(0) ++#define LOWER_STATUS_CLR BIT(1) ++#define UPPER_STATUS_CLR BIT(2) ++#define MAX_STATUS_MASK BIT(3) ++ ++#define THRESHOLD_ADDR 0x3624 ++/* THRESHOLD_ADDR bitmasks */ ++#define THRESHOLD_MAX_CODE 0x20000 ++#define THRESHOLD_MIN_CODE 0 ++#define THRESHOLD_MAX_LIMIT_SHIFT 24 ++#define THRESHOLD_MIN_LIMIT_SHIFT 16 ++#define THRESHOLD_UPPER_LIMIT_SHIFT 8 ++#define THRESHOLD_LOWER_LIMIT_SHIFT 0 ++#define THRESHOLD_MAX_LIMIT_MASK (THRESHOLD_MAX_CODE << \ ++ THRESHOLD_MAX_LIMIT_SHIFT) ++#define THRESHOLD_MIN_LIMIT_MASK (THRESHOLD_MAX_CODE << \ ++ THRESHOLD_MIN_LIMIT_SHIFT) ++#define THRESHOLD_UPPER_LIMIT_MASK (THRESHOLD_MAX_CODE << \ ++ THRESHOLD_UPPER_LIMIT_SHIFT) ++#define THRESHOLD_LOWER_LIMIT_MASK (THRESHOLD_MAX_CODE << \ ++ THRESHOLD_LOWER_LIMIT_SHIFT) ++ ++/* Initial temperature threshold values */ ++#define LOWER_LIMIT_TH 0x9d /* 95C */ ++#define UPPER_LIMIT_TH 0xa6 /* 105C */ ++#define MIN_LIMIT_TH 0x0 ++#define MAX_LIMIT_TH 0xff ++ ++#define S0_STATUS_ADDR 0x3628 ++#define STATUS_ADDR_OFFSET 2 ++#define SENSOR_STATUS_SIZE 4 ++#define INT_STATUS_ADDR 0x363c ++#define TRDY_MASK BIT(7) ++#define TIMEOUT_US 100 ++ ++#define TSENS_EN BIT(0) ++#define TSENS_SW_RST BIT(1) ++#define TSENS_ADC_CLK_SEL BIT(2) ++#define SENSOR0_EN BIT(3) ++#define SENSOR1_EN BIT(4) ++#define SENSOR2_EN BIT(5) ++#define SENSOR3_EN BIT(6) ++#define SENSOR4_EN BIT(7) ++#define SENSORS_EN (SENSOR0_EN | SENSOR1_EN | \ ++ SENSOR2_EN | SENSOR3_EN | SENSOR4_EN) ++#define TSENS_8064_SENSOR5_EN BIT(8) ++#define TSENS_8064_SENSOR6_EN BIT(9) ++#define TSENS_8064_SENSOR7_EN BIT(10) ++#define TSENS_8064_SENSOR8_EN BIT(11) ++#define TSENS_8064_SENSOR9_EN BIT(12) ++#define TSENS_8064_SENSOR10_EN BIT(13) ++#define TSENS_8064_SENSORS_EN (SENSORS_EN | \ ++ TSENS_8064_SENSOR5_EN | \ ++ TSENS_8064_SENSOR6_EN | \ ++ TSENS_8064_SENSOR7_EN | \ ++ TSENS_8064_SENSOR8_EN | \ ++ TSENS_8064_SENSOR9_EN | \ ++ TSENS_8064_SENSOR10_EN) ++ ++#define TSENS_8064_SEQ_SENSORS 5 ++#define TSENS_8064_S4_S5_OFFSET 40 ++#define TSENS_FACTOR 1 ++ ++/* Trips: from very hot to very cold */ ++enum tsens_trip_type { ++ TSENS_TRIP_STAGE3 = 0, ++ TSENS_TRIP_STAGE2, ++ TSENS_TRIP_STAGE1, ++ TSENS_TRIP_STAGE0, ++ TSENS_TRIP_NUM, ++}; ++ ++u32 tsens_8064_slope[] = { ++ 1176, 1176, 1154, 1176, ++ 1111, 1132, 1132, 1199, ++ 1132, 1199, 1132 ++ }; ++ ++/* Temperature on y axis and ADC-code on x-axis */ ++static inline int code_to_degC(u32 adc_code, const struct tsens_sensor *s) ++{ ++ int degcbeforefactor, degc; ++ ++ degcbeforefactor = (adc_code * s->slope) + s->offset; ++ ++ if (degcbeforefactor == 0) ++ degc = degcbeforefactor; ++ else if (degcbeforefactor > 0) ++ degc = (degcbeforefactor + TSENS_FACTOR/2) ++ / TSENS_FACTOR; ++ else ++ degc = (degcbeforefactor - TSENS_FACTOR/2) ++ / TSENS_FACTOR; ++ ++ return degc; ++} ++ ++static int degC_to_code(int degC, const struct tsens_sensor *s) ++{ ++ int code = ((degC * TSENS_FACTOR - s->offset) + (s->slope/2)) ++ / s->slope; ++ ++ if (code > THRESHOLD_MAX_CODE) ++ code = THRESHOLD_MAX_CODE; ++ else if (code < THRESHOLD_MIN_CODE) ++ code = THRESHOLD_MIN_CODE; ++ return code; ++} ++ ++static int suspend_ipq8064(struct tsens_device *tmdev) ++{ ++ int ret; ++ unsigned int mask; ++ struct regmap *map = tmdev->map; ++ ++ ret = regmap_read(map, THRESHOLD_ADDR, &tmdev->ctx.threshold); ++ if (ret) ++ return ret; ++ ++ ret = regmap_read(map, CNTL_ADDR, &tmdev->ctx.control); ++ if (ret) ++ return ret; ++ ++ mask = SLP_CLK_ENA | EN; ++ ++ ret = regmap_update_bits(map, CNTL_ADDR, mask, 0); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static int resume_ipq8064(struct tsens_device *tmdev) ++{ ++ int ret; ++ struct regmap *map = tmdev->map; ++ ++ ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST); ++ if (ret) ++ return ret; ++ ++ ret = regmap_update_bits(map, CONFIG_ADDR, CONFIG_MASK, CONFIG); ++ if (ret) ++ return ret; ++ ++ ret = regmap_write(map, THRESHOLD_ADDR, tmdev->ctx.threshold); ++ if (ret) ++ return ret; ++ ++ ret = regmap_write(map, CNTL_ADDR, tmdev->ctx.control); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static void notify_uspace_tsens_fn(struct work_struct *work) ++{ ++ struct tsens_sensor *s = container_of(work, struct tsens_sensor, ++ notify_work); ++ ++ sysfs_notify(&s->tzd->device.kobj, NULL, "type"); ++} ++ ++static void tsens_scheduler_fn(struct work_struct *work) ++{ ++ struct tsens_device *tmdev = container_of(work, struct tsens_device, ++ tsens_work); ++ unsigned int threshold, threshold_low, code, reg, sensor, mask; ++ unsigned int sensor_addr; ++ bool upper_th_x, lower_th_x; ++ int adc_code, ret; ++ ++ ret = regmap_read(tmdev->map, STATUS_CNTL_8064, ®); ++ if (ret) ++ return; ++ reg = reg | LOWER_STATUS_CLR | UPPER_STATUS_CLR; ++ ret = regmap_write(tmdev->map, STATUS_CNTL_8064, reg); ++ if (ret) ++ return; ++ ++ mask = ~(LOWER_STATUS_CLR | UPPER_STATUS_CLR); ++ ret = regmap_read(tmdev->map, THRESHOLD_ADDR, &threshold); ++ if (ret) ++ return; ++ threshold_low = (threshold & THRESHOLD_LOWER_LIMIT_MASK) ++ >> THRESHOLD_LOWER_LIMIT_SHIFT; ++ threshold = (threshold & THRESHOLD_UPPER_LIMIT_MASK) ++ >> THRESHOLD_UPPER_LIMIT_SHIFT; ++ ++ ret = regmap_read(tmdev->map, STATUS_CNTL_8064, ®); ++ if (ret) ++ return; ++ ++ ret = regmap_read(tmdev->map, CNTL_ADDR, &sensor); ++ if (ret) ++ return; ++ sensor &= (uint32_t) TSENS_8064_SENSORS_EN; ++ sensor >>= SENSOR0_SHIFT; ++ ++ /* Constraint: There is only 1 interrupt control register for all ++ * 11 temperature sensor. So monitoring more than 1 sensor based ++ * on interrupts will yield inconsistent result. To overcome this ++ * issue we will monitor only sensor 0 which is the master sensor. ++ */ ++ ++ /* Skip if the sensor is disabled */ ++ if (sensor & 1) { ++ ret = regmap_read(tmdev->map, tmdev->sensor[0].status, &code); ++ if (ret) ++ return; ++ upper_th_x = code >= threshold; ++ lower_th_x = code <= threshold_low; ++ if (upper_th_x) ++ mask |= UPPER_STATUS_CLR; ++ if (lower_th_x) ++ mask |= LOWER_STATUS_CLR; ++ if (upper_th_x || lower_th_x) { ++ /* Notify user space */ ++ schedule_work(&tmdev->sensor[0].notify_work); ++ regmap_read(tmdev->map, sensor_addr, &adc_code); ++ pr_debug("Trigger (%d degrees) for sensor %d\n", ++ code_to_degC(adc_code, &tmdev->sensor[0]), 0); ++ } ++ } ++ regmap_write(tmdev->map, STATUS_CNTL_8064, reg & mask); ++ ++ /* force memory to sync */ ++ mb(); ++} ++ ++static irqreturn_t tsens_isr(int irq, void *data) ++{ ++ struct tsens_device *tmdev = data; ++ ++ schedule_work(&tmdev->tsens_work); ++ return IRQ_HANDLED; ++} ++ ++static void hw_init(struct tsens_device *tmdev) ++{ ++ int ret; ++ unsigned int reg_cntl = 0, reg_cfg = 0, reg_thr = 0; ++ unsigned int reg_status_cntl = 0; ++ ++ regmap_read(tmdev->map, CNTL_ADDR, ®_cntl); ++ regmap_write(tmdev->map, CNTL_ADDR, reg_cntl | TSENS_SW_RST); ++ ++ reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18) ++ | (((1 << tmdev->num_sensors) - 1) << SENSOR0_SHIFT); ++ regmap_write(tmdev->map, CNTL_ADDR, reg_cntl); ++ regmap_read(tmdev->map, STATUS_CNTL_8064, ®_status_cntl); ++ reg_status_cntl |= LOWER_STATUS_CLR | UPPER_STATUS_CLR ++ | MIN_STATUS_MASK | MAX_STATUS_MASK; ++ regmap_write(tmdev->map, STATUS_CNTL_8064, reg_status_cntl); ++ reg_cntl |= TSENS_EN; ++ regmap_write(tmdev->map, CNTL_ADDR, reg_cntl); ++ ++ regmap_read(tmdev->map, CONFIG_ADDR, ®_cfg); ++ reg_cfg = (reg_cfg & ~CONFIG_MASK) | (CONFIG << CONFIG_SHIFT); ++ regmap_write(tmdev->map, CONFIG_ADDR, reg_cfg); ++ ++ reg_thr |= (LOWER_LIMIT_TH << THRESHOLD_LOWER_LIMIT_SHIFT) ++ | (UPPER_LIMIT_TH << THRESHOLD_UPPER_LIMIT_SHIFT) ++ | (MIN_LIMIT_TH << THRESHOLD_MIN_LIMIT_SHIFT) ++ | (MAX_LIMIT_TH << THRESHOLD_MAX_LIMIT_SHIFT); ++ ++ regmap_write(tmdev->map, THRESHOLD_ADDR, reg_thr); ++ ++ ret = devm_request_irq(tmdev->dev, tmdev->tsens_irq, tsens_isr, ++ IRQF_TRIGGER_RISING, "tsens_interrupt", tmdev); ++ if (ret < 0) { ++ pr_err("%s: request_irq FAIL: %d\n", __func__, ret); ++ return; ++ } ++ ++ INIT_WORK(&tmdev->tsens_work, tsens_scheduler_fn); ++} ++ ++static int init_ipq8064(struct tsens_device *tmdev) ++{ ++ int ret, i; ++ u32 reg_cntl, offset = 0; ++ ++ init_common(tmdev); ++ if (!tmdev->map) ++ return -ENODEV; ++ ++ /* ++ * The status registers for each sensor are discontiguous ++ * because some SoCs have 5 sensors while others have more ++ * but the control registers stay in the same place, i.e ++ * directly after the first 5 status registers. ++ */ ++ for (i = 0; i < tmdev->num_sensors; i++) { ++ if (i >= TSENS_8064_SEQ_SENSORS) ++ offset = TSENS_8064_S4_S5_OFFSET; ++ ++ tmdev->sensor[i].status = S0_STATUS_ADDR + offset ++ + (i << STATUS_ADDR_OFFSET); ++ tmdev->sensor[i].slope = tsens_8064_slope[i]; ++ INIT_WORK(&tmdev->sensor[i].notify_work, ++ notify_uspace_tsens_fn); ++ } ++ ++ reg_cntl = SW_RST; ++ ret = regmap_update_bits(tmdev->map, CNTL_ADDR, SW_RST, reg_cntl); ++ if (ret) ++ return ret; ++ ++ reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18); ++ reg_cntl &= ~SW_RST; ++ ret = regmap_update_bits(tmdev->map, CONFIG_ADDR, ++ CONFIG_MASK, CONFIG); ++ ++ reg_cntl |= GENMASK(tmdev->num_sensors - 1, 0) << SENSOR0_SHIFT; ++ ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl); ++ if (ret) ++ return ret; ++ ++ reg_cntl |= EN; ++ ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static int calibrate_ipq8064(struct tsens_device *tmdev) ++{ ++ int i; ++ char *data, *data_backup; ++ ++ ssize_t num_read = tmdev->num_sensors; ++ struct tsens_sensor *s = tmdev->sensor; ++ ++ data = qfprom_read(tmdev->dev, "calib"); ++ if (IS_ERR(data)) { ++ pr_err("Calibration not found.\n"); ++ return PTR_ERR(data); ++ } ++ ++ data_backup = qfprom_read(tmdev->dev, "calib_backup"); ++ if (IS_ERR(data_backup)) { ++ pr_err("Backup calibration not found.\n"); ++ return PTR_ERR(data_backup); ++ } ++ ++ for (i = 0; i < num_read; i++) { ++ s[i].calib_data = readb_relaxed(data + i); ++ s[i].calib_data_backup = readb_relaxed(data_backup + i); ++ ++ if (s[i].calib_data_backup) ++ s[i].calib_data = s[i].calib_data_backup; ++ if (!s[i].calib_data) { ++ pr_err("QFPROM TSENS calibration data not present\n"); ++ return -ENODEV; ++ } ++ s[i].slope = tsens_8064_slope[i]; ++ s[i].offset = CAL_MDEGC - (s[i].calib_data * s[i].slope); ++ } ++ ++ hw_init(tmdev); ++ ++ return 0; ++} ++ ++static int get_temp_ipq8064(struct tsens_device *tmdev, int id, int *temp) ++{ ++ int ret; ++ u32 code, trdy; ++ const struct tsens_sensor *s = &tmdev->sensor[id]; ++ unsigned long timeout; ++ ++ timeout = jiffies + usecs_to_jiffies(TIMEOUT_US); ++ do { ++ ret = regmap_read(tmdev->map, INT_STATUS_ADDR, &trdy); ++ if (ret) ++ return ret; ++ if (!(trdy & TRDY_MASK)) ++ continue; ++ ret = regmap_read(tmdev->map, s->status, &code); ++ if (ret) ++ return ret; ++ *temp = code_to_degC(code, s); ++ return 0; ++ } while (time_before(jiffies, timeout)); ++ ++ return -ETIMEDOUT; ++} ++ ++static int set_trip_temp_ipq8064(void *data, int trip, int temp) ++{ ++ unsigned int reg_th, reg_cntl; ++ int ret, code, code_chk, hi_code, lo_code; ++ const struct tsens_sensor *s = data; ++ struct tsens_device *tmdev = s->tmdev; ++ ++ code_chk = code = degC_to_code(temp, s); ++ ++ if (code < THRESHOLD_MIN_CODE || code > THRESHOLD_MAX_CODE) ++ return -EINVAL; ++ ++ ret = regmap_read(tmdev->map, STATUS_CNTL_8064, ®_cntl); ++ if (ret) ++ return ret; ++ ++ ret = regmap_read(tmdev->map, THRESHOLD_ADDR, ®_th); ++ if (ret) ++ return ret; ++ ++ hi_code = (reg_th & THRESHOLD_UPPER_LIMIT_MASK) ++ >> THRESHOLD_UPPER_LIMIT_SHIFT; ++ lo_code = (reg_th & THRESHOLD_LOWER_LIMIT_MASK) ++ >> THRESHOLD_LOWER_LIMIT_SHIFT; ++ ++ switch (trip) { ++ case TSENS_TRIP_STAGE3: ++ code <<= THRESHOLD_MAX_LIMIT_SHIFT; ++ reg_th &= ~THRESHOLD_MAX_LIMIT_MASK; ++ break; ++ case TSENS_TRIP_STAGE2: ++ if (code_chk <= lo_code) ++ return -EINVAL; ++ code <<= THRESHOLD_UPPER_LIMIT_SHIFT; ++ reg_th &= ~THRESHOLD_UPPER_LIMIT_MASK; ++ break; ++ case TSENS_TRIP_STAGE1: ++ if (code_chk >= hi_code) ++ return -EINVAL; ++ code <<= THRESHOLD_LOWER_LIMIT_SHIFT; ++ reg_th &= ~THRESHOLD_LOWER_LIMIT_MASK; ++ break; ++ case TSENS_TRIP_STAGE0: ++ code <<= THRESHOLD_MIN_LIMIT_SHIFT; ++ reg_th &= ~THRESHOLD_MIN_LIMIT_MASK; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ ret = regmap_write(tmdev->map, THRESHOLD_ADDR, reg_th | code); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static int set_trip_activate_ipq8064(void *data, int trip, ++ enum thermal_trip_activation_mode mode) ++{ ++ unsigned int reg_cntl, mask, val; ++ const struct tsens_sensor *s = data; ++ struct tsens_device *tmdev = s->tmdev; ++ int ret; ++ ++ if (!tmdev || trip < 0) ++ return -EINVAL; ++ ++ ret = regmap_read(tmdev->map, STATUS_CNTL_8064, ®_cntl); ++ if (ret) ++ return ret; ++ ++ switch (trip) { ++ case TSENS_TRIP_STAGE3: ++ mask = MAX_STATUS_MASK; ++ break; ++ case TSENS_TRIP_STAGE2: ++ mask = UPPER_STATUS_CLR; ++ break; ++ case TSENS_TRIP_STAGE1: ++ mask = LOWER_STATUS_CLR; ++ break; ++ case TSENS_TRIP_STAGE0: ++ mask = MIN_STATUS_MASK; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (mode == THERMAL_TRIP_ACTIVATION_DISABLED) ++ val = reg_cntl | mask; ++ else ++ val = reg_cntl & ~mask; ++ ++ ret = regmap_write(tmdev->map, STATUS_CNTL_8064, val); ++ if (ret) ++ return ret; ++ ++ /* force memory to sync */ ++ mb(); ++ return 0; ++} ++ ++const struct tsens_ops ops_ipq8064 = { ++ .init = init_ipq8064, ++ .calibrate = calibrate_ipq8064, ++ .get_temp = get_temp_ipq8064, ++ .suspend = suspend_ipq8064, ++ .resume = resume_ipq8064, ++ .set_trip_temp = set_trip_temp_ipq8064, ++ .set_trip_activate = set_trip_activate_ipq8064, ++}; ++ ++const struct tsens_data data_ipq8064 = { ++ .num_sensors = 11, ++ .ops = &ops_ipq8064, ++}; +--- a/drivers/thermal/qcom/tsens.c ++++ b/drivers/thermal/qcom/tsens.c +@@ -72,6 +72,9 @@ static const struct of_device_id tsens_t + }, { + .compatible = "qcom,msm8996-tsens", + .data = &data_8996, ++ }, { ++ .compatible = "qcom,ipq8064-tsens", ++ .data = &data_ipq8064, + }, + {} + }; +--- a/drivers/thermal/qcom/tsens.h ++++ b/drivers/thermal/qcom/tsens.h +@@ -89,6 +89,6 @@ void compute_intercept_slope(struct tsen + int init_common(struct tsens_device *); + int get_temp_common(struct tsens_device *, int, int *); + +-extern const struct tsens_data data_8916, data_8974, data_8960, data_8996; ++extern const struct tsens_data data_8916, data_8974, data_8960, data_8996, data_ipq8064; + + #endif /* __QCOM_TSENS_H__ */ diff --git a/target/linux/ipq40xx/patches-4.9/0063-2-tsens-support-configurable-interrupts.patch b/target/linux/ipq40xx/patches-4.9/0063-2-tsens-support-configurable-interrupts.patch new file mode 100644 index 000000000..ca9897049 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0063-2-tsens-support-configurable-interrupts.patch @@ -0,0 +1,462 @@ +From 4e87400732c77765afae2ea89ed43837457aa604 Mon Sep 17 00:00:00 2001 +From: Rajith Cherian +Date: Wed, 1 Feb 2017 19:00:26 +0530 +Subject: [PATCH] ipq8064: tsens: Support for configurable interrupts + +Provide support for adding configurable high and +configurable low trip temperatures. An interrupts is +also triggerred when these trip points are hit. The +interrupts can be activated or deactivated from sysfs. +This functionality is made available only if +CONFIG_THERMAL_WRITABLE_TRIPS is defined. + +Change-Id: Ib73f3f9459de4fffce7bb985a0312a88291f4934 +Signed-off-by: Rajith Cherian +--- + .../devicetree/bindings/thermal/qcom-tsens.txt | 4 ++ + drivers/thermal/of-thermal.c | 63 ++++++++++++++++++---- + drivers/thermal/qcom/tsens.c | 43 ++++++++++++--- + drivers/thermal/qcom/tsens.h | 11 ++++ + drivers/thermal/thermal_core.c | 44 ++++++++++++++- + include/linux/thermal.h | 14 +++++ + 6 files changed, 162 insertions(+), 17 deletions(-) + +--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.txt ++++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt +@@ -12,11 +12,15 @@ Required properties: + - Refer to Documentation/devicetree/bindings/nvmem/nvmem.txt to know how to specify + nvmem cells + ++Optional properties: ++- interrupts: Interrupt which gets triggered when threshold is hit ++ + Example: + tsens: thermal-sensor@900000 { + compatible = "qcom,msm8916-tsens"; + reg = <0x4a8000 0x2000>; + nvmem-cells = <&tsens_caldata>, <&tsens_calsel>; + nvmem-cell-names = "caldata", "calsel"; ++ interrupts = <0 178 0>; + #thermal-sensor-cells = <1>; + }; +--- a/drivers/thermal/of-thermal.c ++++ b/drivers/thermal/of-thermal.c +@@ -95,7 +95,7 @@ static int of_thermal_get_temp(struct th + { + struct __thermal_zone *data = tz->devdata; + +- if (!data->ops->get_temp) ++ if (!data->ops->get_temp || (data->mode == THERMAL_DEVICE_DISABLED)) + return -EINVAL; + + return data->ops->get_temp(data->sensor_data, temp); +@@ -106,7 +106,8 @@ static int of_thermal_set_trips(struct t + { + struct __thermal_zone *data = tz->devdata; + +- if (!data->ops || !data->ops->set_trips) ++ if (!data->ops || !data->ops->set_trips ++ || (data->mode == THERMAL_DEVICE_DISABLED)) + return -EINVAL; + + return data->ops->set_trips(data->sensor_data, low, high); +@@ -192,6 +193,9 @@ static int of_thermal_set_emul_temp(stru + { + struct __thermal_zone *data = tz->devdata; + ++ if (data->mode == THERMAL_DEVICE_DISABLED) ++ return -EINVAL; ++ + return data->ops->set_emul_temp(data->sensor_data, temp); + } + +@@ -200,7 +204,7 @@ static int of_thermal_get_trend(struct t + { + struct __thermal_zone *data = tz->devdata; + +- if (!data->ops->get_trend) ++ if (!data->ops->get_trend || (data->mode == THERMAL_DEVICE_DISABLED)) + return -EINVAL; + + return data->ops->get_trend(data->sensor_data, trip, trend); +@@ -286,7 +290,9 @@ static int of_thermal_set_mode(struct th + mutex_unlock(&tz->lock); + + data->mode = mode; +- thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); ++ ++ if (mode == THERMAL_DEVICE_ENABLED) ++ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); + + return 0; + } +@@ -296,7 +302,8 @@ static int of_thermal_get_trip_type(stru + { + struct __thermal_zone *data = tz->devdata; + +- if (trip >= data->ntrips || trip < 0) ++ if (trip >= data->ntrips || trip < 0 ++ || (data->mode == THERMAL_DEVICE_DISABLED)) + return -EDOM; + + *type = data->trips[trip].type; +@@ -304,12 +311,39 @@ static int of_thermal_get_trip_type(stru + return 0; + } + ++static int of_thermal_activate_trip_type(struct thermal_zone_device *tz, ++ int trip, enum thermal_trip_activation_mode mode) ++{ ++ struct __thermal_zone *data = tz->devdata; ++ ++ if (trip >= data->ntrips || trip < 0 ++ || (data->mode == THERMAL_DEVICE_DISABLED)) ++ return -EDOM; ++ ++ /* ++ * The configurable_hi and configurable_lo trip points can be ++ * activated and deactivated. ++ */ ++ ++ if (data->ops->set_trip_activate) { ++ int ret; ++ ++ ret = data->ops->set_trip_activate(data->sensor_data, ++ trip, mode); ++ if (ret) ++ return ret; ++ } ++ ++ return 0; ++} ++ + static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip, + int *temp) + { + struct __thermal_zone *data = tz->devdata; + +- if (trip >= data->ntrips || trip < 0) ++ if (trip >= data->ntrips || trip < 0 ++ || (data->mode == THERMAL_DEVICE_DISABLED)) + return -EDOM; + + *temp = data->trips[trip].temperature; +@@ -322,7 +356,8 @@ static int of_thermal_set_trip_temp(stru + { + struct __thermal_zone *data = tz->devdata; + +- if (trip >= data->ntrips || trip < 0) ++ if (trip >= data->ntrips || trip < 0 ++ || (data->mode == THERMAL_DEVICE_DISABLED)) + return -EDOM; + + if (data->ops->set_trip_temp) { +@@ -344,7 +379,8 @@ static int of_thermal_get_trip_hyst(stru + { + struct __thermal_zone *data = tz->devdata; + +- if (trip >= data->ntrips || trip < 0) ++ if (trip >= data->ntrips || trip < 0 ++ || (data->mode == THERMAL_DEVICE_DISABLED)) + return -EDOM; + + *hyst = data->trips[trip].hysteresis; +@@ -357,7 +393,8 @@ static int of_thermal_set_trip_hyst(stru + { + struct __thermal_zone *data = tz->devdata; + +- if (trip >= data->ntrips || trip < 0) ++ if (trip >= data->ntrips || trip < 0 ++ || (data->mode == THERMAL_DEVICE_DISABLED)) + return -EDOM; + + /* thermal framework should take care of data->mask & (1 << trip) */ +@@ -432,6 +469,9 @@ thermal_zone_of_add_sensor(struct device + if (ops->set_emul_temp) + tzd->ops->set_emul_temp = of_thermal_set_emul_temp; + ++ if (ops->set_trip_activate) ++ tzd->ops->set_trip_activate = of_thermal_activate_trip_type; ++ + mutex_unlock(&tzd->lock); + + return tzd; +@@ -726,7 +766,10 @@ static const char * const trip_types[] = + [THERMAL_TRIP_ACTIVE] = "active", + [THERMAL_TRIP_PASSIVE] = "passive", + [THERMAL_TRIP_HOT] = "hot", +- [THERMAL_TRIP_CRITICAL] = "critical", ++ [THERMAL_TRIP_CRITICAL] = "critical_high", ++ [THERMAL_TRIP_CONFIGURABLE_HI] = "configurable_hi", ++ [THERMAL_TRIP_CONFIGURABLE_LOW] = "configurable_lo", ++ [THERMAL_TRIP_CRITICAL_LOW] = "critical_low", + }; + + /** +--- a/drivers/thermal/qcom/tsens.c ++++ b/drivers/thermal/qcom/tsens.c +@@ -31,7 +31,7 @@ static int tsens_get_temp(void *data, in + + static int tsens_get_trend(void *p, int trip, enum thermal_trend *trend) + { +- const struct tsens_sensor *s = p; ++ struct tsens_sensor *s = p; + struct tsens_device *tmdev = s->tmdev; + + if (tmdev->ops->get_trend) +@@ -40,9 +40,10 @@ static int tsens_get_trend(void *p, int + return -ENOTSUPP; + } + +-static int __maybe_unused tsens_suspend(struct device *dev) ++static int __maybe_unused tsens_suspend(void *data) + { +- struct tsens_device *tmdev = dev_get_drvdata(dev); ++ struct tsens_sensor *s = data; ++ struct tsens_device *tmdev = s->tmdev; + + if (tmdev->ops && tmdev->ops->suspend) + return tmdev->ops->suspend(tmdev); +@@ -50,9 +51,10 @@ static int __maybe_unused tsens_suspend + return 0; + } + +-static int __maybe_unused tsens_resume(struct device *dev) ++static int __maybe_unused tsens_resume(void *data) + { +- struct tsens_device *tmdev = dev_get_drvdata(dev); ++ struct tsens_sensor *s = data; ++ struct tsens_device *tmdev = s->tmdev; + + if (tmdev->ops && tmdev->ops->resume) + return tmdev->ops->resume(tmdev); +@@ -60,6 +62,30 @@ static int __maybe_unused tsens_resume(s + return 0; + } + ++static int __maybe_unused tsens_set_trip_temp(void *data, int trip, int temp) ++{ ++ struct tsens_sensor *s = data; ++ struct tsens_device *tmdev = s->tmdev; ++ ++ if (tmdev->ops && tmdev->ops->set_trip_temp) ++ return tmdev->ops->set_trip_temp(s, trip, temp); ++ ++ return 0; ++} ++ ++static int __maybe_unused tsens_activate_trip_type(void *data, int trip, ++ enum thermal_trip_activation_mode mode) ++{ ++ struct tsens_sensor *s = data; ++ struct tsens_device *tmdev = s->tmdev; ++ ++ if (tmdev->ops && tmdev->ops->set_trip_activate) ++ return tmdev->ops->set_trip_activate(s, trip, mode); ++ ++ return 0; ++} ++ ++ + static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume); + + static const struct of_device_id tsens_table[] = { +@@ -83,6 +109,8 @@ MODULE_DEVICE_TABLE(of, tsens_table); + static const struct thermal_zone_of_device_ops tsens_of_ops = { + .get_temp = tsens_get_temp, + .get_trend = tsens_get_trend, ++ .set_trip_temp = tsens_set_trip_temp, ++ .set_trip_activate = tsens_activate_trip_type, + }; + + static int tsens_register(struct tsens_device *tmdev) +@@ -131,7 +159,7 @@ static int tsens_probe(struct platform_d + if (id) + data = id->data; + else +- data = &data_8960; ++ return -EINVAL; + + if (data->num_sensors <= 0) { + dev_err(dev, "invalid number of sensors\n"); +@@ -146,6 +174,9 @@ static int tsens_probe(struct platform_d + tmdev->dev = dev; + tmdev->num_sensors = data->num_sensors; + tmdev->ops = data->ops; ++ ++ tmdev->tsens_irq = platform_get_irq(pdev, 0); ++ + for (i = 0; i < tmdev->num_sensors; i++) { + if (data->hw_ids) + tmdev->sensor[i].hw_id = data->hw_ids[i]; +--- a/drivers/thermal/qcom/tsens.h ++++ b/drivers/thermal/qcom/tsens.h +@@ -24,9 +24,12 @@ struct tsens_device; + struct tsens_sensor { + struct tsens_device *tmdev; + struct thermal_zone_device *tzd; ++ struct work_struct notify_work; + int offset; + int id; + int hw_id; ++ int calib_data; ++ int calib_data_backup; + int slope; + u32 status; + }; +@@ -41,6 +44,9 @@ struct tsens_sensor { + * @suspend: Function to suspend the tsens device + * @resume: Function to resume the tsens device + * @get_trend: Function to get the thermal/temp trend ++ * @set_trip_temp: Function to set trip temp ++ * @get_trip_temp: Function to get trip temp ++ * @set_trip_activate: Function to activate trip points + */ + struct tsens_ops { + /* mandatory callbacks */ +@@ -53,6 +59,9 @@ struct tsens_ops { + int (*suspend)(struct tsens_device *); + int (*resume)(struct tsens_device *); + int (*get_trend)(struct tsens_device *, int, enum thermal_trend *); ++ int (*set_trip_temp)(void *, int, int); ++ int (*set_trip_activate)(void *, int, ++ enum thermal_trip_activation_mode); + }; + + /** +@@ -76,11 +85,13 @@ struct tsens_context { + struct tsens_device { + struct device *dev; + u32 num_sensors; ++ u32 tsens_irq; + struct regmap *map; + struct regmap_field *status_field; + struct tsens_context ctx; + bool trdy; + const struct tsens_ops *ops; ++ struct work_struct tsens_work; + struct tsens_sensor sensor[0]; + }; + +--- a/drivers/thermal/thermal_core.c ++++ b/drivers/thermal/thermal_core.c +@@ -732,12 +732,48 @@ trip_point_type_show(struct device *dev, + return sprintf(buf, "passive\n"); + case THERMAL_TRIP_ACTIVE: + return sprintf(buf, "active\n"); ++ case THERMAL_TRIP_CONFIGURABLE_HI: ++ return sprintf(buf, "configurable_hi\n"); ++ case THERMAL_TRIP_CONFIGURABLE_LOW: ++ return sprintf(buf, "configurable_low\n"); ++ case THERMAL_TRIP_CRITICAL_LOW: ++ return sprintf(buf, "critical_low\n"); + default: + return sprintf(buf, "unknown\n"); + } + } + + static ssize_t ++trip_point_type_activate(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct thermal_zone_device *tz = to_thermal_zone(dev); ++ int trip, ret; ++ char *enabled = "enabled"; ++ char *disabled = "disabled"; ++ ++ if (!tz->ops->set_trip_activate) ++ return -EPERM; ++ ++ if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip)) ++ return -EINVAL; ++ ++ if (!strncmp(buf, enabled, strlen(enabled))) ++ ret = tz->ops->set_trip_activate(tz, trip, ++ THERMAL_TRIP_ACTIVATION_ENABLED); ++ else if (!strncmp(buf, disabled, strlen(disabled))) ++ ret = tz->ops->set_trip_activate(tz, trip, ++ THERMAL_TRIP_ACTIVATION_DISABLED); ++ else ++ ret = -EINVAL; ++ ++ if (ret) ++ return ret; ++ ++ return count; ++} ++ ++static ssize_t + trip_point_temp_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) + { +@@ -1321,7 +1357,7 @@ thermal_cooling_device_weight_store(stru + */ + int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, + int trip, +- struct thermal_cooling_device *cdev, ++ struct thermal_cooling_device *cdev, + unsigned long upper, unsigned long lower, + unsigned int weight) + { +@@ -1772,6 +1808,12 @@ static int create_trip_attrs(struct ther + tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO; + tz->trip_type_attrs[indx].attr.show = trip_point_type_show; + ++ if (IS_ENABLED(CONFIG_THERMAL_WRITABLE_TRIPS)) { ++ tz->trip_type_attrs[indx].attr.store ++ = trip_point_type_activate; ++ tz->trip_type_attrs[indx].attr.attr.mode |= S_IWUSR; ++ } ++ + device_create_file(&tz->device, + &tz->trip_type_attrs[indx].attr); + +--- a/include/linux/thermal.h ++++ b/include/linux/thermal.h +@@ -77,11 +77,19 @@ enum thermal_device_mode { + THERMAL_DEVICE_ENABLED, + }; + ++enum thermal_trip_activation_mode { ++ THERMAL_TRIP_ACTIVATION_DISABLED = 0, ++ THERMAL_TRIP_ACTIVATION_ENABLED, ++}; ++ + enum thermal_trip_type { + THERMAL_TRIP_ACTIVE = 0, + THERMAL_TRIP_PASSIVE, + THERMAL_TRIP_HOT, + THERMAL_TRIP_CRITICAL, ++ THERMAL_TRIP_CONFIGURABLE_HI, ++ THERMAL_TRIP_CONFIGURABLE_LOW, ++ THERMAL_TRIP_CRITICAL_LOW, + }; + + enum thermal_trend { +@@ -118,6 +126,8 @@ struct thermal_zone_device_ops { + enum thermal_trip_type *); + int (*get_trip_temp) (struct thermal_zone_device *, int, int *); + int (*set_trip_temp) (struct thermal_zone_device *, int, int); ++ int (*set_trip_activate) (struct thermal_zone_device *, int, ++ enum thermal_trip_activation_mode); + int (*get_trip_hyst) (struct thermal_zone_device *, int, int *); + int (*set_trip_hyst) (struct thermal_zone_device *, int, int); + int (*get_crit_temp) (struct thermal_zone_device *, int *); +@@ -360,6 +370,8 @@ struct thermal_genl_event { + * temperature. + * @set_trip_temp: a pointer to a function that sets the trip temperature on + * hardware. ++ * @activate_trip_type: a pointer to a function to enable/disable trip ++ * temperature interrupts + */ + struct thermal_zone_of_device_ops { + int (*get_temp)(void *, int *); +@@ -367,6 +379,8 @@ struct thermal_zone_of_device_ops { + int (*set_trips)(void *, int, int); + int (*set_emul_temp)(void *, int); + int (*set_trip_temp)(void *, int, int); ++ int (*set_trip_activate)(void *, int, ++ enum thermal_trip_activation_mode); + }; + + /** diff --git a/target/linux/ipq40xx/patches-4.9/0064-clk-clk-rpm-fixes.patch b/target/linux/ipq40xx/patches-4.9/0064-clk-clk-rpm-fixes.patch new file mode 100644 index 000000000..b803488a1 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0064-clk-clk-rpm-fixes.patch @@ -0,0 +1,93 @@ +From d30840e2b1cf79d90392e6051b0c0b6006d29d8b Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Thu, 9 Mar 2017 09:32:40 +0100 +Subject: [PATCH 64/69] clk: clk-rpm fixes + +Signed-off-by: John Crispin +--- + .../devicetree/bindings/clock/qcom,rpmcc.txt | 1 + + drivers/clk/qcom/clk-rpm.c | 35 ++++++++++++++++++++++ + include/dt-bindings/clock/qcom,rpmcc.h | 4 +++ + 3 files changed, 40 insertions(+) + +--- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt ++++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt +@@ -12,6 +12,7 @@ Required properties : + + "qcom,rpmcc-msm8916", "qcom,rpmcc" + "qcom,rpmcc-apq8064", "qcom,rpmcc" ++ "qcom,rpmcc-ipq806x", "qcom,rpmcc" + + - #clock-cells : shall contain 1 + +--- a/drivers/clk/qcom/clk-rpm.c ++++ b/drivers/clk/qcom/clk-rpm.c +@@ -359,6 +359,16 @@ DEFINE_CLK_RPM(apq8064, sfab_clk, sfab_a + DEFINE_CLK_RPM(apq8064, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK); + DEFINE_CLK_RPM(apq8064, qdss_clk, qdss_a_clk, QCOM_RPM_QDSS_CLK); + ++/* ipq806x */ ++DEFINE_CLK_RPM(ipq806x, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK); ++DEFINE_CLK_RPM(ipq806x, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK); ++DEFINE_CLK_RPM(ipq806x, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK); ++DEFINE_CLK_RPM(ipq806x, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK); ++DEFINE_CLK_RPM(ipq806x, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK); ++DEFINE_CLK_RPM(ipq806x, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK); ++DEFINE_CLK_RPM(ipq806x, nss_fabric_0_clk, nss_fabric_0_a_clk, QCOM_RPM_NSS_FABRIC_0_CLK); ++DEFINE_CLK_RPM(ipq806x, nss_fabric_1_clk, nss_fabric_1_a_clk, QCOM_RPM_NSS_FABRIC_1_CLK); ++ + static struct clk_rpm *apq8064_clks[] = { + [RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk, + [RPM_APPS_FABRIC_A_CLK] = &apq8064_afab_a_clk, +@@ -380,13 +390,38 @@ static struct clk_rpm *apq8064_clks[] = + [RPM_QDSS_A_CLK] = &apq8064_qdss_a_clk, + }; + ++static struct clk_rpm *ipq806x_clks[] = { ++ [RPM_APPS_FABRIC_CLK] = &ipq806x_afab_clk, ++ [RPM_APPS_FABRIC_A_CLK] = &ipq806x_afab_a_clk, ++ [RPM_CFPB_CLK] = &ipq806x_cfpb_clk, ++ [RPM_CFPB_A_CLK] = &ipq806x_cfpb_a_clk, ++ [RPM_DAYTONA_FABRIC_CLK] = &ipq806x_daytona_clk, ++ [RPM_DAYTONA_FABRIC_A_CLK] = &ipq806x_daytona_a_clk, ++ [RPM_EBI1_CLK] = &ipq806x_ebi1_clk, ++ [RPM_EBI1_A_CLK] = &ipq806x_ebi1_a_clk, ++ [RPM_SYS_FABRIC_CLK] = &ipq806x_sfab_clk, ++ [RPM_SYS_FABRIC_A_CLK] = &ipq806x_sfab_a_clk, ++ [RPM_SFPB_CLK] = &ipq806x_sfpb_clk, ++ [RPM_SFPB_A_CLK] = &ipq806x_sfpb_a_clk, ++ [RPM_NSS_FABRIC_0_CLK] = &ipq806x_nss_fabric_0_clk, ++ [RPM_NSS_FABRIC_0_A_CLK] = &ipq806x_nss_fabric_0_a_clk, ++ [RPM_NSS_FABRIC_1_CLK] = &ipq806x_nss_fabric_1_clk, ++ [RPM_NSS_FABRIC_1_A_CLK] = &ipq806x_nss_fabric_1_a_clk, ++}; ++ + static const struct rpm_clk_desc rpm_clk_apq8064 = { + .clks = apq8064_clks, + .num_clks = ARRAY_SIZE(apq8064_clks), + }; + ++static const struct rpm_clk_desc rpm_clk_ipq806x = { ++ .clks = ipq806x_clks, ++ .num_clks = ARRAY_SIZE(ipq806x_clks), ++}; ++ + static const struct of_device_id rpm_clk_match_table[] = { + { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 }, ++ { .compatible = "qcom,rpmcc-ipq806x", .data = &rpm_clk_ipq806x }, + { } + }; + MODULE_DEVICE_TABLE(of, rpm_clk_match_table); +--- a/include/dt-bindings/clock/qcom,rpmcc.h ++++ b/include/dt-bindings/clock/qcom,rpmcc.h +@@ -37,6 +37,10 @@ + #define RPM_SYS_FABRIC_A_CLK 19 + #define RPM_SFPB_CLK 20 + #define RPM_SFPB_A_CLK 21 ++#define RPM_NSS_FABRIC_0_CLK 22 ++#define RPM_NSS_FABRIC_0_A_CLK 23 ++#define RPM_NSS_FABRIC_1_CLK 24 ++#define RPM_NSS_FABRIC_1_A_CLK 25 + + /* msm8916 */ + #define RPM_SMD_XO_CLK_SRC 0 diff --git a/target/linux/ipq40xx/patches-4.9/0065-arm-override-compiler-flags.patch b/target/linux/ipq40xx/patches-4.9/0065-arm-override-compiler-flags.patch new file mode 100644 index 000000000..e5af7ffa2 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0065-arm-override-compiler-flags.patch @@ -0,0 +1,21 @@ +From 4d8e29642661397a339ac3485f212c6360445421 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Thu, 9 Mar 2017 09:33:32 +0100 +Subject: [PATCH 65/69] arm: override compiler flags + +Signed-off-by: John Crispin +--- + arch/arm/Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm/Makefile ++++ b/arch/arm/Makefile +@@ -65,7 +65,7 @@ KBUILD_CFLAGS += $(call cc-option,-fno-i + # macro, but instead defines a whole series of macros which makes + # testing for a specific architecture or later rather impossible. + arch-$(CONFIG_CPU_32v7M) =-D__LINUX_ARM_ARCH__=7 -march=armv7-m -Wa,-march=armv7-m +-arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) ++arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 -mcpu=cortex-a15 + arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) + # Only override the compiler option if ARMv6. The ARMv6K extensions are + # always available in ARMv7 diff --git a/target/linux/ipq40xx/patches-4.9/0066-GPIO-add-named-gpio-exports.patch b/target/linux/ipq40xx/patches-4.9/0066-GPIO-add-named-gpio-exports.patch new file mode 100644 index 000000000..4aa80007d --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0066-GPIO-add-named-gpio-exports.patch @@ -0,0 +1,166 @@ +From a37b0c9113647b2120cf1a18cfc46afdb3f1fccc Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Tue, 12 Aug 2014 20:49:27 +0200 +Subject: [PATCH 66/69] GPIO: add named gpio exports + +Signed-off-by: John Crispin +--- + drivers/gpio/gpiolib-of.c | 68 +++++++++++++++++++++++++++++++++++++++++++ + drivers/gpio/gpiolib-sysfs.c | 10 ++++++- + include/asm-generic/gpio.h | 6 ++++ + include/linux/gpio/consumer.h | 8 +++++ + 4 files changed, 91 insertions(+), 1 deletion(-) + +--- a/drivers/gpio/gpiolib-of.c ++++ b/drivers/gpio/gpiolib-of.c +@@ -23,6 +23,8 @@ + #include + #include + #include ++#include ++#include + + #include "gpiolib.h" + +@@ -538,3 +540,69 @@ void of_gpiochip_remove(struct gpio_chip + gpiochip_remove_pin_ranges(chip); + of_node_put(chip->of_node); + } ++ ++static struct of_device_id gpio_export_ids[] = { ++ { .compatible = "gpio-export" }, ++ { /* sentinel */ } ++}; ++ ++static int __init of_gpio_export_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct device_node *cnp; ++ u32 val; ++ int nb = 0; ++ ++ for_each_child_of_node(np, cnp) { ++ const char *name = NULL; ++ int gpio; ++ bool dmc; ++ int max_gpio = 1; ++ int i; ++ ++ of_property_read_string(cnp, "gpio-export,name", &name); ++ ++ if (!name) ++ max_gpio = of_gpio_count(cnp); ++ ++ for (i = 0; i < max_gpio; i++) { ++ unsigned flags = 0; ++ enum of_gpio_flags of_flags; ++ ++ gpio = of_get_gpio_flags(cnp, i, &of_flags); ++ ++ if (of_flags == OF_GPIO_ACTIVE_LOW) ++ flags |= GPIOF_ACTIVE_LOW; ++ ++ if (!of_property_read_u32(cnp, "gpio-export,output", &val)) ++ flags |= val ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW; ++ else ++ flags |= GPIOF_IN; ++ ++ if (devm_gpio_request_one(&pdev->dev, gpio, flags, name ? name : of_node_full_name(np))) ++ continue; ++ ++ dmc = of_property_read_bool(cnp, "gpio-export,direction_may_change"); ++ gpio_export_with_name(gpio, dmc, name); ++ nb++; ++ } ++ } ++ ++ dev_info(&pdev->dev, "%d gpio(s) exported\n", nb); ++ ++ return 0; ++} ++ ++static struct platform_driver gpio_export_driver = { ++ .driver = { ++ .name = "gpio-export", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(gpio_export_ids), ++ }, ++}; ++ ++static int __init of_gpio_export_init(void) ++{ ++ return platform_driver_probe(&gpio_export_driver, of_gpio_export_probe); ++} ++device_initcall(of_gpio_export_init); +--- a/drivers/gpio/gpiolib-sysfs.c ++++ b/drivers/gpio/gpiolib-sysfs.c +@@ -544,7 +544,7 @@ static struct class gpio_class = { + * + * Returns zero on success, else an error. + */ +-int gpiod_export(struct gpio_desc *desc, bool direction_may_change) ++int __gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name) + { + struct gpio_chip *chip; + struct gpio_device *gdev; +@@ -606,6 +606,8 @@ int gpiod_export(struct gpio_desc *desc, + offset = gpio_chip_hwgpio(desc); + if (chip->names && chip->names[offset]) + ioname = chip->names[offset]; ++ if (name) ++ ioname = name; + + dev = device_create_with_groups(&gpio_class, &gdev->dev, + MKDEV(0, 0), data, gpio_groups, +@@ -627,6 +629,12 @@ err_unlock: + gpiod_dbg(desc, "%s: status %d\n", __func__, status); + return status; + } ++EXPORT_SYMBOL_GPL(__gpiod_export); ++ ++int gpiod_export(struct gpio_desc *desc, bool direction_may_change) ++{ ++ return __gpiod_export(desc, direction_may_change, NULL); ++} + EXPORT_SYMBOL_GPL(gpiod_export); + + static int match_export(struct device *dev, const void *desc) +--- a/include/asm-generic/gpio.h ++++ b/include/asm-generic/gpio.h +@@ -126,6 +126,12 @@ static inline int gpio_export(unsigned g + return gpiod_export(gpio_to_desc(gpio), direction_may_change); + } + ++int __gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name); ++static inline int gpio_export_with_name(unsigned gpio, bool direction_may_change, const char *name) ++{ ++ return __gpiod_export(gpio_to_desc(gpio), direction_may_change, name); ++} ++ + static inline int gpio_export_link(struct device *dev, const char *name, + unsigned gpio) + { +--- a/include/linux/gpio/consumer.h ++++ b/include/linux/gpio/consumer.h +@@ -427,6 +427,7 @@ static inline struct gpio_desc *devm_get + + #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) + ++int _gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name); + int gpiod_export(struct gpio_desc *desc, bool direction_may_change); + int gpiod_export_link(struct device *dev, const char *name, + struct gpio_desc *desc); +@@ -434,6 +435,13 @@ void gpiod_unexport(struct gpio_desc *de + + #else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */ + ++static inline int _gpiod_export(struct gpio_desc *desc, ++ bool direction_may_change, ++ const char *name) ++{ ++ return -ENOSYS; ++} ++ + static inline int gpiod_export(struct gpio_desc *desc, + bool direction_may_change) + { diff --git a/target/linux/ipq40xx/patches-4.9/0067-generic-Mangle-bootloader-s-kernel-arguments.patch b/target/linux/ipq40xx/patches-4.9/0067-generic-Mangle-bootloader-s-kernel-arguments.patch new file mode 100644 index 000000000..478dad786 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0067-generic-Mangle-bootloader-s-kernel-arguments.patch @@ -0,0 +1,189 @@ +From 71270226b14733a4b1f2cde58ea9265caa50b38d Mon Sep 17 00:00:00 2001 +From: Adrian Panella +Date: Thu, 9 Mar 2017 09:37:17 +0100 +Subject: [PATCH 67/69] generic: Mangle bootloader's kernel arguments + +The command-line arguments provided by the boot loader will be +appended to a new device tree property: bootloader-args. +If there is a property "append-rootblock" in DT under /chosen +and a root= option in bootloaders command line it will be parsed +and added to DT bootargs with the form: XX. +Only command line ATAG will be processed, the rest of the ATAGs +sent by bootloader will be ignored. +This is usefull in dual boot systems, to get the current root partition +without afecting the rest of the system. + +Signed-off-by: Adrian Panella +--- + arch/arm/Kconfig | 11 +++++ + arch/arm/boot/compressed/atags_to_fdt.c | 72 ++++++++++++++++++++++++++++++++- + init/main.c | 16 ++++++++ + 3 files changed, 98 insertions(+), 1 deletion(-) + +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -1948,6 +1948,17 @@ config ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEN + The command-line arguments provided by the boot loader will be + appended to the the device tree bootargs property. + ++config ARM_ATAG_DTB_COMPAT_CMDLINE_MANGLE ++ bool "Append rootblock parsing bootloader's kernel arguments" ++ help ++ The command-line arguments provided by the boot loader will be ++ appended to a new device tree property: bootloader-args. ++ If there is a property "append-rootblock" in DT under /chosen ++ and a root= option in bootloaders command line it will be parsed ++ and added to DT bootargs with the form: XX. ++ Only command line ATAG will be processed, the rest of the ATAGs ++ sent by bootloader will be ignored. ++ + endchoice + + config CMDLINE +--- a/arch/arm/boot/compressed/atags_to_fdt.c ++++ b/arch/arm/boot/compressed/atags_to_fdt.c +@@ -3,6 +3,8 @@ + + #if defined(CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND) + #define do_extend_cmdline 1 ++#elif defined(CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_MANGLE) ++#define do_extend_cmdline 1 + #else + #define do_extend_cmdline 0 + #endif +@@ -66,6 +68,59 @@ static uint32_t get_cell_size(const void + return cell_size; + } + ++#if defined(CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_MANGLE) ++ ++static char *append_rootblock(char *dest, const char *str, int len, void *fdt) ++{ ++ char *ptr, *end; ++ char *root="root="; ++ int i, l; ++ const char *rootblock; ++ ++ //ARM doesn't have __HAVE_ARCH_STRSTR, so search manually ++ ptr = str - 1; ++ ++ do { ++ //first find an 'r' at the begining or after a space ++ do { ++ ptr++; ++ ptr = strchr(ptr, 'r'); ++ if(!ptr) return dest; ++ ++ } while (ptr != str && *(ptr-1) != ' '); ++ ++ //then check for the rest ++ for(i = 1; i <= 4; i++) ++ if(*(ptr+i) != *(root+i)) break; ++ ++ } while (i != 5); ++ ++ end = strchr(ptr, ' '); ++ end = end ? (end - 1) : (strchr(ptr, 0) - 1); ++ ++ //find partition number (assumes format root=/dev/mtdXX | /dev/mtdblockXX | yy:XX ) ++ for( i = 0; end >= ptr && *end >= '0' && *end <= '9'; end--, i++); ++ ptr = end + 1; ++ ++ /* if append-rootblock property is set use it to append to command line */ ++ rootblock = getprop(fdt, "/chosen", "append-rootblock", &l); ++ if(rootblock != NULL) { ++ if(*dest != ' ') { ++ *dest = ' '; ++ dest++; ++ len++; ++ } ++ if (len + l + i <= COMMAND_LINE_SIZE) { ++ memcpy(dest, rootblock, l); ++ dest += l - 1; ++ memcpy(dest, ptr, i); ++ dest += i; ++ } ++ } ++ return dest; ++} ++#endif ++ + static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) + { + char cmdline[COMMAND_LINE_SIZE]; +@@ -85,12 +140,21 @@ static void merge_fdt_bootargs(void *fdt + + /* and append the ATAG_CMDLINE */ + if (fdt_cmdline) { ++ ++#if defined(CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_MANGLE) ++ //save original bootloader args ++ //and append ubi.mtd with root partition number to current cmdline ++ setprop_string(fdt, "/chosen", "bootloader-args", fdt_cmdline); ++ ptr = append_rootblock(ptr, fdt_cmdline, len, fdt); ++ ++#else + len = strlen(fdt_cmdline); + if (ptr - cmdline + len + 2 < COMMAND_LINE_SIZE) { + *ptr++ = ' '; + memcpy(ptr, fdt_cmdline, len); + ptr += len; + } ++#endif + } + *ptr = '\0'; + +@@ -147,7 +211,9 @@ int atags_to_fdt(void *atag_list, void * + else + setprop_string(fdt, "/chosen", "bootargs", + atag->u.cmdline.cmdline); +- } else if (atag->hdr.tag == ATAG_MEM) { ++ } ++#ifndef CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_MANGLE ++ else if (atag->hdr.tag == ATAG_MEM) { + if (memcount >= sizeof(mem_reg_property)/4) + continue; + if (!atag->u.mem.size) +@@ -186,6 +252,10 @@ int atags_to_fdt(void *atag_list, void * + setprop(fdt, "/memory", "reg", mem_reg_property, + 4 * memcount * memsize); + } ++#else ++ ++ } ++#endif + + return fdt_pack(fdt); + } +--- a/init/main.c ++++ b/init/main.c +@@ -88,6 +88,10 @@ + #include + #include + ++#if defined(CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_MANGLE) ++#include ++#endif ++ + static int kernel_init(void *); + + extern void init_IRQ(void); +@@ -539,6 +543,18 @@ asmlinkage __visible void __init start_k + page_alloc_init(); + + pr_notice("Kernel command line: %s\n", boot_command_line); ++ ++#if defined(CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_MANGLE) ++ //Show bootloader's original command line for reference ++ if(of_chosen) { ++ const char *prop = of_get_property(of_chosen, "bootloader-args", NULL); ++ if(prop) ++ pr_notice("Bootloader command line (ignored): %s\n", prop); ++ else ++ pr_notice("Bootloader command line not present\n"); ++ } ++#endif ++ + parse_early_param(); + after_dashes = parse_args("Booting kernel", + static_command_line, __start___param, diff --git a/target/linux/ipq40xx/patches-4.9/0068-spi-add-gpio-cs-support.patch b/target/linux/ipq40xx/patches-4.9/0068-spi-add-gpio-cs-support.patch new file mode 100644 index 000000000..0c03bc9f7 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0068-spi-add-gpio-cs-support.patch @@ -0,0 +1,71 @@ +From b9c998eb7735df8000cf48d77f9271823e8e73da Mon Sep 17 00:00:00 2001 +From: Ram Chandra Jangir +Date: Thu, 9 Mar 2017 09:39:05 +0100 +Subject: [PATCH 68/69] spi: add gpio cs support + +Signed-off-by: John Crispin +--- + drivers/spi/spi-qup.c | 38 ++++++++++++++++++++++++++++++++++++++ + 1 file changed, 38 insertions(+) + +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + + #define QUP_CONFIG 0x0000 + #define QUP_STATE 0x0004 +@@ -1019,6 +1020,38 @@ err_tx: + return ret; + } + ++static void spi_qup_set_cs(struct spi_device *spi, bool val) ++{ ++ struct spi_qup *controller; ++ u32 spi_ioc; ++ u32 spi_ioc_orig; ++ ++ controller = spi_master_get_devdata(spi->master); ++ spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL); ++ spi_ioc_orig = spi_ioc; ++ if (!val) ++ spi_ioc |= SPI_IO_C_FORCE_CS; ++ else ++ spi_ioc &= ~SPI_IO_C_FORCE_CS; ++ ++ if (spi_ioc != spi_ioc_orig) ++ writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL); ++} ++ ++static int spi_qup_setup(struct spi_device *spi) ++{ ++ if (spi->cs_gpio >= 0) { ++ if (spi->mode & SPI_CS_HIGH) ++ gpio_set_value(spi->cs_gpio, 0); ++ else ++ gpio_set_value(spi->cs_gpio, 1); ++ ++ udelay(10); ++ } ++ ++ return 0; ++} ++ + static int spi_qup_probe(struct platform_device *pdev) + { + struct spi_master *master; +@@ -1115,6 +1148,11 @@ static int spi_qup_probe(struct platform + if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1")) + controller->qup_v1 = 1; + ++ if (!controller->qup_v1) ++ master->set_cs = spi_qup_set_cs; ++ else ++ master->setup = spi_qup_setup; ++ + spin_lock_init(&controller->lock); + init_completion(&controller->done); + init_completion(&controller->dma_tx_done); diff --git a/target/linux/ipq40xx/patches-4.9/0069-arm-boot-add-dts-files.patch b/target/linux/ipq40xx/patches-4.9/0069-arm-boot-add-dts-files.patch new file mode 100644 index 000000000..3e7c91cf3 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0069-arm-boot-add-dts-files.patch @@ -0,0 +1,32 @@ +From 8f68331e14dff9a101f2d0e1d6bec84a031f27ee Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Thu, 9 Mar 2017 11:03:18 +0100 +Subject: [PATCH 69/69] arm: boot: add dts files + +Signed-off-by: John Crispin +--- + arch/arm/boot/dts/Makefile | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/arch/arm/boot/dts/Makefile ++++ b/arch/arm/boot/dts/Makefile +@@ -618,7 +618,19 @@ dtb-$(CONFIG_ARCH_QCOM) += \ + qcom-apq8084-mtp.dtb \ + qcom-ipq4019-ap.dk01.1-c1.dtb \ + qcom-ipq4019-ap.dk04.1-c1.dtb \ ++ qcom-ipq4019-fritz4040.dtb \ ++ qcom-ipq4019-a42.dtb \ ++ qcom-ipq4019-rt-ac58u.dtb \ ++ qcom-ipq4019-rt-acrh17.dtb \ + qcom-ipq8064-ap148.dtb \ ++ qcom-ipq8064-c2600.dtb \ ++ qcom-ipq8064-d7800.dtb \ ++ qcom-ipq8064-db149.dtb \ ++ qcom-ipq8064-ea8500.dtb \ ++ qcom-ipq8064-r7500.dtb \ ++ qcom-ipq8064-r7500v2.dtb \ ++ qcom-ipq8065-nbg6817.dtb \ ++ qcom-ipq8065-r7800.dtb \ + qcom-msm8660-surf.dtb \ + qcom-msm8960-cdp.dtb \ + qcom-msm8974-lge-nexus5-hammerhead.dtb \ diff --git a/target/linux/ipq40xx/patches-4.9/0070-qcom-spm-fix-probe-order.patch b/target/linux/ipq40xx/patches-4.9/0070-qcom-spm-fix-probe-order.patch new file mode 100644 index 000000000..b7e375dfb --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0070-qcom-spm-fix-probe-order.patch @@ -0,0 +1,16 @@ +Check for SCM availability before attempting to use SPM + +Signed-off-by: Felix Fietkau + +--- a/drivers/soc/qcom/spm.c ++++ b/drivers/soc/qcom/spm.c +@@ -219,6 +219,9 @@ static int __init qcom_cpuidle_init(stru + cpumask_t mask; + bool use_scm_power_down = false; + ++ if (!qcom_scm_is_available()) ++ return -EPROBE_DEFER; ++ + for (i = 0; ; i++) { + state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + if (!state_node) diff --git a/target/linux/ipq40xx/patches-4.9/0071-1-PCI-qcom-Fixed-IPQ806x-specific-clocks.patch b/target/linux/ipq40xx/patches-4.9/0071-1-PCI-qcom-Fixed-IPQ806x-specific-clocks.patch new file mode 100644 index 000000000..7a315627f --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0071-1-PCI-qcom-Fixed-IPQ806x-specific-clocks.patch @@ -0,0 +1,95 @@ +From 86655aa14304ca88a8ce8847276147dbc1a83238 Mon Sep 17 00:00:00 2001 +From: Sham Muthayyan +Date: Tue, 19 Jul 2016 18:44:49 +0530 +Subject: PCI: qcom: Fixed IPQ806x specific clocks + +Change-Id: I488e1bc707d6a22b37a338f41935e3922009ba5e +Signed-off-by: Sham Muthayyan +--- + drivers/pci/host/pcie-qcom.c | 38 +++++++++++++++++++++++++++++++++----- + 1 file changed, 33 insertions(+), 5 deletions(-) + +--- a/drivers/pci/host/pcie-qcom.c ++++ b/drivers/pci/host/pcie-qcom.c +@@ -53,6 +53,8 @@ struct qcom_pcie_resources_v0 { + struct clk *iface_clk; + struct clk *core_clk; + struct clk *phy_clk; ++ struct clk *aux_clk; ++ struct clk *ref_clk; + struct reset_control *pci_reset; + struct reset_control *axi_reset; + struct reset_control *ahb_reset; +@@ -160,6 +162,14 @@ static int qcom_pcie_get_resources_v0(st + if (IS_ERR(res->phy_clk)) + return PTR_ERR(res->phy_clk); + ++ res->aux_clk = devm_clk_get(dev, "aux"); ++ if (IS_ERR(res->aux_clk)) ++ return PTR_ERR(res->aux_clk); ++ ++ res->ref_clk = devm_clk_get(dev, "ref"); ++ if (IS_ERR(res->ref_clk)) ++ return PTR_ERR(res->ref_clk); ++ + res->pci_reset = devm_reset_control_get(dev, "pci"); + if (IS_ERR(res->pci_reset)) + return PTR_ERR(res->pci_reset); +@@ -227,6 +237,8 @@ static void qcom_pcie_deinit_v0(struct q + clk_disable_unprepare(res->iface_clk); + clk_disable_unprepare(res->core_clk); + clk_disable_unprepare(res->phy_clk); ++ clk_disable_unprepare(res->aux_clk); ++ clk_disable_unprepare(res->ref_clk); + regulator_disable(res->vdda); + regulator_disable(res->vdda_phy); + regulator_disable(res->vdda_refclk); +@@ -269,16 +281,28 @@ static int qcom_pcie_init_v0(struct qcom + goto err_assert_ahb; + } + ++ ret = clk_prepare_enable(res->core_clk); ++ if (ret) { ++ dev_err(dev, "cannot prepare/enable core clock\n"); ++ goto err_clk_core; ++ } ++ + ret = clk_prepare_enable(res->phy_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable phy clock\n"); + goto err_clk_phy; + } + +- ret = clk_prepare_enable(res->core_clk); ++ ret = clk_prepare_enable(res->aux_clk); + if (ret) { +- dev_err(dev, "cannot prepare/enable core clock\n"); +- goto err_clk_core; ++ dev_err(dev, "cannot prepare/enable aux clock\n"); ++ goto err_clk_aux; ++ } ++ ++ ret = clk_prepare_enable(res->ref_clk); ++ if (ret) { ++ dev_err(dev, "cannot prepare/enable ref clock\n"); ++ goto err_clk_ref; + } + + ret = reset_control_deassert(res->ahb_reset); +@@ -327,10 +351,14 @@ static int qcom_pcie_init_v0(struct qcom + return 0; + + err_deassert_ahb: +- clk_disable_unprepare(res->core_clk); +-err_clk_core: ++ clk_disable_unprepare(res->ref_clk); ++err_clk_ref: ++ clk_disable_unprepare(res->aux_clk); ++err_clk_aux: + clk_disable_unprepare(res->phy_clk); + err_clk_phy: ++ clk_disable_unprepare(res->core_clk); ++err_clk_core: + clk_disable_unprepare(res->iface_clk); + err_assert_ahb: + regulator_disable(res->vdda_phy); diff --git a/target/linux/ipq40xx/patches-4.9/0071-2-PCI-qcom-Fixed-IPQ806x-PCIE-reset-changes.patch b/target/linux/ipq40xx/patches-4.9/0071-2-PCI-qcom-Fixed-IPQ806x-PCIE-reset-changes.patch new file mode 100644 index 000000000..aab4f364e --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0071-2-PCI-qcom-Fixed-IPQ806x-PCIE-reset-changes.patch @@ -0,0 +1,85 @@ +From 490d103232287eb51c92c49a4ef8865fd0a9d59e Mon Sep 17 00:00:00 2001 +From: Sham Muthayyan +Date: Tue, 19 Jul 2016 18:58:18 +0530 +Subject: PCI: qcom: Fixed IPQ806x PCIE reset changes + +Change-Id: Ia6590e960b9754b1e8b7a51f318788cd63e9e321 +Signed-off-by: Sham Muthayyan +--- + drivers/pci/host/pcie-qcom.c | 24 +++++++++++++++++++----- + 1 file changed, 19 insertions(+), 5 deletions(-) + +--- a/drivers/pci/host/pcie-qcom.c ++++ b/drivers/pci/host/pcie-qcom.c +@@ -60,6 +60,7 @@ struct qcom_pcie_resources_v0 { + struct reset_control *ahb_reset; + struct reset_control *por_reset; + struct reset_control *phy_reset; ++ struct reset_control *ext_reset; + struct regulator *vdda; + struct regulator *vdda_phy; + struct regulator *vdda_refclk; +@@ -190,6 +191,10 @@ static int qcom_pcie_get_resources_v0(st + if (IS_ERR(res->phy_reset)) + return PTR_ERR(res->phy_reset); + ++ res->ext_reset = devm_reset_control_get(dev, "ext"); ++ if (IS_ERR(res->ext_reset)) ++ return PTR_ERR(res->ext_reset); ++ + return 0; + } + +@@ -234,6 +239,7 @@ static void qcom_pcie_deinit_v0(struct q + reset_control_assert(res->ahb_reset); + reset_control_assert(res->por_reset); + reset_control_assert(res->pci_reset); ++ reset_control_assert(res->ext_reset); + clk_disable_unprepare(res->iface_clk); + clk_disable_unprepare(res->core_clk); + clk_disable_unprepare(res->phy_clk); +@@ -251,6 +257,12 @@ static int qcom_pcie_init_v0(struct qcom + u32 val; + int ret; + ++ ret = reset_control_assert(res->ahb_reset); ++ if (ret) { ++ dev_err(dev, "cannot assert ahb reset\n"); ++ return ret; ++ } ++ + ret = regulator_enable(res->vdda); + if (ret) { + dev_err(dev, "cannot enable vdda regulator\n"); +@@ -269,16 +281,16 @@ static int qcom_pcie_init_v0(struct qcom + goto err_vdda_phy; + } + +- ret = reset_control_assert(res->ahb_reset); ++ ret = reset_control_deassert(res->ext_reset); + if (ret) { +- dev_err(dev, "cannot assert ahb reset\n"); +- goto err_assert_ahb; ++ dev_err(dev, "cannot assert ext reset\n"); ++ goto err_reset_ext; + } + + ret = clk_prepare_enable(res->iface_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); +- goto err_assert_ahb; ++ goto err_iface; + } + + ret = clk_prepare_enable(res->core_clk); +@@ -360,7 +372,9 @@ err_clk_phy: + clk_disable_unprepare(res->core_clk); + err_clk_core: + clk_disable_unprepare(res->iface_clk); +-err_assert_ahb: ++err_iface: ++ reset_control_assert(res->ext_reset); ++err_reset_ext: + regulator_disable(res->vdda_phy); + err_vdda_phy: + regulator_disable(res->vdda_refclk); diff --git a/target/linux/ipq40xx/patches-4.9/0071-3-PCI-qcom-Fixed-IPQ806x-PCIE-init-changes.patch b/target/linux/ipq40xx/patches-4.9/0071-3-PCI-qcom-Fixed-IPQ806x-PCIE-init-changes.patch new file mode 100644 index 000000000..6c68edfe9 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0071-3-PCI-qcom-Fixed-IPQ806x-PCIE-init-changes.patch @@ -0,0 +1,127 @@ +From eddd13215d0f2b549ebc5f0e8796d5b1231f90a0 Mon Sep 17 00:00:00 2001 +From: Sham Muthayyan +Date: Tue, 19 Jul 2016 19:58:22 +0530 +Subject: PCI: qcom: Fixed IPQ806x PCIE init changes + +Change-Id: Ic319b1aec27a47809284759f8fcb6a8815b7cf7e +Signed-off-by: Sham Muthayyan +--- + drivers/pci/host/pcie-qcom.c | 62 +++++++++++++++++++++++++++++++++++++------- + 1 file changed, 53 insertions(+), 9 deletions(-) + +--- a/drivers/pci/host/pcie-qcom.c ++++ b/drivers/pci/host/pcie-qcom.c +@@ -37,7 +37,13 @@ + #include "pcie-designware.h" + + #define PCIE20_PARF_PHY_CTRL 0x40 ++#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK (0x1f << 16) ++#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) (x << 16) ++ + #define PCIE20_PARF_PHY_REFCLK 0x4C ++#define REF_SSP_EN BIT(16) ++#define REF_USE_PAD BIT(12) ++ + #define PCIE20_PARF_DBI_BASE_ADDR 0x168 + #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16c + #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 +@@ -48,6 +54,18 @@ + #define PCIE20_CAP 0x70 + + #define PERST_DELAY_US 1000 ++/* PARF registers */ ++#define PCIE20_PARF_PCS_DEEMPH 0x34 ++#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) (x << 16) ++#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) (x << 8) ++#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) (x << 0) ++ ++#define PCIE20_PARF_PCS_SWING 0x38 ++#define PCS_SWING_TX_SWING_FULL(x) (x << 8) ++#define PCS_SWING_TX_SWING_LOW(x) (x << 0) ++ ++#define PCIE20_PARF_CONFIG_BITS 0x50 ++#define PHY_RX0_EQ(x) (x << 24) + + struct qcom_pcie_resources_v0 { + struct clk *iface_clk; +@@ -64,6 +82,7 @@ struct qcom_pcie_resources_v0 { + struct regulator *vdda; + struct regulator *vdda_phy; + struct regulator *vdda_refclk; ++ uint8_t phy_tx0_term_offset; + }; + + struct qcom_pcie_resources_v1 { +@@ -100,6 +119,16 @@ struct qcom_pcie { + + #define to_qcom_pcie(x) container_of(x, struct qcom_pcie, pp) + ++static inline void ++writel_masked(void __iomem *addr, u32 clear_mask, u32 set_mask) ++{ ++ u32 val = readl(addr); ++ ++ val &= ~clear_mask; ++ val |= set_mask; ++ writel(val, addr); ++} ++ + static void qcom_ep_reset_assert(struct qcom_pcie *pcie) + { + gpiod_set_value(pcie->reset, 1); +@@ -195,6 +224,10 @@ static int qcom_pcie_get_resources_v0(st + if (IS_ERR(res->ext_reset)) + return PTR_ERR(res->ext_reset); + ++ if (of_property_read_u8(dev->of_node, "phy-tx0-term-offset", ++ &res->phy_tx0_term_offset)) ++ res->phy_tx0_term_offset = 0; ++ + return 0; + } + +@@ -254,7 +287,6 @@ static int qcom_pcie_init_v0(struct qcom + { + struct qcom_pcie_resources_v0 *res = &pcie->res.v0; + struct device *dev = pcie->pp.dev; +- u32 val; + int ret; + + ret = reset_control_assert(res->ahb_reset); +@@ -323,15 +355,27 @@ static int qcom_pcie_init_v0(struct qcom + goto err_deassert_ahb; + } + +- /* enable PCIe clocks and resets */ +- val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); +- val &= ~BIT(0); +- writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); +- +- /* enable external reference clock */ +- val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); +- val |= BIT(16); +- writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); ++ writel_masked(pcie->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0); ++ ++ /* Set Tx termination offset */ ++ writel_masked(pcie->parf + PCIE20_PARF_PHY_CTRL, ++ PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, ++ PHY_CTRL_PHY_TX0_TERM_OFFSET(res->phy_tx0_term_offset)); ++ ++ /* PARF programming */ ++ writel(PCS_DEEMPH_TX_DEEMPH_GEN1(0x18) | ++ PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(0x18) | ++ PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(0x22), ++ pcie->parf + PCIE20_PARF_PCS_DEEMPH); ++ writel(PCS_SWING_TX_SWING_FULL(0x78) | ++ PCS_SWING_TX_SWING_LOW(0x78), ++ pcie->parf + PCIE20_PARF_PCS_SWING); ++ writel(PHY_RX0_EQ(0x4), pcie->parf + PCIE20_PARF_CONFIG_BITS); ++ ++ /* Enable reference clock */ ++ writel_masked(pcie->parf + PCIE20_PARF_PHY_REFCLK, ++ REF_USE_PAD, REF_SSP_EN); ++ + + ret = reset_control_deassert(res->phy_reset); + if (ret) { diff --git a/target/linux/ipq40xx/patches-4.9/0071-4-PCIE-designware-Fixed-PCI-host-init.patch b/target/linux/ipq40xx/patches-4.9/0071-4-PCIE-designware-Fixed-PCI-host-init.patch new file mode 100644 index 000000000..af9e121f8 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0071-4-PCIE-designware-Fixed-PCI-host-init.patch @@ -0,0 +1,68 @@ +From e833cdb5c792912d459773cc23153e5d78875d34 Mon Sep 17 00:00:00 2001 +From: Sham Muthayyan +Date: Tue, 19 Jul 2016 20:05:25 +0530 +Subject: PCIE: designware: Fixed PCI host init + +Change-Id: I949b302d77199fc09342acf26b7bb45a7ec467ee +Signed-off-by: Sham Muthayyan +--- + drivers/pci/host/pcie-designware.c | 9 +++++++-- + drivers/pci/host/pcie-designware.h | 2 +- + drivers/pci/host/pcie-qcom.c | 5 +++-- + 3 files changed, 11 insertions(+), 5 deletions(-) + +--- a/drivers/pci/host/pcie-designware.c ++++ b/drivers/pci/host/pcie-designware.c +@@ -637,8 +637,13 @@ int dw_pcie_host_init(struct pcie_port * + } + } + +- if (pp->ops->host_init) +- pp->ops->host_init(pp); ++ if (pp->ops->host_init) { ++ ret = pp->ops->host_init(pp); ++ if (ret) { ++ dev_err(pp->dev, "hostinit failed\n"); ++ return 0; ++ } ++ } + + pp->root_bus_nr = pp->busn->start; + if (IS_ENABLED(CONFIG_PCI_MSI)) { +--- a/drivers/pci/host/pcie-designware.h ++++ b/drivers/pci/host/pcie-designware.h +@@ -63,7 +63,7 @@ struct pcie_host_ops { + int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 val); + int (*link_up)(struct pcie_port *pp); +- void (*host_init)(struct pcie_port *pp); ++ int (*host_init)(struct pcie_port *pp); + void (*msi_set_irq)(struct pcie_port *pp, int irq); + void (*msi_clear_irq)(struct pcie_port *pp, int irq); + phys_addr_t (*get_msi_addr)(struct pcie_port *pp); +--- a/drivers/pci/host/pcie-qcom.c ++++ b/drivers/pci/host/pcie-qcom.c +@@ -515,7 +515,7 @@ static int qcom_pcie_link_up(struct pcie + return !!(val & PCI_EXP_LNKSTA_DLLLA); + } + +-static void qcom_pcie_host_init(struct pcie_port *pp) ++static int qcom_pcie_host_init(struct pcie_port *pp) + { + struct qcom_pcie *pcie = to_qcom_pcie(pp); + int ret; +@@ -541,12 +541,13 @@ static void qcom_pcie_host_init(struct p + if (ret) + goto err; + +- return; ++ return 0; + err: + qcom_ep_reset_assert(pcie); + phy_power_off(pcie->phy); + err_deinit: + pcie->ops->deinit(pcie); ++ return ret; + } + + static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, diff --git a/target/linux/ipq40xx/patches-4.9/0071-5-PCI-qcom-Programming-the-PCIE-iATU-for-IPQ806x.patch b/target/linux/ipq40xx/patches-4.9/0071-5-PCI-qcom-Programming-the-PCIE-iATU-for-IPQ806x.patch new file mode 100644 index 000000000..98e2b54e3 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0071-5-PCI-qcom-Programming-the-PCIE-iATU-for-IPQ806x.patch @@ -0,0 +1,113 @@ +From d27c303e828d7e42f339a459d2abfe30c51698e9 Mon Sep 17 00:00:00 2001 +From: Sham Muthayyan +Date: Tue, 26 Jul 2016 12:28:31 +0530 +Subject: PCI: qcom: Programming the PCIE iATU for IPQ806x + +Resolved PCIE EP detection errors caused due to missing iATU programming. + +Change-Id: Ie95c0f8cb940abc0192a8a3c4e825ddba54b72fe +Signed-off-by: Sham Muthayyan +--- + drivers/pci/host/pcie-qcom.c | 77 ++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 77 insertions(+) + +--- a/drivers/pci/host/pcie-qcom.c ++++ b/drivers/pci/host/pcie-qcom.c +@@ -52,6 +52,29 @@ + #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) + + #define PCIE20_CAP 0x70 ++#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10) ++ ++#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 ++#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c ++ ++#define PCIE20_PLR_IATU_VIEWPORT 0x900 ++#define PCIE20_PLR_IATU_REGION_OUTBOUND (0x0 << 31) ++#define PCIE20_PLR_IATU_REGION_INDEX(x) (x << 0) ++ ++#define PCIE20_PLR_IATU_CTRL1 0x904 ++#define PCIE20_PLR_IATU_TYPE_CFG0 (0x4 << 0) ++#define PCIE20_PLR_IATU_TYPE_MEM (0x0 << 0) ++ ++#define PCIE20_PLR_IATU_CTRL2 0x908 ++#define PCIE20_PLR_IATU_ENABLE BIT(31) ++ ++#define PCIE20_PLR_IATU_LBAR 0x90C ++#define PCIE20_PLR_IATU_UBAR 0x910 ++#define PCIE20_PLR_IATU_LAR 0x914 ++#define PCIE20_PLR_IATU_LTAR 0x918 ++#define PCIE20_PLR_IATU_UTAR 0x91c ++ ++#define MSM_PCIE_DEV_CFG_ADDR 0x01000000 + + #define PERST_DELAY_US 1000 + /* PARF registers */ +@@ -163,6 +186,57 @@ static int qcom_pcie_establish_link(stru + return dw_pcie_wait_for_link(&pcie->pp); + } + ++static void qcom_pcie_prog_viewport_cfg0(struct qcom_pcie *pcie, u32 busdev) ++{ ++ struct pcie_port *pp = &pcie->pp; ++ ++ /* ++ * program and enable address translation region 0 (device config ++ * address space); region type config; ++ * axi config address range to device config address range ++ */ ++ writel(PCIE20_PLR_IATU_REGION_OUTBOUND | ++ PCIE20_PLR_IATU_REGION_INDEX(0), ++ pcie->pp.dbi_base + PCIE20_PLR_IATU_VIEWPORT); ++ ++ writel(PCIE20_PLR_IATU_TYPE_CFG0, pcie->pp.dbi_base + PCIE20_PLR_IATU_CTRL1); ++ writel(PCIE20_PLR_IATU_ENABLE, pcie->pp.dbi_base + PCIE20_PLR_IATU_CTRL2); ++ writel(pp->cfg0_base, pcie->pp.dbi_base + PCIE20_PLR_IATU_LBAR); ++ writel((pp->cfg0_base >> 32), pcie->pp.dbi_base + PCIE20_PLR_IATU_UBAR); ++ writel((pp->cfg0_base + pp->cfg0_size - 1), ++ pcie->pp.dbi_base + PCIE20_PLR_IATU_LAR); ++ writel(busdev, pcie->pp.dbi_base + PCIE20_PLR_IATU_LTAR); ++ writel(0, pcie->pp.dbi_base + PCIE20_PLR_IATU_UTAR); ++} ++ ++static void qcom_pcie_prog_viewport_mem2_outbound(struct qcom_pcie *pcie) ++{ ++ struct pcie_port *pp = &pcie->pp; ++ ++ /* ++ * program and enable address translation region 2 (device resource ++ * address space); region type memory; ++ * axi device bar address range to device bar address range ++ */ ++ writel(PCIE20_PLR_IATU_REGION_OUTBOUND | ++ PCIE20_PLR_IATU_REGION_INDEX(2), ++ pcie->pp.dbi_base + PCIE20_PLR_IATU_VIEWPORT); ++ ++ writel(PCIE20_PLR_IATU_TYPE_MEM, pcie->pp.dbi_base + PCIE20_PLR_IATU_CTRL1); ++ writel(PCIE20_PLR_IATU_ENABLE, pcie->pp.dbi_base + PCIE20_PLR_IATU_CTRL2); ++ writel(pp->mem_base, pcie->pp.dbi_base + PCIE20_PLR_IATU_LBAR); ++ writel((pp->mem_base >> 32), pcie->pp.dbi_base + PCIE20_PLR_IATU_UBAR); ++ writel(pp->mem_base + pp->mem_size - 1, ++ pcie->pp.dbi_base + PCIE20_PLR_IATU_LAR); ++ writel(pp->mem_bus_addr, pcie->pp.dbi_base + PCIE20_PLR_IATU_LTAR); ++ writel(upper_32_bits(pp->mem_bus_addr), ++ pcie->pp.dbi_base + PCIE20_PLR_IATU_UTAR); ++ ++ /* 256B PCIE buffer setting */ ++ writel(0x1, pcie->pp.dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); ++ writel(0x1, pcie->pp.dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); ++} ++ + static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) + { + struct qcom_pcie_resources_v0 *res = &pcie->res.v0; +@@ -404,6 +478,9 @@ static int qcom_pcie_init_v0(struct qcom + /* wait for clock acquisition */ + usleep_range(1000, 1500); + ++ qcom_pcie_prog_viewport_cfg0(pcie, MSM_PCIE_DEV_CFG_ADDR); ++ qcom_pcie_prog_viewport_mem2_outbound(pcie); ++ + return 0; + + err_deassert_ahb: diff --git a/target/linux/ipq40xx/patches-4.9/0071-6-PCI-qcom-Force-GEN1-support.patch b/target/linux/ipq40xx/patches-4.9/0071-6-PCI-qcom-Force-GEN1-support.patch new file mode 100644 index 000000000..91891a5b2 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0071-6-PCI-qcom-Force-GEN1-support.patch @@ -0,0 +1,61 @@ +From 4910cfd150342ec7b038892262923c725a9c4001 Mon Sep 17 00:00:00 2001 +From: Sham Muthayyan +Date: Wed, 7 Sep 2016 16:44:28 +0530 +Subject: PCI: qcom: Force GEN1 support + +Change-Id: Ica54ddb737d7b851469deab1745f54bf431bd3f0 +Signed-off-by: Sham Muthayyan +--- + drivers/pci/host/pcie-qcom.c | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +--- a/drivers/pci/host/pcie-qcom.c ++++ b/drivers/pci/host/pcie-qcom.c +@@ -90,6 +90,8 @@ + #define PCIE20_PARF_CONFIG_BITS 0x50 + #define PHY_RX0_EQ(x) (x << 24) + ++#define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xA0 ++ + struct qcom_pcie_resources_v0 { + struct clk *iface_clk; + struct clk *core_clk; +@@ -138,6 +140,7 @@ struct qcom_pcie { + struct phy *phy; + struct gpio_desc *reset; + struct qcom_pcie_ops *ops; ++ uint32_t force_gen1; + }; + + #define to_qcom_pcie(x) container_of(x, struct qcom_pcie, pp) +@@ -477,6 +480,11 @@ static int qcom_pcie_init_v0(struct qcom + + /* wait for clock acquisition */ + usleep_range(1000, 1500); ++ if (pcie->force_gen1) { ++ writel_relaxed((readl_relaxed( ++ pcie->pp.dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2) | 1), ++ pcie->pp.dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2); ++ } + + qcom_pcie_prog_viewport_cfg0(pcie, MSM_PCIE_DEV_CFG_ADDR); + qcom_pcie_prog_viewport_mem2_outbound(pcie); +@@ -666,6 +674,8 @@ static int qcom_pcie_probe(struct platfo + struct qcom_pcie *pcie; + struct pcie_port *pp; + int ret; ++ uint32_t force_gen1 = 0; ++ struct device_node *np = pdev->dev.of_node; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) +@@ -678,6 +688,9 @@ static int qcom_pcie_probe(struct platfo + if (IS_ERR(pcie->reset)) + return PTR_ERR(pcie->reset); + ++ of_property_read_u32(np, "force_gen1", &force_gen1); ++ pcie->force_gen1 = force_gen1; ++ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); + pcie->parf = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->parf)) diff --git a/target/linux/ipq40xx/patches-4.9/0071-7-pcie-Set-PCIE-MRRS-and-MPS-to-256B.patch b/target/linux/ipq40xx/patches-4.9/0071-7-pcie-Set-PCIE-MRRS-and-MPS-to-256B.patch new file mode 100644 index 000000000..157386458 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0071-7-pcie-Set-PCIE-MRRS-and-MPS-to-256B.patch @@ -0,0 +1,69 @@ +From edff8f777c6321ca89bb950a382f409c4a126e28 Mon Sep 17 00:00:00 2001 +From: Gokul Sriram Palanisamy +Date: Thu, 15 Dec 2016 17:38:18 +0530 +Subject: pcie: Set PCIE MRRS and MPS to 256B + +Set Max Read Request Size and Max Payload Size to 256 bytes, +per chip team recommendation. + +Change-Id: I097004be2ced1b3096ffc10c318aae0b2bb155e8 +Signed-off-by: Gokul Sriram Palanisamy +--- + drivers/pci/host/pcie-qcom.c | 37 +++++++++++++++++++++++++++++++++++++ + 1 file changed, 37 insertions(+) + +(limited to 'drivers/pci/host/pcie-qcom.c') + +--- a/drivers/pci/host/pcie-qcom.c ++++ b/drivers/pci/host/pcie-qcom.c +@@ -92,6 +92,14 @@ + + #define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xA0 + ++#define __set(v, a, b) (((v) << (b)) & GENMASK(a, b)) ++#define __mask(a, b) (((1 << ((a) + 1)) - 1) & ~((1 << (b)) - 1)) ++#define PCIE20_DEV_CAS 0x78 ++#define PCIE20_MRRS_MASK __mask(14, 12) ++#define PCIE20_MRRS(x) __set(x, 14, 12) ++#define PCIE20_MPS_MASK __mask(7, 5) ++#define PCIE20_MPS(x) __set(x, 7, 5) ++ + struct qcom_pcie_resources_v0 { + struct clk *iface_clk; + struct clk *core_clk; +@@ -745,6 +753,35 @@ static int qcom_pcie_probe(struct platfo + return 0; + } + ++static void qcom_pcie_fixup_final(struct pci_dev *dev) ++{ ++ int cap, err; ++ u16 ctl, reg_val; ++ ++ cap = pci_pcie_cap(dev); ++ if (!cap) ++ return; ++ ++ err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); ++ ++ if (err) ++ return; ++ ++ reg_val = ctl; ++ ++ if (((reg_val & PCIE20_MRRS_MASK) >> 12) > 1) ++ reg_val = (reg_val & ~(PCIE20_MRRS_MASK)) | PCIE20_MRRS(0x1); ++ ++ if (((ctl & PCIE20_MPS_MASK) >> 5) > 1) ++ reg_val = (reg_val & ~(PCIE20_MPS_MASK)) | PCIE20_MPS(0x1); ++ ++ err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, reg_val); ++ ++ if (err) ++ pr_err("pcie config write failed %d\n", err); ++} ++DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, qcom_pcie_fixup_final); ++ + static const struct of_device_id qcom_pcie_match[] = { + { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 }, + { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, diff --git a/target/linux/ipq40xx/patches-4.9/0071-8-pcie-qcom-Fixed-pcie_phy_clk-branch-issue.patch b/target/linux/ipq40xx/patches-4.9/0071-8-pcie-qcom-Fixed-pcie_phy_clk-branch-issue.patch new file mode 100644 index 000000000..0f5050826 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0071-8-pcie-qcom-Fixed-pcie_phy_clk-branch-issue.patch @@ -0,0 +1,91 @@ +From b74bab6186131eea09459eedf5d737645a3559c9 Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Thu, 22 Dec 2016 11:18:45 +0530 +Subject: pcie: qcom: Fixed pcie_phy_clk branch issue + +Following backtraces are observed in PCIe deinit operation. + + Hardware name: Qualcomm (Flattened Device Tree) + (unwind_backtrace) from [] (show_stack+0x10/0x14) + (show_stack) from [] (dump_stack+0x84/0x98) + (dump_stack) from [] (warn_slowpath_common+0x9c/0xb8) + (warn_slowpath_common) from [] (warn_slowpath_fmt+0x30/0x40) + (warn_slowpath_fmt) from [] (clk_branch_wait+0x114/0x120) + (clk_branch_wait) from [] (clk_core_disable+0xd0/0x1f4) + (clk_core_disable) from [] (clk_disable+0x24/0x30) + (clk_disable) from [] (qcom_pcie_deinit_v0+0x6c/0xb8) + (qcom_pcie_deinit_v0) from [] (qcom_pcie_host_init+0xe0/0xe8) + (qcom_pcie_host_init) from [] (dw_pcie_host_init+0x3b0/0x538) + (dw_pcie_host_init) from [] (qcom_pcie_probe+0x20c/0x2e4) + +pcie_phy_clk is generated for PCIe controller itself and the +GCC controls its branch operation. This error is coming since +the assert operations turn off the parent clock before branch +clock. Now this patch moves clk_disable_unprepare before assert +operations. + +Similarly, during probe function, the clock branch operation +should be done after dessert operation. Currently, it does not +generate any error since bootloader enables the pcie_phy_clk +but the same error is coming during probe, if bootloader +disables pcie_phy_clk. + +Change-Id: Ib29c154d10eb64363d9cc982ce5fd8107af5627d +Signed-off-by: Abhishek Sahu +--- + drivers/pci/host/pcie-qcom.c | 16 +++++++--------- + 1 file changed, 7 insertions(+), 9 deletions(-) + +--- a/drivers/pci/host/pcie-qcom.c ++++ b/drivers/pci/host/pcie-qcom.c +@@ -352,6 +352,7 @@ static void qcom_pcie_deinit_v0(struct q + { + struct qcom_pcie_resources_v0 *res = &pcie->res.v0; + ++ clk_disable_unprepare(res->phy_clk); + reset_control_assert(res->pci_reset); + reset_control_assert(res->axi_reset); + reset_control_assert(res->ahb_reset); +@@ -360,7 +361,6 @@ static void qcom_pcie_deinit_v0(struct q + reset_control_assert(res->ext_reset); + clk_disable_unprepare(res->iface_clk); + clk_disable_unprepare(res->core_clk); +- clk_disable_unprepare(res->phy_clk); + clk_disable_unprepare(res->aux_clk); + clk_disable_unprepare(res->ref_clk); + regulator_disable(res->vdda); +@@ -416,12 +416,6 @@ static int qcom_pcie_init_v0(struct qcom + goto err_clk_core; + } + +- ret = clk_prepare_enable(res->phy_clk); +- if (ret) { +- dev_err(dev, "cannot prepare/enable phy clock\n"); +- goto err_clk_phy; +- } +- + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); +@@ -486,6 +480,12 @@ static int qcom_pcie_init_v0(struct qcom + return ret; + } + ++ ret = clk_prepare_enable(res->phy_clk); ++ if (ret) { ++ dev_err(dev, "cannot prepare/enable phy clock\n"); ++ goto err_deassert_ahb; ++ } ++ + /* wait for clock acquisition */ + usleep_range(1000, 1500); + if (pcie->force_gen1) { +@@ -504,8 +504,6 @@ err_deassert_ahb: + err_clk_ref: + clk_disable_unprepare(res->aux_clk); + err_clk_aux: +- clk_disable_unprepare(res->phy_clk); +-err_clk_phy: + clk_disable_unprepare(res->core_clk); + err_clk_core: + clk_disable_unprepare(res->iface_clk); diff --git a/target/linux/ipq40xx/patches-4.9/0071-9-pcie-qcom-change-duplicate-pci-reset-to-phy-reset.patch b/target/linux/ipq40xx/patches-4.9/0071-9-pcie-qcom-change-duplicate-pci-reset-to-phy-reset.patch new file mode 100644 index 000000000..a60add93c --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0071-9-pcie-qcom-change-duplicate-pci-reset-to-phy-reset.patch @@ -0,0 +1,25 @@ +From 1a9c48123bd09f75562b6a2ee0f0a7b2d533cd45 Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Thu, 22 Dec 2016 11:50:49 +0530 +Subject: pcie: qcom: change duplicate pci reset to phy reset + +The deinit issues reset_control_assert for pci twice and +does not contain phy reset. + +Change-Id: Iba849963c7e5f9a2a1063f0e2e89635df70b8a99 +Signed-off-by: Abhishek Sahu +--- + drivers/pci/host/pcie-qcom.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/pci/host/pcie-qcom.c ++++ b/drivers/pci/host/pcie-qcom.c +@@ -353,7 +353,7 @@ static void qcom_pcie_deinit_v0(struct q + struct qcom_pcie_resources_v0 *res = &pcie->res.v0; + + clk_disable_unprepare(res->phy_clk); +- reset_control_assert(res->pci_reset); ++ reset_control_assert(res->phy_reset); + reset_control_assert(res->axi_reset); + reset_control_assert(res->ahb_reset); + reset_control_assert(res->por_reset); diff --git a/target/linux/ipq40xx/patches-4.9/0072-ipq-scm-TZ-don-t-need-clock-to-be-enabled-disabled-for-ipq.patch b/target/linux/ipq40xx/patches-4.9/0072-ipq-scm-TZ-don-t-need-clock-to-be-enabled-disabled-for-ipq.patch new file mode 100644 index 000000000..4f0e544ab --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0072-ipq-scm-TZ-don-t-need-clock-to-be-enabled-disabled-for-ipq.patch @@ -0,0 +1,166 @@ +From 0fb08a02baf5114fd3bdbc5aa92d6a6cd6d5ef3f Mon Sep 17 00:00:00 2001 +From: Manoharan Vijaya Raghavan +Date: Tue, 24 Jan 2017 20:58:46 +0530 +Subject: ipq: scm: TZ don't need clock to be enabled/disabled for ipq + +When SCM was made as a platform driver, clock management was +addedfor firmware calls. This is not required for IPQ. + +Change-Id: I3d29fafe0266e51f708f2718bab03907078b0f4d +Signed-off-by: Manoharan Vijaya Raghavan +--- + drivers/firmware/qcom_scm.c | 87 +++++++++++++++++++++++++++++---------------- + 1 file changed, 57 insertions(+), 30 deletions(-) + +(limited to 'drivers/firmware/qcom_scm.c') + +--- a/drivers/firmware/qcom_scm.c ++++ b/drivers/firmware/qcom_scm.c +@@ -28,12 +28,15 @@ + + #include "qcom_scm.h" + ++#define SCM_NOCLK 1 ++ + struct qcom_scm { + struct device *dev; + struct clk *core_clk; + struct clk *iface_clk; + struct clk *bus_clk; + struct reset_controller_dev reset; ++ int is_clkdisabled; + }; + + static struct qcom_scm *__scm; +@@ -42,6 +45,9 @@ static int qcom_scm_clk_enable(void) + { + int ret; + ++ if (__scm->is_clkdisabled) ++ return 0; ++ + ret = clk_prepare_enable(__scm->core_clk); + if (ret) + goto bail; +@@ -66,6 +72,9 @@ bail: + + static void qcom_scm_clk_disable(void) + { ++ if (__scm->is_clkdisabled) ++ return; ++ + clk_disable_unprepare(__scm->core_clk); + clk_disable_unprepare(__scm->iface_clk); + clk_disable_unprepare(__scm->bus_clk); +@@ -320,37 +329,61 @@ bool qcom_scm_is_available(void) + } + EXPORT_SYMBOL(qcom_scm_is_available); + ++static const struct of_device_id qcom_scm_dt_match[] = { ++ { .compatible = "qcom,scm-apq8064",}, ++ { .compatible = "qcom,scm-msm8660",}, ++ { .compatible = "qcom,scm-msm8960",}, ++ { .compatible = "qcom,scm-ipq807x", .data = (void *)SCM_NOCLK }, ++ { .compatible = "qcom,scm-ipq806x", .data = (void *)SCM_NOCLK }, ++ { .compatible = "qcom,scm-ipq40xx", .data = (void *)SCM_NOCLK }, ++ { .compatible = "qcom,scm-msm8960",}, ++ { .compatible = "qcom,scm-msm8960",}, ++ { .compatible = "qcom,scm",}, ++ {} ++}; ++ + static int qcom_scm_probe(struct platform_device *pdev) + { + struct qcom_scm *scm; ++ const struct of_device_id *id; + int ret; + + scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); + if (!scm) + return -ENOMEM; + +- scm->core_clk = devm_clk_get(&pdev->dev, "core"); +- if (IS_ERR(scm->core_clk)) { +- if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) +- return PTR_ERR(scm->core_clk); ++ id = of_match_device(qcom_scm_dt_match, &pdev->dev); ++ if (id) ++ scm->is_clkdisabled = (unsigned int)id->data; ++ else ++ scm->is_clkdisabled = 0; ++ ++ if (!(scm->is_clkdisabled)) { ++ ++ scm->core_clk = devm_clk_get(&pdev->dev, "core"); ++ if (IS_ERR(scm->core_clk)) { ++ if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) ++ return PTR_ERR(scm->core_clk); + +- scm->core_clk = NULL; +- } +- +- if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) { +- scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); +- if (IS_ERR(scm->iface_clk)) { +- if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER) +- dev_err(&pdev->dev, "failed to acquire iface clk\n"); +- return PTR_ERR(scm->iface_clk); ++ scm->core_clk = NULL; + } + +- scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); +- if (IS_ERR(scm->bus_clk)) { +- if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER) +- dev_err(&pdev->dev, "failed to acquire bus clk\n"); +- return PTR_ERR(scm->bus_clk); ++ if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) { ++ scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); ++ if (IS_ERR(scm->iface_clk)) { ++ if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER) ++ dev_err(&pdev->dev, "failed to acquire iface clk\n"); ++ return PTR_ERR(scm->iface_clk); ++ } ++ ++ scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); ++ if (IS_ERR(scm->bus_clk)) { ++ if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER) ++ dev_err(&pdev->dev, "failed to acquire bus clk\n"); ++ return PTR_ERR(scm->bus_clk); ++ } + } ++ + } + + scm->reset.ops = &qcom_scm_pas_reset_ops; +@@ -358,10 +391,12 @@ static int qcom_scm_probe(struct platfor + scm->reset.of_node = pdev->dev.of_node; + reset_controller_register(&scm->reset); + +- /* vote for max clk rate for highest performance */ +- ret = clk_set_rate(scm->core_clk, INT_MAX); +- if (ret) +- return ret; ++ if (!(scm->is_clkdisabled)) { ++ /* vote for max clk rate for highest performance */ ++ ret = clk_set_rate(scm->core_clk, INT_MAX); ++ if (ret) ++ return ret; ++ } + + __scm = scm; + __scm->dev = &pdev->dev; +@@ -371,14 +406,6 @@ static int qcom_scm_probe(struct platfor + return 0; + } + +-static const struct of_device_id qcom_scm_dt_match[] = { +- { .compatible = "qcom,scm-apq8064",}, +- { .compatible = "qcom,scm-msm8660",}, +- { .compatible = "qcom,scm-msm8960",}, +- { .compatible = "qcom,scm",}, +- {} +-}; +- + static struct platform_driver qcom_scm_driver = { + .driver = { + .name = "qcom_scm", diff --git a/target/linux/ipq40xx/patches-4.9/0073-pinctrl-qom-use-scm_call-to-route-GPIO-irq-to-Apps.patch b/target/linux/ipq40xx/patches-4.9/0073-pinctrl-qom-use-scm_call-to-route-GPIO-irq-to-Apps.patch new file mode 100644 index 000000000..c6a0470ea --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0073-pinctrl-qom-use-scm_call-to-route-GPIO-irq-to-Apps.patch @@ -0,0 +1,166 @@ +From 2034addc7e193dc81d7ca60d8884832751b76758 Mon Sep 17 00:00:00 2001 +From: Ajay Kishore +Date: Tue, 24 Jan 2017 14:14:16 +0530 +Subject: pinctrl: qcom: use scm_call to route GPIO irq to Apps + +For IPQ806x targets, TZ protects the registers that are used to +configure the routing of interrupts to a target processor. +To resolve this, this patch uses scm call to route GPIO interrupts +to application processor. Also the scm call interface is changed. + +Change-Id: Ib6c06829d04bc8c20483c36e63da92e26cdef9ce +Signed-off-by: Ajay Kishore +--- + drivers/firmware/qcom_scm-32.c | 17 +++++++++++++++++ + drivers/firmware/qcom_scm-64.c | 9 +++++++++ + drivers/firmware/qcom_scm.c | 13 +++++++++++++ + drivers/firmware/qcom_scm.h | 8 ++++++++ + drivers/pinctrl/qcom/pinctrl-msm.c | 34 ++++++++++++++++++++++++++++------ + include/linux/qcom_scm.h | 3 ++- + 6 files changed, 77 insertions(+), 7 deletions(-) + +--- a/drivers/firmware/qcom_scm-32.c ++++ b/drivers/firmware/qcom_scm-32.c +@@ -560,3 +560,21 @@ int __qcom_scm_pas_mss_reset(struct devi + + return ret ? : le32_to_cpu(out); + } ++ ++int __qcom_scm_pinmux_read(u32 svc_id, u32 cmd_id, u32 arg1) ++{ ++ s32 ret; ++ ++ ret = qcom_scm_call_atomic1(svc_id, cmd_id, arg1); ++ ++ return ret; ++} ++ ++int __qcom_scm_pinmux_write(u32 svc_id, u32 cmd_id, u32 arg1, u32 arg2) ++{ ++ s32 ret; ++ ++ ret = qcom_scm_call_atomic2(svc_id, cmd_id, arg1, arg2); ++ ++ return ret; ++ } +--- a/drivers/firmware/qcom_scm-64.c ++++ b/drivers/firmware/qcom_scm-64.c +@@ -365,3 +365,12 @@ int __qcom_scm_pas_mss_reset(struct devi + + return ret ? : res.a1; + } ++int __qcom_scm_pinmux_read(u32 svc_id, u32 cmd_id, u32 arg1) ++{ ++ return -ENOTSUPP; ++} ++ ++int __qcom_scm_pinmux_write(u32 svc_id, u32 cmd_id, u32 arg1, u32 arg2) ++{ ++ return -ENOTSUPP; ++} +--- a/drivers/firmware/qcom_scm.c ++++ b/drivers/firmware/qcom_scm.c +@@ -443,3 +443,16 @@ static int __init qcom_scm_init(void) + return platform_driver_register(&qcom_scm_driver); + } + subsys_initcall(qcom_scm_init); ++ ++int qcom_scm_pinmux_read(u32 arg1) ++{ ++ return __qcom_scm_pinmux_read(SCM_SVC_IO_ACCESS, SCM_IO_READ, arg1); ++} ++EXPORT_SYMBOL(qcom_scm_pinmux_read); ++ ++int qcom_scm_pinmux_write(u32 arg1, u32 arg2) ++{ ++ return __qcom_scm_pinmux_write(SCM_SVC_IO_ACCESS, SCM_IO_WRITE, ++ arg1, arg2); ++} ++EXPORT_SYMBOL(qcom_scm_pinmux_write); +--- a/drivers/firmware/qcom_scm.h ++++ b/drivers/firmware/qcom_scm.h +@@ -56,6 +56,13 @@ extern int __qcom_scm_pas_auth_and_rese + extern int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral); + extern int __qcom_scm_pas_mss_reset(struct device *dev, bool reset); + ++#define SCM_IO_READ 1 ++#define SCM_IO_WRITE 2 ++#define SCM_SVC_IO_ACCESS 0x5 ++ ++s32 __qcom_scm_pinmux_read(u32 svc_id, u32 cmd_id, u32 arg1); ++s32 __qcom_scm_pinmux_write(u32 svc_id, u32 cmd_id, u32 arg1, u32 arg2); ++ + /* common error codes */ + #define QCOM_SCM_V2_EBUSY -12 + #define QCOM_SCM_ENOMEM -5 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c +@@ -30,7 +30,8 @@ + #include + #include + #include +- ++#include ++#include + #include "../core.h" + #include "../pinconf.h" + #include "pinctrl-msm.h" +@@ -635,6 +636,9 @@ static int msm_gpio_irq_set_type(struct + const struct msm_pingroup *g; + unsigned long flags; + u32 val; ++ u32 addr; ++ int ret; ++ const __be32 *reg; + + g = &pctrl->soc->groups[d->hwirq]; + +@@ -648,11 +652,30 @@ static int msm_gpio_irq_set_type(struct + else + clear_bit(d->hwirq, pctrl->dual_edge_irqs); + ++ ret = of_device_is_compatible(pctrl->dev->of_node, ++ "qcom,ipq8064-pinctrl"); + /* Route interrupts to application cpu */ +- val = readl(pctrl->regs + g->intr_target_reg); +- val &= ~(7 << g->intr_target_bit); +- val |= g->intr_target_kpss_val << g->intr_target_bit; +- writel(val, pctrl->regs + g->intr_target_reg); ++ if (!ret) { ++ val = readl(pctrl->regs + g->intr_target_reg); ++ val &= ~(7 << g->intr_target_bit); ++ val |= g->intr_target_kpss_val << g->intr_target_bit; ++ writel(val, pctrl->regs + g->intr_target_reg); ++ } else { ++ reg = of_get_property(pctrl->dev->of_node, "reg", NULL); ++ if (reg) { ++ addr = be32_to_cpup(reg) + g->intr_target_reg; ++ val = qcom_scm_pinmux_read(addr); ++ __iormb(); ++ ++ val &= ~(7 << g->intr_target_bit); ++ val |= g->intr_target_kpss_val << g->intr_target_bit; ++ ++ __iowmb(); ++ ret = qcom_scm_pinmux_write(addr, val); ++ if (ret) ++ pr_err("\n Routing interrupts to Apps proc failed"); ++ } ++ } + + /* Update configuration for gpio. + * RAW_STATUS_EN is left on for all gpio irqs. Due to the +@@ -926,4 +949,3 @@ int msm_pinctrl_remove(struct platform_d + return 0; + } + EXPORT_SYMBOL(msm_pinctrl_remove); +- +--- a/include/linux/qcom_scm.h ++++ b/include/linux/qcom_scm.h +@@ -46,4 +46,6 @@ extern void qcom_scm_cpu_power_down(u32 + + extern u32 qcom_scm_get_version(void); + ++extern s32 qcom_scm_pinmux_read(u32 arg1); ++extern s32 qcom_scm_pinmux_write(u32 arg1, u32 arg2); + #endif diff --git a/target/linux/ipq40xx/patches-4.9/0074-ipq806x-usb-Control-USB-master-reset.patch b/target/linux/ipq40xx/patches-4.9/0074-ipq806x-usb-Control-USB-master-reset.patch new file mode 100644 index 000000000..d5ff86829 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/0074-ipq806x-usb-Control-USB-master-reset.patch @@ -0,0 +1,75 @@ +From a86bda9f8a7965f0cedd347a9c04800eb9f41ea3 Mon Sep 17 00:00:00 2001 +From: Vasudevan Murugesan +Date: Tue, 21 Jul 2015 10:22:38 +0530 +Subject: ipq806x: usb: Control USB master reset + +During removal of the glue layer(dwc3-of-simple), +USB master reset is set to active and during insertion +it is de-activated. + +Change-Id: I537dc810f6cb2a46664ee674840145066432b957 +Signed-off-by: Vasudevan Murugesan +(cherry picked from commit 4611e13580a216812f85f0801b95442d02eeb836) +--- + drivers/usb/dwc3/dwc3-of-simple.c | 22 ++++++++++++++++++++++ + 1 file changed, 12 insertions(+) + +(limited to 'drivers/usb/dwc3/dwc3-of-simple.c') + +diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c +index f9e92ef..49bf556 100644 +--- a/drivers/usb/dwc3/dwc3-of-simple.c ++++ b/drivers/usb/dwc3/dwc3-of-simple.c +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -34,6 +35,8 @@ struct dwc3_of_simple { + struct device *dev; + struct clk **clks; + int num_clocks; ++ struct reset_control *mstr_rst_30_0; ++ struct reset_control *mstr_rst_30_1; + }; + + static int dwc3_of_simple_probe(struct platform_device *pdev) +@@ -89,6 +92,20 @@ static int dwc3_of_simple_probe(struct platform_device *pdev) + simple->clks[i] = clk; + } + ++ simple->mstr_rst_30_0 = devm_reset_control_get(dev, "usb30_0_mstr_rst"); ++ ++ if (!IS_ERR(simple->mstr_rst_30_0)) ++ reset_control_deassert(simple->mstr_rst_30_0); ++ else ++ dev_dbg(simple->dev, "cannot get handle for USB PHY 0 master reset control\n"); ++ ++ simple->mstr_rst_30_1 = devm_reset_control_get(dev, "usb30_1_mstr_rst"); ++ ++ if (!IS_ERR(simple->mstr_rst_30_1)) ++ reset_control_deassert(simple->mstr_rst_30_1); ++ else ++ dev_dbg(simple->dev, "cannot get handle for USB PHY 1 master reset control\n"); ++ + ret = of_platform_populate(np, NULL, NULL, dev); + if (ret) { + for (i = 0; i < simple->num_clocks; i++) { +@@ -117,6 +134,12 @@ static int dwc3_of_simple_remove(struct platform_device *pdev) + clk_put(simple->clks[i]); + } + ++ if (!IS_ERR(simple->mstr_rst_30_0)) ++ reset_control_assert(simple->mstr_rst_30_0); ++ ++ if (!IS_ERR(simple->mstr_rst_30_1)) ++ reset_control_assert(simple->mstr_rst_30_1); ++ + of_platform_depopulate(dev); + + pm_runtime_put_sync(dev); +-- +cgit v1.1 diff --git a/target/linux/ipq40xx/patches-4.9/104-mtd-nand-add-Winbond-manufacturer-and-chip.patch b/target/linux/ipq40xx/patches-4.9/104-mtd-nand-add-Winbond-manufacturer-and-chip.patch new file mode 100644 index 000000000..d2b58a466 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/104-mtd-nand-add-Winbond-manufacturer-and-chip.patch @@ -0,0 +1,38 @@ +From 07b6d0cdbbda8c917480eceaec668f09e4cf24a5 Mon Sep 17 00:00:00 2001 +From: Christian Lamparter +Date: Mon, 14 Nov 2016 23:49:22 +0100 +Subject: [PATCH] mtd: nand: add Winbond manufacturer and chip + +This patch adds the W25N01GV NAND to the table of +known devices. Without this patch the device gets detected: + +nand: device found, Manufacturer ID: 0xef, Chip ID: 0xaa +nand: Unknown NAND 256MiB 1,8V 8-bit +nand: 256 MiB, SLC, erase size: 64 KiB, page size: 1024, OOB size : 16 + +Whereas the u-boot identifies it as: +spi_nand: spi_nand_flash_probe SF NAND ID 00:ef:aa:21 +SF: Detected W25N01GV with page size 2 KiB, total 128 MiB + +Due to the page size discrepancy, it's impossible to attach +ubi volumes on the device. + +Signed-off-by: Christian Lamparter +--- + drivers/mtd/nand/nand_ids.c | 4 ++++ + include/linux/mtd/nand.h | 1 + + 2 files changed, 5 insertions(+) + +--- a/drivers/mtd/nand/nand_ids.c ++++ b/drivers/mtd/nand/nand_ids.c +@@ -52,6 +52,10 @@ struct nand_flash_dev nand_flash_ids[] = + { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} }, + SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640, + NAND_ECC_INFO(40, SZ_1K), 4 }, ++ {"W25N01GV 1G 3.3V 8-bit", ++ { .id = {0xef, 0xaa} }, ++ SZ_2K, SZ_128, SZ_128K, NAND_NO_SUBPAGE_WRITE, ++ 2, 64, NAND_ECC_INFO(1, SZ_512) }, + + LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS), diff --git a/target/linux/ipq40xx/patches-4.9/105-mtd-nor-add-mx25l25635f.patch b/target/linux/ipq40xx/patches-4.9/105-mtd-nor-add-mx25l25635f.patch new file mode 100644 index 000000000..edfafda48 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/105-mtd-nor-add-mx25l25635f.patch @@ -0,0 +1,22 @@ +Subject: mtd: spi-nor: add mx25l25635f with SECT_4K + +This patch fixes an issue with the creation of the +ubi volume on the AVM FRITZ!Box 4040. The mx25l25635f +and mx25l25635e support SECT_4K which will set the +erase size to 4K. This is used by ubi to calculate +VID header offsets. Without this, uboot and linux +disagrees about the layout and refuse to attach +the ubi volume created by the other. + +--- +--- a/drivers/mtd/spi-nor/spi-nor.c ++++ b/drivers/mtd/spi-nor/spi-nor.c +@@ -1018,7 +1018,7 @@ static const struct flash_info spi_nor_i + { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, + { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, + { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, +- { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, ++ { "mx25l25635f", INFO(0xc22019, 0, 64 * 1024, 512, SECT_4K) }, + { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) }, + { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, + { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) }, diff --git a/target/linux/ipq40xx/patches-4.9/305-qcom-ipq4019-use-v2-of-the-kpss-bringup-mechanism.patch b/target/linux/ipq40xx/patches-4.9/305-qcom-ipq4019-use-v2-of-the-kpss-bringup-mechanism.patch new file mode 100644 index 000000000..40f5e407b --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/305-qcom-ipq4019-use-v2-of-the-kpss-bringup-mechanism.patch @@ -0,0 +1,109 @@ +From 6a6c067b7ce2b3de4efbafddc134afbea3ddc1a3 Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Fri, 8 Apr 2016 15:26:10 -0500 +Subject: [PATCH] qcom: ipq4019: use v2 of the kpss bringup mechanism + +v1 was the incorrect choice here and sometimes the board +would not come up properly. + +Signed-off-by: Matthew McClintock +Signed-off-by: Christian Lamparter +--- +Changes: + - moved L2-Cache to be a subnode of cpu0 +--- + arch/arm/boot/dts/qcom-ipq4019.dtsi | 32 ++++++++++++++++++++++++-------- + 1 file changed, 24 insertions(+), 8 deletions(-) + +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -34,19 +34,27 @@ + cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; +- enable-method = "qcom,kpss-acc-v1"; ++ enable-method = "qcom,kpss-acc-v2"; ++ next-level-cache = <&L2>; + qcom,acc = <&acc0>; + qcom,saw = <&saw0>; + reg = <0x0>; + clocks = <&gcc GCC_APPS_CLK_SRC>; + clock-frequency = <0>; + operating-points-v2 = <&cpu0_opp_table>; ++ ++ L2: l2-cache { ++ compatible = "qcom,arch-cache"; ++ cache-level = <2>; ++ qcom,saw = <&saw_l2>; ++ }; + }; + + cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; +- enable-method = "qcom,kpss-acc-v1"; ++ enable-method = "qcom,kpss-acc-v2"; ++ next-level-cache = <&L2>; + qcom,acc = <&acc1>; + qcom,saw = <&saw1>; + reg = <0x1>; +@@ -58,7 +66,8 @@ + cpu@2 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; +- enable-method = "qcom,kpss-acc-v1"; ++ enable-method = "qcom,kpss-acc-v2"; ++ next-level-cache = <&L2>; + qcom,acc = <&acc2>; + qcom,saw = <&saw2>; + reg = <0x2>; +@@ -70,7 +79,8 @@ + cpu@3 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; +- enable-method = "qcom,kpss-acc-v1"; ++ enable-method = "qcom,kpss-acc-v2"; ++ next-level-cache = <&L2>; + qcom,acc = <&acc3>; + qcom,saw = <&saw3>; + reg = <0x3>; +@@ -212,22 +222,22 @@ + }; + + acc0: clock-controller@b088000 { +- compatible = "qcom,kpss-acc-v1"; ++ compatible = "qcom,kpss-acc-v2"; + reg = <0x0b088000 0x1000>, <0xb008000 0x1000>; + }; + + acc1: clock-controller@b098000 { +- compatible = "qcom,kpss-acc-v1"; ++ compatible = "qcom,kpss-acc-v2"; + reg = <0x0b098000 0x1000>, <0xb008000 0x1000>; + }; + + acc2: clock-controller@b0a8000 { +- compatible = "qcom,kpss-acc-v1"; ++ compatible = "qcom,kpss-acc-v2"; + reg = <0x0b0a8000 0x1000>, <0xb008000 0x1000>; + }; + + acc3: clock-controller@b0b8000 { +- compatible = "qcom,kpss-acc-v1"; ++ compatible = "qcom,kpss-acc-v2"; + reg = <0x0b0b8000 0x1000>, <0xb008000 0x1000>; + }; + +@@ -255,6 +265,12 @@ + regulator; + }; + ++ saw_l2: regulator@b012000 { ++ compatible = "qcom,saw2"; ++ reg = <0xb012000 0x1000>; ++ regulator; ++ }; ++ + serial@78af000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0x78af000 0x200>; diff --git a/target/linux/ipq40xx/patches-4.9/306-qcom-ipq4019-add-USB-nodes-to-ipq4019-SoC-device-tre.patch b/target/linux/ipq40xx/patches-4.9/306-qcom-ipq4019-add-USB-nodes-to-ipq4019-SoC-device-tre.patch new file mode 100644 index 000000000..3c3fc98a2 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/306-qcom-ipq4019-add-USB-nodes-to-ipq4019-SoC-device-tre.patch @@ -0,0 +1,130 @@ +From ea5f4d6f4716f3a0bb4fc3614b7a0e8c0df1cb81 Mon Sep 17 00:00:00 2001 +From: Matthew McClintock +Date: Thu, 17 Mar 2016 16:22:28 -0500 +Subject: [PATCH] qcom: ipq4019: add USB nodes to ipq4019 SoC device tree + +This adds the SoC nodes to the ipq4019 device tree and +enable it for the DK01.1 board. + +Signed-off-by: Matthew McClintock +Signed-off-by: Christian Lamparter +--- +Changes: + - replaced space with tab + - added sleep and mock_utmi clocks + - added registers for usb2 and usb3 parent node + - changed compatible to qca,ipa4019-dwc3 + - updated usb2 and usb3 names + (included the reg - in case they become necessary later) +--- + arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi | 20 ++++++++ + arch/arm/boot/dts/qcom-ipq4019.dtsi | 71 +++++++++++++++++++++++++++ + 2 files changed, 91 insertions(+) + +--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi +@@ -108,5 +108,25 @@ + watchdog@b017000 { + status = "ok"; + }; ++ ++ usb3_ss_phy: ssphy@9a000 { ++ status = "ok"; ++ }; ++ ++ usb3_hs_phy: hsphy@a6000 { ++ status = "ok"; ++ }; ++ ++ usb3: usb3@8af8800 { ++ status = "ok"; ++ }; ++ ++ usb2_hs_phy: hsphy@a8000 { ++ status = "ok"; ++ }; ++ ++ usb2: usb2@60f8800 { ++ status = "ok"; ++ }; + }; + }; +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -307,5 +307,76 @@ + compatible = "qcom,pshold"; + reg = <0x4ab000 0x4>; + }; ++ ++ usb3_ss_phy: ssphy@9a000 { ++ compatible = "qca,uni-ssphy"; ++ reg = <0x9a000 0x800>; ++ reg-names = "phy_base"; ++ resets = <&gcc USB3_UNIPHY_PHY_ARES>; ++ reset-names = "por_rst"; ++ status = "disabled"; ++ }; ++ ++ usb3_hs_phy: hsphy@a6000 { ++ compatible = "qca,baldur-usb3-hsphy"; ++ reg = <0xa6000 0x40>; ++ reg-names = "phy_base"; ++ resets = <&gcc USB3_HSPHY_POR_ARES>, <&gcc USB3_HSPHY_S_ARES>; ++ reset-names = "por_rst", "srif_rst"; ++ status = "disabled"; ++ }; ++ ++ usb3@8af8800 { ++ compatible = "qca,ipq4019-dwc3"; ++ reg = <0x8af8800 0x100>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ clocks = <&gcc GCC_USB3_MASTER_CLK>, ++ <&gcc GCC_USB3_SLEEP_CLK>, ++ <&gcc GCC_USB3_MOCK_UTMI_CLK>; ++ clock-names = "master", "sleep", "mock_utmi"; ++ ranges; ++ status = "disabled"; ++ ++ dwc3@8a00000 { ++ compatible = "snps,dwc3"; ++ reg = <0x8a00000 0xf8000>; ++ interrupts = <0 132 0>; ++ usb-phy = <&usb3_hs_phy>, <&usb3_ss_phy>; ++ phy-names = "usb2-phy", "usb3-phy"; ++ dr_mode = "host"; ++ }; ++ }; ++ ++ usb2_hs_phy: hsphy@a8000 { ++ compatible = "qca,baldur-usb2-hsphy"; ++ reg = <0xa8000 0x40>; ++ reg-names = "phy_base"; ++ resets = <&gcc USB2_HSPHY_POR_ARES>, <&gcc USB2_HSPHY_S_ARES>; ++ reset-names = "por_rst", "srif_rst"; ++ status = "disabled"; ++ }; ++ ++ usb2@60f8800 { ++ compatible = "qca,ipq4019-dwc3"; ++ reg = <0x60f8800 0x100>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ clocks = <&gcc GCC_USB2_MASTER_CLK>, ++ <&gcc GCC_USB2_SLEEP_CLK>, ++ <&gcc GCC_USB2_MOCK_UTMI_CLK>; ++ clock-names = "master", "sleep", "mock_utmi"; ++ ranges; ++ status = "disabled"; ++ ++ dwc3@6000000 { ++ compatible = "snps,dwc3"; ++ reg = <0x6000000 0xf8000>; ++ interrupts = <0 136 0>; ++ usb-phy = <&usb2_hs_phy>; ++ phy-names = "usb2-phy"; ++ dr_mode = "host"; ++ }; ++ }; + }; + }; diff --git a/target/linux/ipq40xx/patches-4.9/307-ARM-qcom-Add-IPQ4019-SoC-support.patch b/target/linux/ipq40xx/patches-4.9/307-ARM-qcom-Add-IPQ4019-SoC-support.patch new file mode 100644 index 000000000..4cbcc0aea --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/307-ARM-qcom-Add-IPQ4019-SoC-support.patch @@ -0,0 +1,35 @@ +From e7748d641ae37081e2034869491f1629461ae13c Mon Sep 17 00:00:00 2001 +From: Christian Lamparter +Date: Sat, 19 Nov 2016 00:58:18 +0100 +Subject: [PATCH] ARM: qcom: Add IPQ4019 SoC support + +Add support for the Qualcomm Atheros IPQ4019 SoC. + +Signed-off-by: Christian Lamparter +--- + arch/arm/Makefile | 1 + + arch/arm/mach-qcom/Kconfig | 5 +++++ + 2 files changed, 6 insertions(+) + +--- a/arch/arm/Makefile ++++ b/arch/arm/Makefile +@@ -147,6 +147,7 @@ textofs-$(CONFIG_SA1111) := 0x00208000 + endif + textofs-$(CONFIG_ARCH_MSM8X60) := 0x00208000 + textofs-$(CONFIG_ARCH_MSM8960) := 0x00208000 ++textofs-$(CONFIG_ARCH_IPQ40XX) := 0x00208000 + textofs-$(CONFIG_ARCH_AXXIA) := 0x00308000 + + # Machine directory name. This list is sorted alphanumerically +--- a/arch/arm/mach-qcom/Kconfig ++++ b/arch/arm/mach-qcom/Kconfig +@@ -28,4 +28,9 @@ config ARCH_MDM9615 + bool "Enable support for MDM9615" + select CLKSRC_QCOM + ++config ARCH_IPQ40XX ++ bool "Enable support for IPQ40XX" ++ select CLKSRC_QCOM ++ select HAVE_ARM_ARCH_TIMER ++ + endif diff --git a/target/linux/ipq40xx/patches-4.9/308-dts-ipq4019-add-both-IPQ4019-wifi-block-definitions.patch b/target/linux/ipq40xx/patches-4.9/308-dts-ipq4019-add-both-IPQ4019-wifi-block-definitions.patch new file mode 100644 index 000000000..d12ca4bef --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/308-dts-ipq4019-add-both-IPQ4019-wifi-block-definitions.patch @@ -0,0 +1,105 @@ +From 6091a49b0b06bf838fed80498c4f5f40d0fbd447 Mon Sep 17 00:00:00 2001 +From: Christian Lamparter +Date: Sat, 19 Nov 2016 01:22:46 +0100 +Subject: [PATCH] dts: ipq4019: add both IPQ4019 wifi block definitions + +The IPQ4019 has two ath10k blocks on the AHB. Both wifi's +are already supported by ath10k. + +Signed-off-by: Christian Lamparter +--- + arch/arm/boot/dts/qcom-ipq4019.dtsi | 84 +++++++++++++++++++++++++++++++++++++ + 1 file changed, 84 insertions(+) + +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -378,5 +378,89 @@ + dr_mode = "host"; + }; + }; ++ ++ wifi0: wifi@a000000 { ++ compatible = "qcom,ipq4019-wifi"; ++ reg = <0xa000000 0x200000>; ++ resets = <&gcc WIFI0_CPU_INIT_RESET ++ &gcc WIFI0_RADIO_SRIF_RESET ++ &gcc WIFI0_RADIO_WARM_RESET ++ &gcc WIFI0_RADIO_COLD_RESET ++ &gcc WIFI0_CORE_WARM_RESET ++ &gcc WIFI0_CORE_COLD_RESET>; ++ reset-names = "wifi_cpu_init", "wifi_radio_srif", ++ "wifi_radio_warm", "wifi_radio_cold", ++ "wifi_core_warm", "wifi_core_cold"; ++ clocks = <&gcc GCC_WCSS2G_CLK ++ &gcc GCC_WCSS2G_REF_CLK ++ &gcc GCC_WCSS2G_RTC_CLK>; ++ clock-names = "wifi_wcss_cmd", "wifi_wcss_ref", ++ "wifi_wcss_rtc"; ++ interrupts = <0 32 IRQ_TYPE_EDGE_RISING ++ 0 33 IRQ_TYPE_EDGE_RISING ++ 0 34 IRQ_TYPE_EDGE_RISING ++ 0 35 IRQ_TYPE_EDGE_RISING ++ 0 36 IRQ_TYPE_EDGE_RISING ++ 0 37 IRQ_TYPE_EDGE_RISING ++ 0 38 IRQ_TYPE_EDGE_RISING ++ 0 39 IRQ_TYPE_EDGE_RISING ++ 0 40 IRQ_TYPE_EDGE_RISING ++ 0 41 IRQ_TYPE_EDGE_RISING ++ 0 42 IRQ_TYPE_EDGE_RISING ++ 0 43 IRQ_TYPE_EDGE_RISING ++ 0 44 IRQ_TYPE_EDGE_RISING ++ 0 45 IRQ_TYPE_EDGE_RISING ++ 0 46 IRQ_TYPE_EDGE_RISING ++ 0 47 IRQ_TYPE_EDGE_RISING ++ 0 168 IRQ_TYPE_NONE>; ++ interrupt-names = "msi0", "msi1", "msi2", "msi3", ++ "msi4", "msi5", "msi6", "msi7", ++ "msi8", "msi9", "msi10", "msi11", ++ "msi12", "msi13", "msi14", "msi15", ++ "legacy"; ++ status = "disabled"; ++ }; ++ ++ wifi1: wifi@a800000 { ++ compatible = "qcom,ipq4019-wifi"; ++ reg = <0xa800000 0x200000>; ++ resets = <&gcc WIFI1_CPU_INIT_RESET ++ &gcc WIFI1_RADIO_SRIF_RESET ++ &gcc WIFI1_RADIO_WARM_RESET ++ &gcc WIFI1_RADIO_COLD_RESET ++ &gcc WIFI1_CORE_WARM_RESET ++ &gcc WIFI1_CORE_COLD_RESET>; ++ reset-names = "wifi_cpu_init", "wifi_radio_srif", ++ "wifi_radio_warm", "wifi_radio_cold", ++ "wifi_core_warm", "wifi_core_cold"; ++ clocks = <&gcc GCC_WCSS5G_CLK ++ &gcc GCC_WCSS5G_REF_CLK ++ &gcc GCC_WCSS5G_RTC_CLK>; ++ clock-names = "wifi_wcss_cmd", "wifi_wcss_ref", ++ "wifi_wcss_rtc"; ++ interrupts = <0 48 IRQ_TYPE_EDGE_RISING ++ 0 49 IRQ_TYPE_EDGE_RISING ++ 0 50 IRQ_TYPE_EDGE_RISING ++ 0 51 IRQ_TYPE_EDGE_RISING ++ 0 52 IRQ_TYPE_EDGE_RISING ++ 0 53 IRQ_TYPE_EDGE_RISING ++ 0 54 IRQ_TYPE_EDGE_RISING ++ 0 55 IRQ_TYPE_EDGE_RISING ++ 0 56 IRQ_TYPE_EDGE_RISING ++ 0 57 IRQ_TYPE_EDGE_RISING ++ 0 58 IRQ_TYPE_EDGE_RISING ++ 0 59 IRQ_TYPE_EDGE_RISING ++ 0 60 IRQ_TYPE_EDGE_RISING ++ 0 61 IRQ_TYPE_EDGE_RISING ++ 0 62 IRQ_TYPE_EDGE_RISING ++ 0 63 IRQ_TYPE_EDGE_RISING ++ 0 169 IRQ_TYPE_NONE>; ++ interrupt-names = "msi0", "msi1", "msi2", "msi3", ++ "msi4", "msi5", "msi6", "msi7", ++ "msi8", "msi9", "msi10", "msi11", ++ "msi12", "msi13", "msi14", "msi15", ++ "legacy"; ++ status = "disabled"; ++ }; + }; + }; diff --git a/target/linux/ipq40xx/patches-4.9/309-dts-ipq4019-add-pseudo-random-number-generator.patch b/target/linux/ipq40xx/patches-4.9/309-dts-ipq4019-add-pseudo-random-number-generator.patch new file mode 100644 index 000000000..abcbb6ee5 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/309-dts-ipq4019-add-pseudo-random-number-generator.patch @@ -0,0 +1,29 @@ +From 26fa6fdc627b523277c7a79907233596b2f8a3ef Mon Sep 17 00:00:00 2001 +From: Christian Lamparter +Date: Sat, 19 Nov 2016 03:29:04 +0100 +Subject: [PATCH] dts: ipq4019: add pseudo random number generator + +This architecture has a pseudo random number generator +supported by the existing "qcom,prng" binding. + +Signed-off-by: Christian Lamparter +--- + arch/arm/boot/dts/qcom-ipq4019.dtsi | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -271,6 +271,13 @@ + regulator; + }; + ++ rng@22000 { ++ compatible = "qcom,prng"; ++ reg = <0x22000 0x140>; ++ clocks = <&gcc GCC_PRNG_AHB_CLK>; ++ clock-names = "core"; ++ }; ++ + serial@78af000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0x78af000 0x200>; diff --git a/target/linux/ipq40xx/patches-4.9/310-msm-adhoc-bus-support.patch b/target/linux/ipq40xx/patches-4.9/310-msm-adhoc-bus-support.patch new file mode 100644 index 000000000..ff0e1e0fe --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/310-msm-adhoc-bus-support.patch @@ -0,0 +1,11026 @@ +From: Christian Lamparter +Subject: BUS: add MSM_BUS +--- a/drivers/bus/Makefile ++++ b/drivers/bus/Makefile +@@ -10,6 +10,7 @@ obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmst + obj-$(CONFIG_IMX_WEIM) += imx-weim.o + obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o + obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o ++obj-$(CONFIG_BUS_TOPOLOGY_ADHOC)+= msm_bus/ + + # Interconnect bus driver for OMAP SoCs. + obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o +--- a/drivers/bus/Kconfig ++++ b/drivers/bus/Kconfig +@@ -92,6 +92,8 @@ config MVEBU_MBUS + Driver needed for the MBus configuration on Marvell EBU SoCs + (Kirkwood, Dove, Orion5x, MV78XX0 and Armada 370/XP). + ++source "drivers/bus/msm_bus/Kconfig" ++ + config OMAP_INTERCONNECT + tristate "OMAP INTERCONNECT DRIVER" + depends on ARCH_OMAP2PLUS +--- /dev/null ++++ b/include/dt-bindings/msm/msm-bus-ids.h +@@ -0,0 +1,869 @@ ++/* Copyright (c) 2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef __MSM_BUS_IDS_H ++#define __MSM_BUS_IDS_H ++ ++/* Topology related enums */ ++#define MSM_BUS_FAB_DEFAULT 0 ++#define MSM_BUS_FAB_APPSS 0 ++#define MSM_BUS_FAB_SYSTEM 1024 ++#define MSM_BUS_FAB_MMSS 2048 ++#define MSM_BUS_FAB_SYSTEM_FPB 3072 ++#define MSM_BUS_FAB_CPSS_FPB 4096 ++ ++#define MSM_BUS_FAB_BIMC 0 ++#define MSM_BUS_FAB_SYS_NOC 1024 ++#define MSM_BUS_FAB_MMSS_NOC 2048 ++#define MSM_BUS_FAB_OCMEM_NOC 3072 ++#define MSM_BUS_FAB_PERIPH_NOC 4096 ++#define MSM_BUS_FAB_CONFIG_NOC 5120 ++#define MSM_BUS_FAB_OCMEM_VNOC 6144 ++#define MSM_BUS_FAB_MMSS_AHB 2049 ++#define MSM_BUS_FAB_A0_NOC 6145 ++#define MSM_BUS_FAB_A1_NOC 6146 ++#define MSM_BUS_FAB_A2_NOC 6147 ++ ++#define MSM_BUS_MASTER_FIRST 1 ++#define MSM_BUS_MASTER_AMPSS_M0 1 ++#define MSM_BUS_MASTER_AMPSS_M1 2 ++#define MSM_BUS_APPSS_MASTER_FAB_MMSS 3 ++#define MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4 ++#define MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5 ++#define MSM_BUS_MASTER_SPS 6 ++#define MSM_BUS_MASTER_ADM_PORT0 7 ++#define MSM_BUS_MASTER_ADM_PORT1 8 ++#define MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9 ++#define MSM_BUS_MASTER_ADM1_PORT1 10 ++#define MSM_BUS_MASTER_LPASS_PROC 11 ++#define MSM_BUS_MASTER_MSS_PROCI 12 ++#define MSM_BUS_MASTER_MSS_PROCD 13 ++#define MSM_BUS_MASTER_MSS_MDM_PORT0 14 ++#define MSM_BUS_MASTER_LPASS 15 ++#define MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16 ++#define MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17 ++#define MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18 ++#define MSM_BUS_MASTER_ADM1_CI 19 ++#define MSM_BUS_MASTER_ADM0_CI 20 ++#define MSM_BUS_MASTER_MSS_MDM_PORT1 21 ++#define MSM_BUS_MASTER_MDP_PORT0 22 ++#define MSM_BUS_MASTER_MDP_PORT1 23 ++#define MSM_BUS_MMSS_MASTER_ADM1_PORT0 24 ++#define MSM_BUS_MASTER_ROTATOR 25 ++#define MSM_BUS_MASTER_GRAPHICS_3D 26 ++#define MSM_BUS_MASTER_JPEG_DEC 27 ++#define MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28 ++#define MSM_BUS_MASTER_VFE 29 ++#define MSM_BUS_MASTER_VPE 30 ++#define MSM_BUS_MASTER_JPEG_ENC 31 ++#define MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32 ++#define MSM_BUS_MMSS_MASTER_APPS_FAB 33 ++#define MSM_BUS_MASTER_HD_CODEC_PORT0 34 ++#define MSM_BUS_MASTER_HD_CODEC_PORT1 35 ++#define MSM_BUS_MASTER_SPDM 36 ++#define MSM_BUS_MASTER_RPM 37 ++#define MSM_BUS_MASTER_MSS 38 ++#define MSM_BUS_MASTER_RIVA 39 ++#define MSM_BUS_MASTER_SNOC_VMEM 40 ++#define MSM_BUS_MASTER_MSS_SW_PROC 41 ++#define MSM_BUS_MASTER_MSS_FW_PROC 42 ++#define MSM_BUS_MASTER_HMSS 43 ++#define MSM_BUS_MASTER_GSS_NAV 44 ++#define MSM_BUS_MASTER_PCIE 45 ++#define MSM_BUS_MASTER_SATA 46 ++#define MSM_BUS_MASTER_CRYPTO 47 ++#define MSM_BUS_MASTER_VIDEO_CAP 48 ++#define MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49 ++#define MSM_BUS_MASTER_VIDEO_ENC 50 ++#define MSM_BUS_MASTER_VIDEO_DEC 51 ++#define MSM_BUS_MASTER_LPASS_AHB 52 ++#define MSM_BUS_MASTER_QDSS_BAM 53 ++#define MSM_BUS_MASTER_SNOC_CFG 54 ++#define MSM_BUS_MASTER_CRYPTO_CORE0 55 ++#define MSM_BUS_MASTER_CRYPTO_CORE1 56 ++#define MSM_BUS_MASTER_MSS_NAV 57 ++#define MSM_BUS_MASTER_OCMEM_DMA 58 ++#define MSM_BUS_MASTER_WCSS 59 ++#define MSM_BUS_MASTER_QDSS_ETR 60 ++#define MSM_BUS_MASTER_USB3 61 ++#define MSM_BUS_MASTER_JPEG 62 ++#define MSM_BUS_MASTER_VIDEO_P0 63 ++#define MSM_BUS_MASTER_VIDEO_P1 64 ++#define MSM_BUS_MASTER_MSS_PROC 65 ++#define MSM_BUS_MASTER_JPEG_OCMEM 66 ++#define MSM_BUS_MASTER_MDP_OCMEM 67 ++#define MSM_BUS_MASTER_VIDEO_P0_OCMEM 68 ++#define MSM_BUS_MASTER_VIDEO_P1_OCMEM 69 ++#define MSM_BUS_MASTER_VFE_OCMEM 70 ++#define MSM_BUS_MASTER_CNOC_ONOC_CFG 71 ++#define MSM_BUS_MASTER_RPM_INST 72 ++#define MSM_BUS_MASTER_RPM_DATA 73 ++#define MSM_BUS_MASTER_RPM_SYS 74 ++#define MSM_BUS_MASTER_DEHR 75 ++#define MSM_BUS_MASTER_QDSS_DAP 76 ++#define MSM_BUS_MASTER_TIC 77 ++#define MSM_BUS_MASTER_SDCC_1 78 ++#define MSM_BUS_MASTER_SDCC_3 79 ++#define MSM_BUS_MASTER_SDCC_4 80 ++#define MSM_BUS_MASTER_SDCC_2 81 ++#define MSM_BUS_MASTER_TSIF 82 ++#define MSM_BUS_MASTER_BAM_DMA 83 ++#define MSM_BUS_MASTER_BLSP_2 84 ++#define MSM_BUS_MASTER_USB_HSIC 85 ++#define MSM_BUS_MASTER_BLSP_1 86 ++#define MSM_BUS_MASTER_USB_HS 87 ++#define MSM_BUS_MASTER_PNOC_CFG 88 ++#define MSM_BUS_MASTER_V_OCMEM_GFX3D 89 ++#define MSM_BUS_MASTER_IPA 90 ++#define MSM_BUS_MASTER_QPIC 91 ++#define MSM_BUS_MASTER_MDPE 92 ++#define MSM_BUS_MASTER_USB_HS2 93 ++#define MSM_BUS_MASTER_VPU 94 ++#define MSM_BUS_MASTER_UFS 95 ++#define MSM_BUS_MASTER_BCAST 96 ++#define MSM_BUS_MASTER_CRYPTO_CORE2 97 ++#define MSM_BUS_MASTER_EMAC 98 ++#define MSM_BUS_MASTER_VPU_1 99 ++#define MSM_BUS_MASTER_PCIE_1 100 ++#define MSM_BUS_MASTER_USB3_1 101 ++#define MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102 ++#define MSM_BUS_MASTER_CNOC_MNOC_CFG 103 ++#define MSM_BUS_MASTER_TCU_0 104 ++#define MSM_BUS_MASTER_TCU_1 105 ++#define MSM_BUS_MASTER_CPP 106 ++#define MSM_BUS_MASTER_AUDIO 107 ++#define MSM_BUS_MASTER_PCIE_2 108 ++#define MSM_BUS_MASTER_BLSP_BAM 109 ++#define MSM_BUS_MASTER_USB2_BAM 110 ++#define MSM_BUS_MASTER_ADDS_DMA0 111 ++#define MSM_BUS_MASTER_ADDS_DMA1 112 ++#define MSM_BUS_MASTER_ADDS_DMA2 113 ++#define MSM_BUS_MASTER_ADDS_DMA3 114 ++#define MSM_BUS_MASTER_QPIC_BAM 115 ++#define MSM_BUS_MASTER_SDCC_BAM 116 ++#define MSM_BUS_MASTER_DDRC_SNOC 117 ++#define MSM_BUS_MASTER_WSS_0 118 ++#define MSM_BUS_MASTER_WSS_1 119 ++#define MSM_BUS_MASTER_ESS 120 ++#define MSM_BUS_MASTER_QDSS_BAMNDP 121 ++#define MSM_BUS_MASTER_QDSS_SNOC_CFG 122 ++#define MSM_BUS_MASTER_LAST 130 ++ ++#define MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB ++#define MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB ++ ++#define MSM_BUS_SNOC_MM_INT_0 10000 ++#define MSM_BUS_SNOC_MM_INT_1 10001 ++#define MSM_BUS_SNOC_MM_INT_2 10002 ++#define MSM_BUS_SNOC_MM_INT_BIMC 10003 ++#define MSM_BUS_SNOC_INT_0 10004 ++#define MSM_BUS_SNOC_INT_1 10005 ++#define MSM_BUS_SNOC_INT_BIMC 10006 ++#define MSM_BUS_SNOC_BIMC_0_MAS 10007 ++#define MSM_BUS_SNOC_BIMC_1_MAS 10008 ++#define MSM_BUS_SNOC_QDSS_INT 10009 ++#define MSM_BUS_PNOC_SNOC_MAS 10010 ++#define MSM_BUS_PNOC_SNOC_SLV 10011 ++#define MSM_BUS_PNOC_INT_0 10012 ++#define MSM_BUS_PNOC_INT_1 10013 ++#define MSM_BUS_PNOC_M_0 10014 ++#define MSM_BUS_PNOC_M_1 10015 ++#define MSM_BUS_BIMC_SNOC_MAS 10016 ++#define MSM_BUS_BIMC_SNOC_SLV 10017 ++#define MSM_BUS_PNOC_SLV_0 10018 ++#define MSM_BUS_PNOC_SLV_1 10019 ++#define MSM_BUS_PNOC_SLV_2 10020 ++#define MSM_BUS_PNOC_SLV_3 10021 ++#define MSM_BUS_PNOC_SLV_4 10022 ++#define MSM_BUS_PNOC_SLV_8 10023 ++#define MSM_BUS_PNOC_SLV_9 10024 ++#define MSM_BUS_SNOC_BIMC_0_SLV 10025 ++#define MSM_BUS_SNOC_BIMC_1_SLV 10026 ++#define MSM_BUS_MNOC_BIMC_MAS 10027 ++#define MSM_BUS_MNOC_BIMC_SLV 10028 ++#define MSM_BUS_BIMC_MNOC_MAS 10029 ++#define MSM_BUS_BIMC_MNOC_SLV 10030 ++#define MSM_BUS_SNOC_BIMC_MAS 10031 ++#define MSM_BUS_SNOC_BIMC_SLV 10032 ++#define MSM_BUS_CNOC_SNOC_MAS 10033 ++#define MSM_BUS_CNOC_SNOC_SLV 10034 ++#define MSM_BUS_SNOC_CNOC_MAS 10035 ++#define MSM_BUS_SNOC_CNOC_SLV 10036 ++#define MSM_BUS_OVNOC_SNOC_MAS 10037 ++#define MSM_BUS_OVNOC_SNOC_SLV 10038 ++#define MSM_BUS_SNOC_OVNOC_MAS 10039 ++#define MSM_BUS_SNOC_OVNOC_SLV 10040 ++#define MSM_BUS_SNOC_PNOC_MAS 10041 ++#define MSM_BUS_SNOC_PNOC_SLV 10042 ++#define MSM_BUS_BIMC_INT_APPS_EBI 10043 ++#define MSM_BUS_BIMC_INT_APPS_SNOC 10044 ++#define MSM_BUS_SNOC_BIMC_2_MAS 10045 ++#define MSM_BUS_SNOC_BIMC_2_SLV 10046 ++#define MSM_BUS_PNOC_SLV_5 10047 ++#define MSM_BUS_PNOC_SLV_6 10048 ++#define MSM_BUS_PNOC_INT_2 10049 ++#define MSM_BUS_PNOC_INT_3 10050 ++#define MSM_BUS_PNOC_INT_4 10051 ++#define MSM_BUS_PNOC_INT_5 10052 ++#define MSM_BUS_PNOC_INT_6 10053 ++#define MSM_BUS_PNOC_INT_7 10054 ++#define MSM_BUS_BIMC_SNOC_1_MAS 10055 ++#define MSM_BUS_BIMC_SNOC_1_SLV 10056 ++#define MSM_BUS_PNOC_A1NOC_MAS 10057 ++#define MSM_BUS_PNOC_A1NOC_SLV 10058 ++#define MSM_BUS_CNOC_A1NOC_MAS 10059 ++#define MSM_BUS_A0NOC_SNOC_MAS 10060 ++#define MSM_BUS_A0NOC_SNOC_SLV 10061 ++#define MSM_BUS_A1NOC_SNOC_SLV 10062 ++#define MSM_BUS_A1NOC_SNOC_MAS 10063 ++#define MSM_BUS_A2NOC_SNOC_MAS 10064 ++#define MSM_BUS_A2NOC_SNOC_SLV 10065 ++#define MSM_BUS_PNOC_SLV_7 10066 ++#define MSM_BUS_INT_LAST 10067 ++ ++#define MSM_BUS_SLAVE_FIRST 512 ++#define MSM_BUS_SLAVE_EBI_CH0 512 ++#define MSM_BUS_SLAVE_EBI_CH1 513 ++#define MSM_BUS_SLAVE_AMPSS_L2 514 ++#define MSM_BUS_APPSS_SLAVE_FAB_MMSS 515 ++#define MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516 ++#define MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517 ++#define MSM_BUS_SLAVE_SPS 518 ++#define MSM_BUS_SLAVE_SYSTEM_IMEM 519 ++#define MSM_BUS_SLAVE_AMPSS 520 ++#define MSM_BUS_SLAVE_MSS 521 ++#define MSM_BUS_SLAVE_LPASS 522 ++#define MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523 ++#define MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524 ++#define MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525 ++#define MSM_BUS_SLAVE_CORESIGHT 526 ++#define MSM_BUS_SLAVE_RIVA 527 ++#define MSM_BUS_SLAVE_SMI 528 ++#define MSM_BUS_MMSS_SLAVE_FAB_APPS 529 ++#define MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530 ++#define MSM_BUS_SLAVE_MM_IMEM 531 ++#define MSM_BUS_SLAVE_CRYPTO 532 ++#define MSM_BUS_SLAVE_SPDM 533 ++#define MSM_BUS_SLAVE_RPM 534 ++#define MSM_BUS_SLAVE_RPM_MSG_RAM 535 ++#define MSM_BUS_SLAVE_MPM 536 ++#define MSM_BUS_SLAVE_PMIC1_SSBI1_A 537 ++#define MSM_BUS_SLAVE_PMIC1_SSBI1_B 538 ++#define MSM_BUS_SLAVE_PMIC1_SSBI1_C 539 ++#define MSM_BUS_SLAVE_PMIC2_SSBI2_A 540 ++#define MSM_BUS_SLAVE_PMIC2_SSBI2_B 541 ++#define MSM_BUS_SLAVE_GSBI1_UART 542 ++#define MSM_BUS_SLAVE_GSBI2_UART 543 ++#define MSM_BUS_SLAVE_GSBI3_UART 544 ++#define MSM_BUS_SLAVE_GSBI4_UART 545 ++#define MSM_BUS_SLAVE_GSBI5_UART 546 ++#define MSM_BUS_SLAVE_GSBI6_UART 547 ++#define MSM_BUS_SLAVE_GSBI7_UART 548 ++#define MSM_BUS_SLAVE_GSBI8_UART 549 ++#define MSM_BUS_SLAVE_GSBI9_UART 550 ++#define MSM_BUS_SLAVE_GSBI10_UART 551 ++#define MSM_BUS_SLAVE_GSBI11_UART 552 ++#define MSM_BUS_SLAVE_GSBI12_UART 553 ++#define MSM_BUS_SLAVE_GSBI1_QUP 554 ++#define MSM_BUS_SLAVE_GSBI2_QUP 555 ++#define MSM_BUS_SLAVE_GSBI3_QUP 556 ++#define MSM_BUS_SLAVE_GSBI4_QUP 557 ++#define MSM_BUS_SLAVE_GSBI5_QUP 558 ++#define MSM_BUS_SLAVE_GSBI6_QUP 559 ++#define MSM_BUS_SLAVE_GSBI7_QUP 560 ++#define MSM_BUS_SLAVE_GSBI8_QUP 561 ++#define MSM_BUS_SLAVE_GSBI9_QUP 562 ++#define MSM_BUS_SLAVE_GSBI10_QUP 563 ++#define MSM_BUS_SLAVE_GSBI11_QUP 564 ++#define MSM_BUS_SLAVE_GSBI12_QUP 565 ++#define MSM_BUS_SLAVE_EBI2_NAND 566 ++#define MSM_BUS_SLAVE_EBI2_CS0 567 ++#define MSM_BUS_SLAVE_EBI2_CS1 568 ++#define MSM_BUS_SLAVE_EBI2_CS2 569 ++#define MSM_BUS_SLAVE_EBI2_CS3 570 ++#define MSM_BUS_SLAVE_EBI2_CS4 571 ++#define MSM_BUS_SLAVE_EBI2_CS5 572 ++#define MSM_BUS_SLAVE_USB_FS1 573 ++#define MSM_BUS_SLAVE_USB_FS2 574 ++#define MSM_BUS_SLAVE_TSIF 575 ++#define MSM_BUS_SLAVE_MSM_TSSC 576 ++#define MSM_BUS_SLAVE_MSM_PDM 577 ++#define MSM_BUS_SLAVE_MSM_DIMEM 578 ++#define MSM_BUS_SLAVE_MSM_TCSR 579 ++#define MSM_BUS_SLAVE_MSM_PRNG 580 ++#define MSM_BUS_SLAVE_GSS 581 ++#define MSM_BUS_SLAVE_SATA 582 ++#define MSM_BUS_SLAVE_USB3 583 ++#define MSM_BUS_SLAVE_WCSS 584 ++#define MSM_BUS_SLAVE_OCIMEM 585 ++#define MSM_BUS_SLAVE_SNOC_OCMEM 586 ++#define MSM_BUS_SLAVE_SERVICE_SNOC 587 ++#define MSM_BUS_SLAVE_QDSS_STM 588 ++#define MSM_BUS_SLAVE_CAMERA_CFG 589 ++#define MSM_BUS_SLAVE_DISPLAY_CFG 590 ++#define MSM_BUS_SLAVE_OCMEM_CFG 591 ++#define MSM_BUS_SLAVE_CPR_CFG 592 ++#define MSM_BUS_SLAVE_CPR_XPU_CFG 593 ++#define MSM_BUS_SLAVE_MISC_CFG 594 ++#define MSM_BUS_SLAVE_MISC_XPU_CFG 595 ++#define MSM_BUS_SLAVE_VENUS_CFG 596 ++#define MSM_BUS_SLAVE_MISC_VENUS_CFG 597 ++#define MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598 ++#define MSM_BUS_SLAVE_MMSS_CLK_CFG 599 ++#define MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600 ++#define MSM_BUS_SLAVE_MNOC_MPU_CFG 601 ++#define MSM_BUS_SLAVE_ONOC_MPU_CFG 602 ++#define MSM_BUS_SLAVE_SERVICE_MNOC 603 ++#define MSM_BUS_SLAVE_OCMEM 604 ++#define MSM_BUS_SLAVE_SERVICE_ONOC 605 ++#define MSM_BUS_SLAVE_SDCC_1 606 ++#define MSM_BUS_SLAVE_SDCC_3 607 ++#define MSM_BUS_SLAVE_SDCC_2 608 ++#define MSM_BUS_SLAVE_SDCC_4 609 ++#define MSM_BUS_SLAVE_BAM_DMA 610 ++#define MSM_BUS_SLAVE_BLSP_2 611 ++#define MSM_BUS_SLAVE_USB_HSIC 612 ++#define MSM_BUS_SLAVE_BLSP_1 613 ++#define MSM_BUS_SLAVE_USB_HS 614 ++#define MSM_BUS_SLAVE_PDM 615 ++#define MSM_BUS_SLAVE_PERIPH_APU_CFG 616 ++#define MSM_BUS_SLAVE_PNOC_MPU_CFG 617 ++#define MSM_BUS_SLAVE_PRNG 618 ++#define MSM_BUS_SLAVE_SERVICE_PNOC 619 ++#define MSM_BUS_SLAVE_CLK_CTL 620 ++#define MSM_BUS_SLAVE_CNOC_MSS 621 ++#define MSM_BUS_SLAVE_SECURITY 622 ++#define MSM_BUS_SLAVE_TCSR 623 ++#define MSM_BUS_SLAVE_TLMM 624 ++#define MSM_BUS_SLAVE_CRYPTO_0_CFG 625 ++#define MSM_BUS_SLAVE_CRYPTO_1_CFG 626 ++#define MSM_BUS_SLAVE_IMEM_CFG 627 ++#define MSM_BUS_SLAVE_MESSAGE_RAM 628 ++#define MSM_BUS_SLAVE_BIMC_CFG 629 ++#define MSM_BUS_SLAVE_BOOT_ROM 630 ++#define MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631 ++#define MSM_BUS_SLAVE_PMIC_ARB 632 ++#define MSM_BUS_SLAVE_SPDM_WRAPPER 633 ++#define MSM_BUS_SLAVE_DEHR_CFG 634 ++#define MSM_BUS_SLAVE_QDSS_CFG 635 ++#define MSM_BUS_SLAVE_RBCPR_CFG 636 ++#define MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637 ++#define MSM_BUS_SLAVE_SNOC_MPU_CFG 638 ++#define MSM_BUS_SLAVE_CNOC_ONOC_CFG 639 ++#define MSM_BUS_SLAVE_CNOC_MNOC_CFG 640 ++#define MSM_BUS_SLAVE_PNOC_CFG 641 ++#define MSM_BUS_SLAVE_SNOC_CFG 642 ++#define MSM_BUS_SLAVE_EBI1_DLL_CFG 643 ++#define MSM_BUS_SLAVE_PHY_APU_CFG 644 ++#define MSM_BUS_SLAVE_EBI1_PHY_CFG 645 ++#define MSM_BUS_SLAVE_SERVICE_CNOC 646 ++#define MSM_BUS_SLAVE_IPS_CFG 647 ++#define MSM_BUS_SLAVE_QPIC 648 ++#define MSM_BUS_SLAVE_DSI_CFG 649 ++#define MSM_BUS_SLAVE_UFS_CFG 650 ++#define MSM_BUS_SLAVE_RBCPR_CX_CFG 651 ++#define MSM_BUS_SLAVE_RBCPR_MX_CFG 652 ++#define MSM_BUS_SLAVE_PCIE_CFG 653 ++#define MSM_BUS_SLAVE_USB_PHYS_CFG 654 ++#define MSM_BUS_SLAVE_VIDEO_CAP_CFG 655 ++#define MSM_BUS_SLAVE_AVSYNC_CFG 656 ++#define MSM_BUS_SLAVE_CRYPTO_2_CFG 657 ++#define MSM_BUS_SLAVE_VPU_CFG 658 ++#define MSM_BUS_SLAVE_BCAST_CFG 659 ++#define MSM_BUS_SLAVE_KLM_CFG 660 ++#define MSM_BUS_SLAVE_GENI_IR_CFG 661 ++#define MSM_BUS_SLAVE_OCMEM_GFX 662 ++#define MSM_BUS_SLAVE_CATS_128 663 ++#define MSM_BUS_SLAVE_OCMEM_64 664 ++#define MSM_BUS_SLAVE_PCIE_0 665 ++#define MSM_BUS_SLAVE_PCIE_1 666 ++#define MSM_BUS_SLAVE_PCIE_0_CFG 667 ++#define MSM_BUS_SLAVE_PCIE_1_CFG 668 ++#define MSM_BUS_SLAVE_SRVC_MNOC 669 ++#define MSM_BUS_SLAVE_USB_HS2 670 ++#define MSM_BUS_SLAVE_AUDIO 671 ++#define MSM_BUS_SLAVE_TCU 672 ++#define MSM_BUS_SLAVE_APPSS 673 ++#define MSM_BUS_SLAVE_PCIE_PARF 674 ++#define MSM_BUS_SLAVE_USB3_PHY_CFG 675 ++#define MSM_BUS_SLAVE_IPA_CFG 676 ++#define MSM_BUS_SLAVE_A0NOC_SNOC 677 ++#define MSM_BUS_SLAVE_A1NOC_SNOC 678 ++#define MSM_BUS_SLAVE_A2NOC_SNOC 679 ++#define MSM_BUS_SLAVE_HMSS_L3 680 ++#define MSM_BUS_SLAVE_PIMEM_CFG 681 ++#define MSM_BUS_SLAVE_DCC_CFG 682 ++#define MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683 ++#define MSM_BUS_SLAVE_PCIE_2_CFG 684 ++#define MSM_BUS_SLAVE_PCIE20_AHB2PHY 685 ++#define MSM_BUS_SLAVE_A0NOC_CFG 686 ++#define MSM_BUS_SLAVE_A1NOC_CFG 687 ++#define MSM_BUS_SLAVE_A2NOC_CFG 688 ++#define MSM_BUS_SLAVE_A1NOC_MPU_CFG 689 ++#define MSM_BUS_SLAVE_A2NOC_MPU_CFG 690 ++#define MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691 ++#define MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692 ++#define MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693 ++#define MSM_BUS_SLAVE_LPASS_SMMU_CFG 694 ++#define MSM_BUS_SLAVE_MMAGIC_CFG 695 ++#define MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696 ++#define MSM_BUS_SLAVE_SSC_CFG 697 ++#define MSM_BUS_SLAVE_DSA_CFG 698 ++#define MSM_BUS_SLAVE_DSA_MPU_CFG 699 ++#define MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700 ++#define MSM_BUS_SLAVE_SMMU_CPP_CFG 701 ++#define MSM_BUS_SLAVE_SMMU_JPEG_CFG 702 ++#define MSM_BUS_SLAVE_SMMU_MDP_CFG 703 ++#define MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704 ++#define MSM_BUS_SLAVE_SMMU_VENUS_CFG 705 ++#define MSM_BUS_SLAVE_SMMU_VFE_CFG 706 ++#define MSM_BUS_SLAVE_A0NOC_MPU_CFG 707 ++#define MSM_BUS_SLAVE_VMEM_CFG 708 ++#define MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 700 ++#define MSM_BUS_SLAVE_VMEM 709 ++#define MSM_BUS_SLAVE_AHB2PHY 710 ++#define MSM_BUS_SLAVE_PIMEM 711 ++#define MSM_BUS_SLAVE_SNOC_VMEM 712 ++#define MSM_BUS_SLAVE_PCIE_2 713 ++#define MSM_BUS_SLAVE_RBCPR_MX 714 ++#define MSM_BUS_SLAVE_RBCPR_CX 715 ++#define MSM_BUS_SLAVE_PRNG_APU_CFG 716 ++#define MSM_BUS_SLAVE_PERIPH_MPU_CFG 717 ++#define MSM_BUS_SLAVE_GCNT 718 ++#define MSM_BUS_SLAVE_ADSS_CFG 719 ++#define MSM_BUS_SLAVE_ADSS_VMIDMT_CFG 720 ++#define MSM_BUS_SLAVE_QHSS_APU_CFG 721 ++#define MSM_BUS_SLAVE_MDIO 722 ++#define MSM_BUS_SLAVE_FEPHY_CFG 723 ++#define MSM_BUS_SLAVE_SRIF 724 ++#define MSM_BUS_SLAVE_LAST 730 ++#define MSM_BUS_SLAVE_DDRC_CFG 731 ++#define MSM_BUS_SLAVE_DDRC_APU_CFG 732 ++#define MSM_BUS_SLAVE_MPU0_CFG 733 ++#define MSM_BUS_SLAVE_MPU1_CFG 734 ++#define MSM_BUS_SLAVE_MPU2_CFG 734 ++#define MSM_BUS_SLAVE_ESS_VMIDMT_CFG 735 ++#define MSM_BUS_SLAVE_ESS_APU_CFG 736 ++#define MSM_BUS_SLAVE_USB2_CFG 737 ++#define MSM_BUS_SLAVE_BLSP_CFG 738 ++#define MSM_BUS_SLAVE_QPIC_CFG 739 ++#define MSM_BUS_SLAVE_SDCC_CFG 740 ++#define MSM_BUS_SLAVE_WSS0_VMIDMT_CFG 741 ++#define MSM_BUS_SLAVE_WSS0_APU_CFG 742 ++#define MSM_BUS_SLAVE_WSS1_VMIDMT_CFG 743 ++#define MSM_BUS_SLAVE_WSS1_APU_CFG 744 ++#define MSM_BUS_SLAVE_SRVC_PCNOC 745 ++#define MSM_BUS_SLAVE_SNOC_DDRC 746 ++#define MSM_BUS_SLAVE_A7SS 747 ++#define MSM_BUS_SLAVE_WSS0_CFG 748 ++#define MSM_BUS_SLAVE_WSS1_CFG 749 ++#define MSM_BUS_SLAVE_PCIE 750 ++#define MSM_BUS_SLAVE_USB3_CFG 751 ++#define MSM_BUS_SLAVE_CRYPTO_CFG 752 ++#define MSM_BUS_SLAVE_ESS_CFG 753 ++#define MSM_BUS_SLAVE_SRVC_SNOC 754 ++ ++#define MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB ++#define MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB ++ ++/* ++ * ID's used in RPM messages ++ */ ++#define ICBID_MASTER_APPSS_PROC 0 ++#define ICBID_MASTER_MSS_PROC 1 ++#define ICBID_MASTER_MNOC_BIMC 2 ++#define ICBID_MASTER_SNOC_BIMC 3 ++#define ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC ++#define ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4 ++#define ICBID_MASTER_CNOC_MNOC_CFG 5 ++#define ICBID_MASTER_GFX3D 6 ++#define ICBID_MASTER_JPEG 7 ++#define ICBID_MASTER_MDP 8 ++#define ICBID_MASTER_MDP0 ICBID_MASTER_MDP ++#define ICBID_MASTER_MDPS ICBID_MASTER_MDP ++#define ICBID_MASTER_VIDEO 9 ++#define ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO ++#define ICBID_MASTER_VIDEO_P1 10 ++#define ICBID_MASTER_VFE 11 ++#define ICBID_MASTER_CNOC_ONOC_CFG 12 ++#define ICBID_MASTER_JPEG_OCMEM 13 ++#define ICBID_MASTER_MDP_OCMEM 14 ++#define ICBID_MASTER_VIDEO_P0_OCMEM 15 ++#define ICBID_MASTER_VIDEO_P1_OCMEM 16 ++#define ICBID_MASTER_VFE_OCMEM 17 ++#define ICBID_MASTER_LPASS_AHB 18 ++#define ICBID_MASTER_QDSS_BAM 19 ++#define ICBID_MASTER_SNOC_CFG 20 ++#define ICBID_MASTER_BIMC_SNOC 21 ++#define ICBID_MASTER_CNOC_SNOC 22 ++#define ICBID_MASTER_CRYPTO 23 ++#define ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO ++#define ICBID_MASTER_CRYPTO_CORE1 24 ++#define ICBID_MASTER_LPASS_PROC 25 ++#define ICBID_MASTER_MSS 26 ++#define ICBID_MASTER_MSS_NAV 27 ++#define ICBID_MASTER_OCMEM_DMA 28 ++#define ICBID_MASTER_PNOC_SNOC 29 ++#define ICBID_MASTER_WCSS 30 ++#define ICBID_MASTER_QDSS_ETR 31 ++#define ICBID_MASTER_USB3 32 ++#define ICBID_MASTER_USB3_0 ICBID_MASTER_USB3 ++#define ICBID_MASTER_SDCC_1 33 ++#define ICBID_MASTER_SDCC_3 34 ++#define ICBID_MASTER_SDCC_2 35 ++#define ICBID_MASTER_SDCC_4 36 ++#define ICBID_MASTER_TSIF 37 ++#define ICBID_MASTER_BAM_DMA 38 ++#define ICBID_MASTER_BLSP_2 39 ++#define ICBID_MASTER_USB_HSIC 40 ++#define ICBID_MASTER_BLSP_1 41 ++#define ICBID_MASTER_USB_HS 42 ++#define ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS ++#define ICBID_MASTER_PNOC_CFG 43 ++#define ICBID_MASTER_SNOC_PNOC 44 ++#define ICBID_MASTER_RPM_INST 45 ++#define ICBID_MASTER_RPM_DATA 46 ++#define ICBID_MASTER_RPM_SYS 47 ++#define ICBID_MASTER_DEHR 48 ++#define ICBID_MASTER_QDSS_DAP 49 ++#define ICBID_MASTER_SPDM 50 ++#define ICBID_MASTER_TIC 51 ++#define ICBID_MASTER_SNOC_CNOC 52 ++#define ICBID_MASTER_GFX3D_OCMEM 53 ++#define ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM ++#define ICBID_MASTER_OVIRT_SNOC 54 ++#define ICBID_MASTER_SNOC_OVIRT 55 ++#define ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT ++#define ICBID_MASTER_ONOC_OVIRT 56 ++#define ICBID_MASTER_USB_HS2 57 ++#define ICBID_MASTER_QPIC 58 ++#define ICBID_MASTER_IPA 59 ++#define ICBID_MASTER_DSI 60 ++#define ICBID_MASTER_MDP1 61 ++#define ICBID_MASTER_MDPE ICBID_MASTER_MDP1 ++#define ICBID_MASTER_VPU_PROC 62 ++#define ICBID_MASTER_VPU 63 ++#define ICBID_MASTER_VPU0 ICBID_MASTER_VPU ++#define ICBID_MASTER_CRYPTO_CORE2 64 ++#define ICBID_MASTER_PCIE_0 65 ++#define ICBID_MASTER_PCIE_1 66 ++#define ICBID_MASTER_SATA 67 ++#define ICBID_MASTER_UFS 68 ++#define ICBID_MASTER_USB3_1 69 ++#define ICBID_MASTER_VIDEO_OCMEM 70 ++#define ICBID_MASTER_VPU1 71 ++#define ICBID_MASTER_VCAP 72 ++#define ICBID_MASTER_EMAC 73 ++#define ICBID_MASTER_BCAST 74 ++#define ICBID_MASTER_MMSS_PROC 75 ++#define ICBID_MASTER_SNOC_BIMC_1 76 ++#define ICBID_MASTER_SNOC_PCNOC 77 ++#define ICBID_MASTER_AUDIO 78 ++#define ICBID_MASTER_MM_INT_0 79 ++#define ICBID_MASTER_MM_INT_1 80 ++#define ICBID_MASTER_MM_INT_2 81 ++#define ICBID_MASTER_MM_INT_BIMC 82 ++#define ICBID_MASTER_MSS_INT 83 ++#define ICBID_MASTER_PCNOC_CFG 84 ++#define ICBID_MASTER_PCNOC_INT_0 85 ++#define ICBID_MASTER_PCNOC_INT_1 86 ++#define ICBID_MASTER_PCNOC_M_0 87 ++#define ICBID_MASTER_PCNOC_M_1 88 ++#define ICBID_MASTER_PCNOC_S_0 89 ++#define ICBID_MASTER_PCNOC_S_1 90 ++#define ICBID_MASTER_PCNOC_S_2 91 ++#define ICBID_MASTER_PCNOC_S_3 92 ++#define ICBID_MASTER_PCNOC_S_4 93 ++#define ICBID_MASTER_PCNOC_S_6 94 ++#define ICBID_MASTER_PCNOC_S_7 95 ++#define ICBID_MASTER_PCNOC_S_8 96 ++#define ICBID_MASTER_PCNOC_S_9 97 ++#define ICBID_MASTER_QDSS_INT 98 ++#define ICBID_MASTER_SNOC_INT_0 99 ++#define ICBID_MASTER_SNOC_INT_1 100 ++#define ICBID_MASTER_SNOC_INT_BIMC 101 ++#define ICBID_MASTER_TCU_0 102 ++#define ICBID_MASTER_TCU_1 103 ++#define ICBID_MASTER_BIMC_INT_0 104 ++#define ICBID_MASTER_BIMC_INT_1 105 ++#define ICBID_MASTER_CAMERA 106 ++#define ICBID_MASTER_RICA 107 ++#define ICBID_MASTER_PCNOC_S_5 129 ++#define ICBID_MASTER_PCNOC_INT_2 124 ++#define ICBID_MASTER_PCNOC_INT_3 125 ++#define ICBID_MASTER_PCNOC_INT_4 126 ++#define ICBID_MASTER_PCNOC_INT_5 127 ++#define ICBID_MASTER_PCNOC_INT_6 128 ++#define ICBID_MASTER_PCIE_2 119 ++#define ICBID_MASTER_MASTER_CNOC_A1NOC 116 ++#define ICBID_MASTER_A0NOC_SNOC 110 ++#define ICBID_MASTER_A1NOC_SNOC 111 ++#define ICBID_MASTER_A2NOC_SNOC 112 ++#define ICBID_MASTER_PNOC_A1NOC 117 ++#define ICBID_MASTER_ROTATOR 120 ++#define ICBID_MASTER_SNOC_VMEM 114 ++#define ICBID_MASTER_VENUS_VMEM 121 ++#define ICBID_MASTER_HMSS 118 ++#define ICBID_MASTER_BIMC_SNOC_1 109 ++#define ICBID_MASTER_CNOC_A1NOC 116 ++#define ICBID_MASTER_CPP 115 ++#define ICBID_MASTER_BLSP_BAM 130 ++#define ICBID_MASTER_USB2_BAM 131 ++#define ICBID_MASTER_ADSS_DMA0 132 ++#define ICBID_MASTER_ADSS_DMA1 133 ++#define ICBID_MASTER_ADSS_DMA2 134 ++#define ICBID_MASTER_ADSS_DMA3 135 ++#define ICBID_MASTER_QPIC_BAM 136 ++#define ICBID_MASTER_SDCC_BAM 137 ++#define ICBID_MASTER_DDRC_SNOC 138 ++#define ICBID_MASTER_WSS_0 139 ++#define ICBID_MASTER_WSS_1 140 ++#define ICBID_MASTER_ESS 141 ++#define ICBID_MASTER_PCIE 142 ++#define ICBID_MASTER_QDSS_BAMNDP 143 ++#define ICBID_MASTER_QDSS_SNOC_CFG 144 ++ ++#define ICBID_SLAVE_EBI1 0 ++#define ICBID_SLAVE_APPSS_L2 1 ++#define ICBID_SLAVE_BIMC_SNOC 2 ++#define ICBID_SLAVE_CAMERA_CFG 3 ++#define ICBID_SLAVE_DISPLAY_CFG 4 ++#define ICBID_SLAVE_OCMEM_CFG 5 ++#define ICBID_SLAVE_CPR_CFG 6 ++#define ICBID_SLAVE_CPR_XPU_CFG 7 ++#define ICBID_SLAVE_MISC_CFG 8 ++#define ICBID_SLAVE_MISC_XPU_CFG 9 ++#define ICBID_SLAVE_VENUS_CFG 10 ++#define ICBID_SLAVE_GFX3D_CFG 11 ++#define ICBID_SLAVE_MMSS_CLK_CFG 12 ++#define ICBID_SLAVE_MMSS_CLK_XPU_CFG 13 ++#define ICBID_SLAVE_MNOC_MPU_CFG 14 ++#define ICBID_SLAVE_ONOC_MPU_CFG 15 ++#define ICBID_SLAVE_MNOC_BIMC 16 ++#define ICBID_SLAVE_SERVICE_MNOC 17 ++#define ICBID_SLAVE_OCMEM 18 ++#define ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM ++#define ICBID_SLAVE_SERVICE_ONOC 19 ++#define ICBID_SLAVE_APPSS 20 ++#define ICBID_SLAVE_LPASS 21 ++#define ICBID_SLAVE_USB3 22 ++#define ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3 ++#define ICBID_SLAVE_WCSS 23 ++#define ICBID_SLAVE_SNOC_BIMC 24 ++#define ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC ++#define ICBID_SLAVE_SNOC_CNOC 25 ++#define ICBID_SLAVE_IMEM 26 ++#define ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM ++#define ICBID_SLAVE_SNOC_OVIRT 27 ++#define ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT ++#define ICBID_SLAVE_SNOC_PNOC 28 ++#define ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC ++#define ICBID_SLAVE_SERVICE_SNOC 29 ++#define ICBID_SLAVE_QDSS_STM 30 ++#define ICBID_SLAVE_SDCC_1 31 ++#define ICBID_SLAVE_SDCC_3 32 ++#define ICBID_SLAVE_SDCC_2 33 ++#define ICBID_SLAVE_SDCC_4 34 ++#define ICBID_SLAVE_TSIF 35 ++#define ICBID_SLAVE_BAM_DMA 36 ++#define ICBID_SLAVE_BLSP_2 37 ++#define ICBID_SLAVE_USB_HSIC 38 ++#define ICBID_SLAVE_BLSP_1 39 ++#define ICBID_SLAVE_USB_HS 40 ++#define ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS ++#define ICBID_SLAVE_PDM 41 ++#define ICBID_SLAVE_PERIPH_APU_CFG 42 ++#define ICBID_SLAVE_PNOC_MPU_CFG 43 ++#define ICBID_SLAVE_PRNG 44 ++#define ICBID_SLAVE_PNOC_SNOC 45 ++#define ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC ++#define ICBID_SLAVE_SERVICE_PNOC 46 ++#define ICBID_SLAVE_CLK_CTL 47 ++#define ICBID_SLAVE_CNOC_MSS 48 ++#define ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS ++#define ICBID_SLAVE_SECURITY 49 ++#define ICBID_SLAVE_TCSR 50 ++#define ICBID_SLAVE_TLMM 51 ++#define ICBID_SLAVE_CRYPTO_0_CFG 52 ++#define ICBID_SLAVE_CRYPTO_1_CFG 53 ++#define ICBID_SLAVE_IMEM_CFG 54 ++#define ICBID_SLAVE_MESSAGE_RAM 55 ++#define ICBID_SLAVE_BIMC_CFG 56 ++#define ICBID_SLAVE_BOOT_ROM 57 ++#define ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58 ++#define ICBID_SLAVE_PMIC_ARB 59 ++#define ICBID_SLAVE_SPDM_WRAPPER 60 ++#define ICBID_SLAVE_DEHR_CFG 61 ++#define ICBID_SLAVE_MPM 62 ++#define ICBID_SLAVE_QDSS_CFG 63 ++#define ICBID_SLAVE_RBCPR_CFG 64 ++#define ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG ++#define ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65 ++#define ICBID_SLAVE_CNOC_MNOC_CFG 66 ++#define ICBID_SLAVE_SNOC_MPU_CFG 67 ++#define ICBID_SLAVE_CNOC_ONOC_CFG 68 ++#define ICBID_SLAVE_PNOC_CFG 69 ++#define ICBID_SLAVE_SNOC_CFG 70 ++#define ICBID_SLAVE_EBI1_DLL_CFG 71 ++#define ICBID_SLAVE_PHY_APU_CFG 72 ++#define ICBID_SLAVE_EBI1_PHY_CFG 73 ++#define ICBID_SLAVE_RPM 74 ++#define ICBID_SLAVE_CNOC_SNOC 75 ++#define ICBID_SLAVE_SERVICE_CNOC 76 ++#define ICBID_SLAVE_OVIRT_SNOC 77 ++#define ICBID_SLAVE_OVIRT_OCMEM 78 ++#define ICBID_SLAVE_USB_HS2 79 ++#define ICBID_SLAVE_QPIC 80 ++#define ICBID_SLAVE_IPS_CFG 81 ++#define ICBID_SLAVE_DSI_CFG 82 ++#define ICBID_SLAVE_USB3_1 83 ++#define ICBID_SLAVE_PCIE_0 84 ++#define ICBID_SLAVE_PCIE_1 85 ++#define ICBID_SLAVE_PSS_SMMU_CFG 86 ++#define ICBID_SLAVE_CRYPTO_2_CFG 87 ++#define ICBID_SLAVE_PCIE_0_CFG 88 ++#define ICBID_SLAVE_PCIE_1_CFG 89 ++#define ICBID_SLAVE_SATA_CFG 90 ++#define ICBID_SLAVE_SPSS_GENI_IR 91 ++#define ICBID_SLAVE_UFS_CFG 92 ++#define ICBID_SLAVE_AVSYNC_CFG 93 ++#define ICBID_SLAVE_VPU_CFG 94 ++#define ICBID_SLAVE_USB_PHY_CFG 95 ++#define ICBID_SLAVE_RBCPR_MX_CFG 96 ++#define ICBID_SLAVE_PCIE_PARF 97 ++#define ICBID_SLAVE_VCAP_CFG 98 ++#define ICBID_SLAVE_EMAC_CFG 99 ++#define ICBID_SLAVE_BCAST_CFG 100 ++#define ICBID_SLAVE_KLM_CFG 101 ++#define ICBID_SLAVE_DISPLAY_PWM 102 ++#define ICBID_SLAVE_GENI 103 ++#define ICBID_SLAVE_SNOC_BIMC_1 104 ++#define ICBID_SLAVE_AUDIO 105 ++#define ICBID_SLAVE_CATS_0 106 ++#define ICBID_SLAVE_CATS_1 107 ++#define ICBID_SLAVE_MM_INT_0 108 ++#define ICBID_SLAVE_MM_INT_1 109 ++#define ICBID_SLAVE_MM_INT_2 110 ++#define ICBID_SLAVE_MM_INT_BIMC 111 ++#define ICBID_SLAVE_MMU_MODEM_XPU_CFG 112 ++#define ICBID_SLAVE_MSS_INT 113 ++#define ICBID_SLAVE_PCNOC_INT_0 114 ++#define ICBID_SLAVE_PCNOC_INT_1 115 ++#define ICBID_SLAVE_PCNOC_M_0 116 ++#define ICBID_SLAVE_PCNOC_M_1 117 ++#define ICBID_SLAVE_PCNOC_S_0 118 ++#define ICBID_SLAVE_PCNOC_S_1 119 ++#define ICBID_SLAVE_PCNOC_S_2 120 ++#define ICBID_SLAVE_PCNOC_S_3 121 ++#define ICBID_SLAVE_PCNOC_S_4 122 ++#define ICBID_SLAVE_PCNOC_S_6 123 ++#define ICBID_SLAVE_PCNOC_S_7 124 ++#define ICBID_SLAVE_PCNOC_S_8 125 ++#define ICBID_SLAVE_PCNOC_S_9 126 ++#define ICBID_SLAVE_PRNG_XPU_CFG 127 ++#define ICBID_SLAVE_QDSS_INT 128 ++#define ICBID_SLAVE_RPM_XPU_CFG 129 ++#define ICBID_SLAVE_SNOC_INT_0 130 ++#define ICBID_SLAVE_SNOC_INT_1 131 ++#define ICBID_SLAVE_SNOC_INT_BIMC 132 ++#define ICBID_SLAVE_TCU 133 ++#define ICBID_SLAVE_BIMC_INT_0 134 ++#define ICBID_SLAVE_BIMC_INT_1 135 ++#define ICBID_SLAVE_RICA_CFG 136 ++#define ICBID_SLAVE_PCNOC_S_5 189 ++#define ICBID_SLAVE_PCNOC_S_7 124 ++#define ICBID_SLAVE_PCNOC_INT_2 184 ++#define ICBID_SLAVE_PCNOC_INT_3 185 ++#define ICBID_SLAVE_PCNOC_INT_4 186 ++#define ICBID_SLAVE_PCNOC_INT_5 187 ++#define ICBID_SLAVE_PCNOC_INT_6 188 ++#define ICBID_SLAVE_USB3_PHY_CFG 182 ++#define ICBID_SLAVE_IPA_CFG 183 ++ ++#define ICBID_SLAVE_A0NOC_SNOC 141 ++#define ICBID_SLAVE_A1NOC_SNOC 142 ++#define ICBID_SLAVE_A2NOC_SNOC 143 ++#define ICBID_SLAVE_BIMC_SNOC_1 138 ++#define ICBID_SLAVE_PIMEM 167 ++#define ICBID_SLAVE_PIMEM_CFG 168 ++#define ICBID_SLAVE_DCC_CFG 155 ++#define ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168 ++#define ICBID_SLAVE_A0NOC_CFG 144 ++#define ICBID_SLAVE_PCIE_2_CFG 165 ++#define ICBID_SLAVE_PCIE20_AHB2PHY 163 ++#define ICBID_SLAVE_PCIE_2 164 ++#define ICBID_SLAVE_A1NOC_CFG 147 ++#define ICBID_SLAVE_A1NOC_MPU_CFG 148 ++#define ICBID_SLAVE_A1NOC_SMMU_CFG 149 ++#define ICBID_SLAVE_A2NOC_CFG 150 ++#define ICBID_SLAVE_A2NOC_MPU_CFG 151 ++#define ICBID_SLAVE_A2NOC_SMMU_CFG 152 ++#define ICBID_SLAVE_AHB2PHY 153 ++#define ICBID_SLAVE_HMSS_L3 161 ++#define ICBID_SLAVE_LPASS_SMMU_CFG 161 ++#define ICBID_SLAVE_MMAGIC_CFG 162 ++#define ICBID_SLAVE_SSC_CFG 177 ++#define ICBID_SLAVE_VENUS_THROTTLE_CFG 178 ++#define ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156 ++#define ICBID_SLAVE_CAMERA_THROTTLE_CFG 154 ++#define ICBID_SLAVE_DSA_CFG 157 ++#define ICBID_SLAVE_DSA_MPU_CFG 158 ++#define ICBID_SLAVE_SMMU_CPP_CFG 171 ++#define ICBID_SLAVE_SMMU_JPEG_CFG 172 ++#define ICBID_SLAVE_SMMU_MDP_CFG 173 ++#define ICBID_SLAVE_SMMU_ROTATOR_CFG 174 ++#define ICBID_SLAVE_SMMU_VENUS_CFG 175 ++#define ICBID_SLAVE_SMMU_VFE_CFG 176 ++#define ICBID_SLAVE_A0NOC_MPU_CFG 145 ++#define ICBID_SLAVE_A0NOC_SMMU_CFG 146 ++#define ICBID_SLAVE_VMEM_CFG 180 ++#define ICBID_SLAVE_VMEM 179 ++#define ICBID_SLAVE_PNOC_A1NOC 139 ++#define ICBID_SLAVE_SNOC_VMEM 140 ++#define ICBID_SLAVE_RBCPR_MX 170 ++#define ICBID_SLAVE_RBCPR_CX 169 ++#define ICBID_SLAVE_PRNG_APU_CFG 190 ++#define ICBID_SLAVE_PERIPH_MPU_CFG 191 ++#define ICBID_SLAVE_GCNT 192 ++#define ICBID_SLAVE_ADSS_CFG 193 ++#define ICBID_SLAVE_ADSS_APU 194 ++#define ICBID_SLAVE_ADSS_VMIDMT_CFG 195 ++#define ICBID_SLAVE_QHSS_APU_CFG 196 ++#define ICBID_SLAVE_MDIO 197 ++#define ICBID_SLAVE_FEPHY_CFG 198 ++#define ICBID_SLAVE_SRIF 199 ++#define ICBID_SLAVE_DDRC_CFG 200 ++#define ICBID_SLAVE_DDRC_APU_CFG 201 ++#define ICBID_SLAVE_DDRC_MPU0_CFG 202 ++#define ICBID_SLAVE_DDRC_MPU1_CFG 203 ++#define ICBID_SLAVE_DDRC_MPU2_CFG 210 ++#define ICBID_SLAVE_ESS_VMIDMT_CFG 211 ++#define ICBID_SLAVE_ESS_APU_CFG 212 ++#define ICBID_SLAVE_USB2_CFG 213 ++#define ICBID_SLAVE_BLSP_CFG 214 ++#define ICBID_SLAVE_QPIC_CFG 215 ++#define ICBID_SLAVE_SDCC_CFG 216 ++#define ICBID_SLAVE_WSS0_VMIDMT_CFG 217 ++#define ICBID_SLAVE_WSS0_APU_CFG 218 ++#define ICBID_SLAVE_WSS1_VMIDMT_CFG 219 ++#define ICBID_SLAVE_WSS1_APU_CFG 220 ++#define ICBID_SLAVE_SRVC_PCNOC 221 ++#define ICBID_SLAVE_SNOC_DDRC 222 ++#define ICBID_SLAVE_A7SS 223 ++#define ICBID_SLAVE_WSS0_CFG 224 ++#define ICBID_SLAVE_WSS1_CFG 225 ++#define ICBID_SLAVE_PCIE 226 ++#define ICBID_SLAVE_USB3_CFG 227 ++#define ICBID_SLAVE_CRYPTO_CFG 228 ++#define ICBID_SLAVE_ESS_CFG 229 ++#define ICBID_SLAVE_SRVC_SNOC 230 ++#endif +--- /dev/null ++++ b/include/dt-bindings/msm/msm-bus-rule-ops.h +@@ -0,0 +1,32 @@ ++/* Copyright (c) 2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef __MSM_BUS_RULE_OPS_H ++#define __MSM_BUS_RULE_OPS_H ++ ++#define FLD_IB 0 ++#define FLD_AB 1 ++#define FLD_CLK 2 ++ ++#define OP_LE 0 ++#define OP_LT 1 ++#define OP_GE 2 ++#define OP_GT 3 ++#define OP_NOOP 4 ++ ++#define RULE_STATE_NOT_APPLIED 0 ++#define RULE_STATE_APPLIED 1 ++ ++#define THROTTLE_ON 0 ++#define THROTTLE_OFF 1 ++ ++#endif +--- /dev/null ++++ b/drivers/bus/msm_bus/Kconfig +@@ -0,0 +1,19 @@ ++config BUS_TOPOLOGY_ADHOC ++ bool "ad-hoc bus scaling topology" ++ depends on ARCH_QCOM ++ default n ++ help ++ This option enables a driver that can handle adhoc bus topologies. ++ Adhoc bus topology driver allows one to many connections and maintains ++ directionality of connections by explicitly listing device connections ++ thus avoiding illegal routes. ++ ++config MSM_BUS_SCALING ++ bool "Bus scaling driver" ++ depends on BUS_TOPOLOGY_ADHOC ++ default n ++ help ++ This option enables bus scaling on MSM devices. Bus scaling ++ allows devices to request the clocks be set to rates sufficient ++ for the active devices needs without keeping the clocks at max ++ frequency when a slower speed is sufficient. +--- /dev/null ++++ b/drivers/bus/msm_bus/Makefile +@@ -0,0 +1,12 @@ ++# ++# Makefile for msm-bus driver specific files ++# ++obj-y += msm_bus_bimc.o msm_bus_noc.o msm_bus_core.o msm_bus_client_api.o \ ++ msm_bus_id.o ++obj-$(CONFIG_OF) += msm_bus_of.o ++ ++obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o msm_bus_rules.o ++obj-$(CONFIG_OF) += msm_bus_of_adhoc.o ++obj-$(CONFIG_CORESIGHT) += msm_buspm_coresight_adhoc.o ++ ++obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg.o +--- /dev/null ++++ b/drivers/bus/msm_bus/msm-bus-board.h +@@ -0,0 +1,198 @@ ++/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef __ASM_ARCH_MSM_BUS_BOARD_H ++#define __ASM_ARCH_MSM_BUS_BOARD_H ++ ++#include ++#include ++ ++enum context { ++ DUAL_CTX, ++ ACTIVE_CTX, ++ NUM_CTX ++}; ++ ++struct msm_bus_fabric_registration { ++ unsigned int id; ++ const char *name; ++ struct msm_bus_node_info *info; ++ unsigned int len; ++ int ahb; ++ const char *fabclk[NUM_CTX]; ++ const char *iface_clk; ++ unsigned int offset; ++ unsigned int haltid; ++ unsigned int rpm_enabled; ++ unsigned int nmasters; ++ unsigned int nslaves; ++ unsigned int ntieredslaves; ++ bool il_flag; ++ const struct msm_bus_board_algorithm *board_algo; ++ int hw_sel; ++ void *hw_data; ++ uint32_t qos_freq; ++ uint32_t qos_baseoffset; ++ u64 nr_lim_thresh; ++ uint32_t eff_fact; ++ uint32_t qos_delta; ++ bool virt; ++}; ++ ++struct msm_bus_device_node_registration { ++ struct msm_bus_node_device_type *info; ++ unsigned int num_devices; ++ bool virt; ++}; ++ ++enum msm_bus_bw_tier_type { ++ MSM_BUS_BW_TIER1 = 1, ++ MSM_BUS_BW_TIER2, ++ MSM_BUS_BW_COUNT, ++ MSM_BUS_BW_SIZE = 0x7FFFFFFF, ++}; ++ ++struct msm_bus_halt_vector { ++ uint32_t haltval; ++ uint32_t haltmask; ++}; ++ ++extern struct msm_bus_fabric_registration msm_bus_apps_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_sys_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_mm_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_sys_fpb_pdata; ++extern struct msm_bus_fabric_registration msm_bus_cpss_fpb_pdata; ++extern struct msm_bus_fabric_registration msm_bus_def_fab_pdata; ++ ++extern struct msm_bus_fabric_registration msm_bus_8960_apps_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8960_sys_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8960_mm_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8960_cpss_fpb_pdata; ++ ++extern struct msm_bus_fabric_registration msm_bus_8064_apps_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8064_sys_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8064_mm_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8064_sys_fpb_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8064_cpss_fpb_pdata; ++ ++extern struct msm_bus_fabric_registration msm_bus_9615_sys_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_9615_def_fab_pdata; ++ ++extern struct msm_bus_fabric_registration msm_bus_8930_apps_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8930_sys_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8930_mm_fabric_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8930_sys_fpb_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8930_cpss_fpb_pdata; ++ ++extern struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata; ++extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata; ++ ++extern struct msm_bus_fabric_registration msm_bus_9625_sys_noc_pdata; ++extern struct msm_bus_fabric_registration msm_bus_9625_bimc_pdata; ++extern struct msm_bus_fabric_registration msm_bus_9625_periph_noc_pdata; ++extern struct msm_bus_fabric_registration msm_bus_9625_config_noc_pdata; ++ ++extern int msm_bus_device_match_adhoc(struct device *dev, void *id); ++ ++void msm_bus_rpm_set_mt_mask(void); ++int msm_bus_board_rpm_get_il_ids(uint16_t *id); ++int msm_bus_board_get_iid(int id); ++ ++#define NFAB_MSM8226 6 ++#define NFAB_MSM8610 5 ++ ++/* ++ * These macros specify the convention followed for allocating ++ * ids to fabrics, masters and slaves for 8x60. ++ * ++ * A node can be identified as a master/slave/fabric by using ++ * these ids. ++ */ ++#define FABRIC_ID_KEY 1024 ++#define SLAVE_ID_KEY ((FABRIC_ID_KEY) >> 1) ++#define MAX_FAB_KEY 7168 /* OR(All fabric ids) */ ++#define INT_NODE_START 10000 ++ ++#define GET_FABID(id) ((id) & MAX_FAB_KEY) ++ ++#define NODE_ID(id) ((id) & (FABRIC_ID_KEY - 1)) ++#define IS_SLAVE(id) ((NODE_ID(id)) >= SLAVE_ID_KEY ? 1 : 0) ++#define CHECK_ID(iid, id) (((iid & id) != id) ? -ENXIO : iid) ++ ++/* ++ * The following macros are used to format the data for port halt ++ * and unhalt requests. ++ */ ++#define MSM_BUS_CLK_HALT 0x1 ++#define MSM_BUS_CLK_HALT_MASK 0x1 ++#define MSM_BUS_CLK_HALT_FIELDSIZE 0x1 ++#define MSM_BUS_CLK_UNHALT 0x0 ++ ++#define MSM_BUS_MASTER_SHIFT(master, fieldsize) \ ++ ((master) * (fieldsize)) ++ ++#define MSM_BUS_SET_BITFIELD(word, fieldmask, fieldvalue) \ ++ { \ ++ (word) &= ~(fieldmask); \ ++ (word) |= (fieldvalue); \ ++ } ++ ++ ++#define MSM_BUS_MASTER_HALT(u32haltmask, u32haltval, master) \ ++ MSM_BUS_SET_BITFIELD(u32haltmask, \ ++ MSM_BUS_CLK_HALT_MASK< ++ ++ ++#endif /*__ASM_ARCH_MSM_BUS_BOARD_H */ +--- /dev/null ++++ b/drivers/bus/msm_bus/msm-bus.h +@@ -0,0 +1,139 @@ ++/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef _ARCH_ARM_MACH_MSM_BUS_H ++#define _ARCH_ARM_MACH_MSM_BUS_H ++ ++#include ++#include ++#include ++ ++/* ++ * Macros for clients to convert their data to ib and ab ++ * Ws : Time window over which to transfer the data in SECONDS ++ * Bs : Size of the data block in bytes ++ * Per : Recurrence period ++ * Tb : Throughput bandwidth to prevent stalling ++ * R : Ratio of actual bandwidth used to Tb ++ * Ib : Instantaneous bandwidth ++ * Ab : Arbitrated bandwidth ++ * ++ * IB_RECURRBLOCK and AB_RECURRBLOCK: ++ * These are used if the requirement is to transfer a ++ * recurring block of data over a known time window. ++ * ++ * IB_THROUGHPUTBW and AB_THROUGHPUTBW: ++ * These are used for CPU style masters. Here the requirement ++ * is to have minimum throughput bandwidth available to avoid ++ * stalling. ++ */ ++#define IB_RECURRBLOCK(Ws, Bs) ((Ws) == 0 ? 0 : ((Bs)/(Ws))) ++#define AB_RECURRBLOCK(Ws, Per) ((Ws) == 0 ? 0 : ((Bs)/(Per))) ++#define IB_THROUGHPUTBW(Tb) (Tb) ++#define AB_THROUGHPUTBW(Tb, R) ((Tb) * (R)) ++ ++struct msm_bus_vectors { ++ int src; /* Master */ ++ int dst; /* Slave */ ++ uint64_t ab; /* Arbitrated bandwidth */ ++ uint64_t ib; /* Instantaneous bandwidth */ ++}; ++ ++struct msm_bus_paths { ++ int num_paths; ++ struct msm_bus_vectors *vectors; ++}; ++ ++struct msm_bus_scale_pdata { ++ struct msm_bus_paths *usecase; ++ int num_usecases; ++ const char *name; ++ /* ++ * If the active_only flag is set to 1, the BW request is applied ++ * only when at least one CPU is active (powered on). If the flag ++ * is set to 0, then the BW request is always applied irrespective ++ * of the CPU state. ++ */ ++ unsigned int active_only; ++}; ++ ++/* Scaling APIs */ ++ ++/* ++ * This function returns a handle to the client. This should be used to ++ * call msm_bus_scale_client_update_request. ++ * The function returns 0 if bus driver is unable to register a client ++ */ ++ ++#if (defined(CONFIG_MSM_BUS_SCALING) || defined(CONFIG_BUS_TOPOLOGY_ADHOC)) ++int __init msm_bus_fabric_init_driver(void); ++uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata); ++int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index); ++void msm_bus_scale_unregister_client(uint32_t cl); ++/* AXI Port configuration APIs */ ++int msm_bus_axi_porthalt(int master_port); ++int msm_bus_axi_portunhalt(int master_port); ++ ++#else ++static inline int __init msm_bus_fabric_init_driver(void) { return 0; } ++ ++static inline uint32_t ++msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata) ++{ ++ return 1; ++} ++ ++static inline int ++msm_bus_scale_client_update_request(uint32_t cl, unsigned int index) ++{ ++ return 0; ++} ++ ++static inline void ++msm_bus_scale_unregister_client(uint32_t cl) ++{ ++} ++ ++static inline int msm_bus_axi_porthalt(int master_port) ++{ ++ return 0; ++} ++ ++static inline int msm_bus_axi_portunhalt(int master_port) ++{ ++ return 0; ++} ++#endif ++ ++#if defined(CONFIG_OF) && defined(CONFIG_MSM_BUS_SCALING) ++struct msm_bus_scale_pdata *msm_bus_pdata_from_node( ++ struct platform_device *pdev, struct device_node *of_node); ++struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev); ++void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata); ++#else ++static inline struct msm_bus_scale_pdata ++*msm_bus_cl_get_pdata(struct platform_device *pdev) ++{ ++ return NULL; ++} ++ ++static inline struct msm_bus_scale_pdata *msm_bus_pdata_from_node( ++ struct platform_device *pdev, struct device_node *of_node) ++{ ++ return NULL; ++} ++ ++static inline void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata) ++{ ++} ++#endif ++#endif /*_ARCH_ARM_MACH_MSM_BUS_H*/ +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_adhoc.h +@@ -0,0 +1,141 @@ ++/* Copyright (c) 2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H ++#define _ARCH_ARM_MACH_MSM_BUS_ADHOC_H ++ ++#include ++#include ++#include "msm-bus-board.h" ++#include "msm-bus.h" ++#include "msm_bus_rules.h" ++#include "msm_bus_core.h" ++ ++struct msm_bus_node_device_type; ++struct link_node { ++ uint64_t lnode_ib[NUM_CTX]; ++ uint64_t lnode_ab[NUM_CTX]; ++ int next; ++ struct device *next_dev; ++ struct list_head link; ++ uint32_t in_use; ++}; ++ ++/* New types introduced for adhoc topology */ ++struct msm_bus_noc_ops { ++ int (*qos_init)(struct msm_bus_node_device_type *dev, ++ void __iomem *qos_base, uint32_t qos_off, ++ uint32_t qos_delta, uint32_t qos_freq); ++ int (*set_bw)(struct msm_bus_node_device_type *dev, ++ void __iomem *qos_base, uint32_t qos_off, ++ uint32_t qos_delta, uint32_t qos_freq); ++ int (*limit_mport)(struct msm_bus_node_device_type *dev, ++ void __iomem *qos_base, uint32_t qos_off, ++ uint32_t qos_delta, uint32_t qos_freq, bool enable_lim, ++ uint64_t lim_bw); ++ bool (*update_bw_reg)(int mode); ++}; ++ ++struct nodebw { ++ uint64_t ab[NUM_CTX]; ++ bool dirty; ++}; ++ ++struct msm_bus_fab_device_type { ++ void __iomem *qos_base; ++ phys_addr_t pqos_base; ++ size_t qos_range; ++ uint32_t base_offset; ++ uint32_t qos_freq; ++ uint32_t qos_off; ++ uint32_t util_fact; ++ uint32_t vrail_comp; ++ struct msm_bus_noc_ops noc_ops; ++ enum msm_bus_hw_sel bus_type; ++ bool bypass_qos_prg; ++}; ++ ++struct qos_params_type { ++ int mode; ++ unsigned int prio_lvl; ++ unsigned int prio_rd; ++ unsigned int prio_wr; ++ unsigned int prio1; ++ unsigned int prio0; ++ unsigned int gp; ++ unsigned int thmp; ++ unsigned int ws; ++ int cur_mode; ++ u64 bw_buffer; ++}; ++ ++struct msm_bus_node_info_type { ++ const char *name; ++ unsigned int id; ++ int mas_rpm_id; ++ int slv_rpm_id; ++ int num_ports; ++ int num_qports; ++ int *qport; ++ struct qos_params_type qos_params; ++ unsigned int num_connections; ++ unsigned int num_blist; ++ bool is_fab_dev; ++ bool virt_dev; ++ bool is_traversed; ++ unsigned int *connections; ++ unsigned int *black_listed_connections; ++ struct device **dev_connections; ++ struct device **black_connections; ++ unsigned int bus_device_id; ++ struct device *bus_device; ++ unsigned int buswidth; ++ struct rule_update_path_info rule; ++ uint64_t lim_bw; ++ uint32_t util_fact; ++ uint32_t vrail_comp; ++}; ++ ++struct msm_bus_node_device_type { ++ struct msm_bus_node_info_type *node_info; ++ struct msm_bus_fab_device_type *fabdev; ++ int num_lnodes; ++ struct link_node *lnode_list; ++ uint64_t cur_clk_hz[NUM_CTX]; ++ struct nodebw node_ab; ++ struct list_head link; ++ unsigned int ap_owned; ++ struct nodeclk clk[NUM_CTX]; ++ struct nodeclk qos_clk; ++}; ++ ++int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev, ++ bool throttle_en, uint64_t lim_bw); ++int msm_bus_update_clks(struct msm_bus_node_device_type *nodedev, ++ int ctx, int **dirty_nodes, int *num_dirty); ++int msm_bus_commit_data(int *dirty_nodes, int ctx, int num_dirty); ++int msm_bus_update_bw(struct msm_bus_node_device_type *nodedev, int ctx, ++ int64_t add_bw, int **dirty_nodes, int *num_dirty); ++void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size, ++ size_t new_size, gfp_t flags); ++ ++extern struct msm_bus_device_node_registration ++ *msm_bus_of_to_pdata(struct platform_device *pdev); ++extern void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops); ++extern int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev); ++extern int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev); ++extern int msm_bus_of_get_static_rules(struct platform_device *pdev, ++ struct bus_rule_type **static_rule); ++extern int msm_rules_update_path(struct list_head *input_list, ++ struct list_head *output_list); ++extern void print_all_rules(void); ++#endif /* _ARCH_ARM_MACH_MSM_BUS_ADHOC_H */ +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_arb_adhoc.c +@@ -0,0 +1,998 @@ ++/* Copyright (c) 2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is Mree software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "msm-bus.h" ++#include "msm_bus_core.h" ++#include "msm_bus_adhoc.h" ++ ++#define NUM_CL_HANDLES 50 ++#define NUM_LNODES 3 ++ ++struct bus_search_type { ++ struct list_head link; ++ struct list_head node_list; ++}; ++ ++struct handle_type { ++ int num_entries; ++ struct msm_bus_client **cl_list; ++}; ++ ++static struct handle_type handle_list; ++struct list_head input_list; ++struct list_head apply_list; ++ ++DEFINE_MUTEX(msm_bus_adhoc_lock); ++ ++static bool chk_bl_list(struct list_head *black_list, unsigned int id) ++{ ++ struct msm_bus_node_device_type *bus_node = NULL; ++ ++ list_for_each_entry(bus_node, black_list, link) { ++ if (bus_node->node_info->id == id) ++ return true; ++ } ++ return false; ++} ++ ++static void copy_remaining_nodes(struct list_head *edge_list, struct list_head ++ *traverse_list, struct list_head *route_list) ++{ ++ struct bus_search_type *search_node; ++ ++ if (list_empty(edge_list) && list_empty(traverse_list)) ++ return; ++ ++ search_node = kzalloc(sizeof(struct bus_search_type), GFP_KERNEL); ++ INIT_LIST_HEAD(&search_node->node_list); ++ list_splice_init(edge_list, traverse_list); ++ list_splice_init(traverse_list, &search_node->node_list); ++ list_add_tail(&search_node->link, route_list); ++} ++ ++/* ++ * Duplicate instantiaion from msm_bus_arb.c. Todo there needs to be a ++ * "util" file for these common func/macros. ++ * ++ * */ ++uint64_t msm_bus_div64(unsigned int w, uint64_t bw) ++{ ++ uint64_t *b = &bw; ++ ++ if ((bw > 0) && (bw < w)) ++ return 1; ++ ++ switch (w) { ++ case 0: ++ WARN(1, "AXI: Divide by 0 attempted\n"); ++ case 1: return bw; ++ case 2: return (bw >> 1); ++ case 4: return (bw >> 2); ++ case 8: return (bw >> 3); ++ case 16: return (bw >> 4); ++ case 32: return (bw >> 5); ++ } ++ ++ do_div(*b, w); ++ return *b; ++} ++ ++int msm_bus_device_match_adhoc(struct device *dev, void *id) ++{ ++ int ret = 0; ++ struct msm_bus_node_device_type *bnode = dev->platform_data; ++ ++ if (bnode) ++ ret = (bnode->node_info->id == *(unsigned int *)id); ++ else ++ ret = 0; ++ ++ return ret; ++} ++ ++static int gen_lnode(struct device *dev, ++ int next_hop, int prev_idx) ++{ ++ struct link_node *lnode; ++ struct msm_bus_node_device_type *cur_dev = NULL; ++ int lnode_idx = -1; ++ ++ if (!dev) ++ goto exit_gen_lnode; ++ ++ cur_dev = dev->platform_data; ++ if (!cur_dev) { ++ MSM_BUS_ERR("%s: Null device ptr", __func__); ++ goto exit_gen_lnode; ++ } ++ ++ if (!cur_dev->num_lnodes) { ++ cur_dev->lnode_list = devm_kzalloc(dev, ++ sizeof(struct link_node) * NUM_LNODES, ++ GFP_KERNEL); ++ if (!cur_dev->lnode_list) ++ goto exit_gen_lnode; ++ ++ lnode = cur_dev->lnode_list; ++ cur_dev->num_lnodes = NUM_LNODES; ++ lnode_idx = 0; ++ } else { ++ int i; ++ for (i = 0; i < cur_dev->num_lnodes; i++) { ++ if (!cur_dev->lnode_list[i].in_use) ++ break; ++ } ++ ++ if (i < cur_dev->num_lnodes) { ++ lnode = &cur_dev->lnode_list[i]; ++ lnode_idx = i; ++ } else { ++ struct link_node *realloc_list; ++ size_t cur_size = sizeof(struct link_node) * ++ cur_dev->num_lnodes; ++ ++ cur_dev->num_lnodes += NUM_LNODES; ++ realloc_list = msm_bus_realloc_devmem( ++ dev, ++ cur_dev->lnode_list, ++ cur_size, ++ sizeof(struct link_node) * ++ cur_dev->num_lnodes, GFP_KERNEL); ++ ++ if (!realloc_list) ++ goto exit_gen_lnode; ++ ++ cur_dev->lnode_list = realloc_list; ++ lnode = &cur_dev->lnode_list[i]; ++ lnode_idx = i; ++ } ++ } ++ ++ lnode->in_use = 1; ++ if (next_hop == cur_dev->node_info->id) { ++ lnode->next = -1; ++ lnode->next_dev = NULL; ++ } else { ++ lnode->next = prev_idx; ++ lnode->next_dev = bus_find_device(&msm_bus_type, NULL, ++ (void *) &next_hop, ++ msm_bus_device_match_adhoc); ++ } ++ ++ memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX); ++ memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX); ++ ++exit_gen_lnode: ++ return lnode_idx; ++} ++ ++static int remove_lnode(struct msm_bus_node_device_type *cur_dev, ++ int lnode_idx) ++{ ++ int ret = 0; ++ ++ if (!cur_dev) { ++ MSM_BUS_ERR("%s: Null device ptr", __func__); ++ ret = -ENODEV; ++ goto exit_remove_lnode; ++ } ++ ++ if (lnode_idx != -1) { ++ if (!cur_dev->num_lnodes || ++ (lnode_idx > (cur_dev->num_lnodes - 1))) { ++ MSM_BUS_ERR("%s: Invalid Idx %d, num_lnodes %d", ++ __func__, lnode_idx, cur_dev->num_lnodes); ++ ret = -ENODEV; ++ goto exit_remove_lnode; ++ } ++ ++ cur_dev->lnode_list[lnode_idx].next = -1; ++ cur_dev->lnode_list[lnode_idx].next_dev = NULL; ++ cur_dev->lnode_list[lnode_idx].in_use = 0; ++ } ++ ++exit_remove_lnode: ++ return ret; ++} ++ ++static int prune_path(struct list_head *route_list, int dest, int src, ++ struct list_head *black_list, int found) ++{ ++ struct bus_search_type *search_node, *temp_search_node; ++ struct msm_bus_node_device_type *bus_node; ++ struct list_head *bl_list; ++ struct list_head *temp_bl_list; ++ int search_dev_id = dest; ++ struct device *dest_dev = bus_find_device(&msm_bus_type, NULL, ++ (void *) &dest, ++ msm_bus_device_match_adhoc); ++ int lnode_hop = -1; ++ ++ if (!found) ++ goto reset_links; ++ ++ if (!dest_dev) { ++ MSM_BUS_ERR("%s: Can't find dest dev %d", __func__, dest); ++ goto exit_prune_path; ++ } ++ ++ lnode_hop = gen_lnode(dest_dev, search_dev_id, lnode_hop); ++ ++ list_for_each_entry_reverse(search_node, route_list, link) { ++ list_for_each_entry(bus_node, &search_node->node_list, link) { ++ unsigned int i; ++ for (i = 0; i < bus_node->node_info->num_connections; ++ i++) { ++ if (bus_node->node_info->connections[i] == ++ search_dev_id) { ++ dest_dev = bus_find_device( ++ &msm_bus_type, ++ NULL, ++ (void *) ++ &bus_node->node_info-> ++ id, ++ msm_bus_device_match_adhoc); ++ ++ if (!dest_dev) { ++ lnode_hop = -1; ++ goto reset_links; ++ } ++ ++ lnode_hop = gen_lnode(dest_dev, ++ search_dev_id, ++ lnode_hop); ++ search_dev_id = ++ bus_node->node_info->id; ++ break; ++ } ++ } ++ } ++ } ++reset_links: ++ list_for_each_entry_safe(search_node, temp_search_node, route_list, ++ link) { ++ list_for_each_entry(bus_node, &search_node->node_list, ++ link) ++ bus_node->node_info->is_traversed = false; ++ ++ list_del(&search_node->link); ++ kfree(search_node); ++ } ++ ++ list_for_each_safe(bl_list, temp_bl_list, black_list) ++ list_del(bl_list); ++ ++exit_prune_path: ++ return lnode_hop; ++} ++ ++static void setup_bl_list(struct msm_bus_node_device_type *node, ++ struct list_head *black_list) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < node->node_info->num_blist; i++) { ++ struct msm_bus_node_device_type *bdev; ++ bdev = node->node_info->black_connections[i]->platform_data; ++ list_add_tail(&bdev->link, black_list); ++ } ++} ++ ++static int getpath(int src, int dest) ++{ ++ struct list_head traverse_list; ++ struct list_head edge_list; ++ struct list_head route_list; ++ struct list_head black_list; ++ struct device *src_dev = bus_find_device(&msm_bus_type, NULL, ++ (void *) &src, ++ msm_bus_device_match_adhoc); ++ struct msm_bus_node_device_type *src_node; ++ struct bus_search_type *search_node; ++ int found = 0; ++ int depth_index = 0; ++ int first_hop = -1; ++ ++ INIT_LIST_HEAD(&traverse_list); ++ INIT_LIST_HEAD(&edge_list); ++ INIT_LIST_HEAD(&route_list); ++ INIT_LIST_HEAD(&black_list); ++ ++ if (!src_dev) { ++ MSM_BUS_ERR("%s: Cannot locate src dev %d", __func__, src); ++ goto exit_getpath; ++ } ++ ++ src_node = src_dev->platform_data; ++ if (!src_node) { ++ MSM_BUS_ERR("%s:Fatal, Source dev %d not found", __func__, src); ++ goto exit_getpath; ++ } ++ list_add_tail(&src_node->link, &traverse_list); ++ ++ while ((!found && !list_empty(&traverse_list))) { ++ struct msm_bus_node_device_type *bus_node = NULL; ++ /* Locate dest_id in the traverse list */ ++ list_for_each_entry(bus_node, &traverse_list, link) { ++ if (bus_node->node_info->id == dest) { ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found) { ++ unsigned int i; ++ /* Setup the new edge list */ ++ list_for_each_entry(bus_node, &traverse_list, link) { ++ /* Setup list of black-listed nodes */ ++ setup_bl_list(bus_node, &black_list); ++ ++ for (i = 0; i < bus_node->node_info-> ++ num_connections; i++) { ++ bool skip; ++ struct msm_bus_node_device_type ++ *node_conn; ++ node_conn = bus_node->node_info-> ++ dev_connections[i]-> ++ platform_data; ++ if (node_conn->node_info-> ++ is_traversed) { ++ MSM_BUS_ERR("Circ Path %d\n", ++ node_conn->node_info->id); ++ goto reset_traversed; ++ } ++ skip = chk_bl_list(&black_list, ++ bus_node->node_info-> ++ connections[i]); ++ if (!skip) { ++ list_add_tail(&node_conn->link, ++ &edge_list); ++ node_conn->node_info-> ++ is_traversed = true; ++ } ++ } ++ } ++ ++ /* Keep tabs of the previous search list */ ++ search_node = kzalloc(sizeof(struct bus_search_type), ++ GFP_KERNEL); ++ INIT_LIST_HEAD(&search_node->node_list); ++ list_splice_init(&traverse_list, ++ &search_node->node_list); ++ /* Add the previous search list to a route list */ ++ list_add_tail(&search_node->link, &route_list); ++ /* Advancing the list depth */ ++ depth_index++; ++ list_splice_init(&edge_list, &traverse_list); ++ } ++ } ++reset_traversed: ++ copy_remaining_nodes(&edge_list, &traverse_list, &route_list); ++ first_hop = prune_path(&route_list, dest, src, &black_list, found); ++ ++exit_getpath: ++ return first_hop; ++} ++ ++static uint64_t arbitrate_bus_req(struct msm_bus_node_device_type *bus_dev, ++ int ctx) ++{ ++ int i; ++ uint64_t max_ib = 0; ++ uint64_t sum_ab = 0; ++ uint64_t bw_max_hz; ++ struct msm_bus_node_device_type *fab_dev = NULL; ++ uint32_t util_fact = 0; ++ uint32_t vrail_comp = 0; ++ ++ /* Find max ib */ ++ for (i = 0; i < bus_dev->num_lnodes; i++) { ++ max_ib = max(max_ib, bus_dev->lnode_list[i].lnode_ib[ctx]); ++ sum_ab += bus_dev->lnode_list[i].lnode_ab[ctx]; ++ } ++ /* ++ * Account for Util factor and vrail comp. The new aggregation ++ * formula is: ++ * Freq_hz = max((sum(ab) * util_fact)/num_chan, max(ib)/vrail_comp) ++ * / bus-width ++ * util_fact and vrail comp are obtained from fabric/Node's dts ++ * properties. ++ * They default to 100 if absent. ++ */ ++ fab_dev = bus_dev->node_info->bus_device->platform_data; ++ /* Don't do this for virtual fabrics */ ++ if (fab_dev && fab_dev->fabdev) { ++ util_fact = bus_dev->node_info->util_fact ? ++ bus_dev->node_info->util_fact : ++ fab_dev->fabdev->util_fact; ++ vrail_comp = bus_dev->node_info->vrail_comp ? ++ bus_dev->node_info->vrail_comp : ++ fab_dev->fabdev->vrail_comp; ++ sum_ab *= util_fact; ++ sum_ab = msm_bus_div64(100, sum_ab); ++ max_ib *= 100; ++ max_ib = msm_bus_div64(vrail_comp, max_ib); ++ } ++ ++ /* Account for multiple channels if any */ ++ if (bus_dev->node_info->num_qports > 1) ++ sum_ab = msm_bus_div64(bus_dev->node_info->num_qports, ++ sum_ab); ++ ++ if (!bus_dev->node_info->buswidth) { ++ MSM_BUS_WARN("No bus width found for %d. Using default\n", ++ bus_dev->node_info->id); ++ bus_dev->node_info->buswidth = 8; ++ } ++ ++ bw_max_hz = max(max_ib, sum_ab); ++ bw_max_hz = msm_bus_div64(bus_dev->node_info->buswidth, ++ bw_max_hz); ++ ++ return bw_max_hz; ++} ++ ++static void del_inp_list(struct list_head *list) ++{ ++ struct rule_update_path_info *rule_node; ++ struct rule_update_path_info *rule_node_tmp; ++ ++ list_for_each_entry_safe(rule_node, rule_node_tmp, list, link) ++ list_del(&rule_node->link); ++} ++ ++static void del_op_list(struct list_head *list) ++{ ++ struct rule_apply_rcm_info *rule; ++ struct rule_apply_rcm_info *rule_tmp; ++ ++ list_for_each_entry_safe(rule, rule_tmp, list, link) ++ list_del(&rule->link); ++} ++ ++static int msm_bus_apply_rules(struct list_head *list, bool after_clk_commit) ++{ ++ struct rule_apply_rcm_info *rule; ++ struct device *dev = NULL; ++ struct msm_bus_node_device_type *dev_info = NULL; ++ int ret = 0; ++ bool throttle_en = false; ++ ++ list_for_each_entry(rule, list, link) { ++ if (!rule) ++ break; ++ ++ if (rule && (rule->after_clk_commit != after_clk_commit)) ++ continue; ++ ++ dev = bus_find_device(&msm_bus_type, NULL, ++ (void *) &rule->id, ++ msm_bus_device_match_adhoc); ++ ++ if (!dev) { ++ MSM_BUS_ERR("Can't find dev node for %d", rule->id); ++ continue; ++ } ++ dev_info = dev->platform_data; ++ ++ throttle_en = ((rule->throttle == THROTTLE_ON) ? true : false); ++ ret = msm_bus_enable_limiter(dev_info, throttle_en, ++ rule->lim_bw); ++ if (ret) ++ MSM_BUS_ERR("Failed to set limiter for %d", rule->id); ++ } ++ ++ return ret; ++} ++ ++static uint64_t get_node_aggab(struct msm_bus_node_device_type *bus_dev) ++{ ++ int i; ++ int ctx; ++ uint64_t max_agg_ab = 0; ++ uint64_t agg_ab = 0; ++ ++ for (ctx = 0; ctx < NUM_CTX; ctx++) { ++ for (i = 0; i < bus_dev->num_lnodes; i++) ++ agg_ab += bus_dev->lnode_list[i].lnode_ab[ctx]; ++ ++ if (bus_dev->node_info->num_qports > 1) ++ agg_ab = msm_bus_div64(bus_dev->node_info->num_qports, ++ agg_ab); ++ ++ max_agg_ab = max(max_agg_ab, agg_ab); ++ } ++ ++ return max_agg_ab; ++} ++ ++static uint64_t get_node_ib(struct msm_bus_node_device_type *bus_dev) ++{ ++ int i; ++ int ctx; ++ uint64_t max_ib = 0; ++ ++ for (ctx = 0; ctx < NUM_CTX; ctx++) { ++ for (i = 0; i < bus_dev->num_lnodes; i++) ++ max_ib = max(max_ib, ++ bus_dev->lnode_list[i].lnode_ib[ctx]); ++ } ++ return max_ib; ++} ++ ++static int update_path(int src, int dest, uint64_t req_ib, uint64_t req_bw, ++ uint64_t cur_ib, uint64_t cur_bw, int src_idx, int ctx) ++{ ++ struct device *src_dev = NULL; ++ struct device *next_dev = NULL; ++ struct link_node *lnode = NULL; ++ struct msm_bus_node_device_type *dev_info = NULL; ++ int curr_idx; ++ int ret = 0; ++ int *dirty_nodes = NULL; ++ int num_dirty = 0; ++ struct rule_update_path_info *rule_node; ++ bool rules_registered = msm_rule_are_rules_registered(); ++ ++ src_dev = bus_find_device(&msm_bus_type, NULL, ++ (void *) &src, ++ msm_bus_device_match_adhoc); ++ ++ if (!src_dev) { ++ MSM_BUS_ERR("%s: Can't find source device %d", __func__, src); ++ ret = -ENODEV; ++ goto exit_update_path; ++ } ++ ++ next_dev = src_dev; ++ ++ if (src_idx < 0) { ++ MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, src_idx); ++ ret = -ENXIO; ++ goto exit_update_path; ++ } ++ curr_idx = src_idx; ++ ++ INIT_LIST_HEAD(&input_list); ++ INIT_LIST_HEAD(&apply_list); ++ ++ while (next_dev) { ++ dev_info = next_dev->platform_data; ++ ++ if (curr_idx >= dev_info->num_lnodes) { ++ MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d", ++ __func__, curr_idx, dev_info->num_lnodes); ++ ret = -ENXIO; ++ goto exit_update_path; ++ } ++ ++ lnode = &dev_info->lnode_list[curr_idx]; ++ lnode->lnode_ib[ctx] = req_ib; ++ lnode->lnode_ab[ctx] = req_bw; ++ ++ dev_info->cur_clk_hz[ctx] = arbitrate_bus_req(dev_info, ctx); ++ ++ /* Start updating the clocks at the first hop. ++ * Its ok to figure out the aggregated ++ * request at this node. ++ */ ++ if (src_dev != next_dev) { ++ ret = msm_bus_update_clks(dev_info, ctx, &dirty_nodes, ++ &num_dirty); ++ if (ret) { ++ MSM_BUS_ERR("%s: Failed to update clks dev %d", ++ __func__, dev_info->node_info->id); ++ goto exit_update_path; ++ } ++ } ++ ++ ret = msm_bus_update_bw(dev_info, ctx, req_bw, &dirty_nodes, ++ &num_dirty); ++ if (ret) { ++ MSM_BUS_ERR("%s: Failed to update bw dev %d", ++ __func__, dev_info->node_info->id); ++ goto exit_update_path; ++ } ++ ++ if (rules_registered) { ++ rule_node = &dev_info->node_info->rule; ++ rule_node->id = dev_info->node_info->id; ++ rule_node->ib = get_node_ib(dev_info); ++ rule_node->ab = get_node_aggab(dev_info); ++ rule_node->clk = max(dev_info->cur_clk_hz[ACTIVE_CTX], ++ dev_info->cur_clk_hz[DUAL_CTX]); ++ list_add_tail(&rule_node->link, &input_list); ++ } ++ ++ next_dev = lnode->next_dev; ++ curr_idx = lnode->next; ++ } ++ ++ if (rules_registered) { ++ msm_rules_update_path(&input_list, &apply_list); ++ msm_bus_apply_rules(&apply_list, false); ++ } ++ ++ msm_bus_commit_data(dirty_nodes, ctx, num_dirty); ++ ++ if (rules_registered) { ++ msm_bus_apply_rules(&apply_list, true); ++ del_inp_list(&input_list); ++ del_op_list(&apply_list); ++ } ++exit_update_path: ++ return ret; ++} ++ ++static int remove_path(int src, int dst, uint64_t cur_ib, uint64_t cur_ab, ++ int src_idx, int active_only) ++{ ++ struct device *src_dev = NULL; ++ struct device *next_dev = NULL; ++ struct link_node *lnode = NULL; ++ struct msm_bus_node_device_type *dev_info = NULL; ++ int ret = 0; ++ int cur_idx = src_idx; ++ int next_idx; ++ ++ /* Update the current path to zero out all request from ++ * this cient on all paths ++ */ ++ ++ ret = update_path(src, dst, 0, 0, cur_ib, cur_ab, src_idx, ++ active_only); ++ if (ret) { ++ MSM_BUS_ERR("%s: Error zeroing out path ctx %d", ++ __func__, ACTIVE_CTX); ++ goto exit_remove_path; ++ } ++ ++ src_dev = bus_find_device(&msm_bus_type, NULL, ++ (void *) &src, ++ msm_bus_device_match_adhoc); ++ if (!src_dev) { ++ MSM_BUS_ERR("%s: Can't find source device %d", __func__, src); ++ ret = -ENODEV; ++ goto exit_remove_path; ++ } ++ ++ next_dev = src_dev; ++ ++ while (next_dev) { ++ dev_info = next_dev->platform_data; ++ lnode = &dev_info->lnode_list[cur_idx]; ++ next_idx = lnode->next; ++ next_dev = lnode->next_dev; ++ remove_lnode(dev_info, cur_idx); ++ cur_idx = next_idx; ++ } ++ ++exit_remove_path: ++ return ret; ++} ++ ++static void getpath_debug(int src, int curr, int active_only) ++{ ++ struct device *dev_node; ++ struct device *dev_it; ++ unsigned int hop = 1; ++ int idx; ++ struct msm_bus_node_device_type *devinfo; ++ int i; ++ ++ dev_node = bus_find_device(&msm_bus_type, NULL, ++ (void *) &src, ++ msm_bus_device_match_adhoc); ++ ++ if (!dev_node) { ++ MSM_BUS_ERR("SRC NOT FOUND %d", src); ++ return; ++ } ++ ++ idx = curr; ++ devinfo = dev_node->platform_data; ++ dev_it = dev_node; ++ ++ MSM_BUS_ERR("Route list Src %d", src); ++ while (dev_it) { ++ struct msm_bus_node_device_type *busdev = ++ devinfo->node_info->bus_device->platform_data; ++ ++ MSM_BUS_ERR("Hop[%d] at Device %d ctx %d", hop, ++ devinfo->node_info->id, active_only); ++ ++ for (i = 0; i < NUM_CTX; i++) { ++ MSM_BUS_ERR("dev info sel ib %llu", ++ devinfo->cur_clk_hz[i]); ++ MSM_BUS_ERR("dev info sel ab %llu", ++ devinfo->node_ab.ab[i]); ++ } ++ ++ dev_it = devinfo->lnode_list[idx].next_dev; ++ idx = devinfo->lnode_list[idx].next; ++ if (dev_it) ++ devinfo = dev_it->platform_data; ++ ++ MSM_BUS_ERR("Bus Device %d", busdev->node_info->id); ++ MSM_BUS_ERR("Bus Clock %llu", busdev->clk[active_only].rate); ++ ++ if (idx < 0) ++ break; ++ hop++; ++ } ++} ++ ++static void unregister_client_adhoc(uint32_t cl) ++{ ++ int i; ++ struct msm_bus_scale_pdata *pdata; ++ int lnode, src, curr, dest; ++ uint64_t cur_clk, cur_bw; ++ struct msm_bus_client *client; ++ ++ mutex_lock(&msm_bus_adhoc_lock); ++ if (!cl) { ++ MSM_BUS_ERR("%s: Null cl handle passed unregister\n", ++ __func__); ++ goto exit_unregister_client; ++ } ++ client = handle_list.cl_list[cl]; ++ pdata = client->pdata; ++ if (!pdata) { ++ MSM_BUS_ERR("%s: Null pdata passed to unregister\n", ++ __func__); ++ goto exit_unregister_client; ++ } ++ ++ curr = client->curr; ++ if (curr >= pdata->num_usecases) { ++ MSM_BUS_ERR("Invalid index Defaulting curr to 0"); ++ curr = 0; ++ } ++ ++ MSM_BUS_DBG("%s: Unregistering client %p", __func__, client); ++ ++ for (i = 0; i < pdata->usecase->num_paths; i++) { ++ src = client->pdata->usecase[curr].vectors[i].src; ++ dest = client->pdata->usecase[curr].vectors[i].dst; ++ ++ lnode = client->src_pnode[i]; ++ cur_clk = client->pdata->usecase[curr].vectors[i].ib; ++ cur_bw = client->pdata->usecase[curr].vectors[i].ab; ++ remove_path(src, dest, cur_clk, cur_bw, lnode, ++ pdata->active_only); ++ } ++ msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl); ++ kfree(client->src_pnode); ++ kfree(client); ++ handle_list.cl_list[cl] = NULL; ++exit_unregister_client: ++ mutex_unlock(&msm_bus_adhoc_lock); ++ return; ++} ++ ++static int alloc_handle_lst(int size) ++{ ++ int ret = 0; ++ struct msm_bus_client **t_cl_list; ++ ++ if (!handle_list.num_entries) { ++ t_cl_list = kzalloc(sizeof(struct msm_bus_client *) ++ * NUM_CL_HANDLES, GFP_KERNEL); ++ if (ZERO_OR_NULL_PTR(t_cl_list)) { ++ ret = -ENOMEM; ++ MSM_BUS_ERR("%s: Failed to allocate handles list", ++ __func__); ++ goto exit_alloc_handle_lst; ++ } ++ handle_list.cl_list = t_cl_list; ++ handle_list.num_entries += NUM_CL_HANDLES; ++ } else { ++ t_cl_list = krealloc(handle_list.cl_list, ++ sizeof(struct msm_bus_client *) * ++ handle_list.num_entries + NUM_CL_HANDLES, ++ GFP_KERNEL); ++ if (ZERO_OR_NULL_PTR(t_cl_list)) { ++ ret = -ENOMEM; ++ MSM_BUS_ERR("%s: Failed to allocate handles list", ++ __func__); ++ goto exit_alloc_handle_lst; ++ } ++ ++ memset(&handle_list.cl_list[handle_list.num_entries], 0, ++ NUM_CL_HANDLES * sizeof(struct msm_bus_client *)); ++ handle_list.num_entries += NUM_CL_HANDLES; ++ handle_list.cl_list = t_cl_list; ++ } ++exit_alloc_handle_lst: ++ return ret; ++} ++ ++static uint32_t gen_handle(struct msm_bus_client *client) ++{ ++ uint32_t handle = 0; ++ int i; ++ int ret = 0; ++ ++ for (i = 0; i < handle_list.num_entries; i++) { ++ if (i && !handle_list.cl_list[i]) { ++ handle = i; ++ break; ++ } ++ } ++ ++ if (!handle) { ++ ret = alloc_handle_lst(NUM_CL_HANDLES); ++ ++ if (ret) { ++ MSM_BUS_ERR("%s: Failed to allocate handle list", ++ __func__); ++ goto exit_gen_handle; ++ } ++ handle = i + 1; ++ } ++ handle_list.cl_list[handle] = client; ++exit_gen_handle: ++ return handle; ++} ++ ++static uint32_t register_client_adhoc(struct msm_bus_scale_pdata *pdata) ++{ ++ int src, dest; ++ int i; ++ struct msm_bus_client *client = NULL; ++ int *lnode; ++ uint32_t handle = 0; ++ ++ mutex_lock(&msm_bus_adhoc_lock); ++ client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL); ++ if (!client) { ++ MSM_BUS_ERR("%s: Error allocating client data", __func__); ++ goto exit_register_client; ++ } ++ client->pdata = pdata; ++ ++ lnode = kzalloc(pdata->usecase->num_paths * sizeof(int), GFP_KERNEL); ++ if (ZERO_OR_NULL_PTR(lnode)) { ++ MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__); ++ goto exit_register_client; ++ } ++ client->src_pnode = lnode; ++ ++ for (i = 0; i < pdata->usecase->num_paths; i++) { ++ src = pdata->usecase->vectors[i].src; ++ dest = pdata->usecase->vectors[i].dst; ++ ++ if ((src < 0) || (dest < 0)) { ++ MSM_BUS_ERR("%s:Invalid src/dst.src %d dest %d", ++ __func__, src, dest); ++ goto exit_register_client; ++ } ++ ++ lnode[i] = getpath(src, dest); ++ if (lnode[i] < 0) { ++ MSM_BUS_ERR("%s:Failed to find path.src %d dest %d", ++ __func__, src, dest); ++ goto exit_register_client; ++ } ++ } ++ ++ handle = gen_handle(client); ++ msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER, ++ handle); ++ MSM_BUS_DBG("%s:Client handle %d %s", __func__, handle, ++ client->pdata->name); ++exit_register_client: ++ mutex_unlock(&msm_bus_adhoc_lock); ++ return handle; ++} ++ ++static int update_request_adhoc(uint32_t cl, unsigned int index) ++{ ++ int i, ret = 0; ++ struct msm_bus_scale_pdata *pdata; ++ int lnode, src, curr, dest; ++ uint64_t req_clk, req_bw, curr_clk, curr_bw; ++ struct msm_bus_client *client; ++ const char *test_cl = "Null"; ++ bool log_transaction = false; ++ ++ mutex_lock(&msm_bus_adhoc_lock); ++ ++ if (!cl) { ++ MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl); ++ ret = -ENXIO; ++ goto exit_update_request; ++ } ++ ++ client = handle_list.cl_list[cl]; ++ pdata = client->pdata; ++ if (!pdata) { ++ MSM_BUS_ERR("%s: Client data Null.[client didn't register]", ++ __func__); ++ ret = -ENXIO; ++ goto exit_update_request; ++ } ++ ++ if (index >= pdata->num_usecases) { ++ MSM_BUS_ERR("Client %u passed invalid index: %d\n", ++ cl, index); ++ ret = -ENXIO; ++ goto exit_update_request; ++ } ++ ++ if (client->curr == index) { ++ MSM_BUS_DBG("%s: Not updating client request idx %d unchanged", ++ __func__, index); ++ goto exit_update_request; ++ } ++ ++ curr = client->curr; ++ client->curr = index; ++ ++ if (!strcmp(test_cl, pdata->name)) ++ log_transaction = true; ++ ++ MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__, ++ cl, index, client->curr, client->pdata->usecase->num_paths); ++ ++ for (i = 0; i < pdata->usecase->num_paths; i++) { ++ src = client->pdata->usecase[index].vectors[i].src; ++ dest = client->pdata->usecase[index].vectors[i].dst; ++ ++ lnode = client->src_pnode[i]; ++ req_clk = client->pdata->usecase[index].vectors[i].ib; ++ req_bw = client->pdata->usecase[index].vectors[i].ab; ++ if (curr < 0) { ++ curr_clk = 0; ++ curr_bw = 0; ++ } else { ++ curr_clk = client->pdata->usecase[curr].vectors[i].ib; ++ curr_bw = client->pdata->usecase[curr].vectors[i].ab; ++ MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__, ++ curr_bw, curr_clk); ++ } ++ ++ ret = update_path(src, dest, req_clk, req_bw, ++ curr_clk, curr_bw, lnode, pdata->active_only); ++ ++ if (ret) { ++ MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n", ++ __func__, ret, ACTIVE_CTX); ++ goto exit_update_request; ++ } ++ ++ if (log_transaction) ++ getpath_debug(src, lnode, pdata->active_only); ++ } ++ msm_bus_dbg_client_data(client->pdata, index , cl); ++exit_update_request: ++ mutex_unlock(&msm_bus_adhoc_lock); ++ return ret; ++} ++ ++/** ++ * msm_bus_arb_setops_adhoc() : Setup the bus arbitration ops ++ * @ arb_ops: pointer to the arb ops. ++ */ ++void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops) ++{ ++ arb_ops->register_client = register_client_adhoc; ++ arb_ops->update_request = update_request_adhoc; ++ arb_ops->unregister_client = unregister_client_adhoc; ++} +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_bimc.c +@@ -0,0 +1,2112 @@ ++/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#define pr_fmt(fmt) "AXI: BIMC: %s(): " fmt, __func__ ++ ++#include ++#include ++#include "msm-bus-board.h" ++#include "msm_bus_core.h" ++#include "msm_bus_bimc.h" ++#include "msm_bus_adhoc.h" ++#include ++ ++enum msm_bus_bimc_slave_block { ++ SLAVE_BLOCK_RESERVED = 0, ++ SLAVE_BLOCK_SLAVE_WAY, ++ SLAVE_BLOCK_XPU, ++ SLAVE_BLOCK_ARBITER, ++ SLAVE_BLOCK_SCMO, ++}; ++ ++enum bke_sw { ++ BKE_OFF = 0, ++ BKE_ON = 1, ++}; ++ ++/* M_Generic */ ++ ++#define M_REG_BASE(b) ((b) + 0x00008000) ++ ++#define M_COMPONENT_INFO_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000000) ++enum bimc_m_component_info { ++ M_COMPONENT_INFO_RMSK = 0xffffff, ++ M_COMPONENT_INFO_INSTANCE_BMSK = 0xff0000, ++ M_COMPONENT_INFO_INSTANCE_SHFT = 0x10, ++ M_COMPONENT_INFO_SUB_TYPE_BMSK = 0xff00, ++ M_COMPONENT_INFO_SUB_TYPE_SHFT = 0x8, ++ M_COMPONENT_INFO_TYPE_BMSK = 0xff, ++ M_COMPONENT_INFO_TYPE_SHFT = 0x0, ++}; ++ ++#define M_CONFIG_INFO_0_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000020) ++enum bimc_m_config_info_0 { ++ M_CONFIG_INFO_0_RMSK = 0xff00ffff, ++ M_CONFIG_INFO_0_SYNC_MODE_BMSK = 0xff000000, ++ M_CONFIG_INFO_0_SYNC_MODE_SHFT = 0x18, ++ M_CONFIG_INFO_0_CONNECTION_TYPE_BMSK = 0xff00, ++ M_CONFIG_INFO_0_CONNECTION_TYPE_SHFT = 0x8, ++ M_CONFIG_INFO_0_FUNC_BMSK = 0xff, ++ M_CONFIG_INFO_0_FUNC_SHFT = 0x0, ++}; ++ ++#define M_CONFIG_INFO_1_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000030) ++enum bimc_m_config_info_1 { ++ M_CONFIG_INFO_1_RMSK = 0xffffffff, ++ M_CONFIG_INFO_1_SWAY_CONNECTIVITY_BMSK = 0xffffffff, ++ M_CONFIG_INFO_1_SWAY_CONNECTIVITY_SHFT = 0x0, ++}; ++ ++#define M_CONFIG_INFO_2_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000040) ++enum bimc_m_config_info_2 { ++ M_CONFIG_INFO_2_RMSK = 0xffffffff, ++ M_CONFIG_INFO_2_M_DATA_WIDTH_BMSK = 0xffff0000, ++ M_CONFIG_INFO_2_M_DATA_WIDTH_SHFT = 0x10, ++ M_CONFIG_INFO_2_M_TID_WIDTH_BMSK = 0xff00, ++ M_CONFIG_INFO_2_M_TID_WIDTH_SHFT = 0x8, ++ M_CONFIG_INFO_2_M_MID_WIDTH_BMSK = 0xff, ++ M_CONFIG_INFO_2_M_MID_WIDTH_SHFT = 0x0, ++}; ++ ++#define M_CONFIG_INFO_3_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000050) ++enum bimc_m_config_info_3 { ++ M_CONFIG_INFO_3_RMSK = 0xffffffff, ++ M_CONFIG_INFO_3_RCH_DEPTH_BMSK = 0xff000000, ++ M_CONFIG_INFO_3_RCH_DEPTH_SHFT = 0x18, ++ M_CONFIG_INFO_3_BCH_DEPTH_BMSK = 0xff0000, ++ M_CONFIG_INFO_3_BCH_DEPTH_SHFT = 0x10, ++ M_CONFIG_INFO_3_WCH_DEPTH_BMSK = 0xff00, ++ M_CONFIG_INFO_3_WCH_DEPTH_SHFT = 0x8, ++ M_CONFIG_INFO_3_ACH_DEPTH_BMSK = 0xff, ++ M_CONFIG_INFO_3_ACH_DEPTH_SHFT = 0x0, ++}; ++ ++#define M_CONFIG_INFO_4_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000060) ++enum bimc_m_config_info_4 { ++ M_CONFIG_INFO_4_RMSK = 0xffff, ++ M_CONFIG_INFO_4_REORDER_BUF_DEPTH_BMSK = 0xff00, ++ M_CONFIG_INFO_4_REORDER_BUF_DEPTH_SHFT = 0x8, ++ M_CONFIG_INFO_4_REORDER_TABLE_DEPTH_BMSK = 0xff, ++ M_CONFIG_INFO_4_REORDER_TABLE_DEPTH_SHFT = 0x0, ++}; ++ ++#define M_CONFIG_INFO_5_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000070) ++enum bimc_m_config_info_5 { ++ M_CONFIG_INFO_5_RMSK = 0x111, ++ M_CONFIG_INFO_5_MP2ARB_PIPELINE_EN_BMSK = 0x100, ++ M_CONFIG_INFO_5_MP2ARB_PIPELINE_EN_SHFT = 0x8, ++ M_CONFIG_INFO_5_MPBUF_PIPELINE_EN_BMSK = 0x10, ++ M_CONFIG_INFO_5_MPBUF_PIPELINE_EN_SHFT = 0x4, ++ M_CONFIG_INFO_5_M2MP_PIPELINE_EN_BMSK = 0x1, ++ M_CONFIG_INFO_5_M2MP_PIPELINE_EN_SHFT = 0x0, ++}; ++ ++#define M_INT_STATUS_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000100) ++enum bimc_m_int_status { ++ M_INT_STATUS_RMSK = 0x3, ++}; ++ ++#define M_INT_CLR_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000108) ++enum bimc_m_int_clr { ++ M_INT_CLR_RMSK = 0x3, ++}; ++ ++#define M_INT_EN_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x0000010c) ++enum bimc_m_int_en { ++ M_INT_EN_RMSK = 0x3, ++}; ++ ++#define M_CLK_CTRL_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000200) ++enum bimc_m_clk_ctrl { ++ M_CLK_CTRL_RMSK = 0x3, ++ M_CLK_CTRL_MAS_CLK_GATING_EN_BMSK = 0x2, ++ M_CLK_CTRL_MAS_CLK_GATING_EN_SHFT = 0x1, ++ M_CLK_CTRL_CORE_CLK_GATING_EN_BMSK = 0x1, ++ M_CLK_CTRL_CORE_CLK_GATING_EN_SHFT = 0x0, ++}; ++ ++#define M_MODE_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210) ++enum bimc_m_mode { ++ M_MODE_RMSK = 0xf0000011, ++ M_MODE_WR_GATHER_BEATS_BMSK = 0xf0000000, ++ M_MODE_WR_GATHER_BEATS_SHFT = 0x1c, ++ M_MODE_NARROW_WR_BMSK = 0x10, ++ M_MODE_NARROW_WR_SHFT = 0x4, ++ M_MODE_ORDERING_MODEL_BMSK = 0x1, ++ M_MODE_ORDERING_MODEL_SHFT = 0x0, ++}; ++ ++#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230) ++enum bimc_m_priolvl_override { ++ M_PRIOLVL_OVERRIDE_RMSK = 0x301, ++ M_PRIOLVL_OVERRIDE_BMSK = 0x300, ++ M_PRIOLVL_OVERRIDE_SHFT = 0x8, ++ M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK = 0x1, ++ M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT = 0x0, ++}; ++ ++#define M_RD_CMD_OVERRIDE_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240) ++enum bimc_m_read_command_override { ++ M_RD_CMD_OVERRIDE_RMSK = 0x3071f7f, ++ M_RD_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000, ++ M_RD_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18, ++ M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000, ++ M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10, ++ M_RD_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000, ++ M_RD_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc, ++ M_RD_CMD_OVERRIDE_ASHARED_BMSK = 0x800, ++ M_RD_CMD_OVERRIDE_ASHARED_SHFT = 0xb, ++ M_RD_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400, ++ M_RD_CMD_OVERRIDE_AREDIRECT_SHFT = 0xa, ++ M_RD_CMD_OVERRIDE_AOOO_BMSK = 0x200, ++ M_RD_CMD_OVERRIDE_AOOO_SHFT = 0x9, ++ M_RD_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100, ++ M_RD_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8, ++ M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40, ++ M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6, ++ M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20, ++ M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5, ++ M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10, ++ M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4, ++ M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8, ++ M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3, ++ M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK = 0x4, ++ M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT = 0x2, ++ M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK = 0x2, ++ M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT = 0x1, ++ M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK = 0x1, ++ M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT = 0x0, ++}; ++ ++#define M_WR_CMD_OVERRIDE_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250) ++enum bimc_m_write_command_override { ++ M_WR_CMD_OVERRIDE_RMSK = 0x3071f7f, ++ M_WR_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000, ++ M_WR_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18, ++ M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000, ++ M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10, ++ M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000, ++ M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc, ++ M_WR_CMD_OVERRIDE_ASHARED_BMSK = 0x800, ++ M_WR_CMD_OVERRIDE_ASHARED_SHFT = 0xb, ++ M_WR_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400, ++ M_WR_CMD_OVERRIDE_AREDIRECT_SHFT = 0xa, ++ M_WR_CMD_OVERRIDE_AOOO_BMSK = 0x200, ++ M_WR_CMD_OVERRIDE_AOOO_SHFT = 0x9, ++ M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100, ++ M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8, ++ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40, ++ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6, ++ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20, ++ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5, ++ M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10, ++ M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4, ++ M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8, ++ M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3, ++ M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK = 0x4, ++ M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT = 0x2, ++ M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK = 0x2, ++ M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT = 0x1, ++ M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK = 0x1, ++ M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT = 0x0, ++}; ++ ++#define M_BKE_EN_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000300) ++enum bimc_m_bke_en { ++ M_BKE_EN_RMSK = 0x1, ++ M_BKE_EN_EN_BMSK = 0x1, ++ M_BKE_EN_EN_SHFT = 0x0, ++}; ++ ++/* Grant Period registers */ ++#define M_BKE_GP_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000304) ++enum bimc_m_bke_grant_period { ++ M_BKE_GP_RMSK = 0x3ff, ++ M_BKE_GP_GP_BMSK = 0x3ff, ++ M_BKE_GP_GP_SHFT = 0x0, ++}; ++ ++/* Grant count register. ++ * The Grant count register represents a signed 16 bit ++ * value, range 0-0x7fff ++ */ ++#define M_BKE_GC_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000308) ++enum bimc_m_bke_grant_count { ++ M_BKE_GC_RMSK = 0xffff, ++ M_BKE_GC_GC_BMSK = 0xffff, ++ M_BKE_GC_GC_SHFT = 0x0, ++}; ++ ++/* Threshold High Registers */ ++#define M_BKE_THH_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000320) ++enum bimc_m_bke_thresh_high { ++ M_BKE_THH_RMSK = 0xffff, ++ M_BKE_THH_THRESH_BMSK = 0xffff, ++ M_BKE_THH_THRESH_SHFT = 0x0, ++}; ++ ++/* Threshold Medium Registers */ ++#define M_BKE_THM_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000324) ++enum bimc_m_bke_thresh_medium { ++ M_BKE_THM_RMSK = 0xffff, ++ M_BKE_THM_THRESH_BMSK = 0xffff, ++ M_BKE_THM_THRESH_SHFT = 0x0, ++}; ++ ++/* Threshold Low Registers */ ++#define M_BKE_THL_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000328) ++enum bimc_m_bke_thresh_low { ++ M_BKE_THL_RMSK = 0xffff, ++ M_BKE_THL_THRESH_BMSK = 0xffff, ++ M_BKE_THL_THRESH_SHFT = 0x0, ++}; ++ ++#define M_BKE_HEALTH_0_CONFIG_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000340) ++enum bimc_m_bke_health_0 { ++ M_BKE_HEALTH_0_CONFIG_RMSK = 0x80000303, ++ M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK = 0x80000000, ++ M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT = 0x1f, ++ M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK = 0x300, ++ M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT = 0x8, ++ M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK = 0x3, ++ M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT = 0x0, ++}; ++ ++#define M_BKE_HEALTH_1_CONFIG_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000344) ++enum bimc_m_bke_health_1 { ++ M_BKE_HEALTH_1_CONFIG_RMSK = 0x80000303, ++ M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_BMSK = 0x80000000, ++ M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_SHFT = 0x1f, ++ M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK = 0x300, ++ M_BKE_HEALTH_1_CONFIG_AREQPRIO_SHFT = 0x8, ++ M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK = 0x3, ++ M_BKE_HEALTH_1_CONFIG_PRIOLVL_SHFT = 0x0, ++}; ++ ++#define M_BKE_HEALTH_2_CONFIG_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000348) ++enum bimc_m_bke_health_2 { ++ M_BKE_HEALTH_2_CONFIG_RMSK = 0x80000303, ++ M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_BMSK = 0x80000000, ++ M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_SHFT = 0x1f, ++ M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK = 0x300, ++ M_BKE_HEALTH_2_CONFIG_AREQPRIO_SHFT = 0x8, ++ M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK = 0x3, ++ M_BKE_HEALTH_2_CONFIG_PRIOLVL_SHFT = 0x0, ++}; ++ ++#define M_BKE_HEALTH_3_CONFIG_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x0000034c) ++enum bimc_m_bke_health_3 { ++ M_BKE_HEALTH_3_CONFIG_RMSK = 0x303, ++ M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK = 0x300, ++ M_BKE_HEALTH_3_CONFIG_AREQPRIO_SHFT = 0x8, ++ M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK = 0x3, ++ M_BKE_HEALTH_3_CONFIG_PRIOLVL_SHFT = 0x0, ++}; ++ ++#define M_BUF_STATUS_ADDR(b, n) \ ++ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000400) ++enum bimc_m_buf_status { ++ M_BUF_STATUS_RMSK = 0xf03f030, ++ M_BUF_STATUS_RCH_DATA_WR_FULL_BMSK = 0x8000000, ++ M_BUF_STATUS_RCH_DATA_WR_FULL_SHFT = 0x1b, ++ M_BUF_STATUS_RCH_DATA_WR_EMPTY_BMSK = 0x4000000, ++ M_BUF_STATUS_RCH_DATA_WR_EMPTY_SHFT = 0x1a, ++ M_BUF_STATUS_RCH_CTRL_WR_FULL_BMSK = 0x2000000, ++ M_BUF_STATUS_RCH_CTRL_WR_FULL_SHFT = 0x19, ++ M_BUF_STATUS_RCH_CTRL_WR_EMPTY_BMSK = 0x1000000, ++ M_BUF_STATUS_RCH_CTRL_WR_EMPTY_SHFT = 0x18, ++ M_BUF_STATUS_BCH_WR_FULL_BMSK = 0x20000, ++ M_BUF_STATUS_BCH_WR_FULL_SHFT = 0x11, ++ M_BUF_STATUS_BCH_WR_EMPTY_BMSK = 0x10000, ++ M_BUF_STATUS_BCH_WR_EMPTY_SHFT = 0x10, ++ M_BUF_STATUS_WCH_DATA_RD_FULL_BMSK = 0x8000, ++ M_BUF_STATUS_WCH_DATA_RD_FULL_SHFT = 0xf, ++ M_BUF_STATUS_WCH_DATA_RD_EMPTY_BMSK = 0x4000, ++ M_BUF_STATUS_WCH_DATA_RD_EMPTY_SHFT = 0xe, ++ M_BUF_STATUS_WCH_CTRL_RD_FULL_BMSK = 0x2000, ++ M_BUF_STATUS_WCH_CTRL_RD_FULL_SHFT = 0xd, ++ M_BUF_STATUS_WCH_CTRL_RD_EMPTY_BMSK = 0x1000, ++ M_BUF_STATUS_WCH_CTRL_RD_EMPTY_SHFT = 0xc, ++ M_BUF_STATUS_ACH_RD_FULL_BMSK = 0x20, ++ M_BUF_STATUS_ACH_RD_FULL_SHFT = 0x5, ++ M_BUF_STATUS_ACH_RD_EMPTY_BMSK = 0x10, ++ M_BUF_STATUS_ACH_RD_EMPTY_SHFT = 0x4, ++}; ++/*BIMC Generic */ ++ ++#define S_REG_BASE(b) ((b) + 0x00048000) ++ ++#define S_COMPONENT_INFO_ADDR(b, n) \ ++ (S_REG_BASE(b) + (0x8000 * (n)) + 0x00000000) ++enum bimc_s_component_info { ++ S_COMPONENT_INFO_RMSK = 0xffffff, ++ S_COMPONENT_INFO_INSTANCE_BMSK = 0xff0000, ++ S_COMPONENT_INFO_INSTANCE_SHFT = 0x10, ++ S_COMPONENT_INFO_SUB_TYPE_BMSK = 0xff00, ++ S_COMPONENT_INFO_SUB_TYPE_SHFT = 0x8, ++ S_COMPONENT_INFO_TYPE_BMSK = 0xff, ++ S_COMPONENT_INFO_TYPE_SHFT = 0x0, ++}; ++ ++#define S_HW_INFO_ADDR(b, n) \ ++ (S_REG_BASE(b) + (0x80000 * (n)) + 0x00000010) ++enum bimc_s_hw_info { ++ S_HW_INFO_RMSK = 0xffffffff, ++ S_HW_INFO_MAJOR_BMSK = 0xff000000, ++ S_HW_INFO_MAJOR_SHFT = 0x18, ++ S_HW_INFO_BRANCH_BMSK = 0xff0000, ++ S_HW_INFO_BRANCH_SHFT = 0x10, ++ S_HW_INFO_MINOR_BMSK = 0xff00, ++ S_HW_INFO_MINOR_SHFT = 0x8, ++ S_HW_INFO_ECO_BMSK = 0xff, ++ S_HW_INFO_ECO_SHFT = 0x0, ++}; ++ ++ ++/* S_SCMO_GENERIC */ ++ ++#define S_SCMO_REG_BASE(b) ((b) + 0x00048000) ++ ++#define S_SCMO_CONFIG_INFO_0_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000020) ++enum bimc_s_scmo_config_info_0 { ++ S_SCMO_CONFIG_INFO_0_RMSK = 0xffffffff, ++ S_SCMO_CONFIG_INFO_0_DATA_WIDTH_BMSK = 0xffff0000, ++ S_SCMO_CONFIG_INFO_0_DATA_WIDTH_SHFT = 0x10, ++ S_SCMO_CONFIG_INFO_0_TID_WIDTH_BMSK = 0xff00, ++ S_SCMO_CONFIG_INFO_0_TID_WIDTH_SHFT = 0x8, ++ S_SCMO_CONFIG_INFO_0_MID_WIDTH_BMSK = 0xff, ++ S_SCMO_CONFIG_INFO_0_MID_WIDTH_SHFT = 0x0, ++}; ++ ++#define S_SCMO_CONFIG_INFO_1_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000030) ++enum bimc_s_scmo_config_info_1 { ++ S_SCMO_CONFIG_INFO_1_RMSK = 0xffffffff, ++ S_SCMO_CONFIG_INFO_1_MPORT_CONNECTIVITY_BMSK = 0xffffffff, ++ S_SCMO_CONFIG_INFO_1_MPORT_CONNECTIVITY_SHFT = 0x0, ++}; ++ ++#define S_SCMO_CONFIG_INFO_2_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000040) ++enum bimc_s_scmo_config_info_2 { ++ S_SCMO_CONFIG_INFO_2_RMSK = 0xff00ff, ++ S_SCMO_CONFIG_INFO_2_NUM_GLOBAL_MONS_BMSK = 0xff0000, ++ S_SCMO_CONFIG_INFO_2_NUM_GLOBAL_MONS_SHFT = 0x10, ++ S_SCMO_CONFIG_INFO_2_VMID_WIDTH_BMSK = 0xff, ++ S_SCMO_CONFIG_INFO_2_VMID_WIDTH_SHFT = 0x0, ++}; ++ ++#define S_SCMO_CONFIG_INFO_3_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000050) ++enum bimc_s_scmo_config_info_3 { ++ S_SCMO_CONFIG_INFO_3_RMSK = 0xffffffff, ++ S_SCMO_CONFIG_INFO_3_RCH0_CTRL_DEPTH_BMSK = 0xff000000, ++ S_SCMO_CONFIG_INFO_3_RCH0_CTRL_DEPTH_SHFT = 0x18, ++ S_SCMO_CONFIG_INFO_3_RCH0_DEPTH_BMSK = 0xff0000, ++ S_SCMO_CONFIG_INFO_3_RCH0_DEPTH_SHFT = 0x10, ++ S_SCMO_CONFIG_INFO_3_BCH_DEPTH_BMSK = 0xff00, ++ S_SCMO_CONFIG_INFO_3_BCH_DEPTH_SHFT = 0x8, ++ S_SCMO_CONFIG_INFO_3_WCH_DEPTH_BMSK = 0xff, ++ S_SCMO_CONFIG_INFO_3_WCH_DEPTH_SHFT = 0x0, ++}; ++ ++#define S_SCMO_CONFIG_INFO_4_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000060) ++enum bimc_s_scmo_config_info_4 { ++ S_SCMO_CONFIG_INFO_4_RMSK = 0xffff, ++ S_SCMO_CONFIG_INFO_4_RCH1_CTRL_DEPTH_BMSK = 0xff00, ++ S_SCMO_CONFIG_INFO_4_RCH1_CTRL_DEPTH_SHFT = 0x8, ++ S_SCMO_CONFIG_INFO_4_RCH1_DEPTH_BMSK = 0xff, ++ S_SCMO_CONFIG_INFO_4_RCH1_DEPTH_SHFT = 0x0, ++}; ++ ++#define S_SCMO_CONFIG_INFO_5_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000070) ++enum bimc_s_scmo_config_info_5 { ++ S_SCMO_CONFIG_INFO_5_RMSK = 0xffff, ++ S_SCMO_CONFIG_INFO_5_DPE_CQ_DEPTH_BMSK = 0xff00, ++ S_SCMO_CONFIG_INFO_5_DPE_CQ_DEPTH_SHFT = 0x8, ++ S_SCMO_CONFIG_INFO_5_DDR_BUS_WIDTH_BMSK = 0xff, ++ S_SCMO_CONFIG_INFO_5_DDR_BUS_WIDTH_SHFT = 0x0, ++}; ++ ++#define S_SCMO_CONFIG_INFO_6_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000080) ++enum bimc_s_scmo_config_info_6 { ++ S_SCMO_CONFIG_INFO_6_RMSK = 0x1111, ++ S_SCMO_CONFIG_INFO_6_WBUFC_PIPE_BMSK = 0x1000, ++ S_SCMO_CONFIG_INFO_6_WBUFC_PIPE_SHFT = 0xc, ++ S_SCMO_CONFIG_INFO_6_RDOPT_PIPE_BMSK = 0x100, ++ S_SCMO_CONFIG_INFO_6_RDOPT_PIPE_SHFT = 0x8, ++ S_SCMO_CONFIG_INFO_6_ACHAN_INTF_PIPE_BMSK = 0x10, ++ S_SCMO_CONFIG_INFO_6_ACHAN_INTF_PIPE_SHFT = 0x4, ++ S_SCMO_CONFIG_INFO_6_ADDR_DECODE_HT_BMSK = 0x1, ++ S_SCMO_CONFIG_INFO_6_ADDR_DECODE_HT_SHFT = 0x0, ++}; ++ ++#define S_SCMO_INT_STATUS_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000100) ++enum bimc_s_scmo_int_status { ++ S_SCMO_INT_STATUS_RMSK = 0x1, ++ S_SCMO_INT_STATUS_ERR_OCCURED_BMSK = 0x1, ++ S_SCMO_INT_STATUS_ERR_OCCURED_SHFT = 0x0, ++}; ++ ++#define S_SCMO_INT_CLR_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000108) ++enum bimc_s_scmo_int_clr { ++ S_SCMO_INT_CLR_RMSK = 0x1, ++ S_SCMO_INT_CLR_IRQ_CLR_BMSK = 0x1, ++ S_SCMO_INT_CLR_IRQ_CLR_SHFT = 0x0, ++}; ++ ++#define S_SCMO_INT_EN_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x0000010c) ++enum bimc_s_scmo_int_en { ++ S_SCMO_INT_EN_RMSK = 0x1, ++ S_SCMO_INT_EN_IRQ_EN_BMSK = 0x1, ++ S_SCMO_INT_EN_IRQ_EN_SHFT = 0x0, ++}; ++ ++#define S_SCMO_ESYN_ADDR_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000120) ++enum bimc_s_scmo_esyn_addr { ++ S_SCMO_ESYN_ADDR_RMSK = 0xffffffff, ++ S_SCMO_ESYN_ADDR_ESYN_ADDR_ERR_ADDR_BMSK = 0xffffffff, ++ S_SCMO_ESYN_ADDR_ESYN_ADDR_ERR_ADDR_SHFT = 0x0, ++}; ++ ++#define S_SCMO_ESYN_APACKET_0_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000128) ++enum bimc_s_scmo_esyn_apacket_0 { ++ S_SCMO_ESYN_APACKET_0_RMSK = 0xff1fffff, ++ S_SCMO_ESYN_APACKET_0_ERR_ATID_BMSK = 0xff000000, ++ S_SCMO_ESYN_APACKET_0_ERR_ATID_SHFT = 0x18, ++ S_SCMO_ESYN_APACKET_0_ERR_AVMID_BMSK = 0x1f0000, ++ S_SCMO_ESYN_APACKET_0_ERR_AVMID_SHFT = 0x10, ++ S_SCMO_ESYN_APACKET_0_ERR_AMID_BMSK = 0xffff, ++ S_SCMO_ESYN_APACKET_0_ERR_AMID_SHFT = 0x0, ++}; ++ ++#define S_SCMO_ESYN_APACKET_1_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x0000012c) ++enum bimc_s_scmo_esyn_apacket_1 { ++ S_SCMO_ESYN_APACKET_1_RMSK = 0x10ff117, ++ S_SCMO_ESYN_APACKET_1_ERR_CODE_BMSK = 0x1000000, ++ S_SCMO_ESYN_APACKET_1_ERR_CODE_SHFT = 0x18, ++ S_SCMO_ESYN_APACKET_1_ERR_ALEN_BMSK = 0xf0000, ++ S_SCMO_ESYN_APACKET_1_ERR_ALEN_SHFT = 0x10, ++ S_SCMO_ESYN_APACKET_1_ERR_ASIZE_BMSK = 0xe000, ++ S_SCMO_ESYN_APACKET_1_ERR_ASIZE_SHFT = 0xd, ++ S_SCMO_ESYN_APACKET_1_ERR_ABURST_BMSK = 0x1000, ++ S_SCMO_ESYN_APACKET_1_ERR_ABURST_SHFT = 0xc, ++ S_SCMO_ESYN_APACKET_1_ERR_AEXCLUSIVE_BMSK = 0x100, ++ S_SCMO_ESYN_APACKET_1_ERR_AEXCLUSIVE_SHFT = 0x8, ++ S_SCMO_ESYN_APACKET_1_ERR_APRONTS_BMSK = 0x10, ++ S_SCMO_ESYN_APACKET_1_ERR_APRONTS_SHFT = 0x4, ++ S_SCMO_ESYN_APACKET_1_ERR_AOOORD_BMSK = 0x4, ++ S_SCMO_ESYN_APACKET_1_ERR_AOOORD_SHFT = 0x2, ++ S_SCMO_ESYN_APACKET_1_ERR_AOOOWR_BMSK = 0x2, ++ S_SCMO_ESYN_APACKET_1_ERR_AOOOWR_SHFT = 0x1, ++ S_SCMO_ESYN_APACKET_1_ERR_AWRITE_BMSK = 0x1, ++ S_SCMO_ESYN_APACKET_1_ERR_AWRITE_SHFT = 0x0, ++}; ++ ++#define S_SCMO_CLK_CTRL_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000200) ++enum bimc_s_scmo_clk_ctrl { ++ S_SCMO_CLK_CTRL_RMSK = 0xffff1111, ++ S_SCMO_CLK_CTRL_PEN_CMD_CG_EN_BMSK = 0x10000, ++ S_SCMO_CLK_CTRL_PEN_CMD_CG_EN_SHFT = 0x10, ++ S_SCMO_CLK_CTRL_RCH_CG_EN_BMSK = 0x1000, ++ S_SCMO_CLK_CTRL_RCH_CG_EN_SHFT = 0xc, ++ S_SCMO_CLK_CTRL_FLUSH_CG_EN_BMSK = 0x100, ++ S_SCMO_CLK_CTRL_FLUSH_CG_EN_SHFT = 0x8, ++ S_SCMO_CLK_CTRL_WCH_CG_EN_BMSK = 0x10, ++ S_SCMO_CLK_CTRL_WCH_CG_EN_SHFT = 0x4, ++ S_SCMO_CLK_CTRL_ACH_CG_EN_BMSK = 0x1, ++ S_SCMO_CLK_CTRL_ACH_CG_EN_SHFT = 0x0, ++}; ++ ++#define S_SCMO_SLV_INTERLEAVE_CFG_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000400) ++enum bimc_s_scmo_slv_interleave_cfg { ++ S_SCMO_SLV_INTERLEAVE_CFG_RMSK = 0xff, ++ S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS1_BMSK = 0x10, ++ S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS1_SHFT = 0x4, ++ S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS0_BMSK = 0x1, ++ S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS0_SHFT = 0x0, ++}; ++ ++#define S_SCMO_ADDR_BASE_CSn_ADDR(b, n, o) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000410 + 0x4 * (o)) ++enum bimc_s_scmo_addr_base_csn { ++ S_SCMO_ADDR_BASE_CSn_RMSK = 0xffff, ++ S_SCMO_ADDR_BASE_CSn_MAXn = 1, ++ S_SCMO_ADDR_BASE_CSn_ADDR_BASE_BMSK = 0xfc, ++ S_SCMO_ADDR_BASE_CSn_ADDR_BASE_SHFT = 0x2, ++}; ++ ++#define S_SCMO_ADDR_MAP_CSn_ADDR(b, n, o) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000420 + 0x4 * (o)) ++enum bimc_s_scmo_addr_map_csn { ++ S_SCMO_ADDR_MAP_CSn_RMSK = 0xffff, ++ S_SCMO_ADDR_MAP_CSn_MAXn = 1, ++ S_SCMO_ADDR_MAP_CSn_RANK_EN_BMSK = 0x8000, ++ S_SCMO_ADDR_MAP_CSn_RANK_EN_SHFT = 0xf, ++ S_SCMO_ADDR_MAP_CSn_ADDR_MODE_BMSK = 0x1000, ++ S_SCMO_ADDR_MAP_CSn_ADDR_MODE_SHFT = 0xc, ++ S_SCMO_ADDR_MAP_CSn_BANK_SIZE_BMSK = 0x100, ++ S_SCMO_ADDR_MAP_CSn_BANK_SIZE_SHFT = 0x8, ++ S_SCMO_ADDR_MAP_CSn_ROW_SIZE_BMSK = 0x30, ++ S_SCMO_ADDR_MAP_CSn_ROW_SIZE_SHFT = 0x4, ++ S_SCMO_ADDR_MAP_CSn_COL_SIZE_BMSK = 0x3, ++ S_SCMO_ADDR_MAP_CSn_COL_SIZE_SHFT = 0x0, ++}; ++ ++#define S_SCMO_ADDR_MASK_CSn_ADDR(b, n, o) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000430 + 0x4 * (0)) ++enum bimc_s_scmo_addr_mask_csn { ++ S_SCMO_ADDR_MASK_CSn_RMSK = 0xffff, ++ S_SCMO_ADDR_MASK_CSn_MAXn = 1, ++ S_SCMO_ADDR_MASK_CSn_ADDR_MASK_BMSK = 0xfc, ++ S_SCMO_ADDR_MASK_CSn_ADDR_MASK_SHFT = 0x2, ++}; ++ ++#define S_SCMO_SLV_STATUS_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000450) ++enum bimc_s_scmo_slv_status { ++ S_SCMO_SLV_STATUS_RMSK = 0xff3, ++ S_SCMO_SLV_STATUS_GLOBAL_MONS_IN_USE_BMSK = 0xff0, ++ S_SCMO_SLV_STATUS_GLOBAL_MONS_IN_USE_SHFT = 0x4, ++ S_SCMO_SLV_STATUS_SLAVE_IDLE_BMSK = 0x3, ++ S_SCMO_SLV_STATUS_SLAVE_IDLE_SHFT = 0x0, ++}; ++ ++#define S_SCMO_CMD_BUF_CFG_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000500) ++enum bimc_s_scmo_cmd_buf_cfg { ++ S_SCMO_CMD_BUF_CFG_RMSK = 0xf1f, ++ S_SCMO_CMD_BUF_CFG_CMD_ORDERING_BMSK = 0x300, ++ S_SCMO_CMD_BUF_CFG_CMD_ORDERING_SHFT = 0x8, ++ S_SCMO_CMD_BUF_CFG_HP_CMD_AREQPRIO_MAP_BMSK = 0x10, ++ S_SCMO_CMD_BUF_CFG_HP_CMD_AREQPRIO_MAP_SHFT = 0x4, ++ S_SCMO_CMD_BUF_CFG_HP_CMD_Q_DEPTH_BMSK = 0x7, ++ S_SCMO_CMD_BUF_CFG_HP_CMD_Q_DEPTH_SHFT = 0x0, ++}; ++ ++#define S_SCM_CMD_BUF_STATUS_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000520) ++enum bimc_s_scm_cmd_buf_status { ++ S_SCMO_CMD_BUF_STATUS_RMSK = 0x77, ++ S_SCMO_CMD_BUF_STATUS_HP_CMD_BUF_ENTRIES_IN_USE_BMSK = 0x70, ++ S_SCMO_CMD_BUF_STATUS_HP_CMD_BUF_ENTRIES_IN_USE_SHFT = 0x4, ++ S_SCMO_CMD_BUF_STATUS_LP_CMD_BUF_ENTRIES_IN_USE_BMSK = 0x7, ++ S_SCMO_CMD_BUF_STATUS_LP_CMD_BUF_ENTRIES_IN_USE_SHFT = 0x0, ++}; ++ ++#define S_SCMO_RCH_SEL_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000540) ++enum bimc_s_scmo_rch_sel { ++ S_SCMO_RCH_SEL_RMSK = 0xffffffff, ++ S_SCMO_CMD_BUF_STATUS_RCH_PORTS_BMSK = 0xffffffff, ++ S_SCMO_CMD_BUF_STATUS_RCH_PORTS_SHFT = 0x0, ++}; ++ ++#define S_SCMO_RCH_BKPR_CFG_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000544) ++enum bimc_s_scmo_rch_bkpr_cfg { ++ S_SCMO_RCH_BKPR_CFG_RMSK = 0xffffffff, ++ S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_HI_TH_BMSK = 0x3f000000, ++ S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_HI_TH_SHFT = 0x18, ++ S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_LO_TH_BMSK = 0x3f0000, ++ S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_LO_TH_SHFT = 0x10, ++ S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_HI_TH_BMSK = 0x3f00, ++ S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_HI_TH_SHFT = 0x8, ++ S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_LO_TH_BMSK = 0x3f, ++ S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_LO_TH_SHFT = 0x0, ++}; ++ ++#define S_SCMO_RCH_STATUS_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000560) ++enum bimc_s_scmo_rch_status { ++ S_SCMO_RCH_STATUS_RMSK = 0x33333, ++ S_SCMO_RCH_STATUS_PRQ_FIFO_FULL_BMSK = 0x20000, ++ S_SCMO_RCH_STATUS_PRQ_FIFO_FULL_SHFT = 0x11, ++ S_SCMO_RCH_STATUS_PRQ_FIFO_EMPTY_BMSK = 0x10000, ++ S_SCMO_RCH_STATUS_PRQ_FIFO_EMPTY_SHFT = 0x10, ++ S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_FULL_BMSK = 0x2000, ++ S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_FULL_SHFT = 0xd, ++ S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_EMPTY_BMSK = 0x1000, ++ S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_EMPTY_SHFT = 0xc, ++ S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_FULL_BMSK = 0x200, ++ S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_FULL_SHFT = 0x9, ++ S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_EMPTY_BMSK = 0x100, ++ S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_EMPTY_SHFT = 0x8, ++ S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_FULL_BMSK = 0x20, ++ S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_FULL_SHFT = 0x5, ++ S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_EMPTY_BMSK = 0x10, ++ S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_EMPTY_SHFT = 0x4, ++ S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_FULL_BMSK = 0x2, ++ S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_FULL_SHFT = 0x1, ++ S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_EMPTY_BMSK = 0x1, ++ S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_EMPTY_SHFT = 0x0, ++}; ++ ++#define S_SCMO_WCH_BUF_CFG_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000580) ++enum bimc_s_scmo_wch_buf_cfg { ++ S_SCMO_WCH_BUF_CFG_RMSK = 0xff, ++ S_SCMO_WCH_BUF_CFG_WRITE_BLOCK_READ_BMSK = 0x10, ++ S_SCMO_WCH_BUF_CFG_WRITE_BLOCK_READ_SHFT = 0x4, ++ S_SCMO_WCH_BUF_CFG_COALESCE_EN_BMSK = 0x1, ++ S_SCMO_WCH_BUF_CFG_COALESCE_EN_SHFT = 0x0, ++}; ++ ++#define S_SCMO_WCH_STATUS_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x000005a0) ++enum bimc_s_scmo_wch_status { ++ S_SCMO_WCH_STATUS_RMSK = 0x333, ++ S_SCMO_WCH_STATUS_BRESP_FIFO_FULL_BMSK = 0x200, ++ S_SCMO_WCH_STATUS_BRESP_FIFO_FULL_SHFT = 0x9, ++ S_SCMO_WCH_STATUS_BRESP_FIFO_EMPTY_BMSK = 0x100, ++ S_SCMO_WCH_STATUS_BRESP_FIFO_EMPTY_SHFT = 0x8, ++ S_SCMO_WCH_STATUS_WDATA_FIFO_FULL_BMSK = 0x20, ++ S_SCMO_WCH_STATUS_WDATA_FIFO_FULL_SHFT = 0x5, ++ S_SCMO_WCH_STATUS_WDATA_FIFO_EMPTY_BMSK = 0x10, ++ S_SCMO_WCH_STATUS_WDATA_FIFO_EMPTY_SHFT = 0x4, ++ S_SCMO_WCH_STATUS_WBUF_FULL_BMSK = 0x2, ++ S_SCMO_WCH_STATUS_WBUF_FULL_SHFT = 0x1, ++ S_SCMO_WCH_STATUS_WBUF_EMPTY_BMSK = 0x1, ++ S_SCMO_WCH_STATUS_WBUF_EMPTY_SHFT = 0x0, ++}; ++ ++#define S_SCMO_FLUSH_CFG_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x000005c0) ++enum bimc_s_scmo_flush_cfg { ++ S_SCMO_FLUSH_CFG_RMSK = 0xffffffff, ++ S_SCMO_FLUSH_CFG_FLUSH_IN_ORDER_BMSK = 0x10000000, ++ S_SCMO_FLUSH_CFG_FLUSH_IN_ORDER_SHFT = 0x1c, ++ S_SCMO_FLUSH_CFG_FLUSH_IDLE_DELAY_BMSK = 0x3ff0000, ++ S_SCMO_FLUSH_CFG_FLUSH_IDLE_DELAY_SHFT = 0x10, ++ S_SCMO_FLUSH_CFG_FLUSH_UPPER_LIMIT_BMSK = 0xf00, ++ S_SCMO_FLUSH_CFG_FLUSH_UPPER_LIMIT_SHFT = 0x8, ++ S_SCMO_FLUSH_CFG_FLUSH_LOWER_LIMIT_BMSK = 0xf, ++ S_SCMO_FLUSH_CFG_FLUSH_LOWER_LIMIT_SHFT = 0x0, ++}; ++ ++#define S_SCMO_FLUSH_CMD_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x000005c4) ++enum bimc_s_scmo_flush_cmd { ++ S_SCMO_FLUSH_CMD_RMSK = 0xf, ++ S_SCMO_FLUSH_CMD_FLUSH_ALL_BUF_BMSK = 0x3, ++ S_SCMO_FLUSH_CMD_FLUSH_ALL_BUF_SHFT = 0x0, ++}; ++ ++#define S_SCMO_CMD_OPT_CFG0_ADDR(b, n) \ ++ (S_SCM0_REG_BASE(b) + (0x8000 * (n)) + 0x00000700) ++enum bimc_s_scmo_cmd_opt_cfg0 { ++ S_SCMO_CMD_OPT_CFG0_RMSK = 0xffffff, ++ S_SCMO_CMD_OPT_CFG0_IGNORE_BANK_UNAVL_BMSK = 0x100000, ++ S_SCMO_CMD_OPT_CFG0_IGNORE_BANK_UNAVL_SHFT = 0x14, ++ S_SCMO_CMD_OPT_CFG0_MASK_CMDOUT_PRI_BMSK = 0x10000, ++ S_SCMO_CMD_OPT_CFG0_MASK_CMDOUT_PRI_SHFT = 0x10, ++ S_SCMO_CMD_OPT_CFG0_DPE_CMD_REORDERING_BMSK = 0x1000, ++ S_SCMO_CMD_OPT_CFG0_DPE_CMD_REORDERING_SHFT = 0xc, ++ S_SCMO_CMD_OPT_CFG0_WR_OPT_EN_BMSK = 0x100, ++ S_SCMO_CMD_OPT_CFG0_WR_OPT_EN_SHFT = 0x8, ++ S_SCMO_CMD_OPT_CFG0_RD_OPT_EN_BMSK = 0x10, ++ S_SCMO_CMD_OPT_CFG0_RD_OPT_EN_SHFT = 0x4, ++ S_SCMO_CMD_OPT_CFG0_PAGE_MGMT_POLICY_BMSK = 0x1, ++ S_SCMO_CMD_OPT_CFG0_PAGE_MGMT_POLICY_SHFT = 0x0, ++}; ++ ++#define S_SCMO_CMD_OPT_CFG1_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000704) ++enum bimc_s_scmo_cmd_opt_cfg1 { ++ S_SCMO_CMD_OPT_CFG1_RMSK = 0xffffffff, ++ S_SCMO_CMD_OPT_CFG1_HSTP_CMD_TIMEOUT_BMSK = 0x1f000000, ++ S_SCMO_CMD_OPT_CFG1_HSTP_CMD_TIMEOUT_SHFT = 0x18, ++ S_SCMO_CMD_OPT_CFG1_HP_CMD_TIMEOUT_BMSK = 0x1f0000, ++ S_SCMO_CMD_OPT_CFG1_HP_CMD_TIMEOUT_SHFT = 0x10, ++ S_SCMO_CMD_OPT_CFG1_MP_CMD_TIMEOUT_BMSK = 0x1f00, ++ S_SCMO_CMD_OPT_CFG1_MP_CMD_TIMEOUT_SHFT = 0x8, ++ S_SCMO_CMD_OPT_CFG1_LP_CMD_TIMEOUT_BMSK = 0x1f, ++ S_SCMO_CMD_OPT_CFG1_LP_CMD_TIMEOUT_SHFT = 0x0, ++}; ++ ++#define S_SCMO_CMD_OPT_CFG2_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000708) ++enum bimc_s_scmo_cmd_opt_cfg2 { ++ S_SCMO_CMD_OPT_CFG2_RMSK = 0xff, ++ S_SCMO_CMD_OPT_CFG2_RWOPT_CMD_TIMEOUT_BMSK = 0xf, ++ S_SCMO_CMD_OPT_CFG2_RWOPT_CMD_TIMEOUT_SHFT = 0x0, ++}; ++ ++#define S_SCMO_CMD_OPT_CFG3_ADDR(b, n) \ ++ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x0000070c) ++enum bimc_s_scmo_cmd_opt_cfg3 { ++ S_SCMO_CMD_OPT_CFG3_RMSK = 0xff, ++ S_SCMO_CMD_OPT_CFG3_FLUSH_CMD_TIMEOUT_BMSK = 0xf, ++ S_SCMO_CMD_OPT_CFG3_FLUSH_CMD_TIMEOUT_SHFT = 0x0, ++}; ++ ++/* S_SWAY_GENERIC */ ++#define S_SWAY_REG_BASE(b) ((b) + 0x00048000) ++ ++#define S_SWAY_CONFIG_INFO_0_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000020) ++enum bimc_s_sway_config_info_0 { ++ S_SWAY_CONFIG_INFO_0_RMSK = 0xff0000ff, ++ S_SWAY_CONFIG_INFO_0_SYNC_MODE_BMSK = 0xff000000, ++ S_SWAY_CONFIG_INFO_0_SYNC_MODE_SHFT = 0x18, ++ S_SWAY_CONFIG_INFO_0_FUNC_BMSK = 0xff, ++ S_SWAY_CONFIG_INFO_0_FUNC_SHFT = 0x0, ++}; ++ ++#define S_SWAY_CONFIG_INFO_1_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000030) ++enum bimc_s_sway_config_info_1 { ++ S_SWAY_CONFIG_INFO_1_RMSK = 0xffffffff, ++ S_SWAY_CONFIG_INFO_1_MPORT_CONNECTIVITY_BMSK = 0xffffffff, ++ S_SWAY_CONFIG_INFO_1_MPORT_CONNECTIVITY_SHFT = 0x0, ++}; ++ ++#define S_SWAY_CONFIG_INFO_2_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000040) ++enum bimc_s_sway_config_info_2 { ++ S_SWAY_CONFIG_INFO_2_RMSK = 0xffff0000, ++ S_SWAY_CONFIG_INFO_2_MPORT_CONNECTIVITY_BMSK = 0xffff0000, ++ S_SWAY_CONFIG_INFO_2_MPORT_CONNECTIVITY_SHFT = 0x10, ++}; ++ ++#define S_SWAY_CONFIG_INFO_3_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000050) ++enum bimc_s_sway_config_info_3 { ++ S_SWAY_CONFIG_INFO_3_RMSK = 0xffffffff, ++ S_SWAY_CONFIG_INFO_3_RCH0_DEPTH_BMSK = 0xff000000, ++ S_SWAY_CONFIG_INFO_3_RCH0_DEPTH_SHFT = 0x18, ++ S_SWAY_CONFIG_INFO_3_BCH_DEPTH_BMSK = 0xff0000, ++ S_SWAY_CONFIG_INFO_3_BCH_DEPTH_SHFT = 0x10, ++ S_SWAY_CONFIG_INFO_3_WCH_DEPTH_BMSK = 0xff, ++ S_SWAY_CONFIG_INFO_3_WCH_DEPTH_SHFT = 0x0, ++}; ++ ++#define S_SWAY_CONFIG_INFO_4_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000060) ++enum bimc_s_sway_config_info_4 { ++ S_SWAY_CONFIG_INFO_4_RMSK = 0x800000ff, ++ S_SWAY_CONFIG_INFO_4_DUAL_RCH_EN_BMSK = 0x80000000, ++ S_SWAY_CONFIG_INFO_4_DUAL_RCH_EN_SHFT = 0x1f, ++ S_SWAY_CONFIG_INFO_4_RCH1_DEPTH_BMSK = 0xff, ++ S_SWAY_CONFIG_INFO_4_RCH1_DEPTH_SHFT = 0x0, ++}; ++ ++#define S_SWAY_CONFIG_INFO_5_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000070) ++enum bimc_s_sway_config_info_5 { ++ S_SWAY_CONFIG_INFO_5_RMSK = 0x800000ff, ++ S_SWAY_CONFIG_INFO_5_QCH_EN_BMSK = 0x80000000, ++ S_SWAY_CONFIG_INFO_5_QCH_EN_SHFT = 0x1f, ++ S_SWAY_CONFIG_INFO_5_QCH_DEPTH_BMSK = 0xff, ++ S_SWAY_CONFIG_INFO_5_QCH_DEPTH_SHFT = 0x0, ++}; ++ ++#define S_SWAY_CONFIG_INFO_6_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000080) ++enum bimc_s_sway_config_info_6 { ++ S_SWAY_CONFIG_INFO_6_RMSK = 0x1, ++ S_SWAY_CONFIG_INFO_6_S2SW_PIPELINE_EN_BMSK = 0x1, ++ S_SWAY_CONFIG_INFO_6_S2SW_PIPELINE_EN_SHFT = 0x0, ++}; ++ ++#define S_SWAY_INT_STATUS_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000100) ++enum bimc_s_sway_int_status { ++ S_SWAY_INT_STATUS_RMSK = 0x3, ++ S_SWAY_INT_STATUS_RFU_BMSK = 0x3, ++ S_SWAY_INT_STATUS_RFU_SHFT = 0x0, ++}; ++ ++#define S_SWAY_INT_CLR_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000108) ++enum bimc_s_sway_int_clr { ++ S_SWAY_INT_CLR_RMSK = 0x3, ++ S_SWAY_INT_CLR_RFU_BMSK = 0x3, ++ S_SWAY_INT_CLR_RFU_SHFT = 0x0, ++}; ++ ++ ++#define S_SWAY_INT_EN_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x0000010c) ++enum bimc_s_sway_int_en { ++ S_SWAY_INT_EN_RMSK = 0x3, ++ S_SWAY_INT_EN_RFU_BMSK = 0x3, ++ S_SWAY_INT_EN_RFU_SHFT = 0x0, ++}; ++ ++#define S_SWAY_CLK_CTRL_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000200) ++enum bimc_s_sway_clk_ctrl { ++ S_SWAY_CLK_CTRL_RMSK = 0x3, ++ S_SWAY_CLK_CTRL_SLAVE_CLK_GATING_EN_BMSK = 0x2, ++ S_SWAY_CLK_CTRL_SLAVE_CLK_GATING_EN_SHFT = 0x1, ++ S_SWAY_CLK_CTRL_CORE_CLK_GATING_EN_BMSK = 0x1, ++ S_SWAY_CLK_CTRL_CORE_CLK_GATING_EN_SHFT = 0x0, ++}; ++ ++#define S_SWAY_RCH_SEL_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000210) ++enum bimc_s_sway_rch_sel { ++ S_SWAY_RCH_SEL_RMSK = 0x7f, ++ S_SWAY_RCH_SEL_UNUSED_BMSK = 0x7f, ++ S_SWAY_RCH_SEL_UNUSED_SHFT = 0x0, ++}; ++ ++ ++#define S_SWAY_MAX_OUTSTANDING_REQS_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000220) ++enum bimc_s_sway_max_outstanding_reqs { ++ S_SWAY_MAX_OUTSTANDING_REQS_RMSK = 0xffff, ++ S_SWAY_MAX_OUTSTANDING_REQS_WRITE_BMSK = 0xff00, ++ S_SWAY_MAX_OUTSTANDING_REQS_WRITE_SHFT = 0x8, ++ S_SWAY_MAX_OUTSTANDING_REQS_READ_BMSK = 0xff, ++ S_SWAY_MAX_OUTSTANDING_REQS_READ_SHFT = 0x0, ++}; ++ ++ ++#define S_SWAY_BUF_STATUS_0_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000400) ++enum bimc_s_sway_buf_status_0 { ++ S_SWAY_BUF_STATUS_0_RMSK = 0xf0300f03, ++ S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_FULL_BMSK = 0x80000000, ++ S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_FULL_SHFT = 0x1f, ++ S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_EMPTY_BMSK = 0x40000000, ++ S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_EMPTY_SHFT = 0x1e, ++ S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_FULL_BMSK = 0x20000000, ++ S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_FULL_SHFT = 0x1d, ++ S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_EMPTY_BMSK = 0x10000000, ++ S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_EMPTY_SHFT = 0x1c, ++ S_SWAY_BUF_STATUS_0_BCH_RD_FULL_BMSK = 0x200000, ++ S_SWAY_BUF_STATUS_0_BCH_RD_FULL_SHFT = 0x15, ++ S_SWAY_BUF_STATUS_0_BCH_RD_EMPTY_BMSK = 0x100000, ++ S_SWAY_BUF_STATUS_0_BCH_RD_EMPTY_SHFT = 0x14, ++ S_SWAY_BUF_STATUS_0_WCH_DATA_WR_FULL_BMSK = 0x800, ++ S_SWAY_BUF_STATUS_0_WCH_DATA_WR_FULL_SHFT = 0xb, ++ S_SWAY_BUF_STATUS_0_WCH_DATA_WR_EMPTY_BMSK = 0x400, ++ S_SWAY_BUF_STATUS_0_WCH_DATA_WR_EMPTY_SHFT = 0xa, ++ S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_FULL_BMSK = 0x200, ++ S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_FULL_SHFT = 0x9, ++ S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_EMPTY_BMSK = 0x100, ++ S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_EMPTY_SHFT = 0x8, ++ S_SWAY_BUF_STATUS_0_ACH_WR_FULL_BMSK = 0x2, ++ S_SWAY_BUF_STATUS_0_ACH_WR_FULL_SHFT = 0x1, ++ S_SWAY_BUF_STATUS_0_ACH_WR_EMPTY_BMSK = 0x1, ++ S_SWAY_BUF_STATUS_0_ACH_WR_EMPTY_SHFT = 0x0, ++}; ++ ++#define S_SWAY_BUF_STATUS_1_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000410) ++enum bimc_s_sway_buf_status_1 { ++ S_SWAY_BUF_STATUS_1_RMSK = 0xf0, ++ S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_FULL_BMSK = 0x80, ++ S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_FULL_SHFT = 0x7, ++ S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_EMPTY_BMSK = 0x40, ++ S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_EMPTY_SHFT = 0x6, ++ S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_FULL_BMSK = 0x20, ++ S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_FULL_SHFT = 0x5, ++ S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_EMPTY_BMSK = 0x10, ++ S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_EMPTY_SHFT = 0x4, ++}; ++ ++#define S_SWAY_BUF_STATUS_2_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000420) ++enum bimc_s_sway_buf_status_2 { ++ S_SWAY_BUF_STATUS_2_RMSK = 0x30, ++ S_SWAY_BUF_STATUS_2_QCH_RD_FULL_BMSK = 0x20, ++ S_SWAY_BUF_STATUS_2_QCH_RD_FULL_SHFT = 0x5, ++ S_SWAY_BUF_STATUS_2_QCH_RD_EMPTY_BMSK = 0x10, ++ S_SWAY_BUF_STATUS_2_QCH_RD_EMPTY_SHFT = 0x4, ++}; ++ ++/* S_ARB_GENERIC */ ++ ++#define S_ARB_REG_BASE(b) ((b) + 0x00049000) ++ ++#define S_ARB_COMPONENT_INFO_ADDR(b, n) \ ++ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000000) ++enum bimc_s_arb_component_info { ++ S_ARB_COMPONENT_INFO_RMSK = 0xffffff, ++ S_ARB_COMPONENT_INFO_INSTANCE_BMSK = 0xff0000, ++ S_ARB_COMPONENT_INFO_INSTANCE_SHFT = 0x10, ++ S_ARB_COMPONENT_INFO_SUB_TYPE_BMSK = 0xff00, ++ S_ARB_COMPONENT_INFO_SUB_TYPE_SHFT = 0x8, ++ S_ARB_COMPONENT_INFO_TYPE_BMSK = 0xff, ++ S_ARB_COMPONENT_INFO_TYPE_SHFT = 0x0, ++}; ++ ++#define S_ARB_CONFIG_INFO_0_ADDR(b, n) \ ++ (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000020) ++enum bimc_s_arb_config_info_0 { ++ S_ARB_CONFIG_INFO_0_RMSK = 0x800000ff, ++ S_ARB_CONFIG_INFO_0_ARB2SW_PIPELINE_EN_BMSK = 0x80000000, ++ S_ARB_CONFIG_INFO_0_ARB2SW_PIPELINE_EN_SHFT = 0x1f, ++ S_ARB_CONFIG_INFO_0_FUNC_BMSK = 0xff, ++ S_ARB_CONFIG_INFO_0_FUNC_SHFT = 0x0, ++}; ++ ++#define S_ARB_CONFIG_INFO_1_ADDR(b, n) \ ++ (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000030) ++enum bimc_s_arb_config_info_1 { ++ S_ARB_CONFIG_INFO_1_RMSK = 0xffffffff, ++ S_ARB_CONFIG_INFO_1_MPORT_CONNECTIVITY_BMSK = 0xffffffff, ++ S_ARB_CONFIG_INFO_1_MPORT_CONNECTIVITY_SHFT = 0x0, ++}; ++ ++#define S_ARB_CLK_CTRL_ADDR(b) \ ++ (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000200) ++enum bimc_s_arb_clk_ctrl { ++ S_ARB_CLK_CTRL_RMSK = 0x1, ++ S_ARB_CLK_CTRL_SLAVE_CLK_GATING_EN_BMSK = 0x2, ++ S_ARB_CLK_CTRL_SLAVE_CLK_GATING_EN_SHFT = 0x1, ++ S_ARB_CLK_CTRL_CORE_CLK_GATING_EN_BMSK = 0x1, ++ S_ARB_CLK_CTRL_CORE_CLK_GATING_EN_SHFT = 0x0, ++ S_ARB_CLK_CTRL_CLK_GATING_EN_BMSK = 0x1, ++ S_ARB_CLK_CTRL_CLK_GATING_EN_SHFT = 0x0, ++}; ++ ++#define S_ARB_MODE_ADDR(b, n) \ ++ (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000210) ++enum bimc_s_arb_mode { ++ S_ARB_MODE_RMSK = 0xf0000001, ++ S_ARB_MODE_WR_GRANTS_AHEAD_BMSK = 0xf0000000, ++ S_ARB_MODE_WR_GRANTS_AHEAD_SHFT = 0x1c, ++ S_ARB_MODE_PRIO_RR_EN_BMSK = 0x1, ++ S_ARB_MODE_PRIO_RR_EN_SHFT = 0x0, ++}; ++ ++#define BKE_HEALTH_MASK \ ++ (M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK |\ ++ M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK |\ ++ M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK) ++ ++#define BKE_HEALTH_VAL(limit, areq, plvl) \ ++ ((((limit) << M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT) & \ ++ M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK) | \ ++ (((areq) << M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT) & \ ++ M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK) | \ ++ (((plvl) << M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT) & \ ++ M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK)) ++ ++#define MAX_GRANT_PERIOD \ ++ (M_BKE_GP_GP_BMSK >> \ ++ M_BKE_GP_GP_SHFT) ++ ++#define MAX_GC \ ++ (M_BKE_GC_GC_BMSK >> \ ++ (M_BKE_GC_GC_SHFT + 1)) ++ ++static int bimc_div(int64_t *a, uint32_t b) ++{ ++ if ((*a > 0) && (*a < b)) { ++ *a = 0; ++ return 1; ++ } else { ++ return do_div(*a, b); ++ } ++} ++ ++#define ENABLE(val) ((val) == 1 ? 1 : 0) ++void msm_bus_bimc_set_mas_clk_gate(struct msm_bus_bimc_info *binfo, ++ uint32_t mas_index, struct msm_bus_bimc_clk_gate *bgate) ++{ ++ uint32_t val, mask, reg_val; ++ void __iomem *addr; ++ ++ reg_val = readl_relaxed(M_CLK_CTRL_ADDR(binfo->base, ++ mas_index)) & M_CLK_CTRL_RMSK; ++ addr = M_CLK_CTRL_ADDR(binfo->base, mas_index); ++ mask = (M_CLK_CTRL_MAS_CLK_GATING_EN_BMSK | ++ M_CLK_CTRL_CORE_CLK_GATING_EN_BMSK); ++ val = (bgate->core_clk_gate_en << ++ M_CLK_CTRL_MAS_CLK_GATING_EN_SHFT) | ++ bgate->port_clk_gate_en; ++ writel_relaxed(((reg_val & (~mask)) | (val & mask)), addr); ++ /* Ensure clock gating enable mask is set before exiting */ ++ wmb(); ++} ++ ++void msm_bus_bimc_arb_en(struct msm_bus_bimc_info *binfo, ++ uint32_t slv_index, bool en) ++{ ++ uint32_t reg_val, reg_mask_val, enable, val; ++ ++ reg_mask_val = (readl_relaxed(S_ARB_CONFIG_INFO_0_ADDR(binfo-> ++ base, slv_index)) & S_ARB_CONFIG_INFO_0_FUNC_BMSK) ++ >> S_ARB_CONFIG_INFO_0_FUNC_SHFT; ++ enable = ENABLE(en); ++ val = enable << S_ARB_MODE_PRIO_RR_EN_SHFT; ++ if (reg_mask_val == BIMC_ARB_MODE_PRIORITY_RR) { ++ reg_val = readl_relaxed(S_ARB_CONFIG_INFO_0_ADDR(binfo-> ++ base, slv_index)) & S_ARB_MODE_RMSK; ++ writel_relaxed(((reg_val & (~(S_ARB_MODE_PRIO_RR_EN_BMSK))) | ++ (val & S_ARB_MODE_PRIO_RR_EN_BMSK)), ++ S_ARB_MODE_ADDR(binfo->base, slv_index)); ++ /* Ensure arbitration mode is set before returning */ ++ wmb(); ++ } ++} ++ ++static void set_qos_mode(void __iomem *baddr, uint32_t index, uint32_t val0, ++ uint32_t val1, uint32_t val2) ++{ ++ uint32_t reg_val, val; ++ ++ reg_val = readl_relaxed(M_PRIOLVL_OVERRIDE_ADDR(baddr, ++ index)) & M_PRIOLVL_OVERRIDE_RMSK; ++ val = val0 << M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT; ++ writel_relaxed(((reg_val & ~(M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK)) ++ | (val & M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK)), ++ M_PRIOLVL_OVERRIDE_ADDR(baddr, index)); ++ reg_val = readl_relaxed(M_RD_CMD_OVERRIDE_ADDR(baddr, index)) & ++ M_RD_CMD_OVERRIDE_RMSK; ++ val = val1 << M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT; ++ writel_relaxed(((reg_val & ~(M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK ++ )) | (val & M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK)), ++ M_RD_CMD_OVERRIDE_ADDR(baddr, index)); ++ reg_val = readl_relaxed(M_WR_CMD_OVERRIDE_ADDR(baddr, index)) & ++ M_WR_CMD_OVERRIDE_RMSK; ++ val = val2 << M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT; ++ writel_relaxed(((reg_val & ~(M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK ++ )) | (val & M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK)), ++ M_WR_CMD_OVERRIDE_ADDR(baddr, index)); ++ /* Ensure the priority register writes go through */ ++ wmb(); ++} ++ ++static void msm_bus_bimc_set_qos_mode(void __iomem *base, ++ uint32_t mas_index, uint8_t qmode_sel) ++{ ++ uint32_t reg_val, val; ++ ++ switch (qmode_sel) { ++ case BIMC_QOS_MODE_FIXED: ++ reg_val = readl_relaxed(M_BKE_EN_ADDR(base, ++ mas_index)); ++ writel_relaxed((reg_val & (~M_BKE_EN_EN_BMSK)), ++ M_BKE_EN_ADDR(base, mas_index)); ++ /* Ensure that the book-keeping register writes ++ * go through before setting QoS mode. ++ * QoS mode registers might write beyond 1K ++ * boundary in future ++ */ ++ wmb(); ++ set_qos_mode(base, mas_index, 1, 1, 1); ++ break; ++ ++ case BIMC_QOS_MODE_BYPASS: ++ reg_val = readl_relaxed(M_BKE_EN_ADDR(base, ++ mas_index)); ++ writel_relaxed((reg_val & (~M_BKE_EN_EN_BMSK)), ++ M_BKE_EN_ADDR(base, mas_index)); ++ /* Ensure that the book-keeping register writes ++ * go through before setting QoS mode. ++ * QoS mode registers might write beyond 1K ++ * boundary in future ++ */ ++ wmb(); ++ set_qos_mode(base, mas_index, 0, 0, 0); ++ break; ++ ++ case BIMC_QOS_MODE_REGULATOR: ++ case BIMC_QOS_MODE_LIMITER: ++ set_qos_mode(base, mas_index, 0, 0, 0); ++ reg_val = readl_relaxed(M_BKE_EN_ADDR(base, ++ mas_index)); ++ val = 1 << M_BKE_EN_EN_SHFT; ++ /* Ensure that the book-keeping register writes ++ * go through before setting QoS mode. ++ * QoS mode registers might write beyond 1K ++ * boundary in future ++ */ ++ wmb(); ++ writel_relaxed(((reg_val & (~M_BKE_EN_EN_BMSK)) | (val & ++ M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(base, ++ mas_index)); ++ break; ++ default: ++ break; ++ } ++} ++ ++static void set_qos_prio_rl(void __iomem *addr, uint32_t rmsk, ++ uint8_t index, struct msm_bus_bimc_qos_mode *qmode) ++{ ++ uint32_t reg_val, val0, val; ++ ++ /* Note, addr is already passed with right mas_index */ ++ reg_val = readl_relaxed(addr) & rmsk; ++ val0 = BKE_HEALTH_VAL(qmode->rl.qhealth[index].limit_commands, ++ qmode->rl.qhealth[index].areq_prio, ++ qmode->rl.qhealth[index].prio_level); ++ val = ((reg_val & (~(BKE_HEALTH_MASK))) | (val0 & BKE_HEALTH_MASK)); ++ writel_relaxed(val, addr); ++ /* Ensure that priority for regulator/limiter modes are ++ * set before returning ++ */ ++ wmb(); ++ ++} ++ ++static void msm_bus_bimc_set_qos_prio(void __iomem *base, ++ uint32_t mas_index, uint8_t qmode_sel, ++ struct msm_bus_bimc_qos_mode *qmode) ++{ ++ uint32_t reg_val, val; ++ ++ switch (qmode_sel) { ++ case BIMC_QOS_MODE_FIXED: ++ reg_val = readl_relaxed(M_PRIOLVL_OVERRIDE_ADDR( ++ base, mas_index)) & M_PRIOLVL_OVERRIDE_RMSK; ++ val = qmode->fixed.prio_level << ++ M_PRIOLVL_OVERRIDE_SHFT; ++ writel_relaxed(((reg_val & ++ ~(M_PRIOLVL_OVERRIDE_BMSK)) | (val ++ & M_PRIOLVL_OVERRIDE_BMSK)), ++ M_PRIOLVL_OVERRIDE_ADDR(base, mas_index)); ++ ++ reg_val = readl_relaxed(M_RD_CMD_OVERRIDE_ADDR( ++ base, mas_index)) & M_RD_CMD_OVERRIDE_RMSK; ++ val = qmode->fixed.areq_prio_rd << ++ M_RD_CMD_OVERRIDE_AREQPRIO_SHFT; ++ writel_relaxed(((reg_val & ~(M_RD_CMD_OVERRIDE_AREQPRIO_BMSK)) ++ | (val & M_RD_CMD_OVERRIDE_AREQPRIO_BMSK)), ++ M_RD_CMD_OVERRIDE_ADDR(base, mas_index)); ++ ++ reg_val = readl_relaxed(M_WR_CMD_OVERRIDE_ADDR( ++ base, mas_index)) & M_WR_CMD_OVERRIDE_RMSK; ++ val = qmode->fixed.areq_prio_wr << ++ M_WR_CMD_OVERRIDE_AREQPRIO_SHFT; ++ writel_relaxed(((reg_val & ~(M_WR_CMD_OVERRIDE_AREQPRIO_BMSK)) ++ | (val & M_WR_CMD_OVERRIDE_AREQPRIO_BMSK)), ++ M_WR_CMD_OVERRIDE_ADDR(base, mas_index)); ++ /* Ensure that fixed mode register writes go through ++ * before returning ++ */ ++ wmb(); ++ break; ++ ++ case BIMC_QOS_MODE_REGULATOR: ++ case BIMC_QOS_MODE_LIMITER: ++ set_qos_prio_rl(M_BKE_HEALTH_3_CONFIG_ADDR(base, ++ mas_index), M_BKE_HEALTH_3_CONFIG_RMSK, 3, qmode); ++ set_qos_prio_rl(M_BKE_HEALTH_2_CONFIG_ADDR(base, ++ mas_index), M_BKE_HEALTH_2_CONFIG_RMSK, 2, qmode); ++ set_qos_prio_rl(M_BKE_HEALTH_1_CONFIG_ADDR(base, ++ mas_index), M_BKE_HEALTH_1_CONFIG_RMSK, 1, qmode); ++ set_qos_prio_rl(M_BKE_HEALTH_0_CONFIG_ADDR(base, ++ mas_index), M_BKE_HEALTH_0_CONFIG_RMSK, 0 , qmode); ++ break; ++ case BIMC_QOS_MODE_BYPASS: ++ default: ++ break; ++ } ++} ++ ++static void set_qos_bw_regs(void __iomem *baddr, uint32_t mas_index, ++ int32_t th, int32_t tm, int32_t tl, uint32_t gp, ++ uint32_t gc) ++{ ++ int32_t reg_val, val; ++ int32_t bke_reg_val; ++ int16_t val2; ++ ++ /* Disable BKE before writing to registers as per spec */ ++ bke_reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index)); ++ writel_relaxed((bke_reg_val & ~(M_BKE_EN_EN_BMSK)), ++ M_BKE_EN_ADDR(baddr, mas_index)); ++ ++ /* Write values of registers calculated */ ++ reg_val = readl_relaxed(M_BKE_GP_ADDR(baddr, mas_index)) ++ & M_BKE_GP_RMSK; ++ val = gp << M_BKE_GP_GP_SHFT; ++ writel_relaxed(((reg_val & ~(M_BKE_GP_GP_BMSK)) | (val & ++ M_BKE_GP_GP_BMSK)), M_BKE_GP_ADDR(baddr, mas_index)); ++ ++ reg_val = readl_relaxed(M_BKE_GC_ADDR(baddr, mas_index)) & ++ M_BKE_GC_RMSK; ++ val = gc << M_BKE_GC_GC_SHFT; ++ writel_relaxed(((reg_val & ~(M_BKE_GC_GC_BMSK)) | (val & ++ M_BKE_GC_GC_BMSK)), M_BKE_GC_ADDR(baddr, mas_index)); ++ ++ reg_val = readl_relaxed(M_BKE_THH_ADDR(baddr, mas_index)) & ++ M_BKE_THH_RMSK; ++ val = th << M_BKE_THH_THRESH_SHFT; ++ writel_relaxed(((reg_val & ~(M_BKE_THH_THRESH_BMSK)) | (val & ++ M_BKE_THH_THRESH_BMSK)), M_BKE_THH_ADDR(baddr, mas_index)); ++ ++ reg_val = readl_relaxed(M_BKE_THM_ADDR(baddr, mas_index)) & ++ M_BKE_THM_RMSK; ++ val2 = tm << M_BKE_THM_THRESH_SHFT; ++ writel_relaxed(((reg_val & ~(M_BKE_THM_THRESH_BMSK)) | (val2 & ++ M_BKE_THM_THRESH_BMSK)), M_BKE_THM_ADDR(baddr, mas_index)); ++ ++ reg_val = readl_relaxed(M_BKE_THL_ADDR(baddr, mas_index)) & ++ M_BKE_THL_RMSK; ++ val2 = tl << M_BKE_THL_THRESH_SHFT; ++ writel_relaxed(((reg_val & ~(M_BKE_THL_THRESH_BMSK)) | ++ (val2 & M_BKE_THL_THRESH_BMSK)), M_BKE_THL_ADDR(baddr, ++ mas_index)); ++ ++ /* Ensure that all bandwidth register writes have completed ++ * before returning ++ */ ++ wmb(); ++} ++ ++static void msm_bus_bimc_set_qos_bw(void __iomem *base, uint32_t qos_freq, ++ uint32_t mas_index, struct msm_bus_bimc_qos_bw *qbw) ++{ ++ uint32_t bke_en; ++ ++ /* Validate QOS Frequency */ ++ if (qos_freq == 0) { ++ MSM_BUS_DBG("Zero frequency\n"); ++ return; ++ } ++ ++ /* Get enable bit for BKE before programming the period */ ++ bke_en = (readl_relaxed(M_BKE_EN_ADDR(base, mas_index)) & ++ M_BKE_EN_EN_BMSK) >> M_BKE_EN_EN_SHFT; ++ ++ /* Only calculate if there's a requested bandwidth and window */ ++ if (qbw->bw && qbw->ws) { ++ int64_t th, tm, tl; ++ uint32_t gp, gc; ++ int64_t gp_nominal, gp_required, gp_calc, data, temp; ++ int64_t win = qbw->ws * qos_freq; ++ temp = win; ++ /* ++ * Calculate nominal grant period defined by requested ++ * window size. ++ * Ceil this value to max grant period. ++ */ ++ bimc_div(&temp, 1000000); ++ gp_nominal = min_t(uint64_t, MAX_GRANT_PERIOD, temp); ++ /* ++ * Calculate max window size, defined by bw request. ++ * Units: (KHz, MB/s) ++ */ ++ gp_calc = MAX_GC * qos_freq * 1000; ++ gp_required = gp_calc; ++ bimc_div(&gp_required, qbw->bw); ++ ++ /* User min of two grant periods */ ++ gp = min_t(int64_t, gp_nominal, gp_required); ++ ++ /* Calculate bandwith in grants and ceil. */ ++ temp = qbw->bw * gp; ++ data = qos_freq * 1000; ++ bimc_div(&temp, data); ++ gc = min_t(int64_t, MAX_GC, temp); ++ ++ /* Calculate thresholds */ ++ th = qbw->bw - qbw->thh; ++ tm = qbw->bw - qbw->thm; ++ tl = qbw->bw - qbw->thl; ++ ++ th = th * gp; ++ bimc_div(&th, data); ++ tm = tm * gp; ++ bimc_div(&tm, data); ++ tl = tl * gp; ++ bimc_div(&tl, data); ++ ++ MSM_BUS_DBG("BIMC: BW: mas_index: %d, th: %llu tm: %llu\n", ++ mas_index, th, tm); ++ MSM_BUS_DBG("BIMC: tl: %llu gp:%u gc: %u bke_en: %u\n", ++ tl, gp, gc, bke_en); ++ set_qos_bw_regs(base, mas_index, th, tm, tl, gp, gc); ++ } else ++ /* Clear bandwidth registers */ ++ set_qos_bw_regs(base, mas_index, 0, 0, 0, 0, 0); ++} ++ ++static int msm_bus_bimc_allocate_commit_data(struct msm_bus_fabric_registration ++ *fab_pdata, void **cdata, int ctx) ++{ ++ struct msm_bus_bimc_commit **cd = (struct msm_bus_bimc_commit **)cdata; ++ struct msm_bus_bimc_info *binfo = ++ (struct msm_bus_bimc_info *)fab_pdata->hw_data; ++ ++ MSM_BUS_DBG("Allocating BIMC commit data\n"); ++ *cd = kzalloc(sizeof(struct msm_bus_bimc_commit), GFP_KERNEL); ++ if (!*cd) { ++ MSM_BUS_DBG("Couldn't alloc mem for cdata\n"); ++ return -ENOMEM; ++ } ++ ++ (*cd)->mas = binfo->cdata[ctx].mas; ++ (*cd)->slv = binfo->cdata[ctx].slv; ++ ++ return 0; ++} ++ ++static void *msm_bus_bimc_allocate_bimc_data(struct platform_device *pdev, ++ struct msm_bus_fabric_registration *fab_pdata) ++{ ++ struct resource *bimc_mem; ++ struct resource *bimc_io; ++ struct msm_bus_bimc_info *binfo; ++ int i; ++ ++ MSM_BUS_DBG("Allocating BIMC data\n"); ++ binfo = kzalloc(sizeof(struct msm_bus_bimc_info), GFP_KERNEL); ++ if (!binfo) { ++ WARN(!binfo, "Couldn't alloc mem for bimc_info\n"); ++ return NULL; ++ } ++ ++ binfo->qos_freq = fab_pdata->qos_freq; ++ ++ binfo->params.nmasters = fab_pdata->nmasters; ++ binfo->params.nslaves = fab_pdata->nslaves; ++ binfo->params.bus_id = fab_pdata->id; ++ ++ for (i = 0; i < NUM_CTX; i++) { ++ binfo->cdata[i].mas = kzalloc(sizeof(struct ++ msm_bus_node_hw_info) * fab_pdata->nmasters * 2, ++ GFP_KERNEL); ++ if (!binfo->cdata[i].mas) { ++ MSM_BUS_ERR("Couldn't alloc mem for bimc master hw\n"); ++ kfree(binfo); ++ return NULL; ++ } ++ ++ binfo->cdata[i].slv = kzalloc(sizeof(struct ++ msm_bus_node_hw_info) * fab_pdata->nslaves * 2, ++ GFP_KERNEL); ++ if (!binfo->cdata[i].slv) { ++ MSM_BUS_DBG("Couldn't alloc mem for bimc slave hw\n"); ++ kfree(binfo->cdata[i].mas); ++ kfree(binfo); ++ return NULL; ++ } ++ } ++ ++ if (fab_pdata->virt) { ++ MSM_BUS_DBG("Don't get memory regions for virtual fabric\n"); ++ goto skip_mem; ++ } ++ ++ bimc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!bimc_mem) { ++ MSM_BUS_ERR("Cannot get BIMC Base address\n"); ++ kfree(binfo); ++ return NULL; ++ } ++ ++ bimc_io = request_mem_region(bimc_mem->start, ++ resource_size(bimc_mem), pdev->name); ++ if (!bimc_io) { ++ MSM_BUS_ERR("BIMC memory unavailable\n"); ++ kfree(binfo); ++ return NULL; ++ } ++ ++ binfo->base = ioremap(bimc_mem->start, resource_size(bimc_mem)); ++ if (!binfo->base) { ++ MSM_BUS_ERR("IOremap failed for BIMC!\n"); ++ release_mem_region(bimc_mem->start, resource_size(bimc_mem)); ++ kfree(binfo); ++ return NULL; ++ } ++ ++skip_mem: ++ fab_pdata->hw_data = (void *)binfo; ++ return (void *)binfo; ++} ++ ++static void free_commit_data(void *cdata) ++{ ++ struct msm_bus_bimc_commit *cd = (struct msm_bus_bimc_commit *)cdata; ++ ++ kfree(cd->mas); ++ kfree(cd->slv); ++ kfree(cd); ++} ++ ++static void bke_switch( ++ void __iomem *baddr, uint32_t mas_index, bool req, int mode) ++{ ++ uint32_t reg_val, val, cur_val; ++ ++ val = req << M_BKE_EN_EN_SHFT; ++ reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index)); ++ cur_val = reg_val & M_BKE_EN_RMSK; ++ if (val == cur_val) ++ return; ++ ++ if (!req && mode == BIMC_QOS_MODE_FIXED) ++ set_qos_mode(baddr, mas_index, 1, 1, 1); ++ ++ writel_relaxed(((reg_val & ~(M_BKE_EN_EN_BMSK)) | (val & ++ M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(baddr, mas_index)); ++ /* Make sure BKE on/off goes through before changing priorities */ ++ wmb(); ++ ++ if (req) ++ set_qos_mode(baddr, mas_index, 0, 0, 0); ++} ++ ++static void bimc_set_static_qos_bw(void __iomem *base, unsigned int qos_freq, ++ int mport, struct msm_bus_bimc_qos_bw *qbw) ++{ ++ int32_t bw_mbps, thh = 0, thm, thl, gc; ++ int32_t gp; ++ u64 temp; ++ ++ if (qos_freq == 0) { ++ MSM_BUS_DBG("No QoS Frequency.\n"); ++ return; ++ } ++ ++ if (!(qbw->bw && qbw->gp)) { ++ MSM_BUS_DBG("No QoS Bandwidth or Window size\n"); ++ return; ++ } ++ ++ /* Convert bandwidth to MBPS */ ++ temp = qbw->bw; ++ bimc_div(&temp, 1000000); ++ bw_mbps = temp; ++ ++ /* Grant period in clock cycles ++ * Grant period from bandwidth structure ++ * is in nano seconds, QoS freq is in KHz. ++ * Divide by 1000 to get clock cycles. ++ */ ++ gp = (qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC); ++ ++ /* Grant count = BW in MBps * Grant period ++ * in micro seconds ++ */ ++ gc = bw_mbps * (qbw->gp / NSEC_PER_USEC); ++ gc = min(gc, MAX_GC); ++ ++ /* Medium threshold = -((Medium Threshold percentage * ++ * Grant count) / 100) ++ */ ++ thm = -((qbw->thmp * gc) / 100); ++ qbw->thm = thm; ++ ++ /* Low threshold = -(Grant count) */ ++ thl = -gc; ++ qbw->thl = thl; ++ ++ MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d", ++ __func__, gp, gc, thm, thl, thh); ++ ++ trace_bus_bke_params(gc, gp, thl, thm, thl); ++ set_qos_bw_regs(base, mport, thh, thm, thl, gp, gc); ++} ++ ++static void msm_bus_bimc_config_master( ++ struct msm_bus_fabric_registration *fab_pdata, ++ struct msm_bus_inode_info *info, ++ uint64_t req_clk, uint64_t req_bw) ++{ ++ int mode, i, ports; ++ struct msm_bus_bimc_info *binfo; ++ uint64_t bw = 0; ++ ++ binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data; ++ ports = info->node_info->num_mports; ++ ++ /** ++ * Here check the details of dual configuration. ++ * Take actions based on different modes. ++ * Check for threshold if limiter mode, etc. ++ */ ++ ++ if (req_clk <= info->node_info->th[0]) { ++ mode = info->node_info->mode; ++ bw = info->node_info->bimc_bw[0]; ++ } else if ((info->node_info->num_thresh > 1) && ++ (req_clk <= info->node_info->th[1])) { ++ mode = info->node_info->mode; ++ bw = info->node_info->bimc_bw[1]; ++ } else ++ mode = info->node_info->mode_thresh; ++ ++ switch (mode) { ++ case BIMC_QOS_MODE_BYPASS: ++ case BIMC_QOS_MODE_FIXED: ++ for (i = 0; i < ports; i++) ++ bke_switch(binfo->base, info->node_info->qport[i], ++ BKE_OFF, mode); ++ break; ++ case BIMC_QOS_MODE_REGULATOR: ++ case BIMC_QOS_MODE_LIMITER: ++ for (i = 0; i < ports; i++) { ++ /* If not in fixed mode, update bandwidth */ ++ if ((info->node_info->cur_lim_bw != bw) ++ && (mode != BIMC_QOS_MODE_FIXED)) { ++ struct msm_bus_bimc_qos_bw qbw; ++ qbw.ws = info->node_info->ws; ++ qbw.bw = bw; ++ qbw.gp = info->node_info->bimc_gp; ++ qbw.thmp = info->node_info->bimc_thmp; ++ bimc_set_static_qos_bw(binfo->base, ++ binfo->qos_freq, ++ info->node_info->qport[i], &qbw); ++ info->node_info->cur_lim_bw = bw; ++ MSM_BUS_DBG("%s: Qos is %d reqclk %llu bw %llu", ++ __func__, mode, req_clk, bw); ++ } ++ bke_switch(binfo->base, info->node_info->qport[i], ++ BKE_ON, mode); ++ } ++ break; ++ default: ++ break; ++ } ++} ++ ++static void msm_bus_bimc_update_bw(struct msm_bus_inode_info *hop, ++ struct msm_bus_inode_info *info, ++ struct msm_bus_fabric_registration *fab_pdata, ++ void *sel_cdata, int *master_tiers, ++ int64_t add_bw) ++{ ++ struct msm_bus_bimc_info *binfo; ++ struct msm_bus_bimc_qos_bw qbw; ++ int i; ++ int64_t bw; ++ int ports = info->node_info->num_mports; ++ struct msm_bus_bimc_commit *sel_cd = ++ (struct msm_bus_bimc_commit *)sel_cdata; ++ ++ MSM_BUS_DBG("BIMC: Update bw for ID %d, with IID: %d: %lld\n", ++ info->node_info->id, info->node_info->priv_id, add_bw); ++ ++ binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data; ++ ++ if (info->node_info->num_mports == 0) { ++ MSM_BUS_DBG("BIMC: Skip Master BW\n"); ++ goto skip_mas_bw; ++ } ++ ++ ports = info->node_info->num_mports; ++ bw = INTERLEAVED_BW(fab_pdata, add_bw, ports); ++ ++ for (i = 0; i < ports; i++) { ++ sel_cd->mas[info->node_info->masterp[i]].bw += bw; ++ sel_cd->mas[info->node_info->masterp[i]].hw_id = ++ info->node_info->mas_hw_id; ++ MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n", ++ info->node_info->priv_id, ++ sel_cd->mas[info->node_info->masterp[i]].bw); ++ if (info->node_info->hw_sel == MSM_BUS_RPM) ++ sel_cd->mas[info->node_info->masterp[i]].dirty = 1; ++ else { ++ if (!info->node_info->qport) { ++ MSM_BUS_DBG("No qos ports to update!\n"); ++ break; ++ } ++ if (!(info->node_info->mode == BIMC_QOS_MODE_REGULATOR) ++ || (info->node_info->mode == ++ BIMC_QOS_MODE_LIMITER)) { ++ MSM_BUS_DBG("Skip QoS reg programming\n"); ++ break; ++ } ++ ++ MSM_BUS_DBG("qport: %d\n", info->node_info->qport[i]); ++ qbw.bw = sel_cd->mas[info->node_info->masterp[i]].bw; ++ qbw.ws = info->node_info->ws; ++ /* Threshold low = 90% of bw */ ++ qbw.thl = div_s64((90 * bw), 100); ++ /* Threshold medium = bw */ ++ qbw.thm = bw; ++ /* Threshold high = 10% more than bw */ ++ qbw.thh = div_s64((110 * bw), 100); ++ /* Check if info is a shared master. ++ * If it is, mark it dirty ++ * If it isn't, then set QOS Bandwidth. ++ * Also if dual-conf is set, don't program bw regs. ++ **/ ++ if (!info->node_info->dual_conf && ++ ((info->node_info->mode == BIMC_QOS_MODE_LIMITER) || ++ (info->node_info->mode == BIMC_QOS_MODE_REGULATOR))) ++ msm_bus_bimc_set_qos_bw(binfo->base, ++ binfo->qos_freq, ++ info->node_info->qport[i], &qbw); ++ } ++ } ++ ++skip_mas_bw: ++ ports = hop->node_info->num_sports; ++ MSM_BUS_DBG("BIMC: ID: %d, Sports: %d\n", hop->node_info->priv_id, ++ ports); ++ ++ for (i = 0; i < ports; i++) { ++ sel_cd->slv[hop->node_info->slavep[i]].bw += add_bw; ++ sel_cd->slv[hop->node_info->slavep[i]].hw_id = ++ hop->node_info->slv_hw_id; ++ MSM_BUS_DBG("BIMC: Update slave_bw: ID: %d -> %llu\n", ++ hop->node_info->priv_id, ++ sel_cd->slv[hop->node_info->slavep[i]].bw); ++ MSM_BUS_DBG("BIMC: Update slave_bw: index: %d\n", ++ hop->node_info->slavep[i]); ++ /* Check if hop is a shared slave. ++ * If it is, mark it dirty ++ * If it isn't, then nothing to be done as the ++ * slaves are in bypass mode. ++ **/ ++ if (hop->node_info->hw_sel == MSM_BUS_RPM) { ++ MSM_BUS_DBG("Slave dirty: %d, slavep: %d\n", ++ hop->node_info->priv_id, ++ hop->node_info->slavep[i]); ++ sel_cd->slv[hop->node_info->slavep[i]].dirty = 1; ++ } ++ } ++} ++ ++static int msm_bus_bimc_commit(struct msm_bus_fabric_registration ++ *fab_pdata, void *hw_data, void **cdata) ++{ ++ MSM_BUS_DBG("\nReached BIMC Commit\n"); ++ msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata); ++ return 0; ++} ++ ++static void msm_bus_bimc_config_limiter( ++ struct msm_bus_fabric_registration *fab_pdata, ++ struct msm_bus_inode_info *info) ++{ ++ struct msm_bus_bimc_info *binfo; ++ int mode, i, ports; ++ ++ binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data; ++ ports = info->node_info->num_mports; ++ ++ if (!info->node_info->qport) { ++ MSM_BUS_DBG("No QoS Ports to init\n"); ++ return; ++ } ++ ++ if (info->cur_lim_bw) ++ mode = BIMC_QOS_MODE_LIMITER; ++ else ++ mode = info->node_info->mode; ++ ++ switch (mode) { ++ case BIMC_QOS_MODE_BYPASS: ++ case BIMC_QOS_MODE_FIXED: ++ for (i = 0; i < ports; i++) ++ bke_switch(binfo->base, info->node_info->qport[i], ++ BKE_OFF, mode); ++ break; ++ case BIMC_QOS_MODE_REGULATOR: ++ case BIMC_QOS_MODE_LIMITER: ++ if (info->cur_lim_bw != info->cur_prg_bw) { ++ MSM_BUS_DBG("Enabled BKE throttling node %d to %llu\n", ++ info->node_info->id, info->cur_lim_bw); ++ trace_bus_bimc_config_limiter(info->node_info->id, ++ info->cur_lim_bw); ++ for (i = 0; i < ports; i++) { ++ /* If not in fixed mode, update bandwidth */ ++ struct msm_bus_bimc_qos_bw qbw; ++ ++ qbw.ws = info->node_info->ws; ++ qbw.bw = info->cur_lim_bw; ++ qbw.gp = info->node_info->bimc_gp; ++ qbw.thmp = info->node_info->bimc_thmp; ++ bimc_set_static_qos_bw(binfo->base, ++ binfo->qos_freq, ++ info->node_info->qport[i], &qbw); ++ bke_switch(binfo->base, ++ info->node_info->qport[i], ++ BKE_ON, mode); ++ info->cur_prg_bw = qbw.bw; ++ } ++ } ++ break; ++ default: ++ break; ++ } ++} ++ ++static void bimc_init_mas_reg(struct msm_bus_bimc_info *binfo, ++ struct msm_bus_inode_info *info, ++ struct msm_bus_bimc_qos_mode *qmode, int mode) ++{ ++ int i; ++ ++ switch (mode) { ++ case BIMC_QOS_MODE_FIXED: ++ qmode->fixed.prio_level = info->node_info->prio_lvl; ++ qmode->fixed.areq_prio_rd = info->node_info->prio_rd; ++ qmode->fixed.areq_prio_wr = info->node_info->prio_wr; ++ break; ++ case BIMC_QOS_MODE_LIMITER: ++ qmode->rl.qhealth[0].limit_commands = 1; ++ qmode->rl.qhealth[1].limit_commands = 0; ++ qmode->rl.qhealth[2].limit_commands = 0; ++ qmode->rl.qhealth[3].limit_commands = 0; ++ break; ++ default: ++ break; ++ } ++ ++ if (!info->node_info->qport) { ++ MSM_BUS_DBG("No QoS Ports to init\n"); ++ return; ++ } ++ ++ for (i = 0; i < info->node_info->num_mports; i++) { ++ /* If not in bypass mode, update priority */ ++ if (mode != BIMC_QOS_MODE_BYPASS) { ++ msm_bus_bimc_set_qos_prio(binfo->base, ++ info->node_info-> ++ qport[i], mode, qmode); ++ ++ /* If not in fixed mode, update bandwidth */ ++ if (mode != BIMC_QOS_MODE_FIXED) { ++ struct msm_bus_bimc_qos_bw qbw; ++ qbw.ws = info->node_info->ws; ++ qbw.bw = info->node_info->bimc_bw[0]; ++ qbw.gp = info->node_info->bimc_gp; ++ qbw.thmp = info->node_info->bimc_thmp; ++ bimc_set_static_qos_bw(binfo->base, ++ binfo->qos_freq, ++ info->node_info->qport[i], &qbw); ++ } ++ } ++ ++ /* set mode */ ++ msm_bus_bimc_set_qos_mode(binfo->base, ++ info->node_info->qport[i], ++ mode); ++ } ++} ++ ++static void init_health_regs(struct msm_bus_bimc_info *binfo, ++ struct msm_bus_inode_info *info, ++ struct msm_bus_bimc_qos_mode *qmode, ++ int mode) ++{ ++ int i; ++ ++ if (mode == BIMC_QOS_MODE_LIMITER) { ++ qmode->rl.qhealth[0].limit_commands = 1; ++ qmode->rl.qhealth[1].limit_commands = 0; ++ qmode->rl.qhealth[2].limit_commands = 0; ++ qmode->rl.qhealth[3].limit_commands = 0; ++ ++ if (!info->node_info->qport) { ++ MSM_BUS_DBG("No QoS Ports to init\n"); ++ return; ++ } ++ ++ for (i = 0; i < info->node_info->num_mports; i++) { ++ /* If not in bypass mode, update priority */ ++ if (mode != BIMC_QOS_MODE_BYPASS) ++ msm_bus_bimc_set_qos_prio(binfo->base, ++ info->node_info->qport[i], mode, qmode); ++ } ++ } ++} ++ ++ ++static int msm_bus_bimc_mas_init(struct msm_bus_bimc_info *binfo, ++ struct msm_bus_inode_info *info) ++{ ++ struct msm_bus_bimc_qos_mode *qmode; ++ qmode = kzalloc(sizeof(struct msm_bus_bimc_qos_mode), ++ GFP_KERNEL); ++ if (!qmode) { ++ MSM_BUS_WARN("Couldn't alloc prio data for node: %d\n", ++ info->node_info->id); ++ return -ENOMEM; ++ } ++ ++ info->hw_data = (void *)qmode; ++ ++ /** ++ * If the master supports dual configuration, ++ * configure registers for both modes ++ */ ++ if (info->node_info->dual_conf) ++ bimc_init_mas_reg(binfo, info, qmode, ++ info->node_info->mode_thresh); ++ else if (info->node_info->nr_lim) ++ init_health_regs(binfo, info, qmode, BIMC_QOS_MODE_LIMITER); ++ ++ bimc_init_mas_reg(binfo, info, qmode, info->node_info->mode); ++ return 0; ++} ++ ++static void msm_bus_bimc_node_init(void *hw_data, ++ struct msm_bus_inode_info *info) ++{ ++ struct msm_bus_bimc_info *binfo = ++ (struct msm_bus_bimc_info *)hw_data; ++ ++ if (!IS_SLAVE(info->node_info->priv_id) && ++ (info->node_info->hw_sel != MSM_BUS_RPM)) ++ msm_bus_bimc_mas_init(binfo, info); ++} ++ ++static int msm_bus_bimc_port_halt(uint32_t haltid, uint8_t mport) ++{ ++ return 0; ++} ++ ++static int msm_bus_bimc_port_unhalt(uint32_t haltid, uint8_t mport) ++{ ++ return 0; ++} ++ ++static int msm_bus_bimc_limit_mport(struct msm_bus_node_device_type *info, ++ void __iomem *qos_base, uint32_t qos_off, ++ uint32_t qos_delta, uint32_t qos_freq, ++ bool enable_lim, u64 lim_bw) ++{ ++ int mode; ++ int i; ++ ++ if (ZERO_OR_NULL_PTR(info->node_info->qport)) { ++ MSM_BUS_DBG("No QoS Ports to limit\n"); ++ return 0; ++ } ++ ++ if (enable_lim && lim_bw) { ++ mode = BIMC_QOS_MODE_LIMITER; ++ ++ if (!info->node_info->lim_bw) { ++ struct msm_bus_bimc_qos_mode qmode; ++ qmode.rl.qhealth[0].limit_commands = 1; ++ qmode.rl.qhealth[1].limit_commands = 0; ++ qmode.rl.qhealth[2].limit_commands = 0; ++ qmode.rl.qhealth[3].limit_commands = 0; ++ ++ for (i = 0; i < info->node_info->num_qports; i++) { ++ /* If not in bypass mode, update priority */ ++ if (mode != BIMC_QOS_MODE_BYPASS) ++ msm_bus_bimc_set_qos_prio(qos_base, ++ info->node_info->qport[i], mode, ++ &qmode); ++ } ++ } ++ ++ for (i = 0; i < info->node_info->num_qports; i++) { ++ struct msm_bus_bimc_qos_bw qbw; ++ /* If not in fixed mode, update bandwidth */ ++ if ((info->node_info->lim_bw != lim_bw)) { ++ qbw.ws = info->node_info->qos_params.ws; ++ qbw.bw = lim_bw; ++ qbw.gp = info->node_info->qos_params.gp; ++ qbw.thmp = info->node_info->qos_params.thmp; ++ bimc_set_static_qos_bw(qos_base, qos_freq, ++ info->node_info->qport[i], &qbw); ++ } ++ bke_switch(qos_base, info->node_info->qport[i], ++ BKE_ON, mode); ++ } ++ info->node_info->lim_bw = lim_bw; ++ } else { ++ mode = info->node_info->qos_params.mode; ++ for (i = 0; i < info->node_info->num_qports; i++) { ++ bke_switch(qos_base, info->node_info->qport[i], ++ BKE_OFF, mode); ++ } ++ } ++ info->node_info->qos_params.cur_mode = mode; ++ return 0; ++} ++ ++static bool msm_bus_bimc_update_bw_reg(int mode) ++{ ++ bool ret = false; ++ ++ if ((mode == BIMC_QOS_MODE_LIMITER) ++ || (mode == BIMC_QOS_MODE_REGULATOR)) ++ ret = true; ++ ++ return ret; ++} ++ ++static int msm_bus_bimc_qos_init(struct msm_bus_node_device_type *info, ++ void __iomem *qos_base, ++ uint32_t qos_off, uint32_t qos_delta, ++ uint32_t qos_freq) ++{ ++ int i; ++ struct msm_bus_bimc_qos_mode qmode; ++ ++ switch (info->node_info->qos_params.mode) { ++ case BIMC_QOS_MODE_FIXED: ++ qmode.fixed.prio_level = info->node_info->qos_params.prio_lvl; ++ qmode.fixed.areq_prio_rd = info->node_info->qos_params.prio_rd; ++ qmode.fixed.areq_prio_wr = info->node_info->qos_params.prio_wr; ++ break; ++ case BIMC_QOS_MODE_LIMITER: ++ qmode.rl.qhealth[0].limit_commands = 1; ++ qmode.rl.qhealth[1].limit_commands = 0; ++ qmode.rl.qhealth[2].limit_commands = 0; ++ qmode.rl.qhealth[3].limit_commands = 0; ++ break; ++ default: ++ break; ++ } ++ ++ if (ZERO_OR_NULL_PTR(info->node_info->qport)) { ++ MSM_BUS_DBG("No QoS Ports to init\n"); ++ return 0; ++ } ++ ++ for (i = 0; i < info->node_info->num_qports; i++) { ++ /* If not in bypass mode, update priority */ ++ if (info->node_info->qos_params.mode != BIMC_QOS_MODE_BYPASS) ++ msm_bus_bimc_set_qos_prio(qos_base, info->node_info-> ++ qport[i], info->node_info->qos_params.mode, ++ &qmode); ++ ++ /* set mode */ ++ if (info->node_info->qos_params.mode == BIMC_QOS_MODE_LIMITER) ++ bke_switch(qos_base, info->node_info->qport[i], ++ BKE_OFF, BIMC_QOS_MODE_FIXED); ++ else ++ msm_bus_bimc_set_qos_mode(qos_base, ++ info->node_info->qport[i], ++ info->node_info->qos_params.mode); ++ } ++ ++ return 0; ++} ++ ++static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev, ++ void __iomem *qos_base, uint32_t qos_off, ++ uint32_t qos_delta, uint32_t qos_freq) ++{ ++ struct msm_bus_bimc_qos_bw qbw; ++ int i; ++ int64_t bw = 0; ++ int ret = 0; ++ struct msm_bus_node_info_type *info = dev->node_info; ++ ++ if (info && info->num_qports && ++ ((info->qos_params.mode == BIMC_QOS_MODE_LIMITER) || ++ (info->qos_params.mode == BIMC_QOS_MODE_REGULATOR))) { ++ bw = msm_bus_div64(info->num_qports, ++ dev->node_ab.ab[DUAL_CTX]); ++ ++ for (i = 0; i < info->num_qports; i++) { ++ MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n", ++ info->id, bw); ++ ++ if (!info->qport) { ++ MSM_BUS_DBG("No qos ports to update!\n"); ++ break; ++ } ++ ++ qbw.bw = bw + info->qos_params.bw_buffer; ++ trace_bus_bimc_config_limiter(info->id, bw); ++ ++ /* Default to gp of 5us */ ++ qbw.gp = (info->qos_params.gp ? ++ info->qos_params.gp : 5000); ++ /* Default to thmp of 50% */ ++ qbw.thmp = (info->qos_params.thmp ? ++ info->qos_params.thmp : 50); ++ /* ++ * If the BW vote is 0 then set the QoS mode to ++ * Fixed. ++ */ ++ if (bw) { ++ bimc_set_static_qos_bw(qos_base, qos_freq, ++ info->qport[i], &qbw); ++ bke_switch(qos_base, info->qport[i], ++ BKE_ON, info->qos_params.mode); ++ } else { ++ bke_switch(qos_base, info->qport[i], ++ BKE_OFF, BIMC_QOS_MODE_FIXED); ++ } ++ } ++ } ++ return ret; ++} ++ ++int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata, ++ struct msm_bus_hw_algorithm *hw_algo) ++{ ++ /* Set interleaving to true by default */ ++ MSM_BUS_DBG("\nInitializing BIMC...\n"); ++ pdata->il_flag = true; ++ hw_algo->allocate_commit_data = msm_bus_bimc_allocate_commit_data; ++ hw_algo->allocate_hw_data = msm_bus_bimc_allocate_bimc_data; ++ hw_algo->node_init = msm_bus_bimc_node_init; ++ hw_algo->free_commit_data = free_commit_data; ++ hw_algo->update_bw = msm_bus_bimc_update_bw; ++ hw_algo->commit = msm_bus_bimc_commit; ++ hw_algo->port_halt = msm_bus_bimc_port_halt; ++ hw_algo->port_unhalt = msm_bus_bimc_port_unhalt; ++ hw_algo->config_master = msm_bus_bimc_config_master; ++ hw_algo->config_limiter = msm_bus_bimc_config_limiter; ++ hw_algo->update_bw_reg = msm_bus_bimc_update_bw_reg; ++ /* BIMC slaves are shared. Slave registers are set through RPM */ ++ if (!pdata->ahb) ++ pdata->rpm_enabled = 1; ++ return 0; ++} ++ ++int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev) ++{ ++ if (!bus_dev) ++ return -ENODEV; ++ else { ++ bus_dev->fabdev->noc_ops.qos_init = msm_bus_bimc_qos_init; ++ bus_dev->fabdev->noc_ops.set_bw = msm_bus_bimc_set_bw; ++ bus_dev->fabdev->noc_ops.limit_mport = msm_bus_bimc_limit_mport; ++ bus_dev->fabdev->noc_ops.update_bw_reg = ++ msm_bus_bimc_update_bw_reg; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(msm_bus_bimc_set_ops); +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_bimc.h +@@ -0,0 +1,127 @@ ++/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H ++#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H ++ ++struct msm_bus_bimc_params { ++ uint32_t bus_id; ++ uint32_t addr_width; ++ uint32_t data_width; ++ uint32_t nmasters; ++ uint32_t nslaves; ++}; ++ ++struct msm_bus_bimc_commit { ++ struct msm_bus_node_hw_info *mas; ++ struct msm_bus_node_hw_info *slv; ++}; ++ ++struct msm_bus_bimc_info { ++ void __iomem *base; ++ uint32_t base_addr; ++ uint32_t qos_freq; ++ struct msm_bus_bimc_params params; ++ struct msm_bus_bimc_commit cdata[NUM_CTX]; ++}; ++ ++struct msm_bus_bimc_node { ++ uint32_t conn_mask; ++ uint32_t data_width; ++ uint8_t slv_arb_mode; ++}; ++ ++enum msm_bus_bimc_arb_mode { ++ BIMC_ARB_MODE_RR = 0, ++ BIMC_ARB_MODE_PRIORITY_RR, ++ BIMC_ARB_MODE_TIERED_RR, ++}; ++ ++ ++enum msm_bus_bimc_interleave { ++ BIMC_INTERLEAVE_NONE = 0, ++ BIMC_INTERLEAVE_ODD, ++ BIMC_INTERLEAVE_EVEN, ++}; ++ ++struct msm_bus_bimc_slave_seg { ++ bool enable; ++ uint64_t start_addr; ++ uint64_t seg_size; ++ uint8_t interleave; ++}; ++ ++enum msm_bus_bimc_qos_mode_type { ++ BIMC_QOS_MODE_FIXED = 0, ++ BIMC_QOS_MODE_LIMITER, ++ BIMC_QOS_MODE_BYPASS, ++ BIMC_QOS_MODE_REGULATOR, ++}; ++ ++struct msm_bus_bimc_qos_health { ++ bool limit_commands; ++ uint32_t areq_prio; ++ uint32_t prio_level; ++}; ++ ++struct msm_bus_bimc_mode_fixed { ++ uint32_t prio_level; ++ uint32_t areq_prio_rd; ++ uint32_t areq_prio_wr; ++}; ++ ++struct msm_bus_bimc_mode_rl { ++ uint8_t qhealthnum; ++ struct msm_bus_bimc_qos_health qhealth[4]; ++}; ++ ++struct msm_bus_bimc_qos_mode { ++ uint8_t mode; ++ struct msm_bus_bimc_mode_fixed fixed; ++ struct msm_bus_bimc_mode_rl rl; ++}; ++ ++struct msm_bus_bimc_qos_bw { ++ uint64_t bw; /* bw is in Bytes/sec */ ++ uint32_t ws; /* Window size in nano seconds*/ ++ int64_t thh; /* Threshold high, bytes per second */ ++ int64_t thm; /* Threshold medium, bytes per second */ ++ int64_t thl; /* Threshold low, bytes per second */ ++ u32 gp; /* Grant Period in micro seconds */ ++ u32 thmp; /* Threshold medium in percentage */ ++}; ++ ++struct msm_bus_bimc_clk_gate { ++ bool core_clk_gate_en; ++ bool arb_clk_gate_en; /* For arbiter */ ++ bool port_clk_gate_en; /* For regs on BIMC core clock */ ++}; ++ ++void msm_bus_bimc_set_slave_seg(struct msm_bus_bimc_info *binfo, ++ uint32_t slv_index, uint32_t seg_index, ++ struct msm_bus_bimc_slave_seg *bsseg); ++void msm_bus_bimc_set_slave_clk_gate(struct msm_bus_bimc_info *binfo, ++ uint32_t slv_index, struct msm_bus_bimc_clk_gate *bgate); ++void msm_bus_bimc_set_mas_clk_gate(struct msm_bus_bimc_info *binfo, ++ uint32_t mas_index, struct msm_bus_bimc_clk_gate *bgate); ++void msm_bus_bimc_arb_en(struct msm_bus_bimc_info *binfo, ++ uint32_t slv_index, bool en); ++void msm_bus_bimc_get_params(struct msm_bus_bimc_info *binfo, ++ struct msm_bus_bimc_params *params); ++void msm_bus_bimc_get_mas_params(struct msm_bus_bimc_info *binfo, ++ uint32_t mas_index, struct msm_bus_bimc_node *mparams); ++void msm_bus_bimc_get_slv_params(struct msm_bus_bimc_info *binfo, ++ uint32_t slv_index, struct msm_bus_bimc_node *sparams); ++bool msm_bus_bimc_get_arb_en(struct msm_bus_bimc_info *binfo, ++ uint32_t slv_index); ++ ++#endif /*_ARCH_ARM_MACH_MSM_BUS_BIMC_H*/ +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_client_api.c +@@ -0,0 +1,83 @@ ++/* Copyright (c) 2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "msm-bus.h" ++#include "msm_bus_core.h" ++ ++struct msm_bus_arb_ops arb_ops; ++ ++/** ++ * msm_bus_scale_register_client() - Register the clients with the msm bus ++ * driver ++ * @pdata: Platform data of the client, containing src, dest, ab, ib. ++ * Return non-zero value in case of success, 0 in case of failure. ++ * ++ * Client data contains the vectors specifying arbitrated bandwidth (ab) ++ * and instantaneous bandwidth (ib) requested between a particular ++ * src and dest. ++ */ ++uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata) ++{ ++ if (arb_ops.register_client) ++ return arb_ops.register_client(pdata); ++ else { ++ pr_err("%s: Bus driver not ready.", ++ __func__); ++ return 0; ++ } ++} ++EXPORT_SYMBOL(msm_bus_scale_register_client); ++ ++/** ++ * msm_bus_scale_client_update_request() - Update the request for bandwidth ++ * from a particular client ++ * ++ * cl: Handle to the client ++ * index: Index into the vector, to which the bw and clock values need to be ++ * updated ++ */ ++int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index) ++{ ++ if (arb_ops.update_request) ++ return arb_ops.update_request(cl, index); ++ else { ++ pr_err("%s: Bus driver not ready.", ++ __func__); ++ return -EPROBE_DEFER; ++ } ++} ++EXPORT_SYMBOL(msm_bus_scale_client_update_request); ++ ++/** ++ * msm_bus_scale_unregister_client() - Unregister the client from the bus driver ++ * @cl: Handle to the client ++ */ ++void msm_bus_scale_unregister_client(uint32_t cl) ++{ ++ if (arb_ops.unregister_client) ++ arb_ops.unregister_client(cl); ++ else { ++ pr_err("%s: Bus driver not ready.", ++ __func__); ++ } ++} ++EXPORT_SYMBOL(msm_bus_scale_unregister_client); +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_core.c +@@ -0,0 +1,125 @@ ++/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "msm-bus-board.h" ++#include "msm-bus.h" ++#include "msm_bus_core.h" ++ ++static atomic_t num_fab = ATOMIC_INIT(0); ++ ++int msm_bus_get_num_fab(void) ++{ ++ return atomic_read(&num_fab); ++} ++ ++int msm_bus_device_match(struct device *dev, void *id) ++{ ++ struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev); ++ ++ if (!fabdev) { ++ MSM_BUS_WARN("Fabric %p returning 0\n", fabdev); ++ return 0; ++ } ++ return fabdev->id == *(int *)id; ++} ++ ++static void msm_bus_release(struct device *device) ++{ ++} ++ ++struct bus_type msm_bus_type = { ++ .name = "msm-bus-type", ++}; ++EXPORT_SYMBOL(msm_bus_type); ++ ++/** ++ * msm_bus_get_fabric_device() - This function is used to search for ++ * the fabric device on the bus ++ * @fabid: Fabric id ++ * Function returns: Pointer to the fabric device ++ */ ++struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid) ++{ ++ struct device *dev; ++ struct msm_bus_fabric_device *fabric; ++ dev = bus_find_device(&msm_bus_type, NULL, (void *)&fabid, ++ msm_bus_device_match); ++ if (!dev) ++ return NULL; ++ fabric = to_msm_bus_fabric_device(dev); ++ return fabric; ++} ++ ++/** ++ * msm_bus_fabric_device_register() - Registers a fabric on msm bus ++ * @fabdev: Fabric device to be registered ++ */ ++int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabdev) ++{ ++ int ret = 0; ++ fabdev->dev.bus = &msm_bus_type; ++ fabdev->dev.release = msm_bus_release; ++ ret = dev_set_name(&fabdev->dev, fabdev->name); ++ if (ret) { ++ MSM_BUS_ERR("error setting dev name\n"); ++ goto err; ++ } ++ ++ ret = device_register(&fabdev->dev); ++ if (ret < 0) { ++ MSM_BUS_ERR("error registering device%d %s\n", ++ ret, fabdev->name); ++ goto err; ++ } ++ atomic_inc(&num_fab); ++err: ++ return ret; ++} ++ ++/** ++ * msm_bus_fabric_device_unregister() - Unregisters the fabric ++ * devices from the msm bus ++ */ ++void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabdev) ++{ ++ device_unregister(&fabdev->dev); ++ atomic_dec(&num_fab); ++} ++ ++static void __exit msm_bus_exit(void) ++{ ++ bus_unregister(&msm_bus_type); ++} ++ ++static int __init msm_bus_init(void) ++{ ++ int retval = 0; ++ retval = bus_register(&msm_bus_type); ++ if (retval) ++ MSM_BUS_ERR("bus_register error! %d\n", ++ retval); ++ return retval; ++} ++postcore_initcall(msm_bus_init); ++module_exit(msm_bus_exit); ++MODULE_LICENSE("GPL v2"); ++MODULE_VERSION("0.2"); ++MODULE_ALIAS("platform:msm_bus"); +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_core.h +@@ -0,0 +1,375 @@ ++/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef _ARCH_ARM_MACH_MSM_BUS_CORE_H ++#define _ARCH_ARM_MACH_MSM_BUS_CORE_H ++ ++#include ++#include ++#include ++#include ++#include "msm-bus-board.h" ++#include "msm-bus.h" ++ ++#define MSM_BUS_DBG(msg, ...) \ ++ pr_debug(msg, ## __VA_ARGS__) ++#define MSM_BUS_ERR(msg, ...) \ ++ pr_err(msg, ## __VA_ARGS__) ++#define MSM_BUS_WARN(msg, ...) \ ++ pr_warn(msg, ## __VA_ARGS__) ++#define MSM_FAB_ERR(msg, ...) \ ++ dev_err(&fabric->fabdev.dev, msg, ## __VA_ARGS__) ++ ++#define IS_MASTER_VALID(mas) \ ++ (((mas >= MSM_BUS_MASTER_FIRST) && (mas <= MSM_BUS_MASTER_LAST)) \ ++ ? 1 : 0) ++#define IS_SLAVE_VALID(slv) \ ++ (((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0) ++ ++#define INTERLEAVED_BW(fab_pdata, bw, ports) \ ++ ((fab_pdata->il_flag) ? ((bw < 0) \ ++ ? -msm_bus_div64((ports), (-bw)) : msm_bus_div64((ports), (bw))) : (bw)) ++#define INTERLEAVED_VAL(fab_pdata, n) \ ++ ((fab_pdata->il_flag) ? (n) : 1) ++#define KBTOB(a) (a * 1000ULL) ++ ++enum msm_bus_dbg_op_type { ++ MSM_BUS_DBG_UNREGISTER = -2, ++ MSM_BUS_DBG_REGISTER, ++ MSM_BUS_DBG_OP = 1, ++}; ++ ++enum msm_bus_hw_sel { ++ MSM_BUS_RPM = 0, ++ MSM_BUS_NOC, ++ MSM_BUS_BIMC, ++}; ++ ++struct msm_bus_arb_ops { ++ uint32_t (*register_client)(struct msm_bus_scale_pdata *pdata); ++ int (*update_request)(uint32_t cl, unsigned int index); ++ void (*unregister_client)(uint32_t cl); ++}; ++ ++enum { ++ SLAVE_NODE, ++ MASTER_NODE, ++ CLK_NODE, ++ NR_LIM_NODE, ++}; ++ ++ ++extern struct bus_type msm_bus_type; ++extern struct msm_bus_arb_ops arb_ops; ++extern void msm_bus_arb_setops_legacy(struct msm_bus_arb_ops *arb_ops); ++ ++struct msm_bus_node_info { ++ unsigned int id; ++ unsigned int priv_id; ++ unsigned int mas_hw_id; ++ unsigned int slv_hw_id; ++ int gateway; ++ int *masterp; ++ int *qport; ++ int num_mports; ++ int *slavep; ++ int num_sports; ++ int *tier; ++ int num_tiers; ++ int ahb; ++ int hw_sel; ++ const char *slaveclk[NUM_CTX]; ++ const char *memclk[NUM_CTX]; ++ const char *iface_clk_node; ++ unsigned int buswidth; ++ unsigned int ws; ++ unsigned int mode; ++ unsigned int perm_mode; ++ unsigned int prio_lvl; ++ unsigned int prio_rd; ++ unsigned int prio_wr; ++ unsigned int prio1; ++ unsigned int prio0; ++ unsigned int num_thresh; ++ u64 *th; ++ u64 cur_lim_bw; ++ unsigned int mode_thresh; ++ bool dual_conf; ++ u64 *bimc_bw; ++ bool nr_lim; ++ u32 ff; ++ bool rt_mas; ++ u32 bimc_gp; ++ u32 bimc_thmp; ++ u64 floor_bw; ++ const char *name; ++}; ++ ++struct path_node { ++ uint64_t clk[NUM_CTX]; ++ uint64_t bw[NUM_CTX]; ++ uint64_t *sel_clk; ++ uint64_t *sel_bw; ++ int next; ++}; ++ ++struct msm_bus_link_info { ++ uint64_t clk[NUM_CTX]; ++ uint64_t *sel_clk; ++ uint64_t memclk; ++ int64_t bw[NUM_CTX]; ++ int64_t *sel_bw; ++ int *tier; ++ int num_tiers; ++}; ++ ++struct nodeclk { ++ struct clk *clk; ++ uint64_t rate; ++ bool dirty; ++ bool enable; ++}; ++ ++struct msm_bus_inode_info { ++ struct msm_bus_node_info *node_info; ++ uint64_t max_bw; ++ uint64_t max_clk; ++ uint64_t cur_lim_bw; ++ uint64_t cur_prg_bw; ++ struct msm_bus_link_info link_info; ++ int num_pnodes; ++ struct path_node *pnode; ++ int commit_index; ++ struct nodeclk nodeclk[NUM_CTX]; ++ struct nodeclk memclk[NUM_CTX]; ++ struct nodeclk iface_clk; ++ void *hw_data; ++}; ++ ++struct msm_bus_node_hw_info { ++ bool dirty; ++ unsigned int hw_id; ++ uint64_t bw; ++}; ++ ++struct msm_bus_hw_algorithm { ++ int (*allocate_commit_data)(struct msm_bus_fabric_registration ++ *fab_pdata, void **cdata, int ctx); ++ void *(*allocate_hw_data)(struct platform_device *pdev, ++ struct msm_bus_fabric_registration *fab_pdata); ++ void (*node_init)(void *hw_data, struct msm_bus_inode_info *info); ++ void (*free_commit_data)(void *cdata); ++ void (*update_bw)(struct msm_bus_inode_info *hop, ++ struct msm_bus_inode_info *info, ++ struct msm_bus_fabric_registration *fab_pdata, ++ void *sel_cdata, int *master_tiers, ++ int64_t add_bw); ++ void (*fill_cdata_buffer)(int *curr, char *buf, const int max_size, ++ void *cdata, int nmasters, int nslaves, int ntslaves); ++ int (*commit)(struct msm_bus_fabric_registration ++ *fab_pdata, void *hw_data, void **cdata); ++ int (*port_unhalt)(uint32_t haltid, uint8_t mport); ++ int (*port_halt)(uint32_t haltid, uint8_t mport); ++ void (*config_master)(struct msm_bus_fabric_registration *fab_pdata, ++ struct msm_bus_inode_info *info, ++ uint64_t req_clk, uint64_t req_bw); ++ void (*config_limiter)(struct msm_bus_fabric_registration *fab_pdata, ++ struct msm_bus_inode_info *info); ++ bool (*update_bw_reg)(int mode); ++}; ++ ++struct msm_bus_fabric_device { ++ int id; ++ const char *name; ++ struct device dev; ++ const struct msm_bus_fab_algorithm *algo; ++ const struct msm_bus_board_algorithm *board_algo; ++ struct msm_bus_hw_algorithm hw_algo; ++ int visited; ++ int num_nr_lim; ++ u64 nr_lim_thresh; ++ u32 eff_fact; ++}; ++#define to_msm_bus_fabric_device(d) container_of(d, \ ++ struct msm_bus_fabric_device, d) ++ ++struct msm_bus_fabric { ++ struct msm_bus_fabric_device fabdev; ++ int ahb; ++ void *cdata[NUM_CTX]; ++ bool arb_dirty; ++ bool clk_dirty; ++ struct radix_tree_root fab_tree; ++ int num_nodes; ++ struct list_head gateways; ++ struct msm_bus_inode_info info; ++ struct msm_bus_fabric_registration *pdata; ++ void *hw_data; ++}; ++#define to_msm_bus_fabric(d) container_of(d, \ ++ struct msm_bus_fabric, d) ++ ++ ++struct msm_bus_fab_algorithm { ++ int (*update_clks)(struct msm_bus_fabric_device *fabdev, ++ struct msm_bus_inode_info *pme, int index, ++ uint64_t curr_clk, uint64_t req_clk, ++ uint64_t bwsum, int flag, int ctx, ++ unsigned int cl_active_flag); ++ int (*port_halt)(struct msm_bus_fabric_device *fabdev, int portid); ++ int (*port_unhalt)(struct msm_bus_fabric_device *fabdev, int portid); ++ int (*commit)(struct msm_bus_fabric_device *fabdev); ++ struct msm_bus_inode_info *(*find_node)(struct msm_bus_fabric_device ++ *fabdev, int id); ++ struct msm_bus_inode_info *(*find_gw_node)(struct msm_bus_fabric_device ++ *fabdev, int id); ++ struct list_head *(*get_gw_list)(struct msm_bus_fabric_device *fabdev); ++ void (*update_bw)(struct msm_bus_fabric_device *fabdev, struct ++ msm_bus_inode_info * hop, struct msm_bus_inode_info *info, ++ int64_t add_bw, int *master_tiers, int ctx); ++ void (*config_master)(struct msm_bus_fabric_device *fabdev, ++ struct msm_bus_inode_info *info, uint64_t req_clk, ++ uint64_t req_bw); ++ void (*config_limiter)(struct msm_bus_fabric_device *fabdev, ++ struct msm_bus_inode_info *info); ++}; ++ ++struct msm_bus_board_algorithm { ++ int board_nfab; ++ void (*assign_iids)(struct msm_bus_fabric_registration *fabreg, ++ int fabid); ++ int (*get_iid)(int id); ++}; ++ ++/** ++ * Used to store the list of fabrics and other info to be ++ * maintained outside the fabric structure. ++ * Used while calculating path, and to find fabric ptrs ++ */ ++struct msm_bus_fabnodeinfo { ++ struct list_head list; ++ struct msm_bus_inode_info *info; ++}; ++ ++struct msm_bus_client { ++ int id; ++ struct msm_bus_scale_pdata *pdata; ++ int *src_pnode; ++ int curr; ++}; ++ ++uint64_t msm_bus_div64(unsigned int width, uint64_t bw); ++int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric); ++void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric); ++struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid); ++int msm_bus_get_num_fab(void); ++ ++ ++int msm_bus_hw_fab_init(struct msm_bus_fabric_registration *pdata, ++ struct msm_bus_hw_algorithm *hw_algo); ++void msm_bus_board_init(struct msm_bus_fabric_registration *pdata); ++void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata, ++ int nfab); ++#if defined(CONFIG_MSM_RPM_SMD) ++int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata, ++ struct msm_bus_hw_algorithm *hw_algo); ++int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration ++ *fab_pdata, void *hw_data, void **cdata); ++void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size, ++ void *cdata, int nmasters, int nslaves, int ntslaves); ++#else ++static inline int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata, ++ struct msm_bus_hw_algorithm *hw_algo) ++{ ++ return 0; ++} ++static inline int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration ++ *fab_pdata, void *hw_data, void **cdata) ++{ ++ return 0; ++} ++static inline void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, ++ const int max_size, void *cdata, int nmasters, int nslaves, ++ int ntslaves) ++{ ++} ++#endif ++ ++int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata, ++ struct msm_bus_hw_algorithm *hw_algo); ++int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata, ++ struct msm_bus_hw_algorithm *hw_algo); ++#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MSM_BUS_SCALING) ++void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index, ++ uint32_t cl); ++void msm_bus_dbg_commit_data(const char *fabname, void *cdata, ++ int nmasters, int nslaves, int ntslaves, int op); ++#else ++static inline void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, ++ int index, uint32_t cl) ++{ ++} ++static inline void msm_bus_dbg_commit_data(const char *fabname, ++ void *cdata, int nmasters, int nslaves, int ntslaves, ++ int op) ++{ ++} ++#endif ++ ++#ifdef CONFIG_CORESIGHT ++int msmbus_coresight_init(struct platform_device *pdev); ++void msmbus_coresight_remove(struct platform_device *pdev); ++int msmbus_coresight_init_adhoc(struct platform_device *pdev, ++ struct device_node *of_node); ++void msmbus_coresight_remove_adhoc(struct platform_device *pdev); ++#else ++static inline int msmbus_coresight_init(struct platform_device *pdev) ++{ ++ return 0; ++} ++ ++static inline void msmbus_coresight_remove(struct platform_device *pdev) ++{ ++} ++ ++static inline int msmbus_coresight_init_adhoc(struct platform_device *pdev, ++ struct device_node *of_node) ++{ ++ return 0; ++} ++ ++static inline void msmbus_coresight_remove_adhoc(struct platform_device *pdev) ++{ ++} ++#endif ++ ++ ++#ifdef CONFIG_OF ++void msm_bus_of_get_nfab(struct platform_device *pdev, ++ struct msm_bus_fabric_registration *pdata); ++struct msm_bus_fabric_registration ++ *msm_bus_of_get_fab_data(struct platform_device *pdev); ++#else ++static inline void msm_bus_of_get_nfab(struct platform_device *pdev, ++ struct msm_bus_fabric_registration *pdata) ++{ ++ return; ++} ++ ++static inline struct msm_bus_fabric_registration ++ *msm_bus_of_get_fab_data(struct platform_device *pdev) ++{ ++ return NULL; ++} ++#endif ++ ++#endif /*_ARCH_ARM_MACH_MSM_BUS_CORE_H*/ +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_dbg.c +@@ -0,0 +1,810 @@ ++/* Copyright (c) 2010-2012, 2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "msm-bus-board.h" ++#include "msm-bus.h" ++#include "msm_bus_rules.h" ++#include "msm_bus_core.h" ++#include "msm_bus_adhoc.h" ++ ++#define CREATE_TRACE_POINTS ++#include ++ ++#define MAX_BUFF_SIZE 4096 ++#define FILL_LIMIT 128 ++ ++static struct dentry *clients; ++static struct dentry *dir; ++static DEFINE_MUTEX(msm_bus_dbg_fablist_lock); ++struct msm_bus_dbg_state { ++ uint32_t cl; ++ uint8_t enable; ++ uint8_t current_index; ++} clstate; ++ ++struct msm_bus_cldata { ++ const struct msm_bus_scale_pdata *pdata; ++ int index; ++ uint32_t clid; ++ int size; ++ struct dentry *file; ++ struct list_head list; ++ char buffer[MAX_BUFF_SIZE]; ++}; ++ ++struct msm_bus_fab_list { ++ const char *name; ++ int size; ++ struct dentry *file; ++ struct list_head list; ++ char buffer[MAX_BUFF_SIZE]; ++}; ++ ++static char *rules_buf; ++ ++LIST_HEAD(fabdata_list); ++LIST_HEAD(cl_list); ++ ++/** ++ * The following structures and funtions are used for ++ * the test-client which can be created at run-time. ++ */ ++ ++static struct msm_bus_vectors init_vectors[1]; ++static struct msm_bus_vectors current_vectors[1]; ++static struct msm_bus_vectors requested_vectors[1]; ++ ++static struct msm_bus_paths shell_client_usecases[] = { ++ { ++ .num_paths = ARRAY_SIZE(init_vectors), ++ .vectors = init_vectors, ++ }, ++ { ++ .num_paths = ARRAY_SIZE(current_vectors), ++ .vectors = current_vectors, ++ }, ++ { ++ .num_paths = ARRAY_SIZE(requested_vectors), ++ .vectors = requested_vectors, ++ }, ++}; ++ ++static struct msm_bus_scale_pdata shell_client = { ++ .usecase = shell_client_usecases, ++ .num_usecases = ARRAY_SIZE(shell_client_usecases), ++ .name = "test-client", ++}; ++ ++static void msm_bus_dbg_init_vectors(void) ++{ ++ init_vectors[0].src = -1; ++ init_vectors[0].dst = -1; ++ init_vectors[0].ab = 0; ++ init_vectors[0].ib = 0; ++ current_vectors[0].src = -1; ++ current_vectors[0].dst = -1; ++ current_vectors[0].ab = 0; ++ current_vectors[0].ib = 0; ++ requested_vectors[0].src = -1; ++ requested_vectors[0].dst = -1; ++ requested_vectors[0].ab = 0; ++ requested_vectors[0].ib = 0; ++ clstate.enable = 0; ++ clstate.current_index = 0; ++} ++ ++static int msm_bus_dbg_update_cl_request(uint32_t cl) ++{ ++ int ret = 0; ++ ++ if (clstate.current_index < 2) ++ clstate.current_index = 2; ++ else { ++ clstate.current_index = 1; ++ current_vectors[0].ab = requested_vectors[0].ab; ++ current_vectors[0].ib = requested_vectors[0].ib; ++ } ++ ++ if (clstate.enable) { ++ MSM_BUS_DBG("Updating request for shell client, index: %d\n", ++ clstate.current_index); ++ ret = msm_bus_scale_client_update_request(clstate.cl, ++ clstate.current_index); ++ } else ++ MSM_BUS_DBG("Enable bit not set. Skipping update request\n"); ++ ++ return ret; ++} ++ ++static void msm_bus_dbg_unregister_client(uint32_t cl) ++{ ++ MSM_BUS_DBG("Unregistering shell client\n"); ++ msm_bus_scale_unregister_client(clstate.cl); ++ clstate.cl = 0; ++} ++ ++static uint32_t msm_bus_dbg_register_client(void) ++{ ++ int ret = 0; ++ ++ if (init_vectors[0].src != requested_vectors[0].src) { ++ MSM_BUS_DBG("Shell client master changed. Unregistering\n"); ++ msm_bus_dbg_unregister_client(clstate.cl); ++ } ++ if (init_vectors[0].dst != requested_vectors[0].dst) { ++ MSM_BUS_DBG("Shell client slave changed. Unregistering\n"); ++ msm_bus_dbg_unregister_client(clstate.cl); ++ } ++ ++ current_vectors[0].src = init_vectors[0].src; ++ requested_vectors[0].src = init_vectors[0].src; ++ current_vectors[0].dst = init_vectors[0].dst; ++ requested_vectors[0].dst = init_vectors[0].dst; ++ ++ if (!clstate.enable) { ++ MSM_BUS_DBG("Enable bit not set, skipping registration: cl " ++ "%d\n", clstate.cl); ++ return 0; ++ } ++ ++ if (clstate.cl) { ++ MSM_BUS_DBG("Client registered, skipping registration\n"); ++ return clstate.cl; ++ } ++ ++ MSM_BUS_DBG("Registering shell client\n"); ++ ret = msm_bus_scale_register_client(&shell_client); ++ return ret; ++} ++ ++static int msm_bus_dbg_mas_get(void *data, u64 *val) ++{ ++ *val = init_vectors[0].src; ++ MSM_BUS_DBG("Get master: %llu\n", *val); ++ return 0; ++} ++ ++static int msm_bus_dbg_mas_set(void *data, u64 val) ++{ ++ init_vectors[0].src = val; ++ MSM_BUS_DBG("Set master: %llu\n", val); ++ clstate.cl = msm_bus_dbg_register_client(); ++ return 0; ++} ++DEFINE_SIMPLE_ATTRIBUTE(shell_client_mas_fops, msm_bus_dbg_mas_get, ++ msm_bus_dbg_mas_set, "%llu\n"); ++ ++static int msm_bus_dbg_slv_get(void *data, u64 *val) ++{ ++ *val = init_vectors[0].dst; ++ MSM_BUS_DBG("Get slave: %llu\n", *val); ++ return 0; ++} ++ ++static int msm_bus_dbg_slv_set(void *data, u64 val) ++{ ++ init_vectors[0].dst = val; ++ MSM_BUS_DBG("Set slave: %llu\n", val); ++ clstate.cl = msm_bus_dbg_register_client(); ++ return 0; ++} ++DEFINE_SIMPLE_ATTRIBUTE(shell_client_slv_fops, msm_bus_dbg_slv_get, ++ msm_bus_dbg_slv_set, "%llu\n"); ++ ++static int msm_bus_dbg_ab_get(void *data, u64 *val) ++{ ++ *val = requested_vectors[0].ab; ++ MSM_BUS_DBG("Get ab: %llu\n", *val); ++ return 0; ++} ++ ++static int msm_bus_dbg_ab_set(void *data, u64 val) ++{ ++ requested_vectors[0].ab = val; ++ MSM_BUS_DBG("Set ab: %llu\n", val); ++ return 0; ++} ++DEFINE_SIMPLE_ATTRIBUTE(shell_client_ab_fops, msm_bus_dbg_ab_get, ++ msm_bus_dbg_ab_set, "%llu\n"); ++ ++static int msm_bus_dbg_ib_get(void *data, u64 *val) ++{ ++ *val = requested_vectors[0].ib; ++ MSM_BUS_DBG("Get ib: %llu\n", *val); ++ return 0; ++} ++ ++static int msm_bus_dbg_ib_set(void *data, u64 val) ++{ ++ requested_vectors[0].ib = val; ++ MSM_BUS_DBG("Set ib: %llu\n", val); ++ return 0; ++} ++DEFINE_SIMPLE_ATTRIBUTE(shell_client_ib_fops, msm_bus_dbg_ib_get, ++ msm_bus_dbg_ib_set, "%llu\n"); ++ ++static int msm_bus_dbg_en_get(void *data, u64 *val) ++{ ++ *val = clstate.enable; ++ MSM_BUS_DBG("Get enable: %llu\n", *val); ++ return 0; ++} ++ ++static int msm_bus_dbg_en_set(void *data, u64 val) ++{ ++ int ret = 0; ++ ++ clstate.enable = val; ++ if (clstate.enable) { ++ if (!clstate.cl) { ++ MSM_BUS_DBG("client: %u\n", clstate.cl); ++ clstate.cl = msm_bus_dbg_register_client(); ++ if (clstate.cl) ++ ret = msm_bus_dbg_update_cl_request(clstate.cl); ++ } else { ++ MSM_BUS_DBG("update request for cl: %u\n", clstate.cl); ++ ret = msm_bus_dbg_update_cl_request(clstate.cl); ++ } ++ } ++ ++ MSM_BUS_DBG("Set enable: %llu\n", val); ++ return ret; ++} ++DEFINE_SIMPLE_ATTRIBUTE(shell_client_en_fops, msm_bus_dbg_en_get, ++ msm_bus_dbg_en_set, "%llu\n"); ++ ++/** ++ * The following funtions are used for viewing the client data ++ * and changing the client request at run-time ++ */ ++ ++static ssize_t client_data_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ int bsize = 0; ++ uint32_t cl = (uint32_t)(uintptr_t)file->private_data; ++ struct msm_bus_cldata *cldata = NULL; ++ int found = 0; ++ ++ list_for_each_entry(cldata, &cl_list, list) { ++ if (cldata->clid == cl) { ++ found = 1; ++ break; ++ } ++ } ++ if (!found) ++ return 0; ++ ++ bsize = cldata->size; ++ return simple_read_from_buffer(buf, count, ppos, ++ cldata->buffer, bsize); ++} ++ ++static int client_data_open(struct inode *inode, struct file *file) ++{ ++ file->private_data = inode->i_private; ++ return 0; ++} ++ ++static const struct file_operations client_data_fops = { ++ .open = client_data_open, ++ .read = client_data_read, ++}; ++ ++struct dentry *msm_bus_dbg_create(const char *name, mode_t mode, ++ struct dentry *dent, uint32_t clid) ++{ ++ if (dent == NULL) { ++ MSM_BUS_DBG("debugfs not ready yet\n"); ++ return NULL; ++ } ++ return debugfs_create_file(name, mode, dent, (void *)(uintptr_t)clid, ++ &client_data_fops); ++} ++ ++#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MSM_BUS_SCALING) ++static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata, ++ int index, uint32_t clid, struct dentry *file) ++{ ++ struct msm_bus_cldata *cldata; ++ ++ cldata = kmalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL); ++ if (!cldata) { ++ MSM_BUS_DBG("Failed to allocate memory for client data\n"); ++ return -ENOMEM; ++ } ++ cldata->pdata = pdata; ++ cldata->index = index; ++ cldata->clid = clid; ++ cldata->file = file; ++ cldata->size = 0; ++ list_add_tail(&cldata->list, &cl_list); ++ return 0; ++} ++ ++static void msm_bus_dbg_free_client(uint32_t clid) ++{ ++ struct msm_bus_cldata *cldata = NULL; ++ ++ list_for_each_entry(cldata, &cl_list, list) { ++ if (cldata->clid == clid) { ++ debugfs_remove(cldata->file); ++ list_del(&cldata->list); ++ kfree(cldata); ++ break; ++ } ++ } ++} ++ ++static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata, ++ int index, uint32_t clid) ++{ ++ int i = 0, j; ++ char *buf = NULL; ++ struct msm_bus_cldata *cldata = NULL; ++ struct timespec ts; ++ int found = 0; ++ ++ list_for_each_entry(cldata, &cl_list, list) { ++ if (cldata->clid == clid) { ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found) ++ return -ENOENT; ++ ++ if (cldata->file == NULL) { ++ if (pdata->name == NULL) { ++ MSM_BUS_DBG("Client doesn't have a name\n"); ++ return -EINVAL; ++ } ++ cldata->file = msm_bus_dbg_create(pdata->name, S_IRUGO, ++ clients, clid); ++ } ++ ++ if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT)) ++ i = cldata->size; ++ else { ++ i = 0; ++ cldata->size = 0; ++ } ++ buf = cldata->buffer; ++ ts = ktime_to_timespec(ktime_get()); ++ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n", ++ (int)ts.tv_sec, (int)ts.tv_nsec); ++ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "curr : %d\n", index); ++ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "masters: "); ++ ++ for (j = 0; j < pdata->usecase->num_paths; j++) ++ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", ++ pdata->usecase[index].vectors[j].src); ++ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslaves : "); ++ for (j = 0; j < pdata->usecase->num_paths; j++) ++ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", ++ pdata->usecase[index].vectors[j].dst); ++ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab : "); ++ for (j = 0; j < pdata->usecase->num_paths; j++) ++ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", ++ pdata->usecase[index].vectors[j].ab); ++ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib : "); ++ for (j = 0; j < pdata->usecase->num_paths; j++) ++ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", ++ pdata->usecase[index].vectors[j].ib); ++ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n"); ++ ++ for (j = 0; j < pdata->usecase->num_paths; j++) ++ trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec, ++ pdata->name, index, ++ pdata->usecase[index].vectors[j].src, ++ pdata->usecase[index].vectors[j].dst, ++ pdata->usecase[index].vectors[j].ab, ++ pdata->usecase[index].vectors[j].ib); ++ ++ cldata->size = i; ++ return i; ++} ++#endif ++ ++static int msm_bus_dbg_update_request(struct msm_bus_cldata *cldata, int index) ++{ ++ int ret = 0; ++ ++ if ((index < 0) || (index > cldata->pdata->num_usecases)) { ++ MSM_BUS_DBG("Invalid index!\n"); ++ return -EINVAL; ++ } ++ ret = msm_bus_scale_client_update_request(cldata->clid, index); ++ return ret; ++} ++ ++static ssize_t msm_bus_dbg_update_request_write(struct file *file, ++ const char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ struct msm_bus_cldata *cldata; ++ unsigned long index = 0; ++ int ret = 0; ++ char *chid; ++ char *buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL); ++ int found = 0; ++ ++ if (!buf || IS_ERR(buf)) { ++ MSM_BUS_ERR("Memory allocation for buffer failed\n"); ++ return -ENOMEM; ++ } ++ if (cnt == 0) { ++ kfree(buf); ++ return 0; ++ } ++ if (copy_from_user(buf, ubuf, cnt)) { ++ kfree(buf); ++ return -EFAULT; ++ } ++ buf[cnt] = '\0'; ++ chid = buf; ++ MSM_BUS_DBG("buffer: %s\n size: %zu\n", buf, sizeof(ubuf)); ++ ++ list_for_each_entry(cldata, &cl_list, list) { ++ if (strnstr(chid, cldata->pdata->name, cnt)) { ++ found = 1; ++ cldata = cldata; ++ strsep(&chid, " "); ++ if (chid) { ++ ret = kstrtoul(chid, 10, &index); ++ if (ret) { ++ MSM_BUS_DBG("Index conversion" ++ " failed\n"); ++ return -EFAULT; ++ } ++ } else { ++ MSM_BUS_DBG("Error parsing input. Index not" ++ " found\n"); ++ found = 0; ++ } ++ break; ++ } ++ } ++ ++ if (found) ++ msm_bus_dbg_update_request(cldata, index); ++ kfree(buf); ++ return cnt; ++} ++ ++/** ++ * The following funtions are used for viewing the commit data ++ * for each fabric ++ */ ++static ssize_t fabric_data_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ struct msm_bus_fab_list *fablist = NULL; ++ int bsize = 0; ++ ssize_t ret; ++ const char *name = file->private_data; ++ int found = 0; ++ ++ mutex_lock(&msm_bus_dbg_fablist_lock); ++ list_for_each_entry(fablist, &fabdata_list, list) { ++ if (strcmp(fablist->name, name) == 0) { ++ found = 1; ++ break; ++ } ++ } ++ if (!found) ++ return -ENOENT; ++ bsize = fablist->size; ++ ret = simple_read_from_buffer(buf, count, ppos, ++ fablist->buffer, bsize); ++ mutex_unlock(&msm_bus_dbg_fablist_lock); ++ return ret; ++} ++ ++static const struct file_operations fabric_data_fops = { ++ .open = client_data_open, ++ .read = fabric_data_read, ++}; ++ ++static ssize_t rules_dbg_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ ssize_t ret; ++ memset(rules_buf, 0, MAX_BUFF_SIZE); ++ print_rules_buf(rules_buf, MAX_BUFF_SIZE); ++ ret = simple_read_from_buffer(buf, count, ppos, ++ rules_buf, MAX_BUFF_SIZE); ++ return ret; ++} ++ ++static int rules_dbg_open(struct inode *inode, struct file *file) ++{ ++ file->private_data = inode->i_private; ++ return 0; ++} ++ ++static const struct file_operations rules_dbg_fops = { ++ .open = rules_dbg_open, ++ .read = rules_dbg_read, ++}; ++ ++#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MSM_BUS_SCALING) ++static int msm_bus_dbg_record_fabric(const char *fabname, struct dentry *file) ++{ ++ struct msm_bus_fab_list *fablist; ++ int ret = 0; ++ ++ mutex_lock(&msm_bus_dbg_fablist_lock); ++ fablist = kmalloc(sizeof(struct msm_bus_fab_list), GFP_KERNEL); ++ if (!fablist) { ++ MSM_BUS_DBG("Failed to allocate memory for commit data\n"); ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ fablist->name = fabname; ++ fablist->size = 0; ++ list_add_tail(&fablist->list, &fabdata_list); ++err: ++ mutex_unlock(&msm_bus_dbg_fablist_lock); ++ return ret; ++} ++ ++static void msm_bus_dbg_free_fabric(const char *fabname) ++{ ++ struct msm_bus_fab_list *fablist = NULL; ++ ++ mutex_lock(&msm_bus_dbg_fablist_lock); ++ list_for_each_entry(fablist, &fabdata_list, list) { ++ if (strcmp(fablist->name, fabname) == 0) { ++ debugfs_remove(fablist->file); ++ list_del(&fablist->list); ++ kfree(fablist); ++ break; ++ } ++ } ++ mutex_unlock(&msm_bus_dbg_fablist_lock); ++} ++ ++static int msm_bus_dbg_fill_fab_buffer(const char *fabname, ++ void *cdata, int nmasters, int nslaves, ++ int ntslaves) ++{ ++ int i; ++ char *buf = NULL; ++ struct msm_bus_fab_list *fablist = NULL; ++ struct timespec ts; ++ int found = 0; ++ ++ mutex_lock(&msm_bus_dbg_fablist_lock); ++ list_for_each_entry(fablist, &fabdata_list, list) { ++ if (strcmp(fablist->name, fabname) == 0) { ++ found = 1; ++ break; ++ } ++ } ++ if (!found) ++ return -ENOENT; ++ ++ if (fablist->file == NULL) { ++ MSM_BUS_DBG("Fabric dbg entry does not exist\n"); ++ mutex_unlock(&msm_bus_dbg_fablist_lock); ++ return -EFAULT; ++ } ++ ++ if (fablist->size < MAX_BUFF_SIZE - 256) ++ i = fablist->size; ++ else { ++ i = 0; ++ fablist->size = 0; ++ } ++ buf = fablist->buffer; ++ mutex_unlock(&msm_bus_dbg_fablist_lock); ++ ts = ktime_to_timespec(ktime_get()); ++ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n", ++ (int)ts.tv_sec, (int)ts.tv_nsec); ++ ++ msm_bus_rpm_fill_cdata_buffer(&i, buf, MAX_BUFF_SIZE, cdata, ++ nmasters, nslaves, ntslaves); ++ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n"); ++ mutex_lock(&msm_bus_dbg_fablist_lock); ++ fablist->size = i; ++ mutex_unlock(&msm_bus_dbg_fablist_lock); ++ return 0; ++} ++#endif ++ ++static const struct file_operations msm_bus_dbg_update_request_fops = { ++ .open = client_data_open, ++ .write = msm_bus_dbg_update_request_write, ++}; ++ ++#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MSM_BUS_SCALING) ++/** ++ * msm_bus_dbg_client_data() - Add debug data for clients ++ * @pdata: Platform data of the client ++ * @index: The current index or operation to be performed ++ * @clid: Client handle obtained during registration ++ */ ++void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index, ++ uint32_t clid) ++{ ++ struct dentry *file = NULL; ++ ++ if (index == MSM_BUS_DBG_REGISTER) { ++ msm_bus_dbg_record_client(pdata, index, clid, file); ++ if (!pdata->name) { ++ MSM_BUS_DBG("Cannot create debugfs entry. Null name\n"); ++ return; ++ } ++ } else if (index == MSM_BUS_DBG_UNREGISTER) { ++ msm_bus_dbg_free_client(clid); ++ MSM_BUS_DBG("Client %d unregistered\n", clid); ++ } else ++ msm_bus_dbg_fill_cl_buffer(pdata, index, clid); ++} ++EXPORT_SYMBOL(msm_bus_dbg_client_data); ++ ++/** ++ * msm_bus_dbg_commit_data() - Add commit data from fabrics ++ * @fabname: Fabric name specified in platform data ++ * @cdata: Commit Data ++ * @nmasters: Number of masters attached to fabric ++ * @nslaves: Number of slaves attached to fabric ++ * @ntslaves: Number of tiered slaves attached to fabric ++ * @op: Operation to be performed ++ */ ++void msm_bus_dbg_commit_data(const char *fabname, void *cdata, ++ int nmasters, int nslaves, int ntslaves, int op) ++{ ++ struct dentry *file = NULL; ++ ++ if (op == MSM_BUS_DBG_REGISTER) ++ msm_bus_dbg_record_fabric(fabname, file); ++ else if (op == MSM_BUS_DBG_UNREGISTER) ++ msm_bus_dbg_free_fabric(fabname); ++ else ++ msm_bus_dbg_fill_fab_buffer(fabname, cdata, nmasters, ++ nslaves, ntslaves); ++} ++EXPORT_SYMBOL(msm_bus_dbg_commit_data); ++#endif ++ ++static int __init msm_bus_debugfs_init(void) ++{ ++ struct dentry *commit, *shell_client, *rules_dbg; ++ struct msm_bus_fab_list *fablist; ++ struct msm_bus_cldata *cldata = NULL; ++ uint64_t val = 0; ++ ++ dir = debugfs_create_dir("msm-bus-dbg", NULL); ++ if ((!dir) || IS_ERR(dir)) { ++ MSM_BUS_ERR("Couldn't create msm-bus-dbg\n"); ++ goto err; ++ } ++ ++ clients = debugfs_create_dir("client-data", dir); ++ if ((!dir) || IS_ERR(dir)) { ++ MSM_BUS_ERR("Couldn't create clients\n"); ++ goto err; ++ } ++ ++ shell_client = debugfs_create_dir("shell-client", dir); ++ if ((!dir) || IS_ERR(dir)) { ++ MSM_BUS_ERR("Couldn't create clients\n"); ++ goto err; ++ } ++ ++ commit = debugfs_create_dir("commit-data", dir); ++ if ((!dir) || IS_ERR(dir)) { ++ MSM_BUS_ERR("Couldn't create commit\n"); ++ goto err; ++ } ++ ++ rules_dbg = debugfs_create_dir("rules-dbg", dir); ++ if ((!rules_dbg) || IS_ERR(rules_dbg)) { ++ MSM_BUS_ERR("Couldn't create rules-dbg\n"); ++ goto err; ++ } ++ ++ if (debugfs_create_file("print_rules", S_IRUGO | S_IWUSR, ++ rules_dbg, &val, &rules_dbg_fops) == NULL) ++ goto err; ++ ++ if (debugfs_create_file("update_request", S_IRUGO | S_IWUSR, ++ shell_client, &val, &shell_client_en_fops) == NULL) ++ goto err; ++ if (debugfs_create_file("ib", S_IRUGO | S_IWUSR, shell_client, &val, ++ &shell_client_ib_fops) == NULL) ++ goto err; ++ if (debugfs_create_file("ab", S_IRUGO | S_IWUSR, shell_client, &val, ++ &shell_client_ab_fops) == NULL) ++ goto err; ++ if (debugfs_create_file("slv", S_IRUGO | S_IWUSR, shell_client, ++ &val, &shell_client_slv_fops) == NULL) ++ goto err; ++ if (debugfs_create_file("mas", S_IRUGO | S_IWUSR, shell_client, ++ &val, &shell_client_mas_fops) == NULL) ++ goto err; ++ if (debugfs_create_file("update-request", S_IRUGO | S_IWUSR, ++ clients, NULL, &msm_bus_dbg_update_request_fops) == NULL) ++ goto err; ++ ++ rules_buf = kzalloc(MAX_BUFF_SIZE, GFP_KERNEL); ++ if (!rules_buf) { ++ MSM_BUS_ERR("Failed to alloc rules_buf"); ++ goto err; ++ } ++ ++ list_for_each_entry(cldata, &cl_list, list) { ++ if (cldata->pdata->name == NULL) { ++ MSM_BUS_DBG("Client name not found\n"); ++ continue; ++ } ++ cldata->file = msm_bus_dbg_create(cldata-> ++ pdata->name, S_IRUGO, clients, cldata->clid); ++ } ++ ++ mutex_lock(&msm_bus_dbg_fablist_lock); ++ list_for_each_entry(fablist, &fabdata_list, list) { ++ fablist->file = debugfs_create_file(fablist->name, S_IRUGO, ++ commit, (void *)fablist->name, &fabric_data_fops); ++ if (fablist->file == NULL) { ++ MSM_BUS_DBG("Cannot create files for commit data\n"); ++ kfree(rules_buf); ++ goto err; ++ } ++ } ++ mutex_unlock(&msm_bus_dbg_fablist_lock); ++ ++ msm_bus_dbg_init_vectors(); ++ return 0; ++err: ++ debugfs_remove_recursive(dir); ++ return -ENODEV; ++} ++late_initcall(msm_bus_debugfs_init); ++ ++static void __exit msm_bus_dbg_teardown(void) ++{ ++ struct msm_bus_fab_list *fablist = NULL, *fablist_temp; ++ struct msm_bus_cldata *cldata = NULL, *cldata_temp; ++ ++ debugfs_remove_recursive(dir); ++ list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) { ++ list_del(&cldata->list); ++ kfree(cldata); ++ } ++ mutex_lock(&msm_bus_dbg_fablist_lock); ++ list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) { ++ list_del(&fablist->list); ++ kfree(fablist); ++ } ++ kfree(rules_buf); ++ mutex_unlock(&msm_bus_dbg_fablist_lock); ++} ++module_exit(msm_bus_dbg_teardown); ++MODULE_DESCRIPTION("Debugfs for msm bus scaling client"); ++MODULE_LICENSE("GPL v2"); ++MODULE_AUTHOR("Gagan Mac "); +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_fabric_adhoc.c +@@ -0,0 +1,1281 @@ ++/* Copyright (c) 2014, Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "rpm-smd.h" ++#include "msm_bus_core.h" ++#include "msm_bus_adhoc.h" ++#include "msm_bus_noc.h" ++#include "msm_bus_bimc.h" ++ ++ssize_t vrail_show(struct device *dev, struct device_attribute *attr, ++ char *buf) ++{ ++ struct msm_bus_node_info_type *node_info = NULL; ++ struct msm_bus_node_device_type *bus_node = NULL; ++ ++ bus_node = dev->platform_data; ++ if (!bus_node) ++ return -EINVAL; ++ node_info = bus_node->node_info; ++ ++ return snprintf(buf, PAGE_SIZE, "%u", node_info->vrail_comp); ++} ++ ++ssize_t vrail_store(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct msm_bus_node_info_type *node_info = NULL; ++ struct msm_bus_node_device_type *bus_node = NULL; ++ int ret = 0; ++ ++ bus_node = dev->platform_data; ++ if (!bus_node) ++ return -EINVAL; ++ node_info = bus_node->node_info; ++ ++ ret = sscanf(buf, "%u", &node_info->vrail_comp); ++ if (ret != 1) ++ return -EINVAL; ++ return count; ++} ++ ++DEVICE_ATTR(vrail, 0600, vrail_show, vrail_store); ++ ++struct static_rules_type { ++ int num_rules; ++ struct bus_rule_type *rules; ++}; ++ ++static struct static_rules_type static_rules; ++ ++static int enable_nodeclk(struct nodeclk *nclk) ++{ ++ int ret = 0; ++ ++ if (!nclk->enable) { ++ ret = clk_prepare_enable(nclk->clk); ++ ++ if (ret) { ++ MSM_BUS_ERR("%s: failed to enable clk ", __func__); ++ nclk->enable = false; ++ } else ++ nclk->enable = true; ++ } ++ return ret; ++} ++ ++static int disable_nodeclk(struct nodeclk *nclk) ++{ ++ int ret = 0; ++ ++ if (nclk->enable) { ++ clk_disable_unprepare(nclk->clk); ++ nclk->enable = false; ++ } ++ return ret; ++} ++ ++static int setrate_nodeclk(struct nodeclk *nclk, long rate) ++{ ++ int ret = 0; ++ ++ ret = clk_set_rate(nclk->clk, rate); ++ ++ if (ret) ++ MSM_BUS_ERR("%s: failed to setrate clk", __func__); ++ return ret; ++} ++ ++static int msm_bus_agg_fab_clks(struct device *bus_dev, void *data) ++{ ++ struct msm_bus_node_device_type *node = NULL; ++ int ret = 0; ++ int ctx = *(int *)data; ++ ++ if (ctx >= NUM_CTX) { ++ MSM_BUS_ERR("%s: Invalid Context %d", __func__, ctx); ++ goto exit_agg_fab_clks; ++ } ++ ++ node = bus_dev->platform_data; ++ if (!node) { ++ MSM_BUS_ERR("%s: Can't get device info", __func__); ++ goto exit_agg_fab_clks; ++ } ++ ++ if (!node->node_info->is_fab_dev) { ++ struct msm_bus_node_device_type *bus_dev = NULL; ++ ++ bus_dev = node->node_info->bus_device->platform_data; ++ ++ if (node->cur_clk_hz[ctx] >= bus_dev->cur_clk_hz[ctx]) ++ bus_dev->cur_clk_hz[ctx] = node->cur_clk_hz[ctx]; ++ } ++ ++exit_agg_fab_clks: ++ return ret; ++} ++ ++static int msm_bus_reset_fab_clks(struct device *bus_dev, void *data) ++{ ++ struct msm_bus_node_device_type *node = NULL; ++ int ret = 0; ++ int ctx = *(int *)data; ++ ++ if (ctx >= NUM_CTX) { ++ MSM_BUS_ERR("%s: Invalid Context %d", __func__, ctx); ++ goto exit_reset_fab_clks; ++ } ++ ++ node = bus_dev->platform_data; ++ if (!node) { ++ MSM_BUS_ERR("%s: Can't get device info", __func__); ++ goto exit_reset_fab_clks; ++ } ++ ++ if (node->node_info->is_fab_dev) { ++ node->cur_clk_hz[ctx] = 0; ++ MSM_BUS_DBG("Resetting for node %d", node->node_info->id); ++ } ++exit_reset_fab_clks: ++ return ret; ++} ++ ++ ++static int send_rpm_msg(struct device *device) ++{ ++ int ret = 0; ++ int ctx; ++ int rsc_type; ++ struct msm_bus_node_device_type *ndev = ++ device->platform_data; ++ struct msm_rpm_kvp rpm_kvp; ++ ++ if (!ndev) { ++ MSM_BUS_ERR("%s: Error getting node info.", __func__); ++ ret = -ENODEV; ++ goto exit_send_rpm_msg; ++ } ++ ++ rpm_kvp.length = sizeof(uint64_t); ++ rpm_kvp.key = RPM_MASTER_FIELD_BW; ++ ++ for (ctx = MSM_RPM_CTX_ACTIVE_SET; ctx <= MSM_RPM_CTX_SLEEP_SET; ++ ctx++) { ++ if (ctx == MSM_RPM_CTX_ACTIVE_SET) ++ rpm_kvp.data = ++ (uint8_t *)&ndev->node_ab.ab[MSM_RPM_CTX_ACTIVE_SET]; ++ else { ++ rpm_kvp.data = ++ (uint8_t *) &ndev->node_ab.ab[MSM_RPM_CTX_SLEEP_SET]; ++ } ++ ++ if (ndev->node_info->mas_rpm_id != -1) { ++ rsc_type = RPM_BUS_MASTER_REQ; ++ ret = msm_rpm_send_message(ctx, rsc_type, ++ ndev->node_info->mas_rpm_id, &rpm_kvp, 1); ++ if (ret) { ++ MSM_BUS_ERR("%s: Failed to send RPM message:", ++ __func__); ++ MSM_BUS_ERR("%s:Node Id %d RPM id %d", ++ __func__, ndev->node_info->id, ++ ndev->node_info->mas_rpm_id); ++ goto exit_send_rpm_msg; ++ } ++ } ++ ++ if (ndev->node_info->slv_rpm_id != -1) { ++ rsc_type = RPM_BUS_SLAVE_REQ; ++ ret = msm_rpm_send_message(ctx, rsc_type, ++ ndev->node_info->slv_rpm_id, &rpm_kvp, 1); ++ if (ret) { ++ MSM_BUS_ERR("%s: Failed to send RPM message:", ++ __func__); ++ MSM_BUS_ERR("%s: Node Id %d RPM id %d", ++ __func__, ndev->node_info->id, ++ ndev->node_info->slv_rpm_id); ++ goto exit_send_rpm_msg; ++ } ++ } ++ } ++exit_send_rpm_msg: ++ return ret; ++} ++ ++static int flush_bw_data(struct device *node_device, int ctx) ++{ ++ struct msm_bus_node_device_type *node_info; ++ int ret = 0; ++ ++ node_info = node_device->platform_data; ++ if (!node_info) { ++ MSM_BUS_ERR("%s: Unable to find bus device for device", ++ __func__); ++ ret = -ENODEV; ++ goto exit_flush_bw_data; ++ } ++ ++ if (node_info->node_ab.dirty) { ++ if (node_info->ap_owned) { ++ struct msm_bus_node_device_type *bus_device = ++ node_info->node_info->bus_device->platform_data; ++ struct msm_bus_fab_device_type *fabdev = ++ bus_device->fabdev; ++ ++ if (fabdev && fabdev->noc_ops.update_bw_reg && ++ fabdev->noc_ops.update_bw_reg ++ (node_info->node_info->qos_params.mode)) ++ ret = fabdev->noc_ops.set_bw(node_info, ++ fabdev->qos_base, ++ fabdev->base_offset, ++ fabdev->qos_off, ++ fabdev->qos_freq); ++ } else { ++ ret = send_rpm_msg(node_device); ++ ++ if (ret) ++ MSM_BUS_ERR("%s: Failed to send RPM msg for%d", ++ __func__, node_info->node_info->id); ++ } ++ node_info->node_ab.dirty = false; ++ } ++ ++exit_flush_bw_data: ++ return ret; ++ ++} ++ ++static int flush_clk_data(struct device *node_device, int ctx) ++{ ++ struct msm_bus_node_device_type *node; ++ struct nodeclk *nodeclk = NULL; ++ int ret = 0; ++ ++ node = node_device->platform_data; ++ if (!node) { ++ MSM_BUS_ERR("Unable to find bus device"); ++ ret = -ENODEV; ++ goto exit_flush_clk_data; ++ } ++ ++ nodeclk = &node->clk[ctx]; ++ if (node->node_info->is_fab_dev) { ++ if (nodeclk->rate != node->cur_clk_hz[ctx]) { ++ nodeclk->rate = node->cur_clk_hz[ctx]; ++ nodeclk->dirty = true; ++ } ++ } ++ ++ if (nodeclk && nodeclk->clk && nodeclk->dirty) { ++ long rounded_rate; ++ ++ if (nodeclk->rate) { ++ rounded_rate = clk_round_rate(nodeclk->clk, ++ nodeclk->rate); ++ ret = setrate_nodeclk(nodeclk, rounded_rate); ++ ++ if (ret) { ++ MSM_BUS_ERR("%s: Failed to set_rate %lu for %d", ++ __func__, rounded_rate, ++ node->node_info->id); ++ ret = -ENODEV; ++ goto exit_flush_clk_data; ++ } ++ ++ ret = enable_nodeclk(nodeclk); ++ } else ++ ret = disable_nodeclk(nodeclk); ++ ++ if (ret) { ++ MSM_BUS_ERR("%s: Failed to enable for %d", __func__, ++ node->node_info->id); ++ ret = -ENODEV; ++ goto exit_flush_clk_data; ++ } ++ MSM_BUS_DBG("%s: Updated %d clk to %llu", __func__, ++ node->node_info->id, nodeclk->rate); ++ ++ } ++exit_flush_clk_data: ++ /* Reset the aggregated clock rate for fab devices*/ ++ if (node && node->node_info->is_fab_dev) ++ node->cur_clk_hz[ctx] = 0; ++ ++ if (nodeclk) ++ nodeclk->dirty = 0; ++ return ret; ++} ++ ++int msm_bus_commit_data(int *dirty_nodes, int ctx, int num_dirty) ++{ ++ int ret = 0; ++ int i = 0; ++ ++ /* Aggregate the bus clocks */ ++ bus_for_each_dev(&msm_bus_type, NULL, (void *)&ctx, ++ msm_bus_agg_fab_clks); ++ ++ for (i = 0; i < num_dirty; i++) { ++ struct device *node_device = ++ bus_find_device(&msm_bus_type, NULL, ++ (void *)&dirty_nodes[i], ++ msm_bus_device_match_adhoc); ++ ++ if (!node_device) { ++ MSM_BUS_ERR("Can't find device for %d", dirty_nodes[i]); ++ continue; ++ } ++ ++ ret = flush_bw_data(node_device, ctx); ++ if (ret) ++ MSM_BUS_ERR("%s: Error flushing bw data for node %d", ++ __func__, dirty_nodes[i]); ++ ++ ret = flush_clk_data(node_device, ctx); ++ if (ret) ++ MSM_BUS_ERR("%s: Error flushing clk data for node %d", ++ __func__, dirty_nodes[i]); ++ } ++ kfree(dirty_nodes); ++ /* Aggregate the bus clocks */ ++ bus_for_each_dev(&msm_bus_type, NULL, (void *)&ctx, ++ msm_bus_reset_fab_clks); ++ return ret; ++} ++ ++void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size, ++ size_t new_size, gfp_t flags) ++{ ++ void *ret; ++ size_t copy_size = old_size; ++ ++ if (!new_size) { ++ devm_kfree(dev, p); ++ return ZERO_SIZE_PTR; ++ } ++ ++ if (new_size < old_size) ++ copy_size = new_size; ++ ++ ret = devm_kzalloc(dev, new_size, flags); ++ if (!ret) { ++ MSM_BUS_ERR("%s: Error Reallocating memory", __func__); ++ goto exit_realloc_devmem; ++ } ++ ++ memcpy(ret, p, copy_size); ++ devm_kfree(dev, p); ++exit_realloc_devmem: ++ return ret; ++} ++ ++ ++static int add_dirty_node(int **dirty_nodes, int id, int *num_dirty) ++{ ++ int i; ++ int found = 0; ++ int ret = 0; ++ int *dnode = NULL; ++ ++ for (i = 0; i < *num_dirty; i++) { ++ if ((*dirty_nodes)[i] == id) { ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found) { ++ (*num_dirty)++; ++ dnode = ++ krealloc(*dirty_nodes, sizeof(int) * (*num_dirty), ++ GFP_KERNEL); ++ ++ if (ZERO_OR_NULL_PTR(dnode)) { ++ MSM_BUS_ERR("%s: Failure allocating dirty nodes array", ++ __func__); ++ ret = -ENOMEM; ++ } else { ++ *dirty_nodes = dnode; ++ (*dirty_nodes)[(*num_dirty) - 1] = id; ++ } ++ } ++ ++ return ret; ++} ++ ++int msm_bus_update_bw(struct msm_bus_node_device_type *nodedev, int ctx, ++ int64_t add_bw, int **dirty_nodes, int *num_dirty) ++{ ++ int ret = 0; ++ int i, j; ++ uint64_t cur_ab_slp = 0; ++ uint64_t cur_ab_act = 0; ++ ++ if (nodedev->node_info->virt_dev) ++ goto exit_update_bw; ++ ++ for (i = 0; i < NUM_CTX; i++) { ++ for (j = 0; j < nodedev->num_lnodes; j++) { ++ if (i == DUAL_CTX) { ++ cur_ab_act += ++ nodedev->lnode_list[j].lnode_ab[i]; ++ cur_ab_slp += ++ nodedev->lnode_list[j].lnode_ab[i]; ++ } else ++ cur_ab_act += ++ nodedev->lnode_list[j].lnode_ab[i]; ++ } ++ } ++ ++ if (nodedev->node_ab.ab[MSM_RPM_CTX_ACTIVE_SET] != cur_ab_act) { ++ nodedev->node_ab.ab[MSM_RPM_CTX_ACTIVE_SET] = cur_ab_act; ++ nodedev->node_ab.ab[MSM_RPM_CTX_SLEEP_SET] = cur_ab_slp; ++ nodedev->node_ab.dirty = true; ++ ret = add_dirty_node(dirty_nodes, nodedev->node_info->id, ++ num_dirty); ++ ++ if (ret) { ++ MSM_BUS_ERR("%s: Failed to add dirty node %d", __func__, ++ nodedev->node_info->id); ++ goto exit_update_bw; ++ } ++ } ++ ++exit_update_bw: ++ return ret; ++} ++ ++int msm_bus_update_clks(struct msm_bus_node_device_type *nodedev, ++ int ctx, int **dirty_nodes, int *num_dirty) ++{ ++ int status = 0; ++ struct nodeclk *nodeclk; ++ struct nodeclk *busclk; ++ struct msm_bus_node_device_type *bus_info = NULL; ++ uint64_t req_clk; ++ ++ bus_info = nodedev->node_info->bus_device->platform_data; ++ ++ if (!bus_info) { ++ MSM_BUS_ERR("%s: Unable to find bus device for device %d", ++ __func__, nodedev->node_info->id); ++ status = -ENODEV; ++ goto exit_set_clks; ++ } ++ ++ req_clk = nodedev->cur_clk_hz[ctx]; ++ busclk = &bus_info->clk[ctx]; ++ ++ if (busclk->rate != req_clk) { ++ busclk->rate = req_clk; ++ busclk->dirty = 1; ++ MSM_BUS_DBG("%s: Modifying bus clk %d Rate %llu", __func__, ++ bus_info->node_info->id, req_clk); ++ status = add_dirty_node(dirty_nodes, bus_info->node_info->id, ++ num_dirty); ++ ++ if (status) { ++ MSM_BUS_ERR("%s: Failed to add dirty node %d", __func__, ++ bus_info->node_info->id); ++ goto exit_set_clks; ++ } ++ } ++ ++ req_clk = nodedev->cur_clk_hz[ctx]; ++ nodeclk = &nodedev->clk[ctx]; ++ ++ if (IS_ERR_OR_NULL(nodeclk)) ++ goto exit_set_clks; ++ ++ if (!nodeclk->dirty || (nodeclk->dirty && (nodeclk->rate < req_clk))) { ++ nodeclk->rate = req_clk; ++ nodeclk->dirty = 1; ++ MSM_BUS_DBG("%s: Modifying node clk %d Rate %llu", __func__, ++ nodedev->node_info->id, req_clk); ++ status = add_dirty_node(dirty_nodes, nodedev->node_info->id, ++ num_dirty); ++ if (status) { ++ MSM_BUS_ERR("%s: Failed to add dirty node %d", __func__, ++ nodedev->node_info->id); ++ goto exit_set_clks; ++ } ++ } ++ ++exit_set_clks: ++ return status; ++} ++ ++static void msm_bus_fab_init_noc_ops(struct msm_bus_node_device_type *bus_dev) ++{ ++ switch (bus_dev->fabdev->bus_type) { ++ case MSM_BUS_NOC: ++ msm_bus_noc_set_ops(bus_dev); ++ break; ++ case MSM_BUS_BIMC: ++ msm_bus_bimc_set_ops(bus_dev); ++ break; ++ default: ++ MSM_BUS_ERR("%s: Invalid Bus type", __func__); ++ } ++} ++ ++static int msm_bus_qos_disable_clk(struct msm_bus_node_device_type *node, ++ int disable_bus_qos_clk) ++{ ++ struct msm_bus_node_device_type *bus_node = NULL; ++ int ret = 0; ++ ++ if (!node) { ++ ret = -ENXIO; ++ goto exit_disable_qos_clk; ++ } ++ ++ bus_node = node->node_info->bus_device->platform_data; ++ ++ if (!bus_node) { ++ ret = -ENXIO; ++ goto exit_disable_qos_clk; ++ } ++ ++ if (disable_bus_qos_clk) ++ ret = disable_nodeclk(&bus_node->clk[DUAL_CTX]); ++ ++ if (ret) { ++ MSM_BUS_ERR("%s: Failed to disable bus clk, node %d", ++ __func__, node->node_info->id); ++ goto exit_disable_qos_clk; ++ } ++ ++ if (!IS_ERR_OR_NULL(node->qos_clk.clk)) { ++ ret = disable_nodeclk(&node->qos_clk); ++ ++ if (ret) { ++ MSM_BUS_ERR("%s: Failed to disable mas qos clk,node %d", ++ __func__, node->node_info->id); ++ goto exit_disable_qos_clk; ++ } ++ } ++ ++exit_disable_qos_clk: ++ return ret; ++} ++ ++static int msm_bus_qos_enable_clk(struct msm_bus_node_device_type *node) ++{ ++ struct msm_bus_node_device_type *bus_node = NULL; ++ long rounded_rate; ++ int ret = 0; ++ int bus_qos_enabled = 0; ++ ++ if (!node) { ++ ret = -ENXIO; ++ goto exit_enable_qos_clk; ++ } ++ ++ bus_node = node->node_info->bus_device->platform_data; ++ ++ if (!bus_node) { ++ ret = -ENXIO; ++ goto exit_enable_qos_clk; ++ } ++ ++ /* Check if the bus clk is already set before trying to set it ++ * Do this only during ++ * a. Bootup ++ * b. Only for bus clks ++ **/ ++ if (!clk_get_rate(bus_node->clk[DUAL_CTX].clk)) { ++ rounded_rate = clk_round_rate(bus_node->clk[DUAL_CTX].clk, 1); ++ ret = setrate_nodeclk(&bus_node->clk[DUAL_CTX], rounded_rate); ++ if (ret) { ++ MSM_BUS_ERR("%s: Failed to set bus clk, node %d", ++ __func__, node->node_info->id); ++ goto exit_enable_qos_clk; ++ } ++ ++ ret = enable_nodeclk(&bus_node->clk[DUAL_CTX]); ++ if (ret) { ++ MSM_BUS_ERR("%s: Failed to enable bus clk, node %d", ++ __func__, node->node_info->id); ++ goto exit_enable_qos_clk; ++ } ++ bus_qos_enabled = 1; ++ } ++ ++ if (!IS_ERR_OR_NULL(node->qos_clk.clk)) { ++ rounded_rate = clk_round_rate(node->qos_clk.clk, 1); ++ ret = setrate_nodeclk(&node->qos_clk, rounded_rate); ++ if (ret) { ++ MSM_BUS_ERR("%s: Failed to enable mas qos clk, node %d", ++ __func__, node->node_info->id); ++ goto exit_enable_qos_clk; ++ } ++ ++ ret = enable_nodeclk(&node->qos_clk); ++ if (ret) { ++ MSM_BUS_ERR("Err enable mas qos clk, node %d ret %d", ++ node->node_info->id, ret); ++ goto exit_enable_qos_clk; ++ } ++ } ++ ret = bus_qos_enabled; ++ ++exit_enable_qos_clk: ++ return ret; ++} ++ ++int msm_bus_enable_limiter(struct msm_bus_node_device_type *node_dev, ++ bool enable, uint64_t lim_bw) ++{ ++ int ret = 0; ++ struct msm_bus_node_device_type *bus_node_dev; ++ ++ if (!node_dev) { ++ MSM_BUS_ERR("No device specified"); ++ ret = -ENXIO; ++ goto exit_enable_limiter; ++ } ++ ++ if (!node_dev->ap_owned) { ++ MSM_BUS_ERR("Device is not AP owned %d.", ++ node_dev->node_info->id); ++ ret = -ENXIO; ++ goto exit_enable_limiter; ++ } ++ ++ bus_node_dev = node_dev->node_info->bus_device->platform_data; ++ if (!bus_node_dev) { ++ MSM_BUS_ERR("Unable to get bus device infofor %d", ++ node_dev->node_info->id); ++ ret = -ENXIO; ++ goto exit_enable_limiter; ++ } ++ if (bus_node_dev->fabdev && ++ bus_node_dev->fabdev->noc_ops.limit_mport) { ++ ret = msm_bus_qos_enable_clk(node_dev); ++ if (ret < 0) { ++ MSM_BUS_ERR("Can't Enable QoS clk %d", ++ node_dev->node_info->id); ++ goto exit_enable_limiter; ++ } ++ bus_node_dev->fabdev->noc_ops.limit_mport( ++ node_dev, ++ bus_node_dev->fabdev->qos_base, ++ bus_node_dev->fabdev->base_offset, ++ bus_node_dev->fabdev->qos_off, ++ bus_node_dev->fabdev->qos_freq, ++ enable, lim_bw); ++ msm_bus_qos_disable_clk(node_dev, ret); ++ } ++ ++exit_enable_limiter: ++ return ret; ++} ++ ++static int msm_bus_dev_init_qos(struct device *dev, void *data) ++{ ++ int ret = 0; ++ struct msm_bus_node_device_type *node_dev = NULL; ++ ++ node_dev = dev->platform_data; ++ ++ if (!node_dev) { ++ MSM_BUS_ERR("%s: Unable to get node device info" , __func__); ++ ret = -ENXIO; ++ goto exit_init_qos; ++ } ++ ++ MSM_BUS_DBG("Device = %d", node_dev->node_info->id); ++ ++ if (node_dev->ap_owned) { ++ struct msm_bus_node_device_type *bus_node_info; ++ ++ bus_node_info = node_dev->node_info->bus_device->platform_data; ++ ++ if (!bus_node_info) { ++ MSM_BUS_ERR("%s: Unable to get bus device infofor %d", ++ __func__, ++ node_dev->node_info->id); ++ ret = -ENXIO; ++ goto exit_init_qos; ++ } ++ ++ if (bus_node_info->fabdev && ++ bus_node_info->fabdev->noc_ops.qos_init) { ++ int ret = 0; ++ ++ if (node_dev->ap_owned && ++ (node_dev->node_info->qos_params.mode) != -1) { ++ ++ if (bus_node_info->fabdev->bypass_qos_prg) ++ goto exit_init_qos; ++ ++ ret = msm_bus_qos_enable_clk(node_dev); ++ if (ret < 0) { ++ MSM_BUS_ERR("Can't Enable QoS clk %d", ++ node_dev->node_info->id); ++ goto exit_init_qos; ++ } ++ ++ bus_node_info->fabdev->noc_ops.qos_init( ++ node_dev, ++ bus_node_info->fabdev->qos_base, ++ bus_node_info->fabdev->base_offset, ++ bus_node_info->fabdev->qos_off, ++ bus_node_info->fabdev->qos_freq); ++ msm_bus_qos_disable_clk(node_dev, ret); ++ } ++ } else ++ MSM_BUS_ERR("%s: Skipping QOS init for %d", ++ __func__, node_dev->node_info->id); ++ } ++exit_init_qos: ++ return ret; ++} ++ ++static int msm_bus_fabric_init(struct device *dev, ++ struct msm_bus_node_device_type *pdata) ++{ ++ struct msm_bus_fab_device_type *fabdev; ++ struct msm_bus_node_device_type *node_dev = NULL; ++ int ret = 0; ++ ++ node_dev = dev->platform_data; ++ if (!node_dev) { ++ MSM_BUS_ERR("%s: Unable to get bus device info" , __func__); ++ ret = -ENXIO; ++ goto exit_fabric_init; ++ } ++ ++ if (node_dev->node_info->virt_dev) { ++ MSM_BUS_ERR("%s: Skip Fab init for virtual device %d", __func__, ++ node_dev->node_info->id); ++ goto exit_fabric_init; ++ } ++ ++ fabdev = devm_kzalloc(dev, sizeof(struct msm_bus_fab_device_type), ++ GFP_KERNEL); ++ if (!fabdev) { ++ MSM_BUS_ERR("Fabric alloc failed\n"); ++ ret = -ENOMEM; ++ goto exit_fabric_init; ++ } ++ ++ node_dev->fabdev = fabdev; ++ fabdev->pqos_base = pdata->fabdev->pqos_base; ++ fabdev->qos_range = pdata->fabdev->qos_range; ++ fabdev->base_offset = pdata->fabdev->base_offset; ++ fabdev->qos_off = pdata->fabdev->qos_off; ++ fabdev->qos_freq = pdata->fabdev->qos_freq; ++ fabdev->bus_type = pdata->fabdev->bus_type; ++ fabdev->bypass_qos_prg = pdata->fabdev->bypass_qos_prg; ++ fabdev->util_fact = pdata->fabdev->util_fact; ++ fabdev->vrail_comp = pdata->fabdev->vrail_comp; ++ msm_bus_fab_init_noc_ops(node_dev); ++ ++ fabdev->qos_base = devm_ioremap(dev, ++ fabdev->pqos_base, fabdev->qos_range); ++ if (!fabdev->qos_base) { ++ MSM_BUS_ERR("%s: Error remapping address 0x%zx :bus device %d", ++ __func__, ++ (size_t)fabdev->pqos_base, node_dev->node_info->id); ++ ret = -ENOMEM; ++ goto exit_fabric_init; ++ } ++ ++ /*if (msmbus_coresight_init(pdev)) ++ pr_warn("Coresight support absent for bus: %d\n", pdata->id);*/ ++exit_fabric_init: ++ return ret; ++} ++ ++static int msm_bus_init_clk(struct device *bus_dev, ++ struct msm_bus_node_device_type *pdata) ++{ ++ unsigned int ctx; ++ int ret = 0; ++ struct msm_bus_node_device_type *node_dev = bus_dev->platform_data; ++ ++ for (ctx = 0; ctx < NUM_CTX; ctx++) { ++ if (!IS_ERR_OR_NULL(pdata->clk[ctx].clk)) { ++ node_dev->clk[ctx].clk = pdata->clk[ctx].clk; ++ node_dev->clk[ctx].enable = false; ++ node_dev->clk[ctx].dirty = false; ++ MSM_BUS_ERR("%s: Valid node clk node %d ctx %d", ++ __func__, node_dev->node_info->id, ctx); ++ } ++ } ++ ++ if (!IS_ERR_OR_NULL(pdata->qos_clk.clk)) { ++ node_dev->qos_clk.clk = pdata->qos_clk.clk; ++ node_dev->qos_clk.enable = false; ++ MSM_BUS_ERR("%s: Valid Iface clk node %d", __func__, ++ node_dev->node_info->id); ++ } ++ ++ return ret; ++} ++ ++static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata, ++ struct device *bus_dev) ++{ ++ int ret = 0; ++ struct msm_bus_node_info_type *node_info = NULL; ++ struct msm_bus_node_info_type *pdata_node_info = NULL; ++ struct msm_bus_node_device_type *bus_node = NULL; ++ ++ bus_node = bus_dev->platform_data; ++ ++ if (!bus_node || !pdata) { ++ ret = -ENXIO; ++ MSM_BUS_ERR("%s: Invalid pointers pdata %p, bus_node %p", ++ __func__, pdata, bus_node); ++ goto exit_copy_node_info; ++ } ++ ++ node_info = bus_node->node_info; ++ pdata_node_info = pdata->node_info; ++ ++ node_info->name = pdata_node_info->name; ++ node_info->id = pdata_node_info->id; ++ node_info->bus_device_id = pdata_node_info->bus_device_id; ++ node_info->mas_rpm_id = pdata_node_info->mas_rpm_id; ++ node_info->slv_rpm_id = pdata_node_info->slv_rpm_id; ++ node_info->num_connections = pdata_node_info->num_connections; ++ node_info->num_blist = pdata_node_info->num_blist; ++ node_info->num_qports = pdata_node_info->num_qports; ++ node_info->buswidth = pdata_node_info->buswidth; ++ node_info->virt_dev = pdata_node_info->virt_dev; ++ node_info->is_fab_dev = pdata_node_info->is_fab_dev; ++ node_info->qos_params.mode = pdata_node_info->qos_params.mode; ++ node_info->qos_params.prio1 = pdata_node_info->qos_params.prio1; ++ node_info->qos_params.prio0 = pdata_node_info->qos_params.prio0; ++ node_info->qos_params.prio_lvl = pdata_node_info->qos_params.prio_lvl; ++ node_info->qos_params.prio_rd = pdata_node_info->qos_params.prio_rd; ++ node_info->qos_params.prio_wr = pdata_node_info->qos_params.prio_wr; ++ node_info->qos_params.gp = pdata_node_info->qos_params.gp; ++ node_info->qos_params.thmp = pdata_node_info->qos_params.thmp; ++ node_info->qos_params.ws = pdata_node_info->qos_params.ws; ++ node_info->qos_params.bw_buffer = pdata_node_info->qos_params.bw_buffer; ++ node_info->util_fact = pdata_node_info->util_fact; ++ node_info->vrail_comp = pdata_node_info->vrail_comp; ++ ++ node_info->dev_connections = devm_kzalloc(bus_dev, ++ sizeof(struct device *) * ++ pdata_node_info->num_connections, ++ GFP_KERNEL); ++ if (!node_info->dev_connections) { ++ MSM_BUS_ERR("%s:Bus dev connections alloc failed\n", __func__); ++ ret = -ENOMEM; ++ goto exit_copy_node_info; ++ } ++ ++ node_info->connections = devm_kzalloc(bus_dev, ++ sizeof(int) * pdata_node_info->num_connections, ++ GFP_KERNEL); ++ if (!node_info->connections) { ++ MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__); ++ devm_kfree(bus_dev, node_info->dev_connections); ++ ret = -ENOMEM; ++ goto exit_copy_node_info; ++ } ++ ++ memcpy(node_info->connections, ++ pdata_node_info->connections, ++ sizeof(int) * pdata_node_info->num_connections); ++ ++ node_info->black_connections = devm_kzalloc(bus_dev, ++ sizeof(struct device *) * ++ pdata_node_info->num_blist, ++ GFP_KERNEL); ++ if (!node_info->black_connections) { ++ MSM_BUS_ERR("%s: Bus black connections alloc failed\n", ++ __func__); ++ devm_kfree(bus_dev, node_info->dev_connections); ++ devm_kfree(bus_dev, node_info->connections); ++ ret = -ENOMEM; ++ goto exit_copy_node_info; ++ } ++ ++ node_info->black_listed_connections = devm_kzalloc(bus_dev, ++ pdata_node_info->num_blist * sizeof(int), ++ GFP_KERNEL); ++ if (!node_info->black_listed_connections) { ++ MSM_BUS_ERR("%s:Bus black list connections alloc failed\n", ++ __func__); ++ devm_kfree(bus_dev, node_info->black_connections); ++ devm_kfree(bus_dev, node_info->dev_connections); ++ devm_kfree(bus_dev, node_info->connections); ++ ret = -ENOMEM; ++ goto exit_copy_node_info; ++ } ++ ++ memcpy(node_info->black_listed_connections, ++ pdata_node_info->black_listed_connections, ++ sizeof(int) * pdata_node_info->num_blist); ++ ++ node_info->qport = devm_kzalloc(bus_dev, ++ sizeof(int) * pdata_node_info->num_qports, ++ GFP_KERNEL); ++ if (!node_info->qport) { ++ MSM_BUS_ERR("%s:Bus qport allocation failed\n", __func__); ++ devm_kfree(bus_dev, node_info->dev_connections); ++ devm_kfree(bus_dev, node_info->connections); ++ devm_kfree(bus_dev, node_info->black_listed_connections); ++ ret = -ENOMEM; ++ goto exit_copy_node_info; ++ } ++ ++ memcpy(node_info->qport, ++ pdata_node_info->qport, ++ sizeof(int) * pdata_node_info->num_qports); ++ ++exit_copy_node_info: ++ return ret; ++} ++ ++static struct device *msm_bus_device_init( ++ struct msm_bus_node_device_type *pdata) ++{ ++ struct device *bus_dev = NULL; ++ struct msm_bus_node_device_type *bus_node = NULL; ++ struct msm_bus_node_info_type *node_info = NULL; ++ int ret = 0; ++ ++ bus_dev = kzalloc(sizeof(struct device), GFP_KERNEL); ++ if (!bus_dev) { ++ MSM_BUS_ERR("%s:Device alloc failed\n", __func__); ++ bus_dev = NULL; ++ goto exit_device_init; ++ } ++ /** ++ * Init here so we can use devm calls ++ */ ++ device_initialize(bus_dev); ++ ++ bus_node = devm_kzalloc(bus_dev, ++ sizeof(struct msm_bus_node_device_type), GFP_KERNEL); ++ if (!bus_node) { ++ MSM_BUS_ERR("%s:Bus node alloc failed\n", __func__); ++ kfree(bus_dev); ++ bus_dev = NULL; ++ goto exit_device_init; ++ } ++ ++ node_info = devm_kzalloc(bus_dev, ++ sizeof(struct msm_bus_node_info_type), GFP_KERNEL); ++ if (!node_info) { ++ MSM_BUS_ERR("%s:Bus node info alloc failed\n", __func__); ++ devm_kfree(bus_dev, bus_node); ++ kfree(bus_dev); ++ bus_dev = NULL; ++ goto exit_device_init; ++ } ++ ++ bus_node->node_info = node_info; ++ bus_node->ap_owned = pdata->ap_owned; ++ bus_dev->platform_data = bus_node; ++ ++ if (msm_bus_copy_node_info(pdata, bus_dev) < 0) { ++ devm_kfree(bus_dev, bus_node); ++ devm_kfree(bus_dev, node_info); ++ kfree(bus_dev); ++ bus_dev = NULL; ++ goto exit_device_init; ++ } ++ ++ bus_dev->bus = &msm_bus_type; ++ dev_set_name(bus_dev, bus_node->node_info->name); ++ ++ ret = device_add(bus_dev); ++ if (ret < 0) { ++ MSM_BUS_ERR("%s: Error registering device %d", ++ __func__, pdata->node_info->id); ++ devm_kfree(bus_dev, bus_node); ++ devm_kfree(bus_dev, node_info->dev_connections); ++ devm_kfree(bus_dev, node_info->connections); ++ devm_kfree(bus_dev, node_info->black_connections); ++ devm_kfree(bus_dev, node_info->black_listed_connections); ++ devm_kfree(bus_dev, node_info); ++ kfree(bus_dev); ++ bus_dev = NULL; ++ goto exit_device_init; ++ } ++ device_create_file(bus_dev, &dev_attr_vrail); ++ ++exit_device_init: ++ return bus_dev; ++} ++ ++static int msm_bus_setup_dev_conn(struct device *bus_dev, void *data) ++{ ++ struct msm_bus_node_device_type *bus_node = NULL; ++ int ret = 0; ++ int j; ++ ++ bus_node = bus_dev->platform_data; ++ if (!bus_node) { ++ MSM_BUS_ERR("%s: Can't get device info", __func__); ++ ret = -ENODEV; ++ goto exit_setup_dev_conn; ++ } ++ ++ /* Setup parent bus device for this node */ ++ if (!bus_node->node_info->is_fab_dev) { ++ struct device *bus_parent_device = ++ bus_find_device(&msm_bus_type, NULL, ++ (void *)&bus_node->node_info->bus_device_id, ++ msm_bus_device_match_adhoc); ++ ++ if (!bus_parent_device) { ++ MSM_BUS_ERR("%s: Error finding parentdev %d parent %d", ++ __func__, ++ bus_node->node_info->id, ++ bus_node->node_info->bus_device_id); ++ ret = -ENXIO; ++ goto exit_setup_dev_conn; ++ } ++ bus_node->node_info->bus_device = bus_parent_device; ++ } ++ ++ bus_node->node_info->is_traversed = false; ++ ++ for (j = 0; j < bus_node->node_info->num_connections; j++) { ++ bus_node->node_info->dev_connections[j] = ++ bus_find_device(&msm_bus_type, NULL, ++ (void *)&bus_node->node_info->connections[j], ++ msm_bus_device_match_adhoc); ++ ++ if (!bus_node->node_info->dev_connections[j]) { ++ MSM_BUS_ERR("%s: Error finding conn %d for device %d", ++ __func__, bus_node->node_info->connections[j], ++ bus_node->node_info->id); ++ ret = -ENODEV; ++ goto exit_setup_dev_conn; ++ } ++ } ++ ++ for (j = 0; j < bus_node->node_info->num_blist; j++) { ++ bus_node->node_info->black_connections[j] = ++ bus_find_device(&msm_bus_type, NULL, ++ (void *)&bus_node->node_info-> ++ black_listed_connections[j], ++ msm_bus_device_match_adhoc); ++ ++ if (!bus_node->node_info->black_connections[j]) { ++ MSM_BUS_ERR("%s: Error finding conn %d for device %d\n", ++ __func__, bus_node->node_info-> ++ black_listed_connections[j], ++ bus_node->node_info->id); ++ ret = -ENODEV; ++ goto exit_setup_dev_conn; ++ } ++ } ++ ++exit_setup_dev_conn: ++ return ret; ++} ++ ++static int msm_bus_node_debug(struct device *bus_dev, void *data) ++{ ++ int j; ++ int ret = 0; ++ struct msm_bus_node_device_type *bus_node = NULL; ++ ++ bus_node = bus_dev->platform_data; ++ if (!bus_node) { ++ MSM_BUS_ERR("%s: Can't get device info", __func__); ++ ret = -ENODEV; ++ goto exit_node_debug; ++ } ++ ++ MSM_BUS_DBG("Device = %d buswidth %u", bus_node->node_info->id, ++ bus_node->node_info->buswidth); ++ for (j = 0; j < bus_node->node_info->num_connections; j++) { ++ struct msm_bus_node_device_type *bdev = ++ (struct msm_bus_node_device_type *) ++ bus_node->node_info->dev_connections[j]->platform_data; ++ MSM_BUS_DBG("\n\t Connection[%d] %d", j, bdev->node_info->id); ++ } ++ ++exit_node_debug: ++ return ret; ++} ++ ++static int msm_bus_device_probe(struct platform_device *pdev) ++{ ++ unsigned int i, ret; ++ struct msm_bus_device_node_registration *pdata; ++ ++ /* If possible, get pdata from device-tree */ ++ if (pdev->dev.of_node) ++ pdata = msm_bus_of_to_pdata(pdev); ++ else { ++ pdata = (struct msm_bus_device_node_registration *)pdev-> ++ dev.platform_data; ++ } ++ ++ if (IS_ERR_OR_NULL(pdata)) { ++ MSM_BUS_ERR("No platform data found"); ++ ret = -ENODATA; ++ goto exit_device_probe; ++ } ++ ++ for (i = 0; i < pdata->num_devices; i++) { ++ struct device *node_dev = NULL; ++ ++ node_dev = msm_bus_device_init(&pdata->info[i]); ++ ++ if (!node_dev) { ++ MSM_BUS_ERR("%s: Error during dev init for %d", ++ __func__, pdata->info[i].node_info->id); ++ ret = -ENXIO; ++ goto exit_device_probe; ++ } ++ ++ ret = msm_bus_init_clk(node_dev, &pdata->info[i]); ++ /*Is this a fabric device ?*/ ++ if (pdata->info[i].node_info->is_fab_dev) { ++ MSM_BUS_DBG("%s: %d is a fab", __func__, ++ pdata->info[i].node_info->id); ++ ret = msm_bus_fabric_init(node_dev, &pdata->info[i]); ++ if (ret) { ++ MSM_BUS_ERR("%s: Error intializing fab %d", ++ __func__, pdata->info[i].node_info->id); ++ goto exit_device_probe; ++ } ++ } ++ } ++ ++ ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, ++ msm_bus_setup_dev_conn); ++ if (ret) { ++ MSM_BUS_ERR("%s: Error setting up dev connections", __func__); ++ goto exit_device_probe; ++ } ++ ++ ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_dev_init_qos); ++ if (ret) { ++ MSM_BUS_ERR("%s: Error during qos init", __func__); ++ goto exit_device_probe; ++ } ++ ++ bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug); ++ ++ /* Register the arb layer ops */ ++ msm_bus_arb_setops_adhoc(&arb_ops); ++ devm_kfree(&pdev->dev, pdata->info); ++ devm_kfree(&pdev->dev, pdata); ++exit_device_probe: ++ return ret; ++} ++ ++static int msm_bus_device_rules_probe(struct platform_device *pdev) ++{ ++ struct bus_rule_type *rule_data = NULL; ++ int num_rules = 0; ++ ++ num_rules = msm_bus_of_get_static_rules(pdev, &rule_data); ++ ++ if (!rule_data) ++ goto exit_rules_probe; ++ ++ msm_rule_register(num_rules, rule_data, NULL); ++ static_rules.num_rules = num_rules; ++ static_rules.rules = rule_data; ++ pdev->dev.platform_data = &static_rules; ++ ++exit_rules_probe: ++ return 0; ++} ++ ++int msm_bus_device_rules_remove(struct platform_device *pdev) ++{ ++ struct static_rules_type *static_rules = NULL; ++ ++ static_rules = pdev->dev.platform_data; ++ if (static_rules) ++ msm_rule_unregister(static_rules->num_rules, ++ static_rules->rules, NULL); ++ return 0; ++} ++ ++static int msm_bus_free_dev(struct device *dev, void *data) ++{ ++ struct msm_bus_node_device_type *bus_node = NULL; ++ ++ bus_node = dev->platform_data; ++ ++ if (bus_node) ++ MSM_BUS_ERR("\n%s: Removing device %d", __func__, ++ bus_node->node_info->id); ++ device_unregister(dev); ++ return 0; ++} ++ ++int msm_bus_device_remove(struct platform_device *pdev) ++{ ++ bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_free_dev); ++ return 0; ++} ++ ++static struct of_device_id rules_match[] = { ++ {.compatible = "qcom,msm-bus-static-bw-rules"}, ++ {} ++}; ++ ++static struct platform_driver msm_bus_rules_driver = { ++ .probe = msm_bus_device_rules_probe, ++ .remove = msm_bus_device_rules_remove, ++ .driver = { ++ .name = "msm_bus_rules_device", ++ .owner = THIS_MODULE, ++ .of_match_table = rules_match, ++ }, ++}; ++ ++static struct of_device_id fabric_match[] = { ++ {.compatible = "qcom,msm-bus-device"}, ++ {} ++}; ++ ++static struct platform_driver msm_bus_device_driver = { ++ .probe = msm_bus_device_probe, ++ .remove = msm_bus_device_remove, ++ .driver = { ++ .name = "msm_bus_device", ++ .owner = THIS_MODULE, ++ .of_match_table = fabric_match, ++ }, ++}; ++ ++int __init msm_bus_device_init_driver(void) ++{ ++ int rc; ++ ++ MSM_BUS_ERR("msm_bus_fabric_init_driver\n"); ++ rc = platform_driver_register(&msm_bus_device_driver); ++ ++ if (rc) { ++ MSM_BUS_ERR("Failed to register bus device driver"); ++ return rc; ++ } ++ return platform_driver_register(&msm_bus_rules_driver); ++} ++subsys_initcall(msm_bus_device_init_driver); +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_id.c +@@ -0,0 +1,94 @@ ++/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include "msm-bus.h" ++#include "msm-bus-board.h" ++#include "msm_bus_core.h" ++#include "msm_bus_noc.h" ++#include "msm_bus_bimc.h" ++ ++static uint32_t master_iids[MSM_BUS_MASTER_LAST]; ++static uint32_t slave_iids[MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY]; ++ ++static void msm_bus_assign_iids(struct msm_bus_fabric_registration ++ *fabreg, int fabid) ++{ ++ int i; ++ for (i = 0; i < fabreg->len; i++) { ++ if (!fabreg->info[i].gateway) { ++ fabreg->info[i].priv_id = fabid + fabreg->info[i].id; ++ if (fabreg->info[i].id < SLAVE_ID_KEY) { ++ if (fabreg->info[i].id >= MSM_BUS_MASTER_LAST) { ++ WARN(1, "id %d exceeds array size!\n", ++ fabreg->info[i].id); ++ continue; ++ } ++ ++ master_iids[fabreg->info[i].id] = ++ fabreg->info[i].priv_id; ++ } else { ++ if ((fabreg->info[i].id - SLAVE_ID_KEY) >= ++ (MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY)) { ++ WARN(1, "id %d exceeds array size!\n", ++ fabreg->info[i].id); ++ continue; ++ } ++ ++ slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)] ++ = fabreg->info[i].priv_id; ++ } ++ } else { ++ fabreg->info[i].priv_id = fabreg->info[i].id; ++ } ++ } ++} ++ ++static int msm_bus_get_iid(int id) ++{ ++ if ((id < SLAVE_ID_KEY && id >= MSM_BUS_MASTER_LAST) || ++ id >= MSM_BUS_SLAVE_LAST) { ++ MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id); ++ return -EINVAL; ++ } ++ ++ return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] : ++ slave_iids[id - SLAVE_ID_KEY]), id); ++} ++ ++static struct msm_bus_board_algorithm msm_bus_id_algo = { ++ .get_iid = msm_bus_get_iid, ++ .assign_iids = msm_bus_assign_iids, ++}; ++ ++int msm_bus_board_rpm_get_il_ids(uint16_t *id) ++{ ++ return -ENXIO; ++} ++ ++void msm_bus_board_init(struct msm_bus_fabric_registration *pdata) ++{ ++ pdata->board_algo = &msm_bus_id_algo; ++} ++ ++void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata, ++ int nfab) ++{ ++ if (nfab <= 0) ++ return; ++ ++ msm_bus_id_algo.board_nfab = nfab; ++} +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_noc.c +@@ -0,0 +1,770 @@ ++/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#define pr_fmt(fmt) "AXI: NOC: %s(): " fmt, __func__ ++ ++#include ++#include ++#include "msm-bus-board.h" ++#include "msm_bus_core.h" ++#include "msm_bus_noc.h" ++#include "msm_bus_adhoc.h" ++ ++/* NOC_QOS generic */ ++#define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x)) ++#define SAT_SCALE 16 /* 16 bytes minimum for saturation */ ++#define BW_SCALE 256 /* 1/256 byte per cycle unit */ ++#define QOS_DEFAULT_BASEOFFSET 0x00003000 ++#define QOS_DEFAULT_DELTA 0x80 ++#define MAX_BW_FIELD (NOC_QOS_BWn_BW_BMSK >> NOC_QOS_BWn_BW_SHFT) ++#define MAX_SAT_FIELD (NOC_QOS_SATn_SAT_BMSK >> NOC_QOS_SATn_SAT_SHFT) ++ ++#define NOC_QOS_REG_BASE(b, o) ((b) + (o)) ++ ++#define NOC_QOS_ID_COREIDn_ADDR(b, o, n, d) \ ++ (NOC_QOS_REG_BASE(b, o) + (d) * (n)) ++enum noc_qos_id_coreidn { ++ NOC_QOS_ID_COREIDn_RMSK = 0xffffffff, ++ NOC_QOS_ID_COREIDn_MAXn = 32, ++ NOC_QOS_ID_COREIDn_CORECHSUM_BMSK = 0xffffff00, ++ NOC_QOS_ID_COREIDn_CORECHSUM_SHFT = 0x8, ++ NOC_QOS_ID_COREIDn_CORETYPEID_BMSK = 0xff, ++ NOC_QOS_ID_COREIDn_CORETYPEID_SHFT = 0x0, ++}; ++ ++#define NOC_QOS_ID_REVISIONIDn_ADDR(b, o, n, d) \ ++ (NOC_QOS_REG_BASE(b, o) + 0x4 + (d) * (n)) ++enum noc_qos_id_revisionidn { ++ NOC_QOS_ID_REVISIONIDn_RMSK = 0xffffffff, ++ NOC_QOS_ID_REVISIONIDn_MAXn = 32, ++ NOC_QOS_ID_REVISIONIDn_FLEXNOCID_BMSK = 0xffffff00, ++ NOC_QOS_ID_REVISIONIDn_FLEXNOCID_SHFT = 0x8, ++ NOC_QOS_ID_REVISIONIDn_USERID_BMSK = 0xff, ++ NOC_QOS_ID_REVISIONIDn_USERID_SHFT = 0x0, ++}; ++ ++#define NOC_QOS_PRIORITYn_ADDR(b, o, n, d) \ ++ (NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n)) ++enum noc_qos_id_priorityn { ++ NOC_QOS_PRIORITYn_RMSK = 0x0000000f, ++ NOC_QOS_PRIORITYn_MAXn = 32, ++ NOC_QOS_PRIORITYn_P1_BMSK = 0xc, ++ NOC_QOS_PRIORITYn_P1_SHFT = 0x2, ++ NOC_QOS_PRIORITYn_P0_BMSK = 0x3, ++ NOC_QOS_PRIORITYn_P0_SHFT = 0x0, ++}; ++ ++#define NOC_QOS_MODEn_ADDR(b, o, n, d) \ ++ (NOC_QOS_REG_BASE(b, o) + 0xC + (d) * (n)) ++enum noc_qos_id_moden_rmsk { ++ NOC_QOS_MODEn_RMSK = 0x00000003, ++ NOC_QOS_MODEn_MAXn = 32, ++ NOC_QOS_MODEn_MODE_BMSK = 0x3, ++ NOC_QOS_MODEn_MODE_SHFT = 0x0, ++}; ++ ++#define NOC_QOS_BWn_ADDR(b, o, n, d) \ ++ (NOC_QOS_REG_BASE(b, o) + 0x10 + (d) * (n)) ++enum noc_qos_id_bwn { ++ NOC_QOS_BWn_RMSK = 0x0000ffff, ++ NOC_QOS_BWn_MAXn = 32, ++ NOC_QOS_BWn_BW_BMSK = 0xffff, ++ NOC_QOS_BWn_BW_SHFT = 0x0, ++}; ++ ++/* QOS Saturation registers */ ++#define NOC_QOS_SATn_ADDR(b, o, n, d) \ ++ (NOC_QOS_REG_BASE(b, o) + 0x14 + (d) * (n)) ++enum noc_qos_id_saturationn { ++ NOC_QOS_SATn_RMSK = 0x000003ff, ++ NOC_QOS_SATn_MAXn = 32, ++ NOC_QOS_SATn_SAT_BMSK = 0x3ff, ++ NOC_QOS_SATn_SAT_SHFT = 0x0, ++}; ++ ++static int noc_div(uint64_t *a, uint32_t b) ++{ ++ if ((*a > 0) && (*a < b)) ++ return 1; ++ else ++ return do_div(*a, b); ++} ++ ++/** ++ * Calculates bw hardware is using from register values ++ * bw returned is in bytes/sec ++ */ ++static uint64_t noc_bw(uint32_t bw_field, uint32_t qos_freq) ++{ ++ uint64_t res; ++ uint32_t rem, scale; ++ ++ res = 2 * qos_freq * bw_field; ++ scale = BW_SCALE * 1000; ++ rem = noc_div(&res, scale); ++ MSM_BUS_DBG("NOC: Calculated bw: %llu\n", res * 1000000ULL); ++ return res * 1000000ULL; ++} ++ ++static uint32_t noc_bw_ceil(long int bw_field, uint32_t qos_freq) ++{ ++ uint64_t bw_temp = 2 * qos_freq * bw_field; ++ uint32_t scale = 1000 * BW_SCALE; ++ noc_div(&bw_temp, scale); ++ return bw_temp * 1000000; ++} ++#define MAX_BW(timebase) noc_bw_ceil(MAX_BW_FIELD, (timebase)) ++ ++/** ++ * Calculates ws hardware is using from register values ++ * ws returned is in nanoseconds ++ */ ++static uint32_t noc_ws(uint64_t bw, uint32_t sat, uint32_t qos_freq) ++{ ++ if (bw && qos_freq) { ++ uint32_t bwf = bw * qos_freq; ++ uint64_t scale = 1000000000000LL * BW_SCALE * ++ SAT_SCALE * sat; ++ noc_div(&scale, bwf); ++ MSM_BUS_DBG("NOC: Calculated ws: %llu\n", scale); ++ return scale; ++ } ++ ++ return 0; ++} ++#define MAX_WS(bw, timebase) noc_ws((bw), MAX_SAT_FIELD, (timebase)) ++ ++/* Calculate bandwidth field value for requested bandwidth */ ++static uint32_t noc_bw_field(uint64_t bw, uint32_t qos_freq) ++{ ++ uint32_t bw_field = 0; ++ ++ if (bw) { ++ uint32_t rem; ++ uint64_t bw_capped = min_t(uint64_t, bw, MAX_BW(qos_freq)); ++ uint64_t bwc = bw_capped * BW_SCALE; ++ uint64_t qf = 2 * qos_freq * 1000; ++ ++ rem = noc_div(&bwc, qf); ++ bw_field = (uint32_t)min_t(uint64_t, bwc, MAX_BW_FIELD); ++ } ++ ++ MSM_BUS_DBG("NOC: bw_field: %u\n", bw_field); ++ return bw_field; ++} ++ ++static uint32_t noc_sat_field(uint64_t bw, uint32_t ws, uint32_t qos_freq) ++{ ++ uint32_t sat_field = 0, win; ++ ++ if (bw) { ++ /* Limit to max bw and scale bw to 100 KB increments */ ++ uint64_t tbw, tscale; ++ uint64_t bw_scaled = min_t(uint64_t, bw, MAX_BW(qos_freq)); ++ uint32_t rem = noc_div(&bw_scaled, 100000); ++ ++ /** ++ * Calculate saturation from windows size. ++ * WS must be at least one arb period. ++ * Saturation must not exceed max field size ++ * ++ * Bandwidth is in 100KB increments ++ * Window size is in ns ++ * qos_freq is in KHz ++ */ ++ win = max(ws, 1000000 / qos_freq); ++ tbw = bw_scaled * win * qos_freq; ++ tscale = 10000000ULL * BW_SCALE * SAT_SCALE; ++ rem = noc_div(&tbw, tscale); ++ sat_field = (uint32_t)min_t(uint64_t, tbw, MAX_SAT_FIELD); ++ } ++ ++ MSM_BUS_DBG("NOC: sat_field: %d\n", sat_field); ++ return sat_field; ++} ++ ++static void noc_set_qos_mode(void __iomem *base, uint32_t qos_off, ++ uint32_t mport, uint32_t qos_delta, uint8_t mode, ++ uint8_t perm_mode) ++{ ++ if (mode < NOC_QOS_MODE_MAX && ++ ((1 << mode) & perm_mode)) { ++ uint32_t reg_val; ++ ++ reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off, ++ mport, qos_delta)) & NOC_QOS_MODEn_RMSK; ++ writel_relaxed(((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) | ++ (mode & NOC_QOS_MODEn_MODE_BMSK)), ++ NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta)); ++ } ++ /* Ensure qos mode is set before exiting */ ++ wmb(); ++} ++ ++static void noc_set_qos_priority(void __iomem *base, uint32_t qos_off, ++ uint32_t mport, uint32_t qos_delta, ++ struct msm_bus_noc_qos_priority *priority) ++{ ++ uint32_t reg_val, val; ++ ++ reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, ++ qos_delta)) & NOC_QOS_PRIORITYn_RMSK; ++ val = priority->p1 << NOC_QOS_PRIORITYn_P1_SHFT; ++ writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P1_BMSK))) | ++ (val & NOC_QOS_PRIORITYn_P1_BMSK)), ++ NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta)); ++ ++ reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, ++ qos_delta)) ++ & NOC_QOS_PRIORITYn_RMSK; ++ writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P0_BMSK))) | ++ (priority->p0 & NOC_QOS_PRIORITYn_P0_BMSK)), ++ NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta)); ++ /* Ensure qos priority is set before exiting */ ++ wmb(); ++} ++ ++static void msm_bus_noc_set_qos_bw(void __iomem *base, uint32_t qos_off, ++ uint32_t qos_freq, uint32_t mport, uint32_t qos_delta, ++ uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw) ++{ ++ uint32_t reg_val, val, mode; ++ ++ if (!qos_freq) { ++ MSM_BUS_DBG("Zero QoS Freq\n"); ++ return; ++ } ++ ++ ++ /* If Limiter or Regulator modes are not supported, bw not available*/ ++ if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER | ++ NOC_QOS_PERM_MODE_REGULATOR)) { ++ uint32_t bw_val = noc_bw_field(qbw->bw, qos_freq); ++ uint32_t sat_val = noc_sat_field(qbw->bw, qbw->ws, ++ qos_freq); ++ ++ MSM_BUS_DBG("NOC: BW: perm_mode: %d bw_val: %d, sat_val: %d\n", ++ perm_mode, bw_val, sat_val); ++ /* ++ * If in Limiter/Regulator mode, first go to fixed mode. ++ * Clear QoS accumulator ++ **/ ++ mode = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off, ++ mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK; ++ if (mode == NOC_QOS_MODE_REGULATOR || mode == ++ NOC_QOS_MODE_LIMITER) { ++ reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR( ++ base, qos_off, mport, qos_delta)); ++ val = NOC_QOS_MODE_FIXED; ++ writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) ++ | (val & NOC_QOS_MODEn_MODE_BMSK), ++ NOC_QOS_MODEn_ADDR(base, qos_off, mport, ++ qos_delta)); ++ } ++ ++ reg_val = readl_relaxed(NOC_QOS_BWn_ADDR(base, qos_off, mport, ++ qos_delta)); ++ val = bw_val << NOC_QOS_BWn_BW_SHFT; ++ writel_relaxed(((reg_val & (~(NOC_QOS_BWn_BW_BMSK))) | ++ (val & NOC_QOS_BWn_BW_BMSK)), ++ NOC_QOS_BWn_ADDR(base, qos_off, mport, qos_delta)); ++ ++ MSM_BUS_DBG("NOC: BW: Wrote value: 0x%x\n", ((reg_val & ++ (~NOC_QOS_BWn_BW_BMSK)) | (val & ++ NOC_QOS_BWn_BW_BMSK))); ++ ++ reg_val = readl_relaxed(NOC_QOS_SATn_ADDR(base, qos_off, ++ mport, qos_delta)); ++ val = sat_val << NOC_QOS_SATn_SAT_SHFT; ++ writel_relaxed(((reg_val & (~(NOC_QOS_SATn_SAT_BMSK))) | ++ (val & NOC_QOS_SATn_SAT_BMSK)), ++ NOC_QOS_SATn_ADDR(base, qos_off, mport, qos_delta)); ++ ++ MSM_BUS_DBG("NOC: SAT: Wrote value: 0x%x\n", ((reg_val & ++ (~NOC_QOS_SATn_SAT_BMSK)) | (val & ++ NOC_QOS_SATn_SAT_BMSK))); ++ ++ /* Set mode back to what it was initially */ ++ reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off, ++ mport, qos_delta)); ++ writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) ++ | (mode & NOC_QOS_MODEn_MODE_BMSK), ++ NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta)); ++ /* Ensure that all writes for bandwidth registers have ++ * completed before returning ++ */ ++ wmb(); ++ } ++} ++ ++uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off, ++ uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode) ++{ ++ if (NOC_QOS_MODES_ALL_PERM == perm_mode) ++ return readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off, ++ mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK; ++ else ++ return 31 - __CLZ(mode & ++ NOC_QOS_MODES_ALL_PERM); ++} ++ ++void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off, ++ uint32_t mport, uint32_t qos_delta, ++ struct msm_bus_noc_qos_priority *priority) ++{ ++ priority->p1 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, ++ mport, qos_delta)) & NOC_QOS_PRIORITYn_P1_BMSK) >> ++ NOC_QOS_PRIORITYn_P1_SHFT; ++ ++ priority->p0 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, ++ mport, qos_delta)) & NOC_QOS_PRIORITYn_P0_BMSK) >> ++ NOC_QOS_PRIORITYn_P0_SHFT; ++} ++ ++void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off, ++ uint32_t qos_freq, ++ uint32_t mport, uint32_t qos_delta, uint8_t perm_mode, ++ struct msm_bus_noc_qos_bw *qbw) ++{ ++ if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER | ++ NOC_QOS_PERM_MODE_REGULATOR)) { ++ uint32_t bw_val = readl_relaxed(NOC_QOS_BWn_ADDR( ++ base, qos_off, mport, qos_delta)) & NOC_QOS_BWn_BW_BMSK; ++ uint32_t sat = readl_relaxed(NOC_QOS_SATn_ADDR( ++ base, qos_off, mport, qos_delta)) ++ & NOC_QOS_SATn_SAT_BMSK; ++ ++ qbw->bw = noc_bw(bw_val, qos_freq); ++ qbw->ws = noc_ws(qbw->bw, sat, qos_freq); ++ } else { ++ qbw->bw = 0; ++ qbw->ws = 0; ++ } ++} ++ ++static int msm_bus_noc_mas_init(struct msm_bus_noc_info *ninfo, ++ struct msm_bus_inode_info *info) ++{ ++ int i; ++ struct msm_bus_noc_qos_priority *prio; ++ prio = kzalloc(sizeof(struct msm_bus_noc_qos_priority), ++ GFP_KERNEL); ++ if (!prio) { ++ MSM_BUS_WARN("Couldn't alloc prio data for node: %d\n", ++ info->node_info->id); ++ return -ENOMEM; ++ } ++ ++ prio->read_prio = info->node_info->prio_rd; ++ prio->write_prio = info->node_info->prio_wr; ++ prio->p1 = info->node_info->prio1; ++ prio->p0 = info->node_info->prio0; ++ info->hw_data = (void *)prio; ++ ++ if (!info->node_info->qport) { ++ MSM_BUS_DBG("No QoS Ports to init\n"); ++ return 0; ++ } ++ ++ for (i = 0; i < info->node_info->num_mports; i++) { ++ if (info->node_info->mode != NOC_QOS_MODE_BYPASS) { ++ noc_set_qos_priority(ninfo->base, ninfo->qos_baseoffset, ++ info->node_info->qport[i], ninfo->qos_delta, ++ prio); ++ ++ if (info->node_info->mode != NOC_QOS_MODE_FIXED) { ++ struct msm_bus_noc_qos_bw qbw; ++ qbw.ws = info->node_info->ws; ++ qbw.bw = 0; ++ msm_bus_noc_set_qos_bw(ninfo->base, ++ ninfo->qos_baseoffset, ++ ninfo->qos_freq, info->node_info-> ++ qport[i], ninfo->qos_delta, ++ info->node_info->perm_mode, ++ &qbw); ++ } ++ } ++ ++ noc_set_qos_mode(ninfo->base, ninfo->qos_baseoffset, ++ info->node_info->qport[i], ninfo->qos_delta, ++ info->node_info->mode, ++ info->node_info->perm_mode); ++ } ++ ++ return 0; ++} ++ ++static void msm_bus_noc_node_init(void *hw_data, ++ struct msm_bus_inode_info *info) ++{ ++ struct msm_bus_noc_info *ninfo = ++ (struct msm_bus_noc_info *)hw_data; ++ ++ if (!IS_SLAVE(info->node_info->priv_id)) ++ if (info->node_info->hw_sel != MSM_BUS_RPM) ++ msm_bus_noc_mas_init(ninfo, info); ++} ++ ++static int msm_bus_noc_allocate_commit_data(struct msm_bus_fabric_registration ++ *fab_pdata, void **cdata, int ctx) ++{ ++ struct msm_bus_noc_commit **cd = (struct msm_bus_noc_commit **)cdata; ++ struct msm_bus_noc_info *ninfo = ++ (struct msm_bus_noc_info *)fab_pdata->hw_data; ++ ++ *cd = kzalloc(sizeof(struct msm_bus_noc_commit), GFP_KERNEL); ++ if (!*cd) { ++ MSM_BUS_DBG("Couldn't alloc mem for cdata\n"); ++ return -ENOMEM; ++ } ++ ++ (*cd)->mas = ninfo->cdata[ctx].mas; ++ (*cd)->slv = ninfo->cdata[ctx].slv; ++ ++ return 0; ++} ++ ++static void *msm_bus_noc_allocate_noc_data(struct platform_device *pdev, ++ struct msm_bus_fabric_registration *fab_pdata) ++{ ++ struct resource *noc_mem; ++ struct resource *noc_io; ++ struct msm_bus_noc_info *ninfo; ++ int i; ++ ++ ninfo = kzalloc(sizeof(struct msm_bus_noc_info), GFP_KERNEL); ++ if (!ninfo) { ++ MSM_BUS_DBG("Couldn't alloc mem for noc info\n"); ++ return NULL; ++ } ++ ++ ninfo->nmasters = fab_pdata->nmasters; ++ ninfo->nqos_masters = fab_pdata->nmasters; ++ ninfo->nslaves = fab_pdata->nslaves; ++ ninfo->qos_freq = fab_pdata->qos_freq; ++ ++ if (!fab_pdata->qos_baseoffset) ++ ninfo->qos_baseoffset = QOS_DEFAULT_BASEOFFSET; ++ else ++ ninfo->qos_baseoffset = fab_pdata->qos_baseoffset; ++ ++ if (!fab_pdata->qos_delta) ++ ninfo->qos_delta = QOS_DEFAULT_DELTA; ++ else ++ ninfo->qos_delta = fab_pdata->qos_delta; ++ ++ ninfo->mas_modes = kzalloc(sizeof(uint32_t) * fab_pdata->nmasters, ++ GFP_KERNEL); ++ if (!ninfo->mas_modes) { ++ MSM_BUS_DBG("Couldn't alloc mem for noc master-modes\n"); ++ kfree(ninfo); ++ return NULL; ++ } ++ ++ for (i = 0; i < NUM_CTX; i++) { ++ ninfo->cdata[i].mas = kzalloc(sizeof(struct ++ msm_bus_node_hw_info) * fab_pdata->nmasters * 2, ++ GFP_KERNEL); ++ if (!ninfo->cdata[i].mas) { ++ MSM_BUS_DBG("Couldn't alloc mem for noc master-bw\n"); ++ kfree(ninfo->mas_modes); ++ kfree(ninfo); ++ return NULL; ++ } ++ ++ ninfo->cdata[i].slv = kzalloc(sizeof(struct ++ msm_bus_node_hw_info) * fab_pdata->nslaves * 2, ++ GFP_KERNEL); ++ if (!ninfo->cdata[i].slv) { ++ MSM_BUS_DBG("Couldn't alloc mem for noc master-bw\n"); ++ kfree(ninfo->cdata[i].mas); ++ goto err; ++ } ++ } ++ ++ /* If it's a virtual fabric, don't get memory info */ ++ if (fab_pdata->virt) ++ goto skip_mem; ++ ++ noc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!noc_mem && !fab_pdata->virt) { ++ MSM_BUS_ERR("Cannot get NoC Base address\n"); ++ goto err; ++ } ++ ++ noc_io = request_mem_region(noc_mem->start, ++ resource_size(noc_mem), pdev->name); ++ if (!noc_io) { ++ MSM_BUS_ERR("NoC memory unavailable\n"); ++ goto err; ++ } ++ ++ ninfo->base = ioremap(noc_mem->start, resource_size(noc_mem)); ++ if (!ninfo->base) { ++ MSM_BUS_ERR("IOremap failed for NoC!\n"); ++ release_mem_region(noc_mem->start, resource_size(noc_mem)); ++ goto err; ++ } ++ ++skip_mem: ++ fab_pdata->hw_data = (void *)ninfo; ++ return (void *)ninfo; ++ ++err: ++ kfree(ninfo->mas_modes); ++ kfree(ninfo); ++ return NULL; ++} ++ ++static void free_commit_data(void *cdata) ++{ ++ struct msm_bus_noc_commit *cd = (struct msm_bus_noc_commit *)cdata; ++ ++ kfree(cd->mas); ++ kfree(cd->slv); ++ kfree(cd); ++} ++ ++static bool msm_bus_noc_update_bw_reg(int mode) ++{ ++ bool ret = false; ++ ++ if ((mode == NOC_QOS_MODE_LIMITER) || ++ (mode == NOC_QOS_MODE_REGULATOR)) ++ ret = true; ++ ++ return ret; ++} ++ ++static void msm_bus_noc_update_bw(struct msm_bus_inode_info *hop, ++ struct msm_bus_inode_info *info, ++ struct msm_bus_fabric_registration *fab_pdata, ++ void *sel_cdata, int *master_tiers, ++ int64_t add_bw) ++{ ++ struct msm_bus_noc_info *ninfo; ++ struct msm_bus_noc_qos_bw qos_bw; ++ int i, ports; ++ int64_t bw; ++ struct msm_bus_noc_commit *sel_cd = ++ (struct msm_bus_noc_commit *)sel_cdata; ++ ++ ninfo = (struct msm_bus_noc_info *)fab_pdata->hw_data; ++ if (!ninfo->qos_freq) { ++ MSM_BUS_DBG("NOC: No qos frequency to update bw\n"); ++ return; ++ } ++ ++ if (info->node_info->num_mports == 0) { ++ MSM_BUS_DBG("NOC: Skip Master BW\n"); ++ goto skip_mas_bw; ++ } ++ ++ ports = info->node_info->num_mports; ++ bw = INTERLEAVED_BW(fab_pdata, add_bw, ports); ++ ++ MSM_BUS_DBG("NOC: Update bw for: %d: %lld\n", ++ info->node_info->priv_id, add_bw); ++ for (i = 0; i < ports; i++) { ++ sel_cd->mas[info->node_info->masterp[i]].bw += bw; ++ sel_cd->mas[info->node_info->masterp[i]].hw_id = ++ info->node_info->mas_hw_id; ++ MSM_BUS_DBG("NOC: Update mas_bw: ID: %d, BW: %llu ports:%d\n", ++ info->node_info->priv_id, ++ sel_cd->mas[info->node_info->masterp[i]].bw, ++ ports); ++ /* Check if info is a shared master. ++ * If it is, mark it dirty ++ * If it isn't, then set QOS Bandwidth ++ **/ ++ if (info->node_info->hw_sel == MSM_BUS_RPM) ++ sel_cd->mas[info->node_info->masterp[i]].dirty = 1; ++ else { ++ if (!info->node_info->qport) { ++ MSM_BUS_DBG("No qos ports to update!\n"); ++ break; ++ } ++ ++ if (!(info->node_info->mode == NOC_QOS_MODE_REGULATOR) ++ || (info->node_info->mode == ++ NOC_QOS_MODE_LIMITER)) { ++ MSM_BUS_DBG("Skip QoS reg programming\n"); ++ break; ++ } ++ qos_bw.bw = sel_cd->mas[info->node_info->masterp[i]]. ++ bw; ++ qos_bw.ws = info->node_info->ws; ++ msm_bus_noc_set_qos_bw(ninfo->base, ++ ninfo->qos_baseoffset, ++ ninfo->qos_freq, ++ info->node_info->qport[i], ninfo->qos_delta, ++ info->node_info->perm_mode, &qos_bw); ++ MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n", ++ qos_bw.ws); ++ } ++ } ++ ++skip_mas_bw: ++ ports = hop->node_info->num_sports; ++ for (i = 0; i < ports; i++) { ++ sel_cd->slv[hop->node_info->slavep[i]].bw += add_bw; ++ sel_cd->slv[hop->node_info->slavep[i]].hw_id = ++ hop->node_info->slv_hw_id; ++ MSM_BUS_DBG("NOC: Update slave_bw for ID: %d -> %llu\n", ++ hop->node_info->priv_id, ++ sel_cd->slv[hop->node_info->slavep[i]].bw); ++ MSM_BUS_DBG("NOC: Update slave_bw for hw_id: %d, index: %d\n", ++ hop->node_info->slv_hw_id, hop->node_info->slavep[i]); ++ /* Check if hop is a shared slave. ++ * If it is, mark it dirty ++ * If it isn't, then nothing to be done as the ++ * slaves are in bypass mode. ++ **/ ++ if (hop->node_info->hw_sel == MSM_BUS_RPM) ++ sel_cd->slv[hop->node_info->slavep[i]].dirty = 1; ++ } ++} ++ ++static int msm_bus_noc_commit(struct msm_bus_fabric_registration ++ *fab_pdata, void *hw_data, void **cdata) ++{ ++ MSM_BUS_DBG("\nReached NOC Commit\n"); ++ msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata); ++ return 0; ++} ++ ++static int msm_bus_noc_port_halt(uint32_t haltid, uint8_t mport) ++{ ++ return 0; ++} ++ ++static int msm_bus_noc_port_unhalt(uint32_t haltid, uint8_t mport) ++{ ++ return 0; ++} ++ ++static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info, ++ void __iomem *qos_base, ++ uint32_t qos_off, uint32_t qos_delta, ++ uint32_t qos_freq) ++{ ++ struct msm_bus_noc_qos_priority prio; ++ int ret = 0; ++ int i; ++ ++ prio.p1 = info->node_info->qos_params.prio1; ++ prio.p0 = info->node_info->qos_params.prio0; ++ ++ if (!info->node_info->qport) { ++ MSM_BUS_DBG("No QoS Ports to init\n"); ++ ret = 0; ++ goto err_qos_init; ++ } ++ ++ for (i = 0; i < info->node_info->num_qports; i++) { ++ if (info->node_info->qos_params.mode != NOC_QOS_MODE_BYPASS) { ++ noc_set_qos_priority(qos_base, qos_off, ++ info->node_info->qport[i], qos_delta, ++ &prio); ++ ++ if (info->node_info->qos_params.mode != ++ NOC_QOS_MODE_FIXED) { ++ struct msm_bus_noc_qos_bw qbw; ++ qbw.ws = info->node_info->qos_params.ws; ++ qbw.bw = 0; ++ msm_bus_noc_set_qos_bw(qos_base, qos_off, ++ qos_freq, ++ info->node_info->qport[i], ++ qos_delta, ++ info->node_info->qos_params.mode, ++ &qbw); ++ } ++ } ++ ++ noc_set_qos_mode(qos_base, qos_off, info->node_info->qport[i], ++ qos_delta, info->node_info->qos_params.mode, ++ (1 << info->node_info->qos_params.mode)); ++ } ++err_qos_init: ++ return ret; ++} ++ ++static int msm_bus_noc_set_bw(struct msm_bus_node_device_type *dev, ++ void __iomem *qos_base, ++ uint32_t qos_off, uint32_t qos_delta, ++ uint32_t qos_freq) ++{ ++ int ret = 0; ++ uint64_t bw = 0; ++ int i; ++ struct msm_bus_node_info_type *info = dev->node_info; ++ ++ if (info && info->num_qports && ++ ((info->qos_params.mode == NOC_QOS_MODE_REGULATOR) || ++ (info->qos_params.mode == ++ NOC_QOS_MODE_LIMITER))) { ++ struct msm_bus_noc_qos_bw qos_bw; ++ ++ bw = msm_bus_div64(info->num_qports, ++ dev->node_ab.ab[DUAL_CTX]); ++ ++ for (i = 0; i < info->num_qports; i++) { ++ if (!info->qport) { ++ MSM_BUS_DBG("No qos ports to update!\n"); ++ break; ++ } ++ ++ qos_bw.bw = bw; ++ qos_bw.ws = info->qos_params.ws; ++ msm_bus_noc_set_qos_bw(qos_base, qos_off, qos_freq, ++ info->qport[i], qos_delta, ++ info->qos_params.mode, &qos_bw); ++ MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n", ++ qos_bw.ws); ++ } ++ } ++ return ret; ++} ++int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata, ++ struct msm_bus_hw_algorithm *hw_algo) ++{ ++ /* Set interleaving to true by default */ ++ pdata->il_flag = true; ++ hw_algo->allocate_commit_data = msm_bus_noc_allocate_commit_data; ++ hw_algo->allocate_hw_data = msm_bus_noc_allocate_noc_data; ++ hw_algo->node_init = msm_bus_noc_node_init; ++ hw_algo->free_commit_data = free_commit_data; ++ hw_algo->update_bw = msm_bus_noc_update_bw; ++ hw_algo->commit = msm_bus_noc_commit; ++ hw_algo->port_halt = msm_bus_noc_port_halt; ++ hw_algo->port_unhalt = msm_bus_noc_port_unhalt; ++ hw_algo->update_bw_reg = msm_bus_noc_update_bw_reg; ++ hw_algo->config_master = NULL; ++ hw_algo->config_limiter = NULL; ++ ++ return 0; ++} ++ ++int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev) ++{ ++ if (!bus_dev) ++ return -ENODEV; ++ else { ++ bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init; ++ bus_dev->fabdev->noc_ops.set_bw = msm_bus_noc_set_bw; ++ bus_dev->fabdev->noc_ops.limit_mport = NULL; ++ bus_dev->fabdev->noc_ops.update_bw_reg = ++ msm_bus_noc_update_bw_reg; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(msm_bus_noc_set_ops); +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_noc.h +@@ -0,0 +1,76 @@ ++/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H ++#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H ++ ++enum msm_bus_noc_qos_mode_type { ++ NOC_QOS_MODE_FIXED = 0, ++ NOC_QOS_MODE_LIMITER, ++ NOC_QOS_MODE_BYPASS, ++ NOC_QOS_MODE_REGULATOR, ++ NOC_QOS_MODE_MAX, ++}; ++ ++enum msm_bus_noc_qos_mode_perm { ++ NOC_QOS_PERM_MODE_FIXED = (1 << NOC_QOS_MODE_FIXED), ++ NOC_QOS_PERM_MODE_LIMITER = (1 << NOC_QOS_MODE_LIMITER), ++ NOC_QOS_PERM_MODE_BYPASS = (1 << NOC_QOS_MODE_BYPASS), ++ NOC_QOS_PERM_MODE_REGULATOR = (1 << NOC_QOS_MODE_REGULATOR), ++}; ++ ++#define NOC_QOS_MODES_ALL_PERM (NOC_QOS_PERM_MODE_FIXED | \ ++ NOC_QOS_PERM_MODE_LIMITER | NOC_QOS_PERM_MODE_BYPASS | \ ++ NOC_QOS_PERM_MODE_REGULATOR) ++ ++struct msm_bus_noc_commit { ++ struct msm_bus_node_hw_info *mas; ++ struct msm_bus_node_hw_info *slv; ++}; ++ ++struct msm_bus_noc_info { ++ void __iomem *base; ++ uint32_t base_addr; ++ uint32_t nmasters; ++ uint32_t nqos_masters; ++ uint32_t nslaves; ++ uint32_t qos_freq; /* QOS Clock in KHz */ ++ uint32_t qos_baseoffset; ++ uint32_t qos_delta; ++ uint32_t *mas_modes; ++ struct msm_bus_noc_commit cdata[NUM_CTX]; ++}; ++ ++struct msm_bus_noc_qos_priority { ++ uint32_t high_prio; ++ uint32_t low_prio; ++ uint32_t read_prio; ++ uint32_t write_prio; ++ uint32_t p1; ++ uint32_t p0; ++}; ++ ++struct msm_bus_noc_qos_bw { ++ uint64_t bw; /* Bandwidth in bytes per second */ ++ uint32_t ws; /* Window size in nano seconds */ ++}; ++ ++void msm_bus_noc_init(struct msm_bus_noc_info *ninfo); ++uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off, ++ uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode); ++void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off, ++ uint32_t mport, uint32_t qos_delta, ++ struct msm_bus_noc_qos_priority *qprio); ++void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off, ++ uint32_t qos_freq, uint32_t mport, uint32_t qos_delta, ++ uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw); ++#endif /*_ARCH_ARM_MACH_MSM_BUS_NOC_H */ +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_of.c +@@ -0,0 +1,705 @@ ++/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "msm-bus.h" ++#include "msm-bus-board.h" ++#include "msm_bus_core.h" ++ ++static const char * const hw_sel_name[] = {"RPM", "NoC", "BIMC", NULL}; ++static const char * const mode_sel_name[] = {"Fixed", "Limiter", "Bypass", ++ "Regulator", NULL}; ++ ++static int get_num(const char *const str[], const char *name) ++{ ++ int i = 0; ++ ++ do { ++ if (!strcmp(name, str[i])) ++ return i; ++ ++ i++; ++ } while (str[i] != NULL); ++ ++ pr_err("Error: string %s not found\n", name); ++ return -EINVAL; ++} ++ ++#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MSM_BUS_SCALING) ++static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev, ++ struct device_node *of_node) ++{ ++ struct msm_bus_scale_pdata *pdata = NULL; ++ struct msm_bus_paths *usecase = NULL; ++ int i = 0, j, ret, num_usecases = 0, num_paths, len; ++ const uint32_t *vec_arr = NULL; ++ bool mem_err = false; ++ ++ if (!pdev) { ++ pr_err("Error: Null Platform device\n"); ++ return NULL; ++ } ++ ++ pdata = devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_scale_pdata), ++ GFP_KERNEL); ++ if (!pdata) { ++ pr_err("Error: Memory allocation for pdata failed\n"); ++ mem_err = true; ++ goto err; ++ } ++ ++ ret = of_property_read_string(of_node, "qcom,msm-bus,name", ++ &pdata->name); ++ if (ret) { ++ pr_err("Error: Client name not found\n"); ++ goto err; ++ } ++ ++ ret = of_property_read_u32(of_node, "qcom,msm-bus,num-cases", ++ &num_usecases); ++ if (ret) { ++ pr_err("Error: num-usecases not found\n"); ++ goto err; ++ } ++ ++ pdata->num_usecases = num_usecases; ++ ++ if (of_property_read_bool(of_node, "qcom,msm-bus,active-only")) ++ pdata->active_only = 1; ++ else { ++ pr_debug("active_only flag absent.\n"); ++ pr_debug("Using dual context by default\n"); ++ } ++ ++ usecase = devm_kzalloc(&pdev->dev, (sizeof(struct msm_bus_paths) * ++ pdata->num_usecases), GFP_KERNEL); ++ if (!usecase) { ++ pr_err("Error: Memory allocation for paths failed\n"); ++ mem_err = true; ++ goto err; ++ } ++ ++ ret = of_property_read_u32(of_node, "qcom,msm-bus,num-paths", ++ &num_paths); ++ if (ret) { ++ pr_err("Error: num_paths not found\n"); ++ goto err; ++ } ++ ++ vec_arr = of_get_property(of_node, "qcom,msm-bus,vectors-KBps", &len); ++ if (vec_arr == NULL) { ++ pr_err("Error: Vector array not found\n"); ++ goto err; ++ } ++ ++ if (len != num_usecases * num_paths * sizeof(uint32_t) * 4) { ++ pr_err("Error: Length-error on getting vectors\n"); ++ goto err; ++ } ++ ++ for (i = 0; i < num_usecases; i++) { ++ usecase[i].num_paths = num_paths; ++ usecase[i].vectors = devm_kzalloc(&pdev->dev, num_paths * ++ sizeof(struct msm_bus_vectors), GFP_KERNEL); ++ if (!usecase[i].vectors) { ++ mem_err = true; ++ pr_err("Error: Mem alloc failure in vectors\n"); ++ goto err; ++ } ++ ++ for (j = 0; j < num_paths; j++) { ++ int index = ((i * num_paths) + j) * 4; ++ usecase[i].vectors[j].src = be32_to_cpu(vec_arr[index]); ++ usecase[i].vectors[j].dst = ++ be32_to_cpu(vec_arr[index + 1]); ++ usecase[i].vectors[j].ab = (uint64_t) ++ KBTOB(be32_to_cpu(vec_arr[index + 2])); ++ usecase[i].vectors[j].ib = (uint64_t) ++ KBTOB(be32_to_cpu(vec_arr[index + 3])); ++ } ++ } ++ ++ pdata->usecase = usecase; ++ return pdata; ++err: ++ if (mem_err) { ++ for (; i > 0; i--) ++ kfree(usecase[i-1].vectors); ++ ++ kfree(usecase); ++ kfree(pdata); ++ } ++ ++ return NULL; ++} ++ ++/** ++ * msm_bus_cl_get_pdata() - Generate bus client data from device tree ++ * provided by clients. ++ * ++ * of_node: Device tree node to extract information from ++ * ++ * The function returns a valid pointer to the allocated bus-scale-pdata ++ * if the vectors were correctly read from the client's device node. ++ * Any error in reading or parsing the device node will return NULL ++ * to the caller. ++ */ ++struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev) ++{ ++ struct device_node *of_node; ++ struct msm_bus_scale_pdata *pdata = NULL; ++ ++ if (!pdev) { ++ pr_err("Error: Null Platform device\n"); ++ return NULL; ++ } ++ ++ of_node = pdev->dev.of_node; ++ pdata = get_pdata(pdev, of_node); ++ if (!pdata) { ++ pr_err("client has to provide missing entry for successful registration\n"); ++ return NULL; ++ } ++ ++ return pdata; ++} ++EXPORT_SYMBOL(msm_bus_cl_get_pdata); ++ ++/** ++ * msm_bus_cl_pdata_from_node() - Generate bus client data from device tree ++ * node provided by clients. This function should be used when a client ++ * driver needs to register multiple bus-clients from a single device-tree ++ * node associated with the platform-device. ++ * ++ * of_node: The subnode containing information about the bus scaling ++ * data ++ * ++ * pdev: Platform device associated with the device-tree node ++ * ++ * The function returns a valid pointer to the allocated bus-scale-pdata ++ * if the vectors were correctly read from the client's device node. ++ * Any error in reading or parsing the device node will return NULL ++ * to the caller. ++ */ ++struct msm_bus_scale_pdata *msm_bus_pdata_from_node( ++ struct platform_device *pdev, struct device_node *of_node) ++{ ++ struct msm_bus_scale_pdata *pdata = NULL; ++ ++ if (!pdev) { ++ pr_err("Error: Null Platform device\n"); ++ return NULL; ++ } ++ ++ if (!of_node) { ++ pr_err("Error: Null of_node passed to bus driver\n"); ++ return NULL; ++ } ++ ++ pdata = get_pdata(pdev, of_node); ++ if (!pdata) { ++ pr_err("client has to provide missing entry for successful registration\n"); ++ return NULL; ++ } ++ ++ return pdata; ++} ++EXPORT_SYMBOL(msm_bus_pdata_from_node); ++ ++/** ++ * msm_bus_cl_clear_pdata() - Clear pdata allocated from device-tree ++ * of_node: Device tree node to extract information from ++ */ ++void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata) ++{ ++ int i; ++ ++ for (i = 0; i < pdata->num_usecases; i++) ++ kfree(pdata->usecase[i].vectors); ++ ++ kfree(pdata->usecase); ++ kfree(pdata); ++} ++EXPORT_SYMBOL(msm_bus_cl_clear_pdata); ++#endif ++ ++static int *get_arr(struct platform_device *pdev, ++ const struct device_node *node, const char *prop, ++ int *nports) ++{ ++ int size = 0, ret; ++ int *arr = NULL; ++ ++ if (of_get_property(node, prop, &size)) { ++ *nports = size / sizeof(int); ++ } else { ++ pr_debug("Property %s not available\n", prop); ++ *nports = 0; ++ return NULL; ++ } ++ ++ if (!size) { ++ *nports = 0; ++ return NULL; ++ } ++ ++ arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); ++ if (ZERO_OR_NULL_PTR(arr)) { ++ pr_err("Error: Failed to alloc mem for %s\n", prop); ++ return NULL; ++ } ++ ++ ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports); ++ if (ret) { ++ pr_err("Error in reading property: %s\n", prop); ++ goto err; ++ } ++ ++ return arr; ++err: ++ devm_kfree(&pdev->dev, arr); ++ return NULL; ++} ++ ++static u64 *get_th_params(struct platform_device *pdev, ++ const struct device_node *node, const char *prop, ++ int *nports) ++{ ++ int size = 0, ret; ++ u64 *ret_arr = NULL; ++ int *arr = NULL; ++ int i; ++ ++ if (of_get_property(node, prop, &size)) { ++ *nports = size / sizeof(int); ++ } else { ++ pr_debug("Property %s not available\n", prop); ++ *nports = 0; ++ return NULL; ++ } ++ ++ if (!size) { ++ *nports = 0; ++ return NULL; ++ } ++ ++ ret_arr = devm_kzalloc(&pdev->dev, (*nports * sizeof(u64)), ++ GFP_KERNEL); ++ if (ZERO_OR_NULL_PTR(ret_arr)) { ++ pr_err("Error: Failed to alloc mem for ret arr %s\n", prop); ++ return NULL; ++ } ++ ++ arr = kzalloc(size, GFP_KERNEL); ++ if ((ZERO_OR_NULL_PTR(arr))) { ++ pr_err("Error: Failed to alloc temp mem for %s\n", prop); ++ return NULL; ++ } ++ ++ ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports); ++ if (ret) { ++ pr_err("Error in reading property: %s\n", prop); ++ goto err; ++ } ++ ++ for (i = 0; i < *nports; i++) ++ ret_arr[i] = (uint64_t)KBTOB(arr[i]); ++ ++ MSM_BUS_DBG("%s: num entries %d prop %s", __func__, *nports, prop); ++ ++ for (i = 0; i < *nports; i++) ++ MSM_BUS_DBG("Th %d val %llu", i, ret_arr[i]); ++ ++ kfree(arr); ++ return ret_arr; ++err: ++ kfree(arr); ++ devm_kfree(&pdev->dev, ret_arr); ++ return NULL; ++} ++ ++static struct msm_bus_node_info *get_nodes(struct device_node *of_node, ++ struct platform_device *pdev, ++ struct msm_bus_fabric_registration *pdata) ++{ ++ struct msm_bus_node_info *info; ++ struct device_node *child_node = NULL; ++ int i = 0, ret; ++ int num_bw = 0; ++ u32 temp; ++ ++ for_each_child_of_node(of_node, child_node) { ++ i++; ++ } ++ ++ pdata->len = i; ++ info = (struct msm_bus_node_info *) ++ devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_node_info) * ++ pdata->len, GFP_KERNEL); ++ if (ZERO_OR_NULL_PTR(info)) { ++ pr_err("Failed to alloc memory for nodes: %d\n", pdata->len); ++ goto err; ++ } ++ ++ i = 0; ++ child_node = NULL; ++ for_each_child_of_node(of_node, child_node) { ++ const char *sel_str; ++ ++ ret = of_property_read_string(child_node, "label", ++ &info[i].name); ++ if (ret) ++ pr_err("Error reading node label\n"); ++ ++ ret = of_property_read_u32(child_node, "cell-id", &info[i].id); ++ if (ret) { ++ pr_err("Error reading node id\n"); ++ goto err; ++ } ++ ++ if (of_property_read_bool(child_node, "qcom,gateway")) ++ info[i].gateway = 1; ++ ++ of_property_read_u32(child_node, "qcom,mas-hw-id", ++ &info[i].mas_hw_id); ++ ++ of_property_read_u32(child_node, "qcom,slv-hw-id", ++ &info[i].slv_hw_id); ++ info[i].masterp = get_arr(pdev, child_node, ++ "qcom,masterp", &info[i].num_mports); ++ /* No need to store number of qports */ ++ info[i].qport = get_arr(pdev, child_node, ++ "qcom,qport", &ret); ++ pdata->nmasters += info[i].num_mports; ++ ++ ++ info[i].slavep = get_arr(pdev, child_node, ++ "qcom,slavep", &info[i].num_sports); ++ pdata->nslaves += info[i].num_sports; ++ ++ ++ info[i].tier = get_arr(pdev, child_node, ++ "qcom,tier", &info[i].num_tiers); ++ ++ if (of_property_read_bool(child_node, "qcom,ahb")) ++ info[i].ahb = 1; ++ ++ ret = of_property_read_string(child_node, "qcom,hw-sel", ++ &sel_str); ++ if (ret) ++ info[i].hw_sel = 0; ++ else { ++ ret = get_num(hw_sel_name, sel_str); ++ if (ret < 0) { ++ pr_err("Invalid hw-sel\n"); ++ goto err; ++ } ++ ++ info[i].hw_sel = ret; ++ } ++ ++ of_property_read_u32(child_node, "qcom,buswidth", ++ &info[i].buswidth); ++ of_property_read_u32(child_node, "qcom,ws", &info[i].ws); ++ ++ info[i].dual_conf = ++ of_property_read_bool(child_node, "qcom,dual-conf"); ++ ++ ++ info[i].th = get_th_params(pdev, child_node, "qcom,thresh", ++ &info[i].num_thresh); ++ ++ info[i].bimc_bw = get_th_params(pdev, child_node, ++ "qcom,bimc,bw", &num_bw); ++ ++ if (num_bw != info[i].num_thresh) { ++ pr_err("%s:num_bw %d must equal num_thresh %d", ++ __func__, num_bw, info[i].num_thresh); ++ pr_err("%s:Err setting up dual conf for %s", ++ __func__, info[i].name); ++ goto err; ++ } ++ ++ of_property_read_u32(child_node, "qcom,bimc,gp", ++ &info[i].bimc_gp); ++ of_property_read_u32(child_node, "qcom,bimc,thmp", ++ &info[i].bimc_thmp); ++ ++ ret = of_property_read_string(child_node, "qcom,mode-thresh", ++ &sel_str); ++ if (ret) ++ info[i].mode_thresh = 0; ++ else { ++ ret = get_num(mode_sel_name, sel_str); ++ if (ret < 0) { ++ pr_err("Unknown mode :%s\n", sel_str); ++ goto err; ++ } ++ ++ info[i].mode_thresh = ret; ++ MSM_BUS_DBG("AXI: THreshold mode set: %d\n", ++ info[i].mode_thresh); ++ } ++ ++ ret = of_property_read_string(child_node, "qcom,mode", ++ &sel_str); ++ ++ if (ret) ++ info[i].mode = 0; ++ else { ++ ret = get_num(mode_sel_name, sel_str); ++ if (ret < 0) { ++ pr_err("Unknown mode :%s\n", sel_str); ++ goto err; ++ } ++ ++ info[i].mode = ret; ++ } ++ ++ info[i].nr_lim = ++ of_property_read_bool(child_node, "qcom,nr-lim"); ++ ++ ret = of_property_read_u32(child_node, "qcom,ff", ++ &info[i].ff); ++ if (ret) { ++ pr_debug("fudge factor not present %d", info[i].id); ++ info[i].ff = 0; ++ } ++ ++ ret = of_property_read_u32(child_node, "qcom,floor-bw", ++ &temp); ++ if (ret) { ++ pr_debug("fabdev floor bw not present %d", info[i].id); ++ info[i].floor_bw = 0; ++ } else { ++ info[i].floor_bw = KBTOB(temp); ++ } ++ ++ info[i].rt_mas = ++ of_property_read_bool(child_node, "qcom,rt-mas"); ++ ++ ret = of_property_read_string(child_node, "qcom,perm-mode", ++ &sel_str); ++ if (ret) ++ info[i].perm_mode = 0; ++ else { ++ ret = get_num(mode_sel_name, sel_str); ++ if (ret < 0) ++ goto err; ++ ++ info[i].perm_mode = 1 << ret; ++ } ++ ++ of_property_read_u32(child_node, "qcom,prio-lvl", ++ &info[i].prio_lvl); ++ of_property_read_u32(child_node, "qcom,prio-rd", ++ &info[i].prio_rd); ++ of_property_read_u32(child_node, "qcom,prio-wr", ++ &info[i].prio_wr); ++ of_property_read_u32(child_node, "qcom,prio0", &info[i].prio0); ++ of_property_read_u32(child_node, "qcom,prio1", &info[i].prio1); ++ ret = of_property_read_string(child_node, "qcom,slaveclk-dual", ++ &info[i].slaveclk[DUAL_CTX]); ++ if (!ret) ++ pr_debug("Got slaveclk_dual: %s\n", ++ info[i].slaveclk[DUAL_CTX]); ++ else ++ info[i].slaveclk[DUAL_CTX] = NULL; ++ ++ ret = of_property_read_string(child_node, ++ "qcom,slaveclk-active", &info[i].slaveclk[ACTIVE_CTX]); ++ if (!ret) ++ pr_debug("Got slaveclk_active\n"); ++ else ++ info[i].slaveclk[ACTIVE_CTX] = NULL; ++ ++ ret = of_property_read_string(child_node, "qcom,memclk-dual", ++ &info[i].memclk[DUAL_CTX]); ++ if (!ret) ++ pr_debug("Got memclk_dual\n"); ++ else ++ info[i].memclk[DUAL_CTX] = NULL; ++ ++ ret = of_property_read_string(child_node, "qcom,memclk-active", ++ &info[i].memclk[ACTIVE_CTX]); ++ if (!ret) ++ pr_debug("Got memclk_active\n"); ++ else ++ info[i].memclk[ACTIVE_CTX] = NULL; ++ ++ ret = of_property_read_string(child_node, "qcom,iface-clk-node", ++ &info[i].iface_clk_node); ++ if (!ret) ++ pr_debug("Got iface_clk_node\n"); ++ else ++ info[i].iface_clk_node = NULL; ++ ++ pr_debug("Node name: %s\n", info[i].name); ++ of_node_put(child_node); ++ i++; ++ } ++ ++ pr_debug("Bus %d added: %d masters\n", pdata->id, pdata->nmasters); ++ pr_debug("Bus %d added: %d slaves\n", pdata->id, pdata->nslaves); ++ return info; ++err: ++ return NULL; ++} ++ ++void msm_bus_of_get_nfab(struct platform_device *pdev, ++ struct msm_bus_fabric_registration *pdata) ++{ ++ struct device_node *of_node; ++ int ret, nfab = 0; ++ ++ if (!pdev) { ++ pr_err("Error: Null platform device\n"); ++ return; ++ } ++ ++ of_node = pdev->dev.of_node; ++ ret = of_property_read_u32(of_node, "qcom,nfab", ++ &nfab); ++ if (!ret) ++ pr_debug("Fab_of: Read number of buses: %u\n", nfab); ++ ++ msm_bus_board_set_nfab(pdata, nfab); ++} ++ ++struct msm_bus_fabric_registration ++ *msm_bus_of_get_fab_data(struct platform_device *pdev) ++{ ++ struct device_node *of_node; ++ struct msm_bus_fabric_registration *pdata; ++ bool mem_err = false; ++ int ret = 0; ++ const char *sel_str; ++ u32 temp; ++ ++ if (!pdev) { ++ pr_err("Error: Null platform device\n"); ++ return NULL; ++ } ++ ++ of_node = pdev->dev.of_node; ++ pdata = devm_kzalloc(&pdev->dev, ++ sizeof(struct msm_bus_fabric_registration), GFP_KERNEL); ++ if (!pdata) { ++ pr_err("Error: Memory allocation for pdata failed\n"); ++ mem_err = true; ++ goto err; ++ } ++ ++ ret = of_property_read_string(of_node, "label", &pdata->name); ++ if (ret) { ++ pr_err("Error: label not found\n"); ++ goto err; ++ } ++ pr_debug("Fab_of: Read name: %s\n", pdata->name); ++ ++ ret = of_property_read_u32(of_node, "cell-id", ++ &pdata->id); ++ if (ret) { ++ pr_err("Error: num-usecases not found\n"); ++ goto err; ++ } ++ pr_debug("Fab_of: Read id: %u\n", pdata->id); ++ ++ if (of_property_read_bool(of_node, "qcom,ahb")) ++ pdata->ahb = 1; ++ ++ ret = of_property_read_string(of_node, "qcom,fabclk-dual", ++ &pdata->fabclk[DUAL_CTX]); ++ if (ret) { ++ pr_debug("fabclk_dual not available\n"); ++ pdata->fabclk[DUAL_CTX] = NULL; ++ } else ++ pr_debug("Fab_of: Read clk dual ctx: %s\n", ++ pdata->fabclk[DUAL_CTX]); ++ ret = of_property_read_string(of_node, "qcom,fabclk-active", ++ &pdata->fabclk[ACTIVE_CTX]); ++ if (ret) { ++ pr_debug("Error: fabclk_active not available\n"); ++ pdata->fabclk[ACTIVE_CTX] = NULL; ++ } else ++ pr_debug("Fab_of: Read clk act ctx: %s\n", ++ pdata->fabclk[ACTIVE_CTX]); ++ ++ ret = of_property_read_u32(of_node, "qcom,ntieredslaves", ++ &pdata->ntieredslaves); ++ if (ret) { ++ pr_err("Error: ntieredslaves not found\n"); ++ goto err; ++ } ++ ++ ret = of_property_read_u32(of_node, "qcom,qos-freq", &pdata->qos_freq); ++ if (ret) ++ pr_debug("qos_freq not available\n"); ++ ++ ret = of_property_read_string(of_node, "qcom,hw-sel", &sel_str); ++ if (ret) { ++ pr_err("Error: hw_sel not found\n"); ++ goto err; ++ } else { ++ ret = get_num(hw_sel_name, sel_str); ++ if (ret < 0) ++ goto err; ++ ++ pdata->hw_sel = ret; ++ } ++ ++ if (of_property_read_bool(of_node, "qcom,virt")) ++ pdata->virt = true; ++ ++ ret = of_property_read_u32(of_node, "qcom,qos-baseoffset", ++ &pdata->qos_baseoffset); ++ if (ret) ++ pr_debug("%s:qos_baseoffset not available\n", __func__); ++ ++ ret = of_property_read_u32(of_node, "qcom,qos-delta", ++ &pdata->qos_delta); ++ if (ret) ++ pr_debug("%s:qos_delta not available\n", __func__); ++ ++ if (of_property_read_bool(of_node, "qcom,rpm-en")) ++ pdata->rpm_enabled = 1; ++ ++ ret = of_property_read_u32(of_node, "qcom,nr-lim-thresh", ++ &temp); ++ ++ if (ret) { ++ pr_err("nr-lim threshold not specified"); ++ pdata->nr_lim_thresh = 0; ++ } else { ++ pdata->nr_lim_thresh = KBTOB(temp); ++ } ++ ++ ret = of_property_read_u32(of_node, "qcom,eff-fact", ++ &pdata->eff_fact); ++ if (ret) { ++ pr_err("Fab eff-factor not present"); ++ pdata->eff_fact = 0; ++ } ++ ++ pdata->info = get_nodes(of_node, pdev, pdata); ++ return pdata; ++err: ++ return NULL; ++} ++EXPORT_SYMBOL(msm_bus_of_get_fab_data); +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_of_adhoc.c +@@ -0,0 +1,641 @@ ++/* Copyright (c) 2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "msm-bus.h" ++#include "msm-bus-board.h" ++#include "msm_bus_rules.h" ++#include "msm_bus_core.h" ++#include "msm_bus_adhoc.h" ++ ++#define DEFAULT_QOS_FREQ 19200 ++#define DEFAULT_UTIL_FACT 100 ++#define DEFAULT_VRAIL_COMP 100 ++ ++static int get_qos_mode(struct platform_device *pdev, ++ struct device_node *node, const char *qos_mode) ++{ ++ const char *qos_names[] = {"fixed", "limiter", "bypass", "regulator"}; ++ int i = 0; ++ int ret = -1; ++ ++ if (!qos_mode) ++ goto exit_get_qos_mode; ++ ++ for (i = 0; i < ARRAY_SIZE(qos_names); i++) { ++ if (!strcmp(qos_mode, qos_names[i])) ++ break; ++ } ++ if (i == ARRAY_SIZE(qos_names)) ++ dev_err(&pdev->dev, "Cannot match mode qos %s using Bypass", ++ qos_mode); ++ else ++ ret = i; ++ ++exit_get_qos_mode: ++ return ret; ++} ++ ++static int *get_arr(struct platform_device *pdev, ++ struct device_node *node, const char *prop, ++ int *nports) ++{ ++ int size = 0, ret; ++ int *arr = NULL; ++ ++ if (of_get_property(node, prop, &size)) { ++ *nports = size / sizeof(int); ++ } else { ++ dev_dbg(&pdev->dev, "Property %s not available\n", prop); ++ *nports = 0; ++ return NULL; ++ } ++ ++ arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); ++ if ((size > 0) && ZERO_OR_NULL_PTR(arr)) { ++ dev_err(&pdev->dev, "Error: Failed to alloc mem for %s\n", ++ prop); ++ return NULL; ++ } ++ ++ ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports); ++ if (ret) { ++ dev_err(&pdev->dev, "Error in reading property: %s\n", prop); ++ goto arr_err; ++ } ++ ++ return arr; ++arr_err: ++ devm_kfree(&pdev->dev, arr); ++ return NULL; ++} ++ ++static struct msm_bus_fab_device_type *get_fab_device_info( ++ struct device_node *dev_node, ++ struct platform_device *pdev) ++{ ++ struct msm_bus_fab_device_type *fab_dev; ++ unsigned int ret; ++ struct resource *res; ++ const char *base_name; ++ ++ fab_dev = devm_kzalloc(&pdev->dev, ++ sizeof(struct msm_bus_fab_device_type), ++ GFP_KERNEL); ++ if (!fab_dev) { ++ dev_err(&pdev->dev, ++ "Error: Unable to allocate memory for fab_dev\n"); ++ return NULL; ++ } ++ ++ ret = of_property_read_string(dev_node, "qcom,base-name", &base_name); ++ if (ret) { ++ dev_err(&pdev->dev, "Error: Unable to get base address name\n"); ++ goto fab_dev_err; ++ } ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, base_name); ++ if (!res) { ++ dev_err(&pdev->dev, "Error getting qos base addr %s\n", ++ base_name); ++ goto fab_dev_err; ++ } ++ fab_dev->pqos_base = res->start; ++ fab_dev->qos_range = resource_size(res); ++ fab_dev->bypass_qos_prg = of_property_read_bool(dev_node, ++ "qcom,bypass-qos-prg"); ++ ++ ret = of_property_read_u32(dev_node, "qcom,base-offset", ++ &fab_dev->base_offset); ++ if (ret) ++ dev_dbg(&pdev->dev, "Bus base offset is missing\n"); ++ ++ ret = of_property_read_u32(dev_node, "qcom,qos-off", ++ &fab_dev->qos_off); ++ if (ret) ++ dev_dbg(&pdev->dev, "Bus qos off is missing\n"); ++ ++ ++ ret = of_property_read_u32(dev_node, "qcom,bus-type", ++ &fab_dev->bus_type); ++ if (ret) { ++ dev_warn(&pdev->dev, "Bus type is missing\n"); ++ goto fab_dev_err; ++ } ++ ++ ret = of_property_read_u32(dev_node, "qcom,qos-freq", ++ &fab_dev->qos_freq); ++ if (ret) { ++ dev_dbg(&pdev->dev, "Bus qos freq is missing\n"); ++ fab_dev->qos_freq = DEFAULT_QOS_FREQ; ++ } ++ ++ ret = of_property_read_u32(dev_node, "qcom,util-fact", ++ &fab_dev->util_fact); ++ if (ret) { ++ dev_info(&pdev->dev, "Util-fact is missing, default to %d\n", ++ DEFAULT_UTIL_FACT); ++ fab_dev->util_fact = DEFAULT_UTIL_FACT; ++ } ++ ++ ret = of_property_read_u32(dev_node, "qcom,vrail-comp", ++ &fab_dev->vrail_comp); ++ if (ret) { ++ dev_info(&pdev->dev, "Vrail-comp is missing, default to %d\n", ++ DEFAULT_VRAIL_COMP); ++ fab_dev->vrail_comp = DEFAULT_VRAIL_COMP; ++ } ++ ++ return fab_dev; ++ ++fab_dev_err: ++ devm_kfree(&pdev->dev, fab_dev); ++ fab_dev = 0; ++ return NULL; ++} ++ ++static void get_qos_params( ++ struct device_node * const dev_node, ++ struct platform_device * const pdev, ++ struct msm_bus_node_info_type *node_info) ++{ ++ const char *qos_mode = NULL; ++ unsigned int ret; ++ unsigned int temp; ++ ++ ret = of_property_read_string(dev_node, "qcom,qos-mode", &qos_mode); ++ ++ if (ret) ++ node_info->qos_params.mode = -1; ++ else ++ node_info->qos_params.mode = get_qos_mode(pdev, dev_node, ++ qos_mode); ++ ++ of_property_read_u32(dev_node, "qcom,prio-lvl", ++ &node_info->qos_params.prio_lvl); ++ ++ of_property_read_u32(dev_node, "qcom,prio1", ++ &node_info->qos_params.prio1); ++ ++ of_property_read_u32(dev_node, "qcom,prio0", ++ &node_info->qos_params.prio0); ++ ++ of_property_read_u32(dev_node, "qcom,prio-rd", ++ &node_info->qos_params.prio_rd); ++ ++ of_property_read_u32(dev_node, "qcom,prio-wr", ++ &node_info->qos_params.prio_wr); ++ ++ of_property_read_u32(dev_node, "qcom,gp", ++ &node_info->qos_params.gp); ++ ++ of_property_read_u32(dev_node, "qcom,thmp", ++ &node_info->qos_params.thmp); ++ ++ of_property_read_u32(dev_node, "qcom,ws", ++ &node_info->qos_params.ws); ++ ++ ret = of_property_read_u32(dev_node, "qcom,bw_buffer", &temp); ++ ++ if (ret) ++ node_info->qos_params.bw_buffer = 0; ++ else ++ node_info->qos_params.bw_buffer = KBTOB(temp); ++ ++} ++ ++ ++static struct msm_bus_node_info_type *get_node_info_data( ++ struct device_node * const dev_node, ++ struct platform_device * const pdev) ++{ ++ struct msm_bus_node_info_type *node_info; ++ unsigned int ret; ++ int size; ++ int i; ++ struct device_node *con_node; ++ struct device_node *bus_dev; ++ ++ node_info = devm_kzalloc(&pdev->dev, ++ sizeof(struct msm_bus_node_info_type), ++ GFP_KERNEL); ++ if (!node_info) { ++ dev_err(&pdev->dev, ++ "Error: Unable to allocate memory for node_info\n"); ++ return NULL; ++ } ++ ++ ret = of_property_read_u32(dev_node, "cell-id", &node_info->id); ++ if (ret) { ++ dev_warn(&pdev->dev, "Bus node is missing cell-id\n"); ++ goto node_info_err; ++ } ++ ret = of_property_read_string(dev_node, "label", &node_info->name); ++ if (ret) { ++ dev_warn(&pdev->dev, "Bus node is missing name\n"); ++ goto node_info_err; ++ } ++ node_info->qport = get_arr(pdev, dev_node, "qcom,qport", ++ &node_info->num_qports); ++ ++ if (of_get_property(dev_node, "qcom,connections", &size)) { ++ node_info->num_connections = size / sizeof(int); ++ node_info->connections = devm_kzalloc(&pdev->dev, size, ++ GFP_KERNEL); ++ } else { ++ node_info->num_connections = 0; ++ node_info->connections = 0; ++ } ++ ++ for (i = 0; i < node_info->num_connections; i++) { ++ con_node = of_parse_phandle(dev_node, "qcom,connections", i); ++ if (IS_ERR_OR_NULL(con_node)) ++ goto node_info_err; ++ ++ if (of_property_read_u32(con_node, "cell-id", ++ &node_info->connections[i])) ++ goto node_info_err; ++ of_node_put(con_node); ++ } ++ ++ if (of_get_property(dev_node, "qcom,blacklist", &size)) { ++ node_info->num_blist = size/sizeof(u32); ++ node_info->black_listed_connections = devm_kzalloc(&pdev->dev, ++ size, GFP_KERNEL); ++ } else { ++ node_info->num_blist = 0; ++ node_info->black_listed_connections = 0; ++ } ++ ++ for (i = 0; i < node_info->num_blist; i++) { ++ con_node = of_parse_phandle(dev_node, "qcom,blacklist", i); ++ if (IS_ERR_OR_NULL(con_node)) ++ goto node_info_err; ++ ++ if (of_property_read_u32(con_node, "cell-id", ++ &node_info->black_listed_connections[i])) ++ goto node_info_err; ++ of_node_put(con_node); ++ } ++ ++ bus_dev = of_parse_phandle(dev_node, "qcom,bus-dev", 0); ++ if (!IS_ERR_OR_NULL(bus_dev)) { ++ if (of_property_read_u32(bus_dev, "cell-id", ++ &node_info->bus_device_id)) { ++ dev_err(&pdev->dev, "Can't find bus device. Node %d", ++ node_info->id); ++ goto node_info_err; ++ } ++ ++ of_node_put(bus_dev); ++ } else ++ dev_dbg(&pdev->dev, "Can't find bdev phandle for %d", ++ node_info->id); ++ ++ node_info->is_fab_dev = of_property_read_bool(dev_node, "qcom,fab-dev"); ++ node_info->virt_dev = of_property_read_bool(dev_node, "qcom,virt-dev"); ++ ++ ret = of_property_read_u32(dev_node, "qcom,buswidth", ++ &node_info->buswidth); ++ if (ret) { ++ dev_dbg(&pdev->dev, "Using default 8 bytes %d", node_info->id); ++ node_info->buswidth = 8; ++ } ++ ++ ret = of_property_read_u32(dev_node, "qcom,mas-rpm-id", ++ &node_info->mas_rpm_id); ++ if (ret) { ++ dev_dbg(&pdev->dev, "mas rpm id is missing\n"); ++ node_info->mas_rpm_id = -1; ++ } ++ ++ ret = of_property_read_u32(dev_node, "qcom,slv-rpm-id", ++ &node_info->slv_rpm_id); ++ if (ret) { ++ dev_dbg(&pdev->dev, "slv rpm id is missing\n"); ++ node_info->slv_rpm_id = -1; ++ } ++ ret = of_property_read_u32(dev_node, "qcom,util-fact", ++ &node_info->util_fact); ++ if (ret) ++ node_info->util_fact = 0; ++ ret = of_property_read_u32(dev_node, "qcom,vrail-comp", ++ &node_info->vrail_comp); ++ if (ret) ++ node_info->vrail_comp = 0; ++ get_qos_params(dev_node, pdev, node_info); ++ ++ return node_info; ++ ++node_info_err: ++ devm_kfree(&pdev->dev, node_info); ++ node_info = 0; ++ return NULL; ++} ++ ++static unsigned int get_bus_node_device_data( ++ struct device_node * const dev_node, ++ struct platform_device * const pdev, ++ struct msm_bus_node_device_type * const node_device) ++{ ++ node_device->node_info = get_node_info_data(dev_node, pdev); ++ if (IS_ERR_OR_NULL(node_device->node_info)) { ++ dev_err(&pdev->dev, "Error: Node info missing\n"); ++ return -ENODATA; ++ } ++ node_device->ap_owned = of_property_read_bool(dev_node, ++ "qcom,ap-owned"); ++ ++ if (node_device->node_info->is_fab_dev) { ++ dev_dbg(&pdev->dev, "Dev %d\n", node_device->node_info->id); ++ ++ if (!node_device->node_info->virt_dev) { ++ node_device->fabdev = ++ get_fab_device_info(dev_node, pdev); ++ if (IS_ERR_OR_NULL(node_device->fabdev)) { ++ dev_err(&pdev->dev, ++ "Error: Fabric device info missing\n"); ++ devm_kfree(&pdev->dev, node_device->node_info); ++ return -ENODATA; ++ } ++ } ++ node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node, ++ "bus_clk"); ++ ++ if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk)) ++ dev_dbg(&pdev->dev, ++ "%s:Failed to get bus clk for bus%d ctx%d", ++ __func__, node_device->node_info->id, ++ DUAL_CTX); ++ ++ node_device->clk[ACTIVE_CTX].clk = of_clk_get_by_name(dev_node, ++ "bus_a_clk"); ++ if (IS_ERR_OR_NULL(node_device->clk[ACTIVE_CTX].clk)) ++ dev_err(&pdev->dev, ++ "Failed to get bus clk for bus%d ctx%d", ++ node_device->node_info->id, ACTIVE_CTX); ++ if (msmbus_coresight_init_adhoc(pdev, dev_node)) ++ dev_warn(&pdev->dev, ++ "Coresight support absent for bus: %d\n", ++ node_device->node_info->id); ++ } else { ++ node_device->qos_clk.clk = of_clk_get_by_name(dev_node, ++ "bus_qos_clk"); ++ ++ if (IS_ERR_OR_NULL(node_device->qos_clk.clk)) ++ dev_dbg(&pdev->dev, ++ "%s:Failed to get bus qos clk for mas%d", ++ __func__, node_device->node_info->id); ++ ++ node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node, ++ "node_clk"); ++ ++ if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk)) ++ dev_dbg(&pdev->dev, ++ "%s:Failed to get bus clk for bus%d ctx%d", ++ __func__, node_device->node_info->id, ++ DUAL_CTX); ++ ++ } ++ return 0; ++} ++ ++struct msm_bus_device_node_registration ++ *msm_bus_of_to_pdata(struct platform_device *pdev) ++{ ++ struct device_node *of_node, *child_node; ++ struct msm_bus_device_node_registration *pdata; ++ unsigned int i = 0, j; ++ unsigned int ret; ++ ++ if (!pdev) { ++ pr_err("Error: Null platform device\n"); ++ return NULL; ++ } ++ ++ of_node = pdev->dev.of_node; ++ ++ pdata = devm_kzalloc(&pdev->dev, ++ sizeof(struct msm_bus_device_node_registration), ++ GFP_KERNEL); ++ if (!pdata) { ++ dev_err(&pdev->dev, ++ "Error: Memory allocation for pdata failed\n"); ++ return NULL; ++ } ++ ++ pdata->num_devices = of_get_child_count(of_node); ++ ++ pdata->info = devm_kzalloc(&pdev->dev, ++ sizeof(struct msm_bus_node_device_type) * ++ pdata->num_devices, GFP_KERNEL); ++ ++ if (!pdata->info) { ++ dev_err(&pdev->dev, ++ "Error: Memory allocation for pdata->info failed\n"); ++ goto node_reg_err; ++ } ++ ++ ret = 0; ++ for_each_child_of_node(of_node, child_node) { ++ ret = get_bus_node_device_data(child_node, pdev, ++ &pdata->info[i]); ++ if (ret) { ++ dev_err(&pdev->dev, "Error: unable to initialize bus nodes\n"); ++ goto node_reg_err_1; ++ } ++ i++; ++ } ++ ++ dev_dbg(&pdev->dev, "bus topology:\n"); ++ for (i = 0; i < pdata->num_devices; i++) { ++ dev_dbg(&pdev->dev, "id %d\nnum_qports %d\nnum_connections %d", ++ pdata->info[i].node_info->id, ++ pdata->info[i].node_info->num_qports, ++ pdata->info[i].node_info->num_connections); ++ dev_dbg(&pdev->dev, "\nbus_device_id %d\n buswidth %d\n", ++ pdata->info[i].node_info->bus_device_id, ++ pdata->info[i].node_info->buswidth); ++ for (j = 0; j < pdata->info[i].node_info->num_connections; ++ j++) { ++ dev_dbg(&pdev->dev, "connection[%d]: %d\n", j, ++ pdata->info[i].node_info->connections[j]); ++ } ++ for (j = 0; j < pdata->info[i].node_info->num_blist; ++ j++) { ++ dev_dbg(&pdev->dev, "black_listed_node[%d]: %d\n", j, ++ pdata->info[i].node_info-> ++ black_listed_connections[j]); ++ } ++ if (pdata->info[i].fabdev) ++ dev_dbg(&pdev->dev, "base_addr %zu\nbus_type %d\n", ++ (size_t)pdata->info[i]. ++ fabdev->pqos_base, ++ pdata->info[i].fabdev->bus_type); ++ } ++ return pdata; ++ ++node_reg_err_1: ++ devm_kfree(&pdev->dev, pdata->info); ++node_reg_err: ++ devm_kfree(&pdev->dev, pdata); ++ pdata = NULL; ++ return NULL; ++} ++ ++static int msm_bus_of_get_ids(struct platform_device *pdev, ++ struct device_node *dev_node, int **dev_ids, ++ int *num_ids, char *prop_name) ++{ ++ int ret = 0; ++ int size, i; ++ struct device_node *rule_node; ++ int *ids = NULL; ++ ++ if (of_get_property(dev_node, prop_name, &size)) { ++ *num_ids = size / sizeof(int); ++ ids = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); ++ } else { ++ dev_err(&pdev->dev, "No rule nodes, skipping node"); ++ ret = -ENXIO; ++ goto exit_get_ids; ++ } ++ ++ *dev_ids = ids; ++ for (i = 0; i < *num_ids; i++) { ++ rule_node = of_parse_phandle(dev_node, prop_name, i); ++ if (IS_ERR_OR_NULL(rule_node)) { ++ dev_err(&pdev->dev, "Can't get rule node id"); ++ ret = -ENXIO; ++ goto err_get_ids; ++ } ++ ++ if (of_property_read_u32(rule_node, "cell-id", ++ &ids[i])) { ++ dev_err(&pdev->dev, "Can't get rule node id"); ++ ret = -ENXIO; ++ goto err_get_ids; ++ } ++ of_node_put(rule_node); ++ } ++exit_get_ids: ++ return ret; ++err_get_ids: ++ devm_kfree(&pdev->dev, ids); ++ of_node_put(rule_node); ++ ids = NULL; ++ return ret; ++} ++ ++int msm_bus_of_get_static_rules(struct platform_device *pdev, ++ struct bus_rule_type **static_rules) ++{ ++ int ret = 0; ++ struct device_node *of_node, *child_node; ++ int num_rules = 0; ++ int rule_idx = 0; ++ int bw_fld = 0; ++ int i; ++ struct bus_rule_type *static_rule = NULL; ++ ++ of_node = pdev->dev.of_node; ++ num_rules = of_get_child_count(of_node); ++ static_rule = devm_kzalloc(&pdev->dev, ++ sizeof(struct bus_rule_type) * num_rules, ++ GFP_KERNEL); ++ ++ if (IS_ERR_OR_NULL(static_rule)) { ++ ret = -ENOMEM; ++ goto exit_static_rules; ++ } ++ ++ *static_rules = static_rule; ++ for_each_child_of_node(of_node, child_node) { ++ ret = msm_bus_of_get_ids(pdev, child_node, ++ &static_rule[rule_idx].src_id, ++ &static_rule[rule_idx].num_src, ++ "qcom,src-nodes"); ++ ++ ret = msm_bus_of_get_ids(pdev, child_node, ++ &static_rule[rule_idx].dst_node, ++ &static_rule[rule_idx].num_dst, ++ "qcom,dest-node"); ++ ++ ret = of_property_read_u32(child_node, "qcom,src-field", ++ &static_rule[rule_idx].src_field); ++ if (ret) { ++ dev_err(&pdev->dev, "src-field missing"); ++ ret = -ENXIO; ++ goto err_static_rules; ++ } ++ ++ ret = of_property_read_u32(child_node, "qcom,src-op", ++ &static_rule[rule_idx].op); ++ if (ret) { ++ dev_err(&pdev->dev, "src-op missing"); ++ ret = -ENXIO; ++ goto err_static_rules; ++ } ++ ++ ret = of_property_read_u32(child_node, "qcom,mode", ++ &static_rule[rule_idx].mode); ++ if (ret) { ++ dev_err(&pdev->dev, "mode missing"); ++ ret = -ENXIO; ++ goto err_static_rules; ++ } ++ ++ ret = of_property_read_u32(child_node, "qcom,thresh", &bw_fld); ++ if (ret) { ++ dev_err(&pdev->dev, "thresh missing"); ++ ret = -ENXIO; ++ goto err_static_rules; ++ } else ++ static_rule[rule_idx].thresh = KBTOB(bw_fld); ++ ++ ret = of_property_read_u32(child_node, "qcom,dest-bw", ++ &bw_fld); ++ if (ret) ++ static_rule[rule_idx].dst_bw = 0; ++ else ++ static_rule[rule_idx].dst_bw = KBTOB(bw_fld); ++ ++ rule_idx++; ++ } ++ ret = rule_idx; ++exit_static_rules: ++ return ret; ++err_static_rules: ++ for (i = 0; i < num_rules; i++) { ++ if (!IS_ERR_OR_NULL(static_rule)) { ++ if (!IS_ERR_OR_NULL(static_rule[i].src_id)) ++ devm_kfree(&pdev->dev, ++ static_rule[i].src_id); ++ if (!IS_ERR_OR_NULL(static_rule[i].dst_node)) ++ devm_kfree(&pdev->dev, ++ static_rule[i].dst_node); ++ devm_kfree(&pdev->dev, static_rule); ++ } ++ } ++ devm_kfree(&pdev->dev, *static_rules); ++ static_rules = NULL; ++ return ret; ++} +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_rules.c +@@ -0,0 +1,624 @@ ++/* Copyright (c) 2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include "msm-bus-board.h" ++#include "msm_bus_rules.h" ++#include ++ ++struct node_vote_info { ++ int id; ++ u64 ib; ++ u64 ab; ++ u64 clk; ++}; ++ ++struct rules_def { ++ int rule_id; ++ int num_src; ++ int state; ++ struct node_vote_info *src_info; ++ struct bus_rule_type rule_ops; ++ bool state_change; ++ struct list_head link; ++}; ++ ++struct rule_node_info { ++ int id; ++ void *data; ++ struct raw_notifier_head rule_notify_list; ++ int cur_rule; ++ int num_rules; ++ struct list_head node_rules; ++ struct list_head link; ++ struct rule_apply_rcm_info apply; ++}; ++ ++DEFINE_MUTEX(msm_bus_rules_lock); ++static LIST_HEAD(node_list); ++static struct rule_node_info *get_node(u32 id, void *data); ++ ++#define LE(op1, op2) (op1 <= op2) ++#define LT(op1, op2) (op1 < op2) ++#define GE(op1, op2) (op1 >= op2) ++#define GT(op1, op2) (op1 > op2) ++#define NB_ID (0x201) ++ ++static struct rule_node_info *get_node(u32 id, void *data) ++{ ++ struct rule_node_info *node_it = NULL; ++ struct rule_node_info *node_match = NULL; ++ ++ list_for_each_entry(node_it, &node_list, link) { ++ if (node_it->id == id) { ++ if ((id == NB_ID)) { ++ if ((node_it->data == data)) { ++ node_match = node_it; ++ break; ++ } ++ } else { ++ node_match = node_it; ++ break; ++ } ++ } ++ } ++ return node_match; ++} ++ ++static struct rule_node_info *gen_node(u32 id, void *data) ++{ ++ struct rule_node_info *node_it = NULL; ++ struct rule_node_info *node_match = NULL; ++ ++ list_for_each_entry(node_it, &node_list, link) { ++ if (node_it->id == id) { ++ node_match = node_it; ++ break; ++ } ++ } ++ ++ if (!node_match) { ++ node_match = kzalloc(sizeof(struct rule_node_info), GFP_KERNEL); ++ if (!node_match) { ++ pr_err("%s: Cannot allocate memory", __func__); ++ goto exit_node_match; ++ } ++ ++ node_match->id = id; ++ node_match->cur_rule = -1; ++ node_match->num_rules = 0; ++ node_match->data = data; ++ list_add_tail(&node_match->link, &node_list); ++ INIT_LIST_HEAD(&node_match->node_rules); ++ RAW_INIT_NOTIFIER_HEAD(&node_match->rule_notify_list); ++ pr_debug("Added new node %d to list\n", id); ++ } ++exit_node_match: ++ return node_match; ++} ++ ++static bool do_compare_op(u64 op1, u64 op2, int op) ++{ ++ bool ret = false; ++ ++ switch (op) { ++ case OP_LE: ++ ret = LE(op1, op2); ++ break; ++ case OP_LT: ++ ret = LT(op1, op2); ++ break; ++ case OP_GT: ++ ret = GT(op1, op2); ++ break; ++ case OP_GE: ++ ret = GE(op1, op2); ++ break; ++ case OP_NOOP: ++ ret = true; ++ break; ++ default: ++ pr_info("Invalid OP %d", op); ++ break; ++ } ++ return ret; ++} ++ ++static void update_src_id_vote(struct rule_update_path_info *inp_node, ++ struct rule_node_info *rule_node) ++{ ++ struct rules_def *rule; ++ int i; ++ ++ list_for_each_entry(rule, &rule_node->node_rules, link) { ++ for (i = 0; i < rule->num_src; i++) { ++ if (rule->src_info[i].id == inp_node->id) { ++ rule->src_info[i].ib = inp_node->ib; ++ rule->src_info[i].ab = inp_node->ab; ++ rule->src_info[i].clk = inp_node->clk; ++ } ++ } ++ } ++} ++ ++static u64 get_field(struct rules_def *rule, int src_id) ++{ ++ u64 field = 0; ++ int i; ++ ++ for (i = 0; i < rule->num_src; i++) { ++ switch (rule->rule_ops.src_field) { ++ case FLD_IB: ++ field += rule->src_info[i].ib; ++ break; ++ case FLD_AB: ++ field += rule->src_info[i].ab; ++ break; ++ case FLD_CLK: ++ field += rule->src_info[i].clk; ++ break; ++ } ++ } ++ ++ return field; ++} ++ ++static bool check_rule(struct rules_def *rule, ++ struct rule_update_path_info *inp) ++{ ++ bool ret = false; ++ ++ if (!rule) ++ return ret; ++ ++ switch (rule->rule_ops.op) { ++ case OP_LE: ++ case OP_LT: ++ case OP_GT: ++ case OP_GE: ++ { ++ u64 src_field = get_field(rule, inp->id); ++ if (!src_field) ++ ret = false; ++ else ++ ret = do_compare_op(src_field, rule->rule_ops.thresh, ++ rule->rule_ops.op); ++ break; ++ } ++ default: ++ pr_err("Unsupported op %d", rule->rule_ops.op); ++ break; ++ } ++ return ret; ++} ++ ++static void match_rule(struct rule_update_path_info *inp_node, ++ struct rule_node_info *node) ++{ ++ struct rules_def *rule; ++ int i; ++ ++ list_for_each_entry(rule, &node->node_rules, link) { ++ for (i = 0; i < rule->num_src; i++) { ++ if (rule->src_info[i].id == inp_node->id) { ++ if (check_rule(rule, inp_node)) { ++ trace_bus_rules_matches(node->cur_rule, ++ inp_node->id, inp_node->ab, ++ inp_node->ib, inp_node->clk); ++ if (rule->state == ++ RULE_STATE_NOT_APPLIED) ++ rule->state_change = true; ++ rule->state = RULE_STATE_APPLIED; ++ } else { ++ if (rule->state == ++ RULE_STATE_APPLIED) ++ rule->state_change = true; ++ rule->state = RULE_STATE_NOT_APPLIED; ++ } ++ } ++ } ++ } ++} ++ ++static void apply_rule(struct rule_node_info *node, ++ struct list_head *output_list) ++{ ++ struct rules_def *rule; ++ ++ node->cur_rule = -1; ++ list_for_each_entry(rule, &node->node_rules, link) { ++ if ((rule->state == RULE_STATE_APPLIED) && ++ (node->cur_rule == -1)) ++ node->cur_rule = rule->rule_id; ++ ++ if (node->id == NB_ID) { ++ if (rule->state_change) { ++ rule->state_change = false; ++ raw_notifier_call_chain(&node->rule_notify_list, ++ rule->state, (void *)&rule->rule_ops); ++ } ++ } else { ++ if ((rule->state == RULE_STATE_APPLIED) && ++ (node->cur_rule == rule->rule_id)) { ++ node->apply.id = rule->rule_ops.dst_node[0]; ++ node->apply.throttle = rule->rule_ops.mode; ++ node->apply.lim_bw = rule->rule_ops.dst_bw; ++ list_add_tail(&node->apply.link, output_list); ++ } ++ rule->state_change = false; ++ } ++ } ++ ++} ++ ++int msm_rules_update_path(struct list_head *input_list, ++ struct list_head *output_list) ++{ ++ int ret = 0; ++ struct rule_update_path_info *inp_node; ++ struct rule_node_info *node_it = NULL; ++ ++ mutex_lock(&msm_bus_rules_lock); ++ list_for_each_entry(inp_node, input_list, link) { ++ list_for_each_entry(node_it, &node_list, link) { ++ update_src_id_vote(inp_node, node_it); ++ match_rule(inp_node, node_it); ++ } ++ } ++ ++ list_for_each_entry(node_it, &node_list, link) ++ apply_rule(node_it, output_list); ++ ++ mutex_unlock(&msm_bus_rules_lock); ++ return ret; ++} ++ ++static bool ops_equal(int op1, int op2) ++{ ++ bool ret = false; ++ ++ switch (op1) { ++ case OP_GT: ++ case OP_GE: ++ case OP_LT: ++ case OP_LE: ++ if (abs(op1 - op2) <= 1) ++ ret = true; ++ break; ++ default: ++ ret = (op1 == op2); ++ } ++ ++ return ret; ++} ++ ++static int node_rules_compare(void *priv, struct list_head *a, ++ struct list_head *b) ++{ ++ struct rules_def *ra = container_of(a, struct rules_def, link); ++ struct rules_def *rb = container_of(b, struct rules_def, link); ++ int ret = -1; ++ int64_t th_diff = 0; ++ ++ ++ if (ra->rule_ops.mode == rb->rule_ops.mode) { ++ if (ops_equal(ra->rule_ops.op, rb->rule_ops.op)) { ++ if ((ra->rule_ops.op == OP_LT) || ++ (ra->rule_ops.op == OP_LE)) { ++ th_diff = ra->rule_ops.thresh - ++ rb->rule_ops.thresh; ++ if (th_diff > 0) ++ ret = 1; ++ else ++ ret = -1; ++ } else if ((ra->rule_ops.op == OP_GT) || ++ (ra->rule_ops.op == OP_GE)) { ++ th_diff = rb->rule_ops.thresh - ++ ra->rule_ops.thresh; ++ if (th_diff > 0) ++ ret = 1; ++ else ++ ret = -1; ++ } ++ } else ++ ret = ra->rule_ops.op - rb->rule_ops.op; ++ } else if ((ra->rule_ops.mode == THROTTLE_OFF) && ++ (rb->rule_ops.mode == THROTTLE_ON)) { ++ ret = 1; ++ } else if ((ra->rule_ops.mode == THROTTLE_ON) && ++ (rb->rule_ops.mode == THROTTLE_OFF)) { ++ ret = -1; ++ } ++ ++ return ret; ++} ++ ++static void print_rules(struct rule_node_info *node_it) ++{ ++ struct rules_def *node_rule = NULL; ++ int i; ++ ++ if (!node_it) { ++ pr_err("%s: no node for found", __func__); ++ return; ++ } ++ ++ pr_info("\n Now printing rules for Node %d cur rule %d\n", ++ node_it->id, node_it->cur_rule); ++ list_for_each_entry(node_rule, &node_it->node_rules, link) { ++ pr_info("\n num Rules %d rule Id %d\n", ++ node_it->num_rules, node_rule->rule_id); ++ pr_info("Rule: src_field %d\n", node_rule->rule_ops.src_field); ++ for (i = 0; i < node_rule->rule_ops.num_src; i++) ++ pr_info("Rule: src %d\n", ++ node_rule->rule_ops.src_id[i]); ++ for (i = 0; i < node_rule->rule_ops.num_dst; i++) ++ pr_info("Rule: dst %d dst_bw %llu\n", ++ node_rule->rule_ops.dst_node[i], ++ node_rule->rule_ops.dst_bw); ++ pr_info("Rule: thresh %llu op %d mode %d State %d\n", ++ node_rule->rule_ops.thresh, ++ node_rule->rule_ops.op, ++ node_rule->rule_ops.mode, ++ node_rule->state); ++ } ++} ++ ++void print_all_rules(void) ++{ ++ struct rule_node_info *node_it = NULL; ++ ++ list_for_each_entry(node_it, &node_list, link) ++ print_rules(node_it); ++} ++ ++void print_rules_buf(char *buf, int max_buf) ++{ ++ struct rule_node_info *node_it = NULL; ++ struct rules_def *node_rule = NULL; ++ int i; ++ int cnt = 0; ++ ++ list_for_each_entry(node_it, &node_list, link) { ++ cnt += scnprintf(buf + cnt, max_buf - cnt, ++ "\n Now printing rules for Node %d cur_rule %d\n", ++ node_it->id, node_it->cur_rule); ++ list_for_each_entry(node_rule, &node_it->node_rules, link) { ++ cnt += scnprintf(buf + cnt, max_buf - cnt, ++ "\nNum Rules:%d ruleId %d STATE:%d change:%d\n", ++ node_it->num_rules, node_rule->rule_id, ++ node_rule->state, node_rule->state_change); ++ cnt += scnprintf(buf + cnt, max_buf - cnt, ++ "Src_field %d\n", ++ node_rule->rule_ops.src_field); ++ for (i = 0; i < node_rule->rule_ops.num_src; i++) ++ cnt += scnprintf(buf + cnt, max_buf - cnt, ++ "Src %d Cur Ib %llu Ab %llu\n", ++ node_rule->rule_ops.src_id[i], ++ node_rule->src_info[i].ib, ++ node_rule->src_info[i].ab); ++ for (i = 0; i < node_rule->rule_ops.num_dst; i++) ++ cnt += scnprintf(buf + cnt, max_buf - cnt, ++ "Dst %d dst_bw %llu\n", ++ node_rule->rule_ops.dst_node[0], ++ node_rule->rule_ops.dst_bw); ++ cnt += scnprintf(buf + cnt, max_buf - cnt, ++ "Thresh %llu op %d mode %d\n", ++ node_rule->rule_ops.thresh, ++ node_rule->rule_ops.op, ++ node_rule->rule_ops.mode); ++ } ++ } ++} ++ ++static int copy_rule(struct bus_rule_type *src, struct rules_def *node_rule, ++ struct notifier_block *nb) ++{ ++ int i; ++ int ret = 0; ++ ++ memcpy(&node_rule->rule_ops, src, ++ sizeof(struct bus_rule_type)); ++ node_rule->rule_ops.src_id = kzalloc( ++ (sizeof(int) * node_rule->rule_ops.num_src), ++ GFP_KERNEL); ++ if (!node_rule->rule_ops.src_id) { ++ pr_err("%s:Failed to allocate for src_id", ++ __func__); ++ return -ENOMEM; ++ } ++ memcpy(node_rule->rule_ops.src_id, src->src_id, ++ sizeof(int) * src->num_src); ++ ++ ++ if (!nb) { ++ node_rule->rule_ops.dst_node = kzalloc( ++ (sizeof(int) * node_rule->rule_ops.num_dst), ++ GFP_KERNEL); ++ if (!node_rule->rule_ops.dst_node) { ++ pr_err("%s:Failed to allocate for src_id", ++ __func__); ++ return -ENOMEM; ++ } ++ memcpy(node_rule->rule_ops.dst_node, src->dst_node, ++ sizeof(int) * src->num_dst); ++ } ++ ++ node_rule->num_src = src->num_src; ++ node_rule->src_info = kzalloc( ++ (sizeof(struct node_vote_info) * node_rule->rule_ops.num_src), ++ GFP_KERNEL); ++ if (!node_rule->src_info) { ++ pr_err("%s:Failed to allocate for src_id", ++ __func__); ++ return -ENOMEM; ++ } ++ for (i = 0; i < src->num_src; i++) ++ node_rule->src_info[i].id = src->src_id[i]; ++ ++ return ret; ++} ++ ++void msm_rule_register(int num_rules, struct bus_rule_type *rule, ++ struct notifier_block *nb) ++{ ++ struct rule_node_info *node = NULL; ++ int i, j; ++ struct rules_def *node_rule = NULL; ++ int num_dst = 0; ++ ++ if (!rule) ++ return; ++ ++ mutex_lock(&msm_bus_rules_lock); ++ for (i = 0; i < num_rules; i++) { ++ if (nb) ++ num_dst = 1; ++ else ++ num_dst = rule[i].num_dst; ++ ++ for (j = 0; j < num_dst; j++) { ++ int id = 0; ++ ++ if (nb) ++ id = NB_ID; ++ else ++ id = rule[i].dst_node[j]; ++ ++ node = gen_node(id, nb); ++ if (!node) { ++ pr_info("Error getting rule"); ++ goto exit_rule_register; ++ } ++ node_rule = kzalloc(sizeof(struct rules_def), ++ GFP_KERNEL); ++ if (!node_rule) { ++ pr_err("%s: Failed to allocate for rule", ++ __func__); ++ goto exit_rule_register; ++ } ++ ++ if (copy_rule(&rule[i], node_rule, nb)) { ++ pr_err("Error copying rule"); ++ goto exit_rule_register; ++ } ++ ++ node_rule->rule_id = node->num_rules++; ++ if (nb) ++ node->data = nb; ++ ++ list_add_tail(&node_rule->link, &node->node_rules); ++ } ++ } ++ list_sort(NULL, &node->node_rules, node_rules_compare); ++ ++ if (nb) ++ raw_notifier_chain_register(&node->rule_notify_list, nb); ++exit_rule_register: ++ mutex_unlock(&msm_bus_rules_lock); ++ return; ++} ++ ++static int comp_rules(struct bus_rule_type *rulea, struct bus_rule_type *ruleb) ++{ ++ int ret = 1; ++ ++ if (rulea->num_src == ruleb->num_src) ++ ret = memcmp(rulea->src_id, ruleb->src_id, ++ (sizeof(int) * rulea->num_src)); ++ if (!ret && (rulea->num_dst == ruleb->num_dst)) ++ ret = memcmp(rulea->dst_node, ruleb->dst_node, ++ (sizeof(int) * rulea->num_dst)); ++ if (!ret && (rulea->dst_bw == ruleb->dst_bw) && ++ (rulea->op == ruleb->op) && (rulea->thresh == ruleb->thresh)) ++ ret = 0; ++ ++ return ret; ++} ++ ++void msm_rule_unregister(int num_rules, struct bus_rule_type *rule, ++ struct notifier_block *nb) ++{ ++ int i; ++ struct rule_node_info *node = NULL; ++ struct rule_node_info *node_tmp = NULL; ++ struct rules_def *node_rule; ++ struct rules_def *node_rule_tmp; ++ bool match_found = false; ++ ++ if (!rule) ++ return; ++ ++ mutex_lock(&msm_bus_rules_lock); ++ if (nb) { ++ node = get_node(NB_ID, nb); ++ if (!node) { ++ pr_err("%s: Can't find node", __func__); ++ goto exit_unregister_rule; ++ } ++ ++ list_for_each_entry_safe(node_rule, node_rule_tmp, ++ &node->node_rules, link) { ++ list_del(&node_rule->link); ++ kfree(node_rule); ++ node->num_rules--; ++ } ++ raw_notifier_chain_unregister(&node->rule_notify_list, nb); ++ } else { ++ for (i = 0; i < num_rules; i++) { ++ match_found = false; ++ ++ list_for_each_entry(node, &node_list, link) { ++ list_for_each_entry_safe(node_rule, ++ node_rule_tmp, &node->node_rules, link) { ++ if (comp_rules(&node_rule->rule_ops, ++ &rule[i]) == 0) { ++ list_del(&node_rule->link); ++ kfree(node_rule); ++ match_found = true; ++ node->num_rules--; ++ list_sort(NULL, ++ &node->node_rules, ++ node_rules_compare); ++ break; ++ } ++ } ++ } ++ } ++ } ++ ++ list_for_each_entry_safe(node, node_tmp, ++ &node_list, link) { ++ if (!node->num_rules) { ++ pr_debug("Deleting Rule node %d", node->id); ++ list_del(&node->link); ++ kfree(node); ++ } ++ } ++exit_unregister_rule: ++ mutex_unlock(&msm_bus_rules_lock); ++} ++ ++bool msm_rule_are_rules_registered(void) ++{ ++ bool ret = false; ++ ++ if (list_empty(&node_list)) ++ ret = false; ++ else ++ ret = true; ++ ++ return ret; ++} ++ +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_bus_rules.h +@@ -0,0 +1,77 @@ ++/* Copyright (c) 2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef _ARCH_ARM_MACH_MSM_BUS_RULES_H ++#define _ARCH_ARM_MACH_MSM_BUS_RULES_H ++ ++#include ++#include ++#include ++#include ++ ++#define MAX_NODES (5) ++ ++struct rule_update_path_info { ++ u32 id; ++ u64 ab; ++ u64 ib; ++ u64 clk; ++ struct list_head link; ++}; ++ ++struct rule_apply_rcm_info { ++ u32 id; ++ u64 lim_bw; ++ int throttle; ++ bool after_clk_commit; ++ struct list_head link; ++}; ++ ++struct bus_rule_type { ++ int num_src; ++ int *src_id; ++ int src_field; ++ int op; ++ u64 thresh; ++ int num_dst; ++ int *dst_node; ++ u64 dst_bw; ++ int mode; ++ void *client_data; ++}; ++ ++#if (defined(CONFIG_BUS_TOPOLOGY_ADHOC)) ++void msm_rule_register(int num_rules, struct bus_rule_type *rule, ++ struct notifier_block *nb); ++void msm_rule_unregister(int num_rules, struct bus_rule_type *rule, ++ struct notifier_block *nb); ++void print_rules_buf(char *buf, int count); ++bool msm_rule_are_rules_registered(void); ++#else ++static inline void msm_rule_register(int num_rules, struct bus_rule_type *rule, ++ struct notifier_block *nb) ++{ ++} ++static inline void msm_rule_unregister(int num_rules, ++ struct bus_rule_type *rule, ++ struct notifier_block *nb) ++{ ++} ++static inline void print_rules_buf(char *buf, int count) ++{ ++} ++static inline bool msm_rule_are_rules_registered(void) ++{ ++ return false; ++} ++#endif /* defined(CONFIG_BUS_TOPOLOGY_ADHOC) */ ++#endif /* _ARCH_ARM_MACH_MSM_BUS_RULES_H */ +--- /dev/null ++++ b/drivers/bus/msm_bus/msm_buspm_coresight_adhoc.c +@@ -0,0 +1,189 @@ ++/* Copyright (c) 2014 The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct msmbus_coresight_adhoc_clock_drvdata { ++ int id; ++ struct clk *clk; ++ struct list_head list; ++}; ++ ++struct msmbus_coresight_adhoc_drvdata { ++ struct device *dev; ++ struct coresight_device *csdev; ++ struct coresight_desc *desc; ++ struct list_head clocks; ++}; ++ ++static int msmbus_coresight_enable_adhoc(struct coresight_device *csdev) ++{ ++ struct msmbus_coresight_adhoc_clock_drvdata *clk; ++ struct msmbus_coresight_adhoc_drvdata *drvdata = ++ dev_get_drvdata(csdev->dev.parent); ++ long rate; ++ ++ list_for_each_entry(clk, &drvdata->clocks, list) { ++ if (clk->id == csdev->id) { ++ rate = clk_round_rate(clk->clk, 1L); ++ clk_set_rate(clk->clk, rate); ++ return clk_prepare_enable(clk->clk); ++ } ++ } ++ ++ return -ENOENT; ++} ++ ++static void msmbus_coresight_disable_adhoc(struct coresight_device *csdev) ++{ ++ struct msmbus_coresight_adhoc_clock_drvdata *clk; ++ struct msmbus_coresight_adhoc_drvdata *drvdata = ++ dev_get_drvdata(csdev->dev.parent); ++ ++ list_for_each_entry(clk, &drvdata->clocks, list) { ++ if (clk->id == csdev->id) ++ clk_disable_unprepare(clk->clk); ++ } ++} ++ ++static const struct coresight_ops_source msmbus_coresight_adhoc_source_ops = { ++ .enable = msmbus_coresight_enable_adhoc, ++ .disable = msmbus_coresight_disable_adhoc, ++}; ++ ++static const struct coresight_ops msmbus_coresight_cs_ops = { ++ .source_ops = &msmbus_coresight_adhoc_source_ops, ++}; ++ ++void msmbus_coresight_remove_adhoc(struct platform_device *pdev) ++{ ++ struct msmbus_coresight_adhoc_clock_drvdata *clk, *next_clk; ++ struct msmbus_coresight_adhoc_drvdata *drvdata = ++ platform_get_drvdata(pdev); ++ ++ msmbus_coresight_disable_adhoc(drvdata->csdev); ++ coresight_unregister(drvdata->csdev); ++ list_for_each_entry_safe(clk, next_clk, &drvdata->clocks, list) { ++ list_del(&clk->list); ++ devm_kfree(&pdev->dev, clk); ++ } ++ devm_kfree(&pdev->dev, drvdata->desc); ++ devm_kfree(&pdev->dev, drvdata); ++ platform_set_drvdata(pdev, NULL); ++} ++EXPORT_SYMBOL(msmbus_coresight_remove_adhoc); ++ ++static int buspm_of_get_clk_adhoc(struct device_node *of_node, ++ struct msmbus_coresight_adhoc_drvdata *drvdata, int id) ++{ ++ struct msmbus_coresight_adhoc_clock_drvdata *clk; ++ clk = devm_kzalloc(drvdata->dev, sizeof(*clk), GFP_KERNEL); ++ ++ if (!clk) ++ return -ENOMEM; ++ ++ clk->id = id; ++ ++ clk->clk = of_clk_get_by_name(of_node, "bus_clk"); ++ if (IS_ERR(clk->clk)) { ++ pr_err("Error: unable to get clock for coresight node %d\n", ++ id); ++ goto err; ++ } ++ ++ list_add(&clk->list, &drvdata->clocks); ++ return 0; ++ ++err: ++ devm_kfree(drvdata->dev, clk); ++ return -EINVAL; ++} ++ ++int msmbus_coresight_init_adhoc(struct platform_device *pdev, ++ struct device_node *of_node) ++{ ++ int ret; ++ struct device *dev = &pdev->dev; ++ struct coresight_platform_data *pdata; ++ struct msmbus_coresight_adhoc_drvdata *drvdata; ++ struct coresight_desc *desc; ++ ++ pdata = of_get_coresight_platform_data(dev, of_node); ++ if (IS_ERR(pdata)) ++ return PTR_ERR(pdata); ++ ++ drvdata = platform_get_drvdata(pdev); ++ if (IS_ERR_OR_NULL(drvdata)) { ++ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); ++ if (!drvdata) { ++ pr_err("coresight: Alloc for drvdata failed\n"); ++ return -ENOMEM; ++ } ++ INIT_LIST_HEAD(&drvdata->clocks); ++ drvdata->dev = &pdev->dev; ++ platform_set_drvdata(pdev, drvdata); ++ } ++ ret = buspm_of_get_clk_adhoc(of_node, drvdata, pdata->id); ++ if (ret) { ++ pr_err("Error getting clocks\n"); ++ ret = -ENXIO; ++ goto err1; ++ } ++ ++ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); ++ if (!desc) { ++ pr_err("coresight: Error allocating memory\n"); ++ ret = -ENOMEM; ++ goto err1; ++ } ++ ++ desc->type = CORESIGHT_DEV_TYPE_SOURCE; ++ desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_BUS; ++ desc->ops = &msmbus_coresight_cs_ops; ++ desc->pdata = pdata; ++ desc->dev = &pdev->dev; ++ desc->owner = THIS_MODULE; ++ drvdata->desc = desc; ++ drvdata->csdev = coresight_register(desc); ++ if (IS_ERR(drvdata->csdev)) { ++ pr_err("coresight: Coresight register failed\n"); ++ ret = PTR_ERR(drvdata->csdev); ++ goto err0; ++ } ++ ++ dev_info(dev, "msmbus_coresight initialized\n"); ++ ++ return 0; ++err0: ++ devm_kfree(dev, desc); ++err1: ++ devm_kfree(dev, drvdata); ++ platform_set_drvdata(pdev, NULL); ++ return ret; ++} ++EXPORT_SYMBOL(msmbus_coresight_init_adhoc); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("MSM BusPM Adhoc CoreSight Driver"); +--- /dev/null ++++ b/drivers/bus/msm_bus/rpm-smd.h +@@ -0,0 +1,268 @@ ++/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#ifndef __ARCH_ARM_MACH_MSM_RPM_SMD_H ++#define __ARCH_ARM_MACH_MSM_RPM_SMD_H ++ ++/** ++ * enum msm_rpm_set - RPM enumerations for sleep/active set ++ * %MSM_RPM_CTX_SET_0: Set resource parameters for active mode. ++ * %MSM_RPM_CTX_SET_SLEEP: Set resource parameters for sleep. ++ */ ++enum msm_rpm_set { ++ MSM_RPM_CTX_ACTIVE_SET, ++ MSM_RPM_CTX_SLEEP_SET, ++}; ++ ++struct msm_rpm_request; ++ ++struct msm_rpm_kvp { ++ uint32_t key; ++ uint32_t length; ++ uint8_t *data; ++}; ++#ifdef CONFIG_MSM_RPM_SMD ++/** ++ * msm_rpm_request() - Creates a parent element to identify the ++ * resource on the RPM, that stores the KVPs for different fields modified ++ * for a hardware resource ++ * ++ * @set: if the device is setting the active/sleep set parameter ++ * for the resource ++ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource ++ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type ++ * @num_elements: number of KVPs pairs associated with the resource ++ * ++ * returns pointer to a msm_rpm_request on success, NULL on error ++ */ ++struct msm_rpm_request *msm_rpm_create_request( ++ enum msm_rpm_set set, uint32_t rsc_type, ++ uint32_t rsc_id, int num_elements); ++ ++/** ++ * msm_rpm_request_noirq() - Creates a parent element to identify the ++ * resource on the RPM, that stores the KVPs for different fields modified ++ * for a hardware resource. This function is similar to msm_rpm_create_request ++ * except that it has to be called with interrupts masked. ++ * ++ * @set: if the device is setting the active/sleep set parameter ++ * for the resource ++ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource ++ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type ++ * @num_elements: number of KVPs pairs associated with the resource ++ * ++ * returns pointer to a msm_rpm_request on success, NULL on error ++ */ ++struct msm_rpm_request *msm_rpm_create_request_noirq( ++ enum msm_rpm_set set, uint32_t rsc_type, ++ uint32_t rsc_id, int num_elements); ++ ++/** ++ * msm_rpm_add_kvp_data() - Adds a Key value pair to a existing RPM resource. ++ * ++ * @handle: RPM resource handle to which the data should be appended ++ * @key: unsigned integer identify the parameter modified ++ * @data: byte array that contains the value corresponding to key. ++ * @size: size of data in bytes. ++ * ++ * returns 0 on success or errno ++ */ ++int msm_rpm_add_kvp_data(struct msm_rpm_request *handle, ++ uint32_t key, const uint8_t *data, int size); ++ ++/** ++ * msm_rpm_add_kvp_data_noirq() - Adds a Key value pair to a existing RPM ++ * resource. This function is similar to msm_rpm_add_kvp_data except that it ++ * has to be called with interrupts masked. ++ * ++ * @handle: RPM resource handle to which the data should be appended ++ * @key: unsigned integer identify the parameter modified ++ * @data: byte array that contains the value corresponding to key. ++ * @size: size of data in bytes. ++ * ++ * returns 0 on success or errno ++ */ ++int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle, ++ uint32_t key, const uint8_t *data, int size); ++ ++/** msm_rpm_free_request() - clean up the RPM request handle created with ++ * msm_rpm_create_request ++ * ++ * @handle: RPM resource handle to be cleared. ++ */ ++ ++void msm_rpm_free_request(struct msm_rpm_request *handle); ++ ++/** ++ * msm_rpm_send_request() - Send the RPM messages using SMD. The function ++ * assigns a message id before sending the data out to the RPM. RPM hardware ++ * uses the message id to acknowledge the messages. ++ * ++ * @handle: pointer to the msm_rpm_request for the resource being modified. ++ * ++ * returns non-zero message id on success and zero on a failed transaction. ++ * The drivers use message id to wait for ACK from RPM. ++ */ ++int msm_rpm_send_request(struct msm_rpm_request *handle); ++ ++/** ++ * msm_rpm_send_request_noirq() - Send the RPM messages using SMD. The ++ * function assigns a message id before sending the data out to the RPM. ++ * RPM hardware uses the message id to acknowledge the messages. This function ++ * is similar to msm_rpm_send_request except that it has to be called with ++ * interrupts masked. ++ * ++ * @handle: pointer to the msm_rpm_request for the resource being modified. ++ * ++ * returns non-zero message id on success and zero on a failed transaction. ++ * The drivers use message id to wait for ACK from RPM. ++ */ ++int msm_rpm_send_request_noirq(struct msm_rpm_request *handle); ++ ++/** ++ * msm_rpm_wait_for_ack() - A blocking call that waits for acknowledgment of ++ * a message from RPM. ++ * ++ * @msg_id: the return from msm_rpm_send_requests ++ * ++ * returns 0 on success or errno ++ */ ++int msm_rpm_wait_for_ack(uint32_t msg_id); ++ ++/** ++ * msm_rpm_wait_for_ack_noirq() - A blocking call that waits for acknowledgment ++ * of a message from RPM. This function is similar to msm_rpm_wait_for_ack ++ * except that it has to be called with interrupts masked. ++ * ++ * @msg_id: the return from msm_rpm_send_request ++ * ++ * returns 0 on success or errno ++ */ ++int msm_rpm_wait_for_ack_noirq(uint32_t msg_id); ++ ++/** ++ * msm_rpm_send_message() -Wrapper function for clients to send data given an ++ * array of key value pairs. ++ * ++ * @set: if the device is setting the active/sleep set parameter ++ * for the resource ++ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource ++ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type ++ * @kvp: array of KVP data. ++ * @nelem: number of KVPs pairs associated with the message. ++ * ++ * returns 0 on success and errno on failure. ++ */ ++int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type, ++ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems); ++ ++/** ++ * msm_rpm_send_message_noirq() -Wrapper function for clients to send data ++ * given an array of key value pairs. This function is similar to the ++ * msm_rpm_send_message() except that it has to be called with interrupts ++ * disabled. Clients should choose the irq version when possible for system ++ * performance. ++ * ++ * @set: if the device is setting the active/sleep set parameter ++ * for the resource ++ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource ++ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type ++ * @kvp: array of KVP data. ++ * @nelem: number of KVPs pairs associated with the message. ++ * ++ * returns 0 on success and errno on failure. ++ */ ++int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type, ++ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems); ++ ++/** ++ * msm_rpm_driver_init() - Initialization function that registers for a ++ * rpm platform driver. ++ * ++ * returns 0 on success. ++ */ ++int __init msm_rpm_driver_init(void); ++ ++#else ++ ++static inline struct msm_rpm_request *msm_rpm_create_request( ++ enum msm_rpm_set set, uint32_t rsc_type, ++ uint32_t rsc_id, int num_elements) ++{ ++ return NULL; ++} ++ ++static inline struct msm_rpm_request *msm_rpm_create_request_noirq( ++ enum msm_rpm_set set, uint32_t rsc_type, ++ uint32_t rsc_id, int num_elements) ++{ ++ return NULL; ++ ++} ++static inline uint32_t msm_rpm_add_kvp_data(struct msm_rpm_request *handle, ++ uint32_t key, const uint8_t *data, int count) ++{ ++ return 0; ++} ++static inline uint32_t msm_rpm_add_kvp_data_noirq( ++ struct msm_rpm_request *handle, uint32_t key, ++ const uint8_t *data, int count) ++{ ++ return 0; ++} ++ ++static inline void msm_rpm_free_request(struct msm_rpm_request *handle) ++{ ++ return; ++} ++ ++static inline int msm_rpm_send_request(struct msm_rpm_request *handle) ++{ ++ return 0; ++} ++ ++static inline int msm_rpm_send_request_noirq(struct msm_rpm_request *handle) ++{ ++ return 0; ++ ++} ++ ++static inline int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type, ++ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems) ++{ ++ return 0; ++} ++ ++static inline int msm_rpm_send_message_noirq(enum msm_rpm_set set, ++ uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp, ++ int nelems) ++{ ++ return 0; ++} ++ ++static inline int msm_rpm_wait_for_ack(uint32_t msg_id) ++{ ++ return 0; ++ ++} ++static inline int msm_rpm_wait_for_ack_noirq(uint32_t msg_id) ++{ ++ return 0; ++} ++ ++static inline int __init msm_rpm_driver_init(void) ++{ ++ return 0; ++} ++#endif ++#endif /*__ARCH_ARM_MACH_MSM_RPM_SMD_H*/ +--- /dev/null ++++ b/include/trace/events/trace_msm_bus.h +@@ -0,0 +1,163 @@ ++/* Copyright (c) 2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM msm_bus ++ ++#if !defined(_TRACE_MSM_BUS_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_MSM_BUS_H ++ ++#include ++ ++TRACE_EVENT(bus_update_request, ++ ++ TP_PROTO(int sec, int nsec, const char *name, unsigned int index, ++ int src, int dest, unsigned long long ab, ++ unsigned long long ib), ++ ++ TP_ARGS(sec, nsec, name, index, src, dest, ab, ib), ++ ++ TP_STRUCT__entry( ++ __field(int, sec) ++ __field(int, nsec) ++ __string(name, name) ++ __field(u32, index) ++ __field(int, src) ++ __field(int, dest) ++ __field(u64, ab) ++ __field(u64, ib) ++ ), ++ ++ TP_fast_assign( ++ __entry->sec = sec; ++ __entry->nsec = nsec; ++ __assign_str(name, name); ++ __entry->index = index; ++ __entry->src = src; ++ __entry->dest = dest; ++ __entry->ab = ab; ++ __entry->ib = ib; ++ ), ++ ++ TP_printk("time= %d.%d name=%s index=%u src=%d dest=%d ab=%llu ib=%llu", ++ __entry->sec, ++ __entry->nsec, ++ __get_str(name), ++ (unsigned int)__entry->index, ++ __entry->src, ++ __entry->dest, ++ (unsigned long long)__entry->ab, ++ (unsigned long long)__entry->ib) ++); ++ ++TRACE_EVENT(bus_bimc_config_limiter, ++ ++ TP_PROTO(int mas_id, unsigned long long cur_lim_bw), ++ ++ TP_ARGS(mas_id, cur_lim_bw), ++ ++ TP_STRUCT__entry( ++ __field(int, mas_id) ++ __field(u64, cur_lim_bw) ++ ), ++ ++ TP_fast_assign( ++ __entry->mas_id = mas_id; ++ __entry->cur_lim_bw = cur_lim_bw; ++ ), ++ ++ TP_printk("Master=%d cur_lim_bw=%llu", ++ __entry->mas_id, ++ (unsigned long long)__entry->cur_lim_bw) ++); ++ ++TRACE_EVENT(bus_avail_bw, ++ ++ TP_PROTO(unsigned long long cur_bimc_bw, unsigned long long cur_mdp_bw), ++ ++ TP_ARGS(cur_bimc_bw, cur_mdp_bw), ++ ++ TP_STRUCT__entry( ++ __field(u64, cur_bimc_bw) ++ __field(u64, cur_mdp_bw) ++ ), ++ ++ TP_fast_assign( ++ __entry->cur_bimc_bw = cur_bimc_bw; ++ __entry->cur_mdp_bw = cur_mdp_bw; ++ ), ++ ++ TP_printk("cur_bimc_bw = %llu cur_mdp_bw = %llu", ++ (unsigned long long)__entry->cur_bimc_bw, ++ (unsigned long long)__entry->cur_mdp_bw) ++); ++ ++TRACE_EVENT(bus_rules_matches, ++ ++ TP_PROTO(int node_id, int rule_id, unsigned long long node_ab, ++ unsigned long long node_ib, unsigned long long node_clk), ++ ++ TP_ARGS(node_id, rule_id, node_ab, node_ib, node_clk), ++ ++ TP_STRUCT__entry( ++ __field(int, node_id) ++ __field(int, rule_id) ++ __field(u64, node_ab) ++ __field(u64, node_ib) ++ __field(u64, node_clk) ++ ), ++ ++ TP_fast_assign( ++ __entry->node_id = node_id; ++ __entry->rule_id = rule_id; ++ __entry->node_ab = node_ab; ++ __entry->node_ib = node_ib; ++ __entry->node_clk = node_clk; ++ ), ++ ++ TP_printk("Rule match node%d rule%d node-ab%llu:ib%llu:clk%llu", ++ __entry->node_id, __entry->rule_id, ++ (unsigned long long)__entry->node_ab, ++ (unsigned long long)__entry->node_ib, ++ (unsigned long long)__entry->node_clk) ++); ++ ++TRACE_EVENT(bus_bke_params, ++ ++ TP_PROTO(u32 gc, u32 gp, u32 thl, u32 thm, u32 thh), ++ ++ TP_ARGS(gc, gp, thl, thm, thh), ++ ++ TP_STRUCT__entry( ++ __field(u32, gc) ++ __field(u32, gp) ++ __field(u32, thl) ++ __field(u32, thm) ++ __field(u32, thh) ++ ), ++ ++ TP_fast_assign( ++ __entry->gc = gc; ++ __entry->gp = gp; ++ __entry->thl = thl; ++ __entry->thm = thm; ++ __entry->thh = thh; ++ ), ++ ++ TP_printk("BKE Params GC=0x%x GP=0x%x THL=0x%x THM=0x%x THH=0x%x", ++ __entry->gc, __entry->gp, __entry->thl, __entry->thm, ++ __entry->thh) ++); ++ ++#endif ++#define TRACE_INCLUDE_FILE trace_msm_bus ++#include diff --git a/target/linux/ipq40xx/patches-4.9/400-mtd-ubi-add-quirk-to-autoload-ubi-on-rt-ac58u.patch b/target/linux/ipq40xx/patches-4.9/400-mtd-ubi-add-quirk-to-autoload-ubi-on-rt-ac58u.patch new file mode 100644 index 000000000..ee19f363b --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/400-mtd-ubi-add-quirk-to-autoload-ubi-on-rt-ac58u.patch @@ -0,0 +1,29 @@ +From b8f3a7ccbeca5bdbd1b6210b94b38d3fef2dd0bd Mon Sep 17 00:00:00 2001 +From: Christian Lamparter +Date: Thu, 19 Jan 2017 01:57:22 +0100 +Subject: [PATCH 16/38] mtd: ubi: add auto_attach HACK for the ASUS RT-AC58U + +This patch adds a hack that allows UBI's autoattach feature +to work with the custom ASUS UBI_DEV partition name. + +This is necessary because the vendor's u-boot doesn't leave +the bootargs / cmdline alone, so the it can't be overwritten +easily otherwise. + +Signed-off-by: Christian Lamparter +--- + drivers/mtd/ubi/build.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/mtd/ubi/build.c ++++ b/drivers/mtd/ubi/build.c +@@ -1225,6 +1225,9 @@ static void __init ubi_auto_attach(void) + mtd = open_mtd_device("ubi"); + if (IS_ERR(mtd)) + mtd = open_mtd_device("data"); ++ /* Hack for the Asus RT-AC58U */ ++ if (IS_ERR(mtd)) ++ mtd = open_mtd_device("UBI_DEV"); + + if (!IS_ERR(mtd)) { + size_t len; diff --git a/target/linux/ipq40xx/patches-4.9/605-net-IPQ4019-needs-rfs-vlan_tag-callbacks-in.patch b/target/linux/ipq40xx/patches-4.9/605-net-IPQ4019-needs-rfs-vlan_tag-callbacks-in.patch new file mode 100644 index 000000000..d2721875a --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/605-net-IPQ4019-needs-rfs-vlan_tag-callbacks-in.patch @@ -0,0 +1,53 @@ +From 7c129254adb1093d10a62ed7bf7b956fcc6ffe34 Mon Sep 17 00:00:00 2001 +From: Rakesh Nair +Date: Wed, 20 Jul 2016 15:02:01 +0530 +Subject: [PATCH] net: IPQ4019 needs rfs/vlan_tag callbacks in + netdev_ops + +Add callback support to get default vlan tag and register +receive flow steering filter. + +Used by IPQ4019 ess-edma driver. + +BUG=chrome-os-partner:33096 +TEST=none + +Change-Id: I266070e4a0fbe4a0d9966fe79a71e50ec4f26c75 +Signed-off-by: Rakesh Nair +Reviewed-on: https://chromium-review.googlesource.com/362203 +Commit-Ready: Grant Grundler +Tested-by: Grant Grundler +Reviewed-by: Grant Grundler +--- + include/linux/netdevice.h | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -725,6 +725,16 @@ struct xps_map { + #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ + - sizeof(struct xps_map)) / sizeof(u16)) + ++#ifdef CONFIG_RFS_ACCEL ++typedef int (*set_rfs_filter_callback_t)(struct net_device *dev, ++ __be32 src, ++ __be32 dst, ++ __be16 sport, ++ __be16 dport, ++ u8 proto, ++ u16 rxq_index, ++ u32 action); ++#endif + /* + * This structure holds all XPS maps for device. Maps are indexed by CPU. + */ +@@ -1251,6 +1261,9 @@ struct net_device_ops { + const struct sk_buff *skb, + u16 rxq_index, + u32 flow_id); ++ int (*ndo_register_rfs_filter)(struct net_device *dev, ++ set_rfs_filter_callback_t set_filter); ++ int (*ndo_get_default_vlan_tag)(struct net_device *net); + #endif + int (*ndo_add_slave)(struct net_device *dev, + struct net_device *slave_dev); diff --git a/target/linux/ipq40xx/patches-4.9/700-net-add-qualcomm-mdio-and-phy.patch b/target/linux/ipq40xx/patches-4.9/700-net-add-qualcomm-mdio-and-phy.patch new file mode 100644 index 000000000..003524312 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/700-net-add-qualcomm-mdio-and-phy.patch @@ -0,0 +1,2690 @@ +From 5a71a2005a2e1e6bbe36f00386c495ad6626beb2 Mon Sep 17 00:00:00 2001 +From: Christian Lamparter +Date: Thu, 19 Jan 2017 01:59:43 +0100 +Subject: [PATCH 30/38] NET: add qualcomm mdio and PHY + +--- + drivers/net/phy/Kconfig | 14 ++++++++++++++ + drivers/net/phy/Makefile | 2 ++ + 2 files changed, 16 insertions(+) + +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -408,6 +408,20 @@ config XILINX_GMII2RGMII + the Reduced Gigabit Media Independent Interface(RGMII) between + Ethernet physical media devices and the Gigabit Ethernet controller. + ++config MDIO_IPQ40XX ++ tristate "Qualcomm Atheros ipq40xx MDIO interface" ++ depends on HAS_IOMEM && OF ++ ---help--- ++ This driver supports the MDIO interface found in Qualcomm ++ Atheros ipq40xx Soc chip. ++ ++config AR40XX_PHY ++ tristate "Driver for Qualcomm Atheros IPQ40XX switches" ++ depends on HAS_IOMEM && OF ++ select SWCONFIG ++ ---help--- ++ This is the driver for Qualcomm Atheros IPQ40XX ESS switches. ++ + endif # PHYLIB + + config MICREL_KS8995MA +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -32,6 +32,7 @@ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += md + obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o + obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o + obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o ++obj-$(CONFIG_MDIO_IPQ40XX) += mdio-ipq40xx.o + obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o + obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o + obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o +@@ -40,6 +41,7 @@ obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o + + obj-$(CONFIG_AMD_PHY) += amd.o + obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o ++obj-$(CONFIG_AR40XX_PHY) += ar40xx.o + obj-$(CONFIG_AT803X_PHY) += at803x.o + obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o + obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o +--- /dev/null ++++ b/drivers/net/phy/ar40xx.c +@@ -0,0 +1,2090 @@ ++/* ++ * Copyright (c) 2016, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for ++ * any purpose with or without fee is hereby granted, provided that the ++ * above copyright notice and this permission notice appear in all copies. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT ++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ar40xx.h" ++ ++static struct ar40xx_priv *ar40xx_priv; ++ ++#define MIB_DESC(_s , _o, _n) \ ++ { \ ++ .size = (_s), \ ++ .offset = (_o), \ ++ .name = (_n), \ ++ } ++ ++static const struct ar40xx_mib_desc ar40xx_mibs[] = { ++ MIB_DESC(1, AR40XX_STATS_RXBROAD, "RxBroad"), ++ MIB_DESC(1, AR40XX_STATS_RXPAUSE, "RxPause"), ++ MIB_DESC(1, AR40XX_STATS_RXMULTI, "RxMulti"), ++ MIB_DESC(1, AR40XX_STATS_RXFCSERR, "RxFcsErr"), ++ MIB_DESC(1, AR40XX_STATS_RXALIGNERR, "RxAlignErr"), ++ MIB_DESC(1, AR40XX_STATS_RXRUNT, "RxRunt"), ++ MIB_DESC(1, AR40XX_STATS_RXFRAGMENT, "RxFragment"), ++ MIB_DESC(1, AR40XX_STATS_RX64BYTE, "Rx64Byte"), ++ MIB_DESC(1, AR40XX_STATS_RX128BYTE, "Rx128Byte"), ++ MIB_DESC(1, AR40XX_STATS_RX256BYTE, "Rx256Byte"), ++ MIB_DESC(1, AR40XX_STATS_RX512BYTE, "Rx512Byte"), ++ MIB_DESC(1, AR40XX_STATS_RX1024BYTE, "Rx1024Byte"), ++ MIB_DESC(1, AR40XX_STATS_RX1518BYTE, "Rx1518Byte"), ++ MIB_DESC(1, AR40XX_STATS_RXMAXBYTE, "RxMaxByte"), ++ MIB_DESC(1, AR40XX_STATS_RXTOOLONG, "RxTooLong"), ++ MIB_DESC(2, AR40XX_STATS_RXGOODBYTE, "RxGoodByte"), ++ MIB_DESC(2, AR40XX_STATS_RXBADBYTE, "RxBadByte"), ++ MIB_DESC(1, AR40XX_STATS_RXOVERFLOW, "RxOverFlow"), ++ MIB_DESC(1, AR40XX_STATS_FILTERED, "Filtered"), ++ MIB_DESC(1, AR40XX_STATS_TXBROAD, "TxBroad"), ++ MIB_DESC(1, AR40XX_STATS_TXPAUSE, "TxPause"), ++ MIB_DESC(1, AR40XX_STATS_TXMULTI, "TxMulti"), ++ MIB_DESC(1, AR40XX_STATS_TXUNDERRUN, "TxUnderRun"), ++ MIB_DESC(1, AR40XX_STATS_TX64BYTE, "Tx64Byte"), ++ MIB_DESC(1, AR40XX_STATS_TX128BYTE, "Tx128Byte"), ++ MIB_DESC(1, AR40XX_STATS_TX256BYTE, "Tx256Byte"), ++ MIB_DESC(1, AR40XX_STATS_TX512BYTE, "Tx512Byte"), ++ MIB_DESC(1, AR40XX_STATS_TX1024BYTE, "Tx1024Byte"), ++ MIB_DESC(1, AR40XX_STATS_TX1518BYTE, "Tx1518Byte"), ++ MIB_DESC(1, AR40XX_STATS_TXMAXBYTE, "TxMaxByte"), ++ MIB_DESC(1, AR40XX_STATS_TXOVERSIZE, "TxOverSize"), ++ MIB_DESC(2, AR40XX_STATS_TXBYTE, "TxByte"), ++ MIB_DESC(1, AR40XX_STATS_TXCOLLISION, "TxCollision"), ++ MIB_DESC(1, AR40XX_STATS_TXABORTCOL, "TxAbortCol"), ++ MIB_DESC(1, AR40XX_STATS_TXMULTICOL, "TxMultiCol"), ++ MIB_DESC(1, AR40XX_STATS_TXSINGLECOL, "TxSingleCol"), ++ MIB_DESC(1, AR40XX_STATS_TXEXCDEFER, "TxExcDefer"), ++ MIB_DESC(1, AR40XX_STATS_TXDEFER, "TxDefer"), ++ MIB_DESC(1, AR40XX_STATS_TXLATECOL, "TxLateCol"), ++}; ++ ++static u32 ++ar40xx_read(struct ar40xx_priv *priv, int reg) ++{ ++ return readl(priv->hw_addr + reg); ++} ++ ++static u32 ++ar40xx_psgmii_read(struct ar40xx_priv *priv, int reg) ++{ ++ return readl(priv->psgmii_hw_addr + reg); ++} ++ ++static void ++ar40xx_write(struct ar40xx_priv *priv, int reg, u32 val) ++{ ++ writel(val, priv->hw_addr + reg); ++} ++ ++static u32 ++ar40xx_rmw(struct ar40xx_priv *priv, int reg, u32 mask, u32 val) ++{ ++ u32 ret; ++ ++ ret = ar40xx_read(priv, reg); ++ ret &= ~mask; ++ ret |= val; ++ ar40xx_write(priv, reg, ret); ++ return ret; ++} ++ ++static void ++ar40xx_psgmii_write(struct ar40xx_priv *priv, int reg, u32 val) ++{ ++ writel(val, priv->psgmii_hw_addr + reg); ++} ++ ++static void ++ar40xx_phy_dbg_write(struct ar40xx_priv *priv, int phy_addr, ++ u16 dbg_addr, u16 dbg_data) ++{ ++ struct mii_bus *bus = priv->mii_bus; ++ ++ mutex_lock(&bus->mdio_lock); ++ bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr); ++ bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA, dbg_data); ++ mutex_unlock(&bus->mdio_lock); ++} ++ ++static void ++ar40xx_phy_dbg_read(struct ar40xx_priv *priv, int phy_addr, ++ u16 dbg_addr, u16 *dbg_data) ++{ ++ struct mii_bus *bus = priv->mii_bus; ++ ++ mutex_lock(&bus->mdio_lock); ++ bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr); ++ *dbg_data = bus->read(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA); ++ mutex_unlock(&bus->mdio_lock); ++} ++ ++static void ++ar40xx_phy_mmd_write(struct ar40xx_priv *priv, u32 phy_id, ++ u16 mmd_num, u16 reg_id, u16 reg_val) ++{ ++ struct mii_bus *bus = priv->mii_bus; ++ ++ mutex_lock(&bus->mdio_lock); ++ bus->write(bus, phy_id, ++ AR40XX_MII_ATH_MMD_ADDR, mmd_num); ++ bus->write(bus, phy_id, ++ AR40XX_MII_ATH_MMD_DATA, reg_id); ++ bus->write(bus, phy_id, ++ AR40XX_MII_ATH_MMD_ADDR, ++ 0x4000 | mmd_num); ++ bus->write(bus, phy_id, ++ AR40XX_MII_ATH_MMD_DATA, reg_val); ++ mutex_unlock(&bus->mdio_lock); ++} ++ ++static u16 ++ar40xx_phy_mmd_read(struct ar40xx_priv *priv, u32 phy_id, ++ u16 mmd_num, u16 reg_id) ++{ ++ u16 value; ++ struct mii_bus *bus = priv->mii_bus; ++ ++ mutex_lock(&bus->mdio_lock); ++ bus->write(bus, phy_id, ++ AR40XX_MII_ATH_MMD_ADDR, mmd_num); ++ bus->write(bus, phy_id, ++ AR40XX_MII_ATH_MMD_DATA, reg_id); ++ bus->write(bus, phy_id, ++ AR40XX_MII_ATH_MMD_ADDR, ++ 0x4000 | mmd_num); ++ value = bus->read(bus, phy_id, AR40XX_MII_ATH_MMD_DATA); ++ mutex_unlock(&bus->mdio_lock); ++ return value; ++} ++ ++/* Start of swconfig support */ ++ ++static void ++ar40xx_phy_poll_reset(struct ar40xx_priv *priv) ++{ ++ u32 i, in_reset, retries = 500; ++ struct mii_bus *bus = priv->mii_bus; ++ ++ /* Assume RESET was recently issued to some or all of the phys */ ++ in_reset = GENMASK(AR40XX_NUM_PHYS - 1, 0); ++ ++ while (retries--) { ++ /* 1ms should be plenty of time. ++ * 802.3 spec allows for a max wait time of 500ms ++ */ ++ usleep_range(1000, 2000); ++ ++ for (i = 0; i < AR40XX_NUM_PHYS; i++) { ++ int val; ++ ++ /* skip devices which have completed reset */ ++ if (!(in_reset & BIT(i))) ++ continue; ++ ++ val = mdiobus_read(bus, i, MII_BMCR); ++ if (val < 0) ++ continue; ++ ++ /* mark when phy is no longer in reset state */ ++ if (!(val & BMCR_RESET)) ++ in_reset &= ~BIT(i); ++ } ++ ++ if (!in_reset) ++ return; ++ } ++ ++ dev_warn(&bus->dev, "Failed to reset all phys! (in_reset: 0x%x)\n", ++ in_reset); ++} ++ ++static void ++ar40xx_phy_init(struct ar40xx_priv *priv) ++{ ++ int i; ++ struct mii_bus *bus; ++ u16 val; ++ ++ bus = priv->mii_bus; ++ for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) { ++ ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val); ++ val &= ~AR40XX_PHY_MANU_CTRL_EN; ++ ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val); ++ mdiobus_write(bus, i, ++ MII_ADVERTISE, ADVERTISE_ALL | ++ ADVERTISE_PAUSE_CAP | ++ ADVERTISE_PAUSE_ASYM); ++ mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL); ++ mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE); ++ } ++ ++ ar40xx_phy_poll_reset(priv); ++} ++ ++static void ++ar40xx_port_phy_linkdown(struct ar40xx_priv *priv) ++{ ++ struct mii_bus *bus; ++ int i; ++ u16 val; ++ ++ bus = priv->mii_bus; ++ for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) { ++ mdiobus_write(bus, i, MII_CTRL1000, 0); ++ mdiobus_write(bus, i, MII_ADVERTISE, 0); ++ mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE); ++ ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val); ++ val |= AR40XX_PHY_MANU_CTRL_EN; ++ ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val); ++ /* disable transmit */ ++ ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_2, &val); ++ val &= 0xf00f; ++ ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_2, val); ++ } ++} ++ ++static void ++ar40xx_set_mirror_regs(struct ar40xx_priv *priv) ++{ ++ int port; ++ ++ /* reset all mirror registers */ ++ ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0, ++ AR40XX_FWD_CTRL0_MIRROR_PORT, ++ (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S)); ++ for (port = 0; port < AR40XX_NUM_PORTS; port++) { ++ ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(port), ++ AR40XX_PORT_LOOKUP_ING_MIRROR_EN, 0); ++ ++ ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(port), ++ AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN, 0); ++ } ++ ++ /* now enable mirroring if necessary */ ++ if (priv->source_port >= AR40XX_NUM_PORTS || ++ priv->monitor_port >= AR40XX_NUM_PORTS || ++ priv->source_port == priv->monitor_port) { ++ return; ++ } ++ ++ ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0, ++ AR40XX_FWD_CTRL0_MIRROR_PORT, ++ (priv->monitor_port << AR40XX_FWD_CTRL0_MIRROR_PORT_S)); ++ ++ if (priv->mirror_rx) ++ ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(priv->source_port), 0, ++ AR40XX_PORT_LOOKUP_ING_MIRROR_EN); ++ ++ if (priv->mirror_tx) ++ ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(priv->source_port), ++ 0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN); ++} ++ ++static int ++ar40xx_sw_get_ports(struct switch_dev *dev, struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ u8 ports = priv->vlan_table[val->port_vlan]; ++ int i; ++ ++ val->len = 0; ++ for (i = 0; i < dev->ports; i++) { ++ struct switch_port *p; ++ ++ if (!(ports & BIT(i))) ++ continue; ++ ++ p = &val->value.ports[val->len++]; ++ p->id = i; ++ if ((priv->vlan_tagged & BIT(i)) || ++ (priv->pvid[i] != val->port_vlan)) ++ p->flags = BIT(SWITCH_PORT_FLAG_TAGGED); ++ else ++ p->flags = 0; ++ } ++ return 0; ++} ++ ++static int ++ar40xx_sw_set_ports(struct switch_dev *dev, struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ u8 *vt = &priv->vlan_table[val->port_vlan]; ++ int i; ++ ++ *vt = 0; ++ for (i = 0; i < val->len; i++) { ++ struct switch_port *p = &val->value.ports[i]; ++ ++ if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) { ++ if (val->port_vlan == priv->pvid[p->id]) ++ priv->vlan_tagged |= BIT(p->id); ++ } else { ++ priv->vlan_tagged &= ~BIT(p->id); ++ priv->pvid[p->id] = val->port_vlan; ++ } ++ ++ *vt |= BIT(p->id); ++ } ++ return 0; ++} ++ ++static int ++ar40xx_reg_wait(struct ar40xx_priv *priv, u32 reg, u32 mask, u32 val, ++ unsigned timeout) ++{ ++ int i; ++ ++ for (i = 0; i < timeout; i++) { ++ u32 t; ++ ++ t = ar40xx_read(priv, reg); ++ if ((t & mask) == val) ++ return 0; ++ ++ usleep_range(1000, 2000); ++ } ++ ++ return -ETIMEDOUT; ++} ++ ++static int ++ar40xx_mib_op(struct ar40xx_priv *priv, u32 op) ++{ ++ int ret; ++ ++ lockdep_assert_held(&priv->mib_lock); ++ ++ /* Capture the hardware statistics for all ports */ ++ ar40xx_rmw(priv, AR40XX_REG_MIB_FUNC, ++ AR40XX_MIB_FUNC, (op << AR40XX_MIB_FUNC_S)); ++ ++ /* Wait for the capturing to complete. */ ++ ret = ar40xx_reg_wait(priv, AR40XX_REG_MIB_FUNC, ++ AR40XX_MIB_BUSY, 0, 10); ++ ++ return ret; ++} ++ ++static void ++ar40xx_mib_fetch_port_stat(struct ar40xx_priv *priv, int port, bool flush) ++{ ++ unsigned int base; ++ u64 *mib_stats; ++ int i; ++ u32 num_mibs = ARRAY_SIZE(ar40xx_mibs); ++ ++ WARN_ON(port >= priv->dev.ports); ++ ++ lockdep_assert_held(&priv->mib_lock); ++ ++ base = AR40XX_REG_PORT_STATS_START + ++ AR40XX_REG_PORT_STATS_LEN * port; ++ ++ mib_stats = &priv->mib_stats[port * num_mibs]; ++ if (flush) { ++ u32 len; ++ ++ len = num_mibs * sizeof(*mib_stats); ++ memset(mib_stats, 0, len); ++ return; ++ } ++ for (i = 0; i < num_mibs; i++) { ++ const struct ar40xx_mib_desc *mib; ++ u64 t; ++ ++ mib = &ar40xx_mibs[i]; ++ t = ar40xx_read(priv, base + mib->offset); ++ if (mib->size == 2) { ++ u64 hi; ++ ++ hi = ar40xx_read(priv, base + mib->offset + 4); ++ t |= hi << 32; ++ } ++ ++ mib_stats[i] += t; ++ } ++} ++ ++static int ++ar40xx_mib_capture(struct ar40xx_priv *priv) ++{ ++ return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_CAPTURE); ++} ++ ++static int ++ar40xx_mib_flush(struct ar40xx_priv *priv) ++{ ++ return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_FLUSH); ++} ++ ++static int ++ar40xx_sw_set_reset_mibs(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ unsigned int len; ++ int ret; ++ u32 num_mibs = ARRAY_SIZE(ar40xx_mibs); ++ ++ mutex_lock(&priv->mib_lock); ++ ++ len = priv->dev.ports * num_mibs * sizeof(*priv->mib_stats); ++ memset(priv->mib_stats, 0, len); ++ ret = ar40xx_mib_flush(priv); ++ ++ mutex_unlock(&priv->mib_lock); ++ return ret; ++} ++ ++static int ++ar40xx_sw_set_vlan(struct switch_dev *dev, const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ priv->vlan = !!val->value.i; ++ return 0; ++} ++ ++static int ++ar40xx_sw_get_vlan(struct switch_dev *dev, const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ val->value.i = priv->vlan; ++ return 0; ++} ++ ++static int ++ar40xx_sw_set_mirror_rx_enable(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ mutex_lock(&priv->reg_mutex); ++ priv->mirror_rx = !!val->value.i; ++ ar40xx_set_mirror_regs(priv); ++ mutex_unlock(&priv->reg_mutex); ++ ++ return 0; ++} ++ ++static int ++ar40xx_sw_get_mirror_rx_enable(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ mutex_lock(&priv->reg_mutex); ++ val->value.i = priv->mirror_rx; ++ mutex_unlock(&priv->reg_mutex); ++ return 0; ++} ++ ++static int ++ar40xx_sw_set_mirror_tx_enable(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ mutex_lock(&priv->reg_mutex); ++ priv->mirror_tx = !!val->value.i; ++ ar40xx_set_mirror_regs(priv); ++ mutex_unlock(&priv->reg_mutex); ++ ++ return 0; ++} ++ ++static int ++ar40xx_sw_get_mirror_tx_enable(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ mutex_lock(&priv->reg_mutex); ++ val->value.i = priv->mirror_tx; ++ mutex_unlock(&priv->reg_mutex); ++ return 0; ++} ++ ++static int ++ar40xx_sw_set_mirror_monitor_port(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ mutex_lock(&priv->reg_mutex); ++ priv->monitor_port = val->value.i; ++ ar40xx_set_mirror_regs(priv); ++ mutex_unlock(&priv->reg_mutex); ++ ++ return 0; ++} ++ ++static int ++ar40xx_sw_get_mirror_monitor_port(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ mutex_lock(&priv->reg_mutex); ++ val->value.i = priv->monitor_port; ++ mutex_unlock(&priv->reg_mutex); ++ return 0; ++} ++ ++static int ++ar40xx_sw_set_mirror_source_port(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ mutex_lock(&priv->reg_mutex); ++ priv->source_port = val->value.i; ++ ar40xx_set_mirror_regs(priv); ++ mutex_unlock(&priv->reg_mutex); ++ ++ return 0; ++} ++ ++static int ++ar40xx_sw_get_mirror_source_port(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ mutex_lock(&priv->reg_mutex); ++ val->value.i = priv->source_port; ++ mutex_unlock(&priv->reg_mutex); ++ return 0; ++} ++ ++static int ++ar40xx_sw_set_linkdown(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ if (val->value.i == 1) ++ ar40xx_port_phy_linkdown(priv); ++ else ++ ar40xx_phy_init(priv); ++ ++ return 0; ++} ++ ++static int ++ar40xx_sw_set_port_reset_mib(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ int port; ++ int ret; ++ ++ port = val->port_vlan; ++ if (port >= dev->ports) ++ return -EINVAL; ++ ++ mutex_lock(&priv->mib_lock); ++ ret = ar40xx_mib_capture(priv); ++ if (ret) ++ goto unlock; ++ ++ ar40xx_mib_fetch_port_stat(priv, port, true); ++ ++unlock: ++ mutex_unlock(&priv->mib_lock); ++ return ret; ++} ++ ++static int ++ar40xx_sw_get_port_mib(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ u64 *mib_stats; ++ int port; ++ int ret; ++ char *buf = priv->buf; ++ int i, len = 0; ++ u32 num_mibs = ARRAY_SIZE(ar40xx_mibs); ++ ++ port = val->port_vlan; ++ if (port >= dev->ports) ++ return -EINVAL; ++ ++ mutex_lock(&priv->mib_lock); ++ ret = ar40xx_mib_capture(priv); ++ if (ret) ++ goto unlock; ++ ++ ar40xx_mib_fetch_port_stat(priv, port, false); ++ ++ len += snprintf(buf + len, sizeof(priv->buf) - len, ++ "Port %d MIB counters\n", ++ port); ++ ++ mib_stats = &priv->mib_stats[port * num_mibs]; ++ for (i = 0; i < num_mibs; i++) ++ len += snprintf(buf + len, sizeof(priv->buf) - len, ++ "%-12s: %llu\n", ++ ar40xx_mibs[i].name, ++ mib_stats[i]); ++ ++ val->value.s = buf; ++ val->len = len; ++ ++unlock: ++ mutex_unlock(&priv->mib_lock); ++ return ret; ++} ++ ++static int ++ar40xx_sw_set_vid(struct switch_dev *dev, const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ priv->vlan_id[val->port_vlan] = val->value.i; ++ return 0; ++} ++ ++static int ++ar40xx_sw_get_vid(struct switch_dev *dev, const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ val->value.i = priv->vlan_id[val->port_vlan]; ++ return 0; ++} ++ ++static int ++ar40xx_sw_get_pvid(struct switch_dev *dev, int port, int *vlan) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ *vlan = priv->pvid[port]; ++ return 0; ++} ++ ++static int ++ar40xx_sw_set_pvid(struct switch_dev *dev, int port, int vlan) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ /* make sure no invalid PVIDs get set */ ++ if (vlan >= dev->vlans) ++ return -EINVAL; ++ ++ priv->pvid[port] = vlan; ++ return 0; ++} ++ ++static void ++ar40xx_read_port_link(struct ar40xx_priv *priv, int port, ++ struct switch_port_link *link) ++{ ++ u32 status; ++ u32 speed; ++ ++ memset(link, 0, sizeof(*link)); ++ ++ status = ar40xx_read(priv, AR40XX_REG_PORT_STATUS(port)); ++ ++ link->aneg = !!(status & AR40XX_PORT_AUTO_LINK_EN); ++ if (link->aneg || (port != AR40XX_PORT_CPU)) ++ link->link = !!(status & AR40XX_PORT_STATUS_LINK_UP); ++ else ++ link->link = true; ++ ++ if (!link->link) ++ return; ++ ++ link->duplex = !!(status & AR40XX_PORT_DUPLEX); ++ link->tx_flow = !!(status & AR40XX_PORT_STATUS_TXFLOW); ++ link->rx_flow = !!(status & AR40XX_PORT_STATUS_RXFLOW); ++ ++ speed = (status & AR40XX_PORT_SPEED) >> ++ AR40XX_PORT_STATUS_SPEED_S; ++ ++ switch (speed) { ++ case AR40XX_PORT_SPEED_10M: ++ link->speed = SWITCH_PORT_SPEED_10; ++ break; ++ case AR40XX_PORT_SPEED_100M: ++ link->speed = SWITCH_PORT_SPEED_100; ++ break; ++ case AR40XX_PORT_SPEED_1000M: ++ link->speed = SWITCH_PORT_SPEED_1000; ++ break; ++ default: ++ link->speed = SWITCH_PORT_SPEED_UNKNOWN; ++ break; ++ } ++} ++ ++static int ++ar40xx_sw_get_port_link(struct switch_dev *dev, int port, ++ struct switch_port_link *link) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ ++ ar40xx_read_port_link(priv, port, link); ++ return 0; ++} ++ ++static const struct switch_attr ar40xx_sw_attr_globals[] = { ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "enable_vlan", ++ .description = "Enable VLAN mode", ++ .set = ar40xx_sw_set_vlan, ++ .get = ar40xx_sw_get_vlan, ++ .max = 1 ++ }, ++ { ++ .type = SWITCH_TYPE_NOVAL, ++ .name = "reset_mibs", ++ .description = "Reset all MIB counters", ++ .set = ar40xx_sw_set_reset_mibs, ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "enable_mirror_rx", ++ .description = "Enable mirroring of RX packets", ++ .set = ar40xx_sw_set_mirror_rx_enable, ++ .get = ar40xx_sw_get_mirror_rx_enable, ++ .max = 1 ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "enable_mirror_tx", ++ .description = "Enable mirroring of TX packets", ++ .set = ar40xx_sw_set_mirror_tx_enable, ++ .get = ar40xx_sw_get_mirror_tx_enable, ++ .max = 1 ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "mirror_monitor_port", ++ .description = "Mirror monitor port", ++ .set = ar40xx_sw_set_mirror_monitor_port, ++ .get = ar40xx_sw_get_mirror_monitor_port, ++ .max = AR40XX_NUM_PORTS - 1 ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "mirror_source_port", ++ .description = "Mirror source port", ++ .set = ar40xx_sw_set_mirror_source_port, ++ .get = ar40xx_sw_get_mirror_source_port, ++ .max = AR40XX_NUM_PORTS - 1 ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "linkdown", ++ .description = "Link down all the PHYs", ++ .set = ar40xx_sw_set_linkdown, ++ .max = 1 ++ }, ++}; ++ ++static const struct switch_attr ar40xx_sw_attr_port[] = { ++ { ++ .type = SWITCH_TYPE_NOVAL, ++ .name = "reset_mib", ++ .description = "Reset single port MIB counters", ++ .set = ar40xx_sw_set_port_reset_mib, ++ }, ++ { ++ .type = SWITCH_TYPE_STRING, ++ .name = "mib", ++ .description = "Get port's MIB counters", ++ .set = NULL, ++ .get = ar40xx_sw_get_port_mib, ++ }, ++}; ++ ++const struct switch_attr ar40xx_sw_attr_vlan[] = { ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "vid", ++ .description = "VLAN ID (0-4094)", ++ .set = ar40xx_sw_set_vid, ++ .get = ar40xx_sw_get_vid, ++ .max = 4094, ++ }, ++}; ++ ++/* End of swconfig support */ ++ ++static int ++ar40xx_wait_bit(struct ar40xx_priv *priv, int reg, u32 mask, u32 val) ++{ ++ int timeout = 20; ++ u32 t; ++ ++ while (1) { ++ t = ar40xx_read(priv, reg); ++ if ((t & mask) == val) ++ return 0; ++ ++ if (timeout-- <= 0) ++ break; ++ ++ usleep_range(10, 20); ++ } ++ ++ pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n", ++ (unsigned int)reg, t, mask, val); ++ return -ETIMEDOUT; ++} ++ ++static int ++ar40xx_atu_flush(struct ar40xx_priv *priv) ++{ ++ int ret; ++ ++ ret = ar40xx_wait_bit(priv, AR40XX_REG_ATU_FUNC, ++ AR40XX_ATU_FUNC_BUSY, 0); ++ if (!ret) ++ ar40xx_write(priv, AR40XX_REG_ATU_FUNC, ++ AR40XX_ATU_FUNC_OP_FLUSH | ++ AR40XX_ATU_FUNC_BUSY); ++ ++ return ret; ++} ++ ++static void ++ar40xx_ess_reset(struct ar40xx_priv *priv) ++{ ++ reset_control_assert(priv->ess_rst); ++ mdelay(10); ++ reset_control_deassert(priv->ess_rst); ++ /* Waiting for all inner tables init done. ++ * It cost 5~10ms. ++ */ ++ mdelay(10); ++ ++ pr_info("ESS reset ok!\n"); ++} ++ ++/* Start of psgmii self test */ ++ ++static void ++ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv *priv) ++{ ++ u32 n; ++ struct mii_bus *bus = priv->mii_bus; ++ /* reset phy psgmii */ ++ /* fix phy psgmii RX 20bit */ ++ mdiobus_write(bus, 5, 0x0, 0x005b); ++ /* reset phy psgmii */ ++ mdiobus_write(bus, 5, 0x0, 0x001b); ++ /* release reset phy psgmii */ ++ mdiobus_write(bus, 5, 0x0, 0x005b); ++ ++ for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) { ++ u16 status; ++ ++ status = ar40xx_phy_mmd_read(priv, 5, 1, 0x28); ++ if (status & BIT(0)) ++ break; ++ /* Polling interval to check PSGMII PLL in malibu is ready ++ * the worst time is 8.67ms ++ * for 25MHz reference clock ++ * [512+(128+2048)*49]*80ns+100us ++ */ ++ mdelay(2); ++ } ++ ++ /*check malibu psgmii calibration done end..*/ ++ ++ /*freeze phy psgmii RX CDR*/ ++ mdiobus_write(bus, 5, 0x1a, 0x2230); ++ ++ ar40xx_ess_reset(priv); ++ ++ /*check psgmii calibration done start*/ ++ for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) { ++ u32 status; ++ ++ status = ar40xx_psgmii_read(priv, 0xa0); ++ if (status & BIT(0)) ++ break; ++ /* Polling interval to check PSGMII PLL in ESS is ready */ ++ mdelay(2); ++ } ++ ++ /* check dakota psgmii calibration done end..*/ ++ ++ /* relesae phy psgmii RX CDR */ ++ mdiobus_write(bus, 5, 0x1a, 0x3230); ++ /* release phy psgmii RX 20bit */ ++ mdiobus_write(bus, 5, 0x0, 0x005f); ++} ++ ++static void ++ar40xx_psgmii_single_phy_testing(struct ar40xx_priv *priv, int phy) ++{ ++ int j; ++ u32 tx_ok, tx_error; ++ u32 rx_ok, rx_error; ++ u32 tx_ok_high16; ++ u32 rx_ok_high16; ++ u32 tx_all_ok, rx_all_ok; ++ struct mii_bus *bus = priv->mii_bus; ++ ++ mdiobus_write(bus, phy, 0x0, 0x9000); ++ mdiobus_write(bus, phy, 0x0, 0x4140); ++ ++ for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) { ++ u16 status; ++ ++ status = mdiobus_read(bus, phy, 0x11); ++ if (status & AR40XX_PHY_SPEC_STATUS_LINK) ++ break; ++ /* the polling interval to check if the PHY link up or not ++ * maxwait_timer: 750 ms +/-10 ms ++ * minwait_timer : 1 us +/- 0.1us ++ * time resides in minwait_timer ~ maxwait_timer ++ * see IEEE 802.3 section 40.4.5.2 ++ */ ++ mdelay(8); ++ } ++ ++ /* enable check */ ++ ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0000); ++ ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0003); ++ ++ /* start traffic */ ++ ar40xx_phy_mmd_write(priv, phy, 7, 0x8020, 0xa000); ++ /* wait for all traffic end ++ * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms ++ */ ++ mdelay(50); ++ ++ /* check counter */ ++ tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e); ++ tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d); ++ tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f); ++ rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b); ++ rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a); ++ rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c); ++ tx_all_ok = tx_ok + (tx_ok_high16 << 16); ++ rx_all_ok = rx_ok + (rx_ok_high16 << 16); ++ if (tx_all_ok == 0x1000 && tx_error == 0) { ++ /* success */ ++ priv->phy_t_status &= (~BIT(phy)); ++ } else { ++ pr_info("PHY %d single test PSGMII issue happen!\n", phy); ++ priv->phy_t_status |= BIT(phy); ++ } ++ ++ mdiobus_write(bus, phy, 0x0, 0x1840); ++} ++ ++static void ++ar40xx_psgmii_all_phy_testing(struct ar40xx_priv *priv) ++{ ++ int phy, j; ++ struct mii_bus *bus = priv->mii_bus; ++ ++ mdiobus_write(bus, 0x1f, 0x0, 0x9000); ++ mdiobus_write(bus, 0x1f, 0x0, 0x4140); ++ ++ for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) { ++ for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { ++ u16 status; ++ ++ status = mdiobus_read(bus, phy, 0x11); ++ if (!(status & BIT(10))) ++ break; ++ } ++ ++ if (phy >= (AR40XX_NUM_PORTS - 1)) ++ break; ++ /* The polling interva to check if the PHY link up or not */ ++ mdelay(8); ++ } ++ /* enable check */ ++ ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0000); ++ ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0003); ++ ++ /* start traffic */ ++ ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0xa000); ++ /* wait for all traffic end ++ * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms ++ */ ++ mdelay(50); ++ ++ for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { ++ u32 tx_ok, tx_error; ++ u32 rx_ok, rx_error; ++ u32 tx_ok_high16; ++ u32 rx_ok_high16; ++ u32 tx_all_ok, rx_all_ok; ++ ++ /* check counter */ ++ tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e); ++ tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d); ++ tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f); ++ rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b); ++ rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a); ++ rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c); ++ tx_all_ok = tx_ok + (tx_ok_high16<<16); ++ rx_all_ok = rx_ok + (rx_ok_high16<<16); ++ if (tx_all_ok == 0x1000 && tx_error == 0) { ++ /* success */ ++ priv->phy_t_status &= ~BIT(phy + 8); ++ } else { ++ pr_info("PHY%d test see issue!\n", phy); ++ priv->phy_t_status |= BIT(phy + 8); ++ } ++ } ++ ++ pr_debug("PHY all test 0x%x \r\n", priv->phy_t_status); ++} ++ ++void ++ar40xx_psgmii_self_test(struct ar40xx_priv *priv) ++{ ++ u32 i, phy; ++ struct mii_bus *bus = priv->mii_bus; ++ ++ ar40xx_malibu_psgmii_ess_reset(priv); ++ ++ /* switch to access MII reg for copper */ ++ mdiobus_write(bus, 4, 0x1f, 0x8500); ++ for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { ++ /*enable phy mdio broadcast write*/ ++ ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x801f); ++ } ++ /* force no link by power down */ ++ mdiobus_write(bus, 0x1f, 0x0, 0x1840); ++ /*packet number*/ ++ ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x1000); ++ ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8062, 0x05e0); ++ ++ /*fix mdi status */ ++ mdiobus_write(bus, 0x1f, 0x10, 0x6800); ++ for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) { ++ priv->phy_t_status = 0; ++ ++ for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { ++ ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1), ++ AR40XX_PORT_LOOKUP_LOOPBACK, ++ AR40XX_PORT_LOOKUP_LOOPBACK); ++ } ++ ++ for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) ++ ar40xx_psgmii_single_phy_testing(priv, phy); ++ ++ ar40xx_psgmii_all_phy_testing(priv); ++ ++ if (priv->phy_t_status) ++ ar40xx_malibu_psgmii_ess_reset(priv); ++ else ++ break; ++ } ++ ++ if (i >= AR40XX_PSGMII_CALB_NUM) ++ pr_info("PSGMII cannot recover\n"); ++ else ++ pr_debug("PSGMII recovered after %d times reset\n", i); ++ ++ /* configuration recover */ ++ /* packet number */ ++ ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x0); ++ /* disable check */ ++ ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0); ++ /* disable traffic */ ++ ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0x0); ++} ++ ++void ++ar40xx_psgmii_self_test_clean(struct ar40xx_priv *priv) ++{ ++ int phy; ++ struct mii_bus *bus = priv->mii_bus; ++ ++ /* disable phy internal loopback */ ++ mdiobus_write(bus, 0x1f, 0x10, 0x6860); ++ mdiobus_write(bus, 0x1f, 0x0, 0x9040); ++ ++ for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { ++ /* disable mac loop back */ ++ ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1), ++ AR40XX_PORT_LOOKUP_LOOPBACK, 0); ++ /* disable phy mdio broadcast write */ ++ ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x001f); ++ } ++ ++ /* clear fdb entry */ ++ ar40xx_atu_flush(priv); ++} ++ ++/* End of psgmii self test */ ++ ++static void ++ar40xx_mac_mode_init(struct ar40xx_priv *priv, u32 mode) ++{ ++ if (mode == PORT_WRAPPER_PSGMII) { ++ ar40xx_psgmii_write(priv, AR40XX_PSGMII_MODE_CONTROL, 0x2200); ++ ar40xx_psgmii_write(priv, AR40XX_PSGMIIPHY_TX_CONTROL, 0x8380); ++ } ++} ++ ++static ++int ar40xx_cpuport_setup(struct ar40xx_priv *priv) ++{ ++ u32 t; ++ ++ t = AR40XX_PORT_STATUS_TXFLOW | ++ AR40XX_PORT_STATUS_RXFLOW | ++ AR40XX_PORT_TXHALF_FLOW | ++ AR40XX_PORT_DUPLEX | ++ AR40XX_PORT_SPEED_1000M; ++ ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t); ++ usleep_range(10, 20); ++ ++ t |= AR40XX_PORT_TX_EN | ++ AR40XX_PORT_RX_EN; ++ ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t); ++ ++ return 0; ++} ++ ++static void ++ar40xx_init_port(struct ar40xx_priv *priv, int port) ++{ ++ u32 t; ++ ++ ar40xx_rmw(priv, AR40XX_REG_PORT_STATUS(port), ++ AR40XX_PORT_AUTO_LINK_EN, 0); ++ ++ ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0); ++ ++ ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), 0); ++ ++ t = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH << AR40XX_PORT_VLAN1_OUT_MODE_S; ++ ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t); ++ ++ t = AR40XX_PORT_LOOKUP_LEARN; ++ t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S; ++ ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t); ++} ++ ++void ++ar40xx_init_globals(struct ar40xx_priv *priv) ++{ ++ u32 t; ++ ++ /* enable CPU port and disable mirror port */ ++ t = AR40XX_FWD_CTRL0_CPU_PORT_EN | ++ AR40XX_FWD_CTRL0_MIRROR_PORT; ++ ar40xx_write(priv, AR40XX_REG_FWD_CTRL0, t); ++ ++ /* forward multicast and broadcast frames to CPU */ ++ t = (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_UC_FLOOD_S) | ++ (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_MC_FLOOD_S) | ++ (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_BC_FLOOD_S); ++ ar40xx_write(priv, AR40XX_REG_FWD_CTRL1, t); ++ ++ /* enable jumbo frames */ ++ ar40xx_rmw(priv, AR40XX_REG_MAX_FRAME_SIZE, ++ AR40XX_MAX_FRAME_SIZE_MTU, 9018 + 8 + 2); ++ ++ /* Enable MIB counters */ ++ ar40xx_rmw(priv, AR40XX_REG_MODULE_EN, 0, ++ AR40XX_MODULE_EN_MIB); ++ ++ /* Disable AZ */ ++ ar40xx_write(priv, AR40XX_REG_EEE_CTRL, 0); ++ ++ /* set flowctrl thershold for cpu port */ ++ t = (AR40XX_PORT0_FC_THRESH_ON_DFLT << 16) | ++ AR40XX_PORT0_FC_THRESH_OFF_DFLT; ++ ar40xx_write(priv, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t); ++} ++ ++static void ++ar40xx_malibu_init(struct ar40xx_priv *priv) ++{ ++ int i; ++ struct mii_bus *bus; ++ u16 val; ++ ++ bus = priv->mii_bus; ++ ++ /* war to enable AZ transmitting ability */ ++ ar40xx_phy_mmd_write(priv, AR40XX_PSGMII_ID, 1, ++ AR40XX_MALIBU_PSGMII_MODE_CTRL, ++ AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL); ++ for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) { ++ /* change malibu control_dac */ ++ val = ar40xx_phy_mmd_read(priv, i, 7, ++ AR40XX_MALIBU_PHY_MMD7_DAC_CTRL); ++ val &= ~AR40XX_MALIBU_DAC_CTRL_MASK; ++ val |= AR40XX_MALIBU_DAC_CTRL_VALUE; ++ ar40xx_phy_mmd_write(priv, i, 7, ++ AR40XX_MALIBU_PHY_MMD7_DAC_CTRL, val); ++ if (i == AR40XX_MALIBU_PHY_LAST_ADDR) { ++ /* to avoid goes into hibernation */ ++ val = ar40xx_phy_mmd_read(priv, i, 3, ++ AR40XX_MALIBU_PHY_RLP_CTRL); ++ val &= (~(1<<1)); ++ ar40xx_phy_mmd_write(priv, i, 3, ++ AR40XX_MALIBU_PHY_RLP_CTRL, val); ++ } ++ } ++ ++ /* adjust psgmii serdes tx amp */ ++ mdiobus_write(bus, AR40XX_PSGMII_ID, AR40XX_PSGMII_TX_DRIVER_1_CTRL, ++ AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP); ++} ++ ++static int ++ar40xx_hw_init(struct ar40xx_priv *priv) ++{ ++ u32 i; ++ ++ ar40xx_ess_reset(priv); ++ ++ if (priv->mii_bus) ++ ar40xx_malibu_init(priv); ++ else ++ return -1; ++ ++ ar40xx_psgmii_self_test(priv); ++ ar40xx_psgmii_self_test_clean(priv); ++ ++ ar40xx_mac_mode_init(priv, priv->mac_mode); ++ ++ for (i = 0; i < priv->dev.ports; i++) ++ ar40xx_init_port(priv, i); ++ ++ ar40xx_init_globals(priv); ++ ++ return 0; ++} ++ ++/* Start of qm error WAR */ ++ ++static ++int ar40xx_force_1g_full(struct ar40xx_priv *priv, u32 port_id) ++{ ++ u32 reg; ++ ++ if (port_id < 0 || port_id > 6) ++ return -1; ++ ++ reg = AR40XX_REG_PORT_STATUS(port_id); ++ return ar40xx_rmw(priv, reg, AR40XX_PORT_SPEED, ++ (AR40XX_PORT_SPEED_1000M | AR40XX_PORT_DUPLEX)); ++} ++ ++static ++int ar40xx_get_qm_status(struct ar40xx_priv *priv, ++ u32 port_id, u32 *qm_buffer_err) ++{ ++ u32 reg; ++ u32 qm_val; ++ ++ if (port_id < 1 || port_id > 5) { ++ *qm_buffer_err = 0; ++ return -1; ++ } ++ ++ if (port_id < 4) { ++ reg = AR40XX_REG_QM_PORT0_3_QNUM; ++ ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg); ++ qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE); ++ /* every 8 bits for each port */ ++ *qm_buffer_err = (qm_val >> (port_id * 8)) & 0xFF; ++ } else { ++ reg = AR40XX_REG_QM_PORT4_6_QNUM; ++ ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg); ++ qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE); ++ /* every 8 bits for each port */ ++ *qm_buffer_err = (qm_val >> ((port_id-4) * 8)) & 0xFF; ++ } ++ ++ return 0; ++} ++ ++static void ++ar40xx_sw_mac_polling_task(struct ar40xx_priv *priv) ++{ ++ static int task_count; ++ u32 i; ++ u32 reg, value; ++ u32 link, speed, duplex; ++ u32 qm_buffer_err; ++ u16 port_phy_status[AR40XX_NUM_PORTS]; ++ static u32 qm_err_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0}; ++ static u32 link_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0}; ++ struct mii_bus *bus = NULL; ++ ++ if (!priv || !priv->mii_bus) ++ return; ++ ++ bus = priv->mii_bus; ++ ++ ++task_count; ++ ++ for (i = 1; i < AR40XX_NUM_PORTS; ++i) { ++ port_phy_status[i] = ++ mdiobus_read(bus, i-1, AR40XX_PHY_SPEC_STATUS); ++ speed = link = duplex = port_phy_status[i]; ++ speed &= AR40XX_PHY_SPEC_STATUS_SPEED; ++ speed >>= 14; ++ link &= AR40XX_PHY_SPEC_STATUS_LINK; ++ link >>= 10; ++ duplex &= AR40XX_PHY_SPEC_STATUS_DUPLEX; ++ duplex >>= 13; ++ ++ if (link != priv->ar40xx_port_old_link[i]) { ++ ++link_cnt[i]; ++ /* Up --> Down */ ++ if ((priv->ar40xx_port_old_link[i] == ++ AR40XX_PORT_LINK_UP) && ++ (link == AR40XX_PORT_LINK_DOWN)) { ++ /* LINK_EN disable(MAC force mode)*/ ++ reg = AR40XX_REG_PORT_STATUS(i); ++ ar40xx_rmw(priv, reg, ++ AR40XX_PORT_AUTO_LINK_EN, 0); ++ ++ /* Check queue buffer */ ++ qm_err_cnt[i] = 0; ++ ar40xx_get_qm_status(priv, i, &qm_buffer_err); ++ if (qm_buffer_err) { ++ priv->ar40xx_port_qm_buf[i] = ++ AR40XX_QM_NOT_EMPTY; ++ } else { ++ u16 phy_val = 0; ++ ++ priv->ar40xx_port_qm_buf[i] = ++ AR40XX_QM_EMPTY; ++ ar40xx_force_1g_full(priv, i); ++ /* Ref:QCA8337 Datasheet,Clearing ++ * MENU_CTRL_EN prevents phy to ++ * stuck in 100BT mode when ++ * bringing up the link ++ */ ++ ar40xx_phy_dbg_read(priv, i-1, ++ AR40XX_PHY_DEBUG_0, ++ &phy_val); ++ phy_val &= (~AR40XX_PHY_MANU_CTRL_EN); ++ ar40xx_phy_dbg_write(priv, i-1, ++ AR40XX_PHY_DEBUG_0, ++ phy_val); ++ } ++ priv->ar40xx_port_old_link[i] = link; ++ } else if ((priv->ar40xx_port_old_link[i] == ++ AR40XX_PORT_LINK_DOWN) && ++ (link == AR40XX_PORT_LINK_UP)) { ++ /* Down --> Up */ ++ if (priv->port_link_up[i] < 1) { ++ ++priv->port_link_up[i]; ++ } else { ++ /* Change port status */ ++ reg = AR40XX_REG_PORT_STATUS(i); ++ value = ar40xx_read(priv, reg); ++ priv->port_link_up[i] = 0; ++ ++ value &= ~(AR40XX_PORT_DUPLEX | ++ AR40XX_PORT_SPEED); ++ value |= speed | (duplex ? BIT(6) : 0); ++ ar40xx_write(priv, reg, value); ++ /* clock switch need such time ++ * to avoid glitch ++ */ ++ usleep_range(100, 200); ++ ++ value |= AR40XX_PORT_AUTO_LINK_EN; ++ ar40xx_write(priv, reg, value); ++ /* HW need such time to make sure link ++ * stable before enable MAC ++ */ ++ usleep_range(100, 200); ++ ++ if (speed == AR40XX_PORT_SPEED_100M) { ++ u16 phy_val = 0; ++ /* Enable @100M, if down to 10M ++ * clock will change smoothly ++ */ ++ ar40xx_phy_dbg_read(priv, i-1, ++ 0, ++ &phy_val); ++ phy_val |= ++ AR40XX_PHY_MANU_CTRL_EN; ++ ar40xx_phy_dbg_write(priv, i-1, ++ 0, ++ phy_val); ++ } ++ priv->ar40xx_port_old_link[i] = link; ++ } ++ } ++ } ++ ++ if (priv->ar40xx_port_qm_buf[i] == AR40XX_QM_NOT_EMPTY) { ++ /* Check QM */ ++ ar40xx_get_qm_status(priv, i, &qm_buffer_err); ++ if (qm_buffer_err) { ++ ++qm_err_cnt[i]; ++ } else { ++ priv->ar40xx_port_qm_buf[i] = ++ AR40XX_QM_EMPTY; ++ qm_err_cnt[i] = 0; ++ ar40xx_force_1g_full(priv, i); ++ } ++ } ++ } ++} ++ ++static void ++ar40xx_qm_err_check_work_task(struct work_struct *work) ++{ ++ struct ar40xx_priv *priv = container_of(work, struct ar40xx_priv, ++ qm_dwork.work); ++ ++ mutex_lock(&priv->qm_lock); ++ ++ ar40xx_sw_mac_polling_task(priv); ++ ++ mutex_unlock(&priv->qm_lock); ++ ++ schedule_delayed_work(&priv->qm_dwork, ++ msecs_to_jiffies(AR40XX_QM_WORK_DELAY)); ++} ++ ++static int ++ar40xx_qm_err_check_work_start(struct ar40xx_priv *priv) ++{ ++ mutex_init(&priv->qm_lock); ++ ++ INIT_DELAYED_WORK(&priv->qm_dwork, ar40xx_qm_err_check_work_task); ++ ++ schedule_delayed_work(&priv->qm_dwork, ++ msecs_to_jiffies(AR40XX_QM_WORK_DELAY)); ++ ++ return 0; ++} ++ ++/* End of qm error WAR */ ++ ++static int ++ar40xx_vlan_init(struct ar40xx_priv *priv) ++{ ++ int port; ++ unsigned long bmp; ++ ++ /* By default Enable VLAN */ ++ priv->vlan = 1; ++ priv->vlan_table[AR40XX_LAN_VLAN] = priv->cpu_bmp | priv->lan_bmp; ++ priv->vlan_table[AR40XX_WAN_VLAN] = priv->cpu_bmp | priv->wan_bmp; ++ priv->vlan_tagged = priv->cpu_bmp; ++ bmp = priv->lan_bmp; ++ for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS) ++ priv->pvid[port] = AR40XX_LAN_VLAN; ++ ++ bmp = priv->wan_bmp; ++ for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS) ++ priv->pvid[port] = AR40XX_WAN_VLAN; ++ ++ return 0; ++} ++ ++static void ++ar40xx_mib_work_func(struct work_struct *work) ++{ ++ struct ar40xx_priv *priv; ++ int err; ++ ++ priv = container_of(work, struct ar40xx_priv, mib_work.work); ++ ++ mutex_lock(&priv->mib_lock); ++ ++ err = ar40xx_mib_capture(priv); ++ if (err) ++ goto next_port; ++ ++ ar40xx_mib_fetch_port_stat(priv, priv->mib_next_port, false); ++ ++next_port: ++ priv->mib_next_port++; ++ if (priv->mib_next_port >= priv->dev.ports) ++ priv->mib_next_port = 0; ++ ++ mutex_unlock(&priv->mib_lock); ++ ++ schedule_delayed_work(&priv->mib_work, ++ msecs_to_jiffies(AR40XX_MIB_WORK_DELAY)); ++} ++ ++static void ++ar40xx_setup_port(struct ar40xx_priv *priv, int port, u32 members) ++{ ++ u32 t; ++ u32 egress, ingress; ++ u32 pvid = priv->vlan_id[priv->pvid[port]]; ++ ++ if (priv->vlan) { ++ egress = AR40XX_PORT_VLAN1_OUT_MODE_UNMOD; ++ ingress = AR40XX_IN_SECURE; ++ } else { ++ egress = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH; ++ ingress = AR40XX_IN_PORT_ONLY; ++ } ++ ++ t = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S; ++ t |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S; ++ ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), t); ++ ++ t = AR40XX_PORT_VLAN1_PORT_VLAN_PROP; ++ t |= egress << AR40XX_PORT_VLAN1_OUT_MODE_S; ++ ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t); ++ ++ t = members; ++ t |= AR40XX_PORT_LOOKUP_LEARN; ++ t |= ingress << AR40XX_PORT_LOOKUP_IN_MODE_S; ++ t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S; ++ ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t); ++} ++ ++static void ++ar40xx_vtu_op(struct ar40xx_priv *priv, u32 op, u32 val) ++{ ++ if (ar40xx_wait_bit(priv, AR40XX_REG_VTU_FUNC1, ++ AR40XX_VTU_FUNC1_BUSY, 0)) ++ return; ++ ++ if ((op & AR40XX_VTU_FUNC1_OP) == AR40XX_VTU_FUNC1_OP_LOAD) ++ ar40xx_write(priv, AR40XX_REG_VTU_FUNC0, val); ++ ++ op |= AR40XX_VTU_FUNC1_BUSY; ++ ar40xx_write(priv, AR40XX_REG_VTU_FUNC1, op); ++} ++ ++static void ++ar40xx_vtu_load_vlan(struct ar40xx_priv *priv, u32 vid, u32 port_mask) ++{ ++ u32 op; ++ u32 val; ++ int i; ++ ++ op = AR40XX_VTU_FUNC1_OP_LOAD | (vid << AR40XX_VTU_FUNC1_VID_S); ++ val = AR40XX_VTU_FUNC0_VALID | AR40XX_VTU_FUNC0_IVL; ++ for (i = 0; i < AR40XX_NUM_PORTS; i++) { ++ u32 mode; ++ ++ if ((port_mask & BIT(i)) == 0) ++ mode = AR40XX_VTU_FUNC0_EG_MODE_NOT; ++ else if (priv->vlan == 0) ++ mode = AR40XX_VTU_FUNC0_EG_MODE_KEEP; ++ else if ((priv->vlan_tagged & BIT(i)) || ++ (priv->vlan_id[priv->pvid[i]] != vid)) ++ mode = AR40XX_VTU_FUNC0_EG_MODE_TAG; ++ else ++ mode = AR40XX_VTU_FUNC0_EG_MODE_UNTAG; ++ ++ val |= mode << AR40XX_VTU_FUNC0_EG_MODE_S(i); ++ } ++ ar40xx_vtu_op(priv, op, val); ++} ++ ++static void ++ar40xx_vtu_flush(struct ar40xx_priv *priv) ++{ ++ ar40xx_vtu_op(priv, AR40XX_VTU_FUNC1_OP_FLUSH, 0); ++} ++ ++static int ++ar40xx_sw_hw_apply(struct switch_dev *dev) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ u8 portmask[AR40XX_NUM_PORTS]; ++ int i, j; ++ ++ mutex_lock(&priv->reg_mutex); ++ /* flush all vlan entries */ ++ ar40xx_vtu_flush(priv); ++ ++ memset(portmask, 0, sizeof(portmask)); ++ if (priv->vlan) { ++ for (j = 0; j < AR40XX_MAX_VLANS; j++) { ++ u8 vp = priv->vlan_table[j]; ++ ++ if (!vp) ++ continue; ++ ++ for (i = 0; i < dev->ports; i++) { ++ u8 mask = BIT(i); ++ ++ if (vp & mask) ++ portmask[i] |= vp & ~mask; ++ } ++ ++ ar40xx_vtu_load_vlan(priv, priv->vlan_id[j], ++ priv->vlan_table[j]); ++ } ++ } else { ++ /* 8021q vlan disabled */ ++ for (i = 0; i < dev->ports; i++) { ++ if (i == AR40XX_PORT_CPU) ++ continue; ++ ++ portmask[i] = BIT(AR40XX_PORT_CPU); ++ portmask[AR40XX_PORT_CPU] |= BIT(i); ++ } ++ } ++ ++ /* update the port destination mask registers and tag settings */ ++ for (i = 0; i < dev->ports; i++) ++ ar40xx_setup_port(priv, i, portmask[i]); ++ ++ ar40xx_set_mirror_regs(priv); ++ ++ mutex_unlock(&priv->reg_mutex); ++ return 0; ++} ++ ++static int ++ar40xx_sw_reset_switch(struct switch_dev *dev) ++{ ++ struct ar40xx_priv *priv = swdev_to_ar40xx(dev); ++ int i, rv; ++ ++ mutex_lock(&priv->reg_mutex); ++ memset(&priv->vlan, 0, sizeof(struct ar40xx_priv) - ++ offsetof(struct ar40xx_priv, vlan)); ++ ++ for (i = 0; i < AR40XX_MAX_VLANS; i++) ++ priv->vlan_id[i] = i; ++ ++ ar40xx_vlan_init(priv); ++ ++ priv->mirror_rx = false; ++ priv->mirror_tx = false; ++ priv->source_port = 0; ++ priv->monitor_port = 0; ++ ++ mutex_unlock(&priv->reg_mutex); ++ ++ rv = ar40xx_sw_hw_apply(dev); ++ return rv; ++} ++ ++static int ++ar40xx_start(struct ar40xx_priv *priv) ++{ ++ int ret; ++ ++ ret = ar40xx_hw_init(priv); ++ if (ret) ++ return ret; ++ ++ ret = ar40xx_sw_reset_switch(&priv->dev); ++ if (ret) ++ return ret; ++ ++ /* at last, setup cpu port */ ++ ret = ar40xx_cpuport_setup(priv); ++ if (ret) ++ return ret; ++ ++ schedule_delayed_work(&priv->mib_work, ++ msecs_to_jiffies(AR40XX_MIB_WORK_DELAY)); ++ ++ ar40xx_qm_err_check_work_start(priv); ++ ++ return 0; ++} ++ ++static const struct switch_dev_ops ar40xx_sw_ops = { ++ .attr_global = { ++ .attr = ar40xx_sw_attr_globals, ++ .n_attr = ARRAY_SIZE(ar40xx_sw_attr_globals), ++ }, ++ .attr_port = { ++ .attr = ar40xx_sw_attr_port, ++ .n_attr = ARRAY_SIZE(ar40xx_sw_attr_port), ++ }, ++ .attr_vlan = { ++ .attr = ar40xx_sw_attr_vlan, ++ .n_attr = ARRAY_SIZE(ar40xx_sw_attr_vlan), ++ }, ++ .get_port_pvid = ar40xx_sw_get_pvid, ++ .set_port_pvid = ar40xx_sw_set_pvid, ++ .get_vlan_ports = ar40xx_sw_get_ports, ++ .set_vlan_ports = ar40xx_sw_set_ports, ++ .apply_config = ar40xx_sw_hw_apply, ++ .reset_switch = ar40xx_sw_reset_switch, ++ .get_port_link = ar40xx_sw_get_port_link, ++}; ++ ++/* Start of phy driver support */ ++ ++static const u32 ar40xx_phy_ids[] = { ++ 0x004dd0b1, ++ 0x004dd0b2, /* AR40xx */ ++}; ++ ++static bool ++ar40xx_phy_match(u32 phy_id) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(ar40xx_phy_ids); i++) ++ if (phy_id == ar40xx_phy_ids[i]) ++ return true; ++ ++ return false; ++} ++ ++static bool ++is_ar40xx_phy(struct mii_bus *bus) ++{ ++ unsigned i; ++ ++ for (i = 0; i < 4; i++) { ++ u32 phy_id; ++ ++ phy_id = mdiobus_read(bus, i, MII_PHYSID1) << 16; ++ phy_id |= mdiobus_read(bus, i, MII_PHYSID2); ++ if (!ar40xx_phy_match(phy_id)) ++ return false; ++ } ++ ++ return true; ++} ++ ++static int ++ar40xx_phy_probe(struct phy_device *phydev) ++{ ++ if (!is_ar40xx_phy(phydev->mdio.bus)) ++ return -ENODEV; ++ ++ ar40xx_priv->mii_bus = phydev->mdio.bus; ++ phydev->priv = ar40xx_priv; ++ if (phydev->mdio.addr == 0) ++ ar40xx_priv->phy = phydev; ++ ++ phydev->supported |= SUPPORTED_1000baseT_Full; ++ phydev->advertising |= ADVERTISED_1000baseT_Full; ++ return 0; ++} ++ ++static void ++ar40xx_phy_remove(struct phy_device *phydev) ++{ ++ ar40xx_priv->mii_bus = NULL; ++ phydev->priv = NULL; ++} ++ ++static int ++ar40xx_phy_config_init(struct phy_device *phydev) ++{ ++ return 0; ++} ++ ++static int ++ar40xx_phy_read_status(struct phy_device *phydev) ++{ ++ if (phydev->mdio.addr != 0) ++ return genphy_read_status(phydev); ++ ++ return 0; ++} ++ ++static int ++ar40xx_phy_config_aneg(struct phy_device *phydev) ++{ ++ if (phydev->mdio.addr == 0) ++ return 0; ++ ++ return genphy_config_aneg(phydev); ++} ++ ++static struct phy_driver ar40xx_phy_driver = { ++ .phy_id = 0x004d0000, ++ .name = "QCA Malibu", ++ .phy_id_mask = 0xffff0000, ++ .features = PHY_BASIC_FEATURES, ++ .probe = ar40xx_phy_probe, ++ .remove = ar40xx_phy_remove, ++ .config_init = ar40xx_phy_config_init, ++ .config_aneg = ar40xx_phy_config_aneg, ++ .read_status = ar40xx_phy_read_status, ++}; ++ ++static uint16_t ar40xx_gpio_get_phy(unsigned int offset) ++{ ++ return offset / 4; ++} ++ ++static uint16_t ar40xx_gpio_get_reg(unsigned int offset) ++{ ++ return 0x8074 + offset % 4; ++} ++ ++static void ar40xx_gpio_set(struct gpio_chip *gc, unsigned int offset, ++ int value) ++{ ++ struct ar40xx_priv *priv = gpiochip_get_data(gc); ++ ++ ar40xx_phy_mmd_write(priv, ar40xx_gpio_get_phy(offset), 0x7, ++ ar40xx_gpio_get_reg(offset), ++ value ? 0xA000 : 0x8000); ++} ++ ++static int ar40xx_gpio_get(struct gpio_chip *gc, unsigned offset) ++{ ++ struct ar40xx_priv *priv = gpiochip_get_data(gc); ++ ++ return ar40xx_phy_mmd_read(priv, ar40xx_gpio_get_phy(offset), 0x7, ++ ar40xx_gpio_get_reg(offset)) == 0xA000; ++} ++ ++static int ar40xx_gpio_get_dir(struct gpio_chip *gc, unsigned offset) ++{ ++ return 0; /* only out direction */ ++} ++ ++static int ar40xx_gpio_dir_out(struct gpio_chip *gc, unsigned offset, ++ int value) ++{ ++ /* ++ * the direction out value is used to set the initial value. ++ * support of this function is required by leds-gpio.c ++ */ ++ ar40xx_gpio_set(gc, offset, value); ++ return 0; ++} ++ ++static void ar40xx_register_gpio(struct device *pdev, ++ struct ar40xx_priv *priv, ++ struct device_node *switch_node) ++{ ++ struct gpio_chip *gc; ++ int err; ++ ++ gc = devm_kzalloc(pdev, sizeof(*gc), GFP_KERNEL); ++ if (!gc) ++ return; ++ ++ gc->label = "ar40xx_gpio", ++ gc->base = -1, ++ gc->ngpio = 5 /* mmd 0 - 4 */ * 4 /* 0x8074 - 0x8077 */, ++ gc->parent = pdev; ++ gc->owner = THIS_MODULE; ++ ++ gc->get_direction = ar40xx_gpio_get_dir; ++ gc->direction_output = ar40xx_gpio_dir_out; ++ gc->get = ar40xx_gpio_get; ++ gc->set = ar40xx_gpio_set; ++ gc->can_sleep = true; ++ gc->label = priv->dev.name; ++ gc->of_node = switch_node; ++ ++ err = devm_gpiochip_add_data(pdev, gc, priv); ++ if (err != 0) ++ dev_err(pdev, "Failed to register gpio %d.\n", err); ++} ++ ++/* End of phy driver support */ ++ ++/* Platform driver probe function */ ++ ++static int ar40xx_probe(struct platform_device *pdev) ++{ ++ struct device_node *switch_node; ++ struct device_node *psgmii_node; ++ const __be32 *mac_mode; ++ struct clk *ess_clk; ++ struct switch_dev *swdev; ++ struct ar40xx_priv *priv; ++ u32 len; ++ u32 num_mibs; ++ struct resource psgmii_base = {0}; ++ struct resource switch_base = {0}; ++ int ret; ++ ++ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, priv); ++ ar40xx_priv = priv; ++ ++ switch_node = of_node_get(pdev->dev.of_node); ++ if (of_address_to_resource(switch_node, 0, &switch_base) != 0) ++ return -EIO; ++ ++ priv->hw_addr = devm_ioremap_resource(&pdev->dev, &switch_base); ++ if (IS_ERR(priv->hw_addr)) { ++ dev_err(&pdev->dev, "Failed to ioremap switch_base!\n"); ++ return PTR_ERR(priv->hw_addr); ++ } ++ ++ /*psgmii dts get*/ ++ psgmii_node = of_find_node_by_name(NULL, "ess-psgmii"); ++ if (!psgmii_node) { ++ dev_err(&pdev->dev, "Failed to find ess-psgmii node!\n"); ++ return -EINVAL; ++ } ++ ++ if (of_address_to_resource(psgmii_node, 0, &psgmii_base) != 0) ++ return -EIO; ++ ++ priv->psgmii_hw_addr = devm_ioremap_resource(&pdev->dev, &psgmii_base); ++ if (IS_ERR(priv->psgmii_hw_addr)) { ++ dev_err(&pdev->dev, "psgmii ioremap fail!\n"); ++ return PTR_ERR(priv->psgmii_hw_addr); ++ } ++ ++ mac_mode = of_get_property(switch_node, "switch_mac_mode", &len); ++ if (!mac_mode) { ++ dev_err(&pdev->dev, "Failed to read switch_mac_mode\n"); ++ return -EINVAL; ++ } ++ priv->mac_mode = be32_to_cpup(mac_mode); ++ ++ ess_clk = of_clk_get_by_name(switch_node, "ess_clk"); ++ if (ess_clk) ++ clk_prepare_enable(ess_clk); ++ ++ priv->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst"); ++ if (IS_ERR(priv->ess_rst)) { ++ dev_err(&pdev->dev, "Failed to get ess_rst control!\n"); ++ return PTR_ERR(priv->ess_rst); ++ } ++ ++ if (of_property_read_u32(switch_node, "switch_cpu_bmp", ++ &priv->cpu_bmp) || ++ of_property_read_u32(switch_node, "switch_lan_bmp", ++ &priv->lan_bmp) || ++ of_property_read_u32(switch_node, "switch_wan_bmp", ++ &priv->wan_bmp)) { ++ dev_err(&pdev->dev, "Failed to read port properties\n"); ++ return -EIO; ++ } ++ ++ ret = phy_driver_register(&ar40xx_phy_driver, THIS_MODULE); ++ if (ret) { ++ dev_err(&pdev->dev, "Failed to register ar40xx phy driver!\n"); ++ return -EIO; ++ } ++ ++ mutex_init(&priv->reg_mutex); ++ mutex_init(&priv->mib_lock); ++ INIT_DELAYED_WORK(&priv->mib_work, ar40xx_mib_work_func); ++ ++ /* register switch */ ++ swdev = &priv->dev; ++ ++ swdev->alias = dev_name(&priv->mii_bus->dev); ++ ++ swdev->cpu_port = AR40XX_PORT_CPU; ++ swdev->name = "QCA AR40xx"; ++ swdev->vlans = AR40XX_MAX_VLANS; ++ swdev->ports = AR40XX_NUM_PORTS; ++ swdev->ops = &ar40xx_sw_ops; ++ ret = register_switch(swdev, NULL); ++ if (ret) ++ goto err_unregister_phy; ++ ++ num_mibs = ARRAY_SIZE(ar40xx_mibs); ++ len = priv->dev.ports * num_mibs * ++ sizeof(*priv->mib_stats); ++ priv->mib_stats = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); ++ if (!priv->mib_stats) { ++ ret = -ENOMEM; ++ goto err_unregister_switch; ++ } ++ ++ ar40xx_start(priv); ++ ++ if (of_property_read_bool(switch_node, "gpio-controller")) ++ ar40xx_register_gpio(&pdev->dev, ar40xx_priv, switch_node); ++ ++ return 0; ++ ++err_unregister_switch: ++ unregister_switch(&priv->dev); ++err_unregister_phy: ++ phy_driver_unregister(&ar40xx_phy_driver); ++ platform_set_drvdata(pdev, NULL); ++ return ret; ++} ++ ++static int ar40xx_remove(struct platform_device *pdev) ++{ ++ struct ar40xx_priv *priv = platform_get_drvdata(pdev); ++ ++ cancel_delayed_work_sync(&priv->qm_dwork); ++ cancel_delayed_work_sync(&priv->mib_work); ++ ++ unregister_switch(&priv->dev); ++ ++ phy_driver_unregister(&ar40xx_phy_driver); ++ ++ return 0; ++} ++ ++static const struct of_device_id ar40xx_of_mtable[] = { ++ {.compatible = "qcom,ess-switch" }, ++ {} ++}; ++ ++struct platform_driver ar40xx_drv = { ++ .probe = ar40xx_probe, ++ .remove = ar40xx_remove, ++ .driver = { ++ .name = "ar40xx", ++ .of_match_table = ar40xx_of_mtable, ++ }, ++}; ++ ++module_platform_driver(ar40xx_drv); ++ ++MODULE_DESCRIPTION("IPQ40XX ESS driver"); ++MODULE_LICENSE("Dual BSD/GPL"); +--- /dev/null ++++ b/drivers/net/phy/ar40xx.h +@@ -0,0 +1,337 @@ ++/* ++ * Copyright (c) 2016, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for ++ * any purpose with or without fee is hereby granted, provided that the ++ * above copyright notice and this permission notice appear in all copies. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT ++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++ #ifndef __AR40XX_H ++#define __AR40XX_H ++ ++#define AR40XX_MAX_VLANS 128 ++#define AR40XX_NUM_PORTS 6 ++#define AR40XX_NUM_PHYS 5 ++ ++#define BITS(_s, _n) (((1UL << (_n)) - 1) << _s) ++ ++struct ar40xx_priv { ++ struct switch_dev dev; ++ ++ u8 __iomem *hw_addr; ++ u8 __iomem *psgmii_hw_addr; ++ u32 mac_mode; ++ struct reset_control *ess_rst; ++ u32 cpu_bmp; ++ u32 lan_bmp; ++ u32 wan_bmp; ++ ++ struct mii_bus *mii_bus; ++ struct phy_device *phy; ++ ++ /* mutex for qm task */ ++ struct mutex qm_lock; ++ struct delayed_work qm_dwork; ++ u32 port_link_up[AR40XX_NUM_PORTS]; ++ u32 ar40xx_port_old_link[AR40XX_NUM_PORTS]; ++ u32 ar40xx_port_qm_buf[AR40XX_NUM_PORTS]; ++ ++ u32 phy_t_status; ++ ++ /* mutex for switch reg access */ ++ struct mutex reg_mutex; ++ ++ /* mutex for mib task */ ++ struct mutex mib_lock; ++ struct delayed_work mib_work; ++ int mib_next_port; ++ u64 *mib_stats; ++ ++ char buf[2048]; ++ ++ /* all fields below will be cleared on reset */ ++ bool vlan; ++ u16 vlan_id[AR40XX_MAX_VLANS]; ++ u8 vlan_table[AR40XX_MAX_VLANS]; ++ u8 vlan_tagged; ++ u16 pvid[AR40XX_NUM_PORTS]; ++ ++ /* mirror */ ++ bool mirror_rx; ++ bool mirror_tx; ++ int source_port; ++ int monitor_port; ++}; ++ ++#define AR40XX_PORT_LINK_UP 1 ++#define AR40XX_PORT_LINK_DOWN 0 ++#define AR40XX_QM_NOT_EMPTY 1 ++#define AR40XX_QM_EMPTY 0 ++ ++#define AR40XX_LAN_VLAN 1 ++#define AR40XX_WAN_VLAN 2 ++ ++enum ar40xx_port_wrapper_cfg { ++ PORT_WRAPPER_PSGMII = 0, ++}; ++ ++struct ar40xx_mib_desc { ++ u32 size; ++ u32 offset; ++ const char *name; ++}; ++ ++#define AR40XX_PORT_CPU 0 ++ ++#define AR40XX_PSGMII_MODE_CONTROL 0x1b4 ++#define AR40XX_PSGMII_ATHR_CSCO_MODE_25M BIT(0) ++ ++#define AR40XX_PSGMIIPHY_TX_CONTROL 0x288 ++ ++#define AR40XX_MII_ATH_MMD_ADDR 0x0d ++#define AR40XX_MII_ATH_MMD_DATA 0x0e ++#define AR40XX_MII_ATH_DBG_ADDR 0x1d ++#define AR40XX_MII_ATH_DBG_DATA 0x1e ++ ++#define AR40XX_STATS_RXBROAD 0x00 ++#define AR40XX_STATS_RXPAUSE 0x04 ++#define AR40XX_STATS_RXMULTI 0x08 ++#define AR40XX_STATS_RXFCSERR 0x0c ++#define AR40XX_STATS_RXALIGNERR 0x10 ++#define AR40XX_STATS_RXRUNT 0x14 ++#define AR40XX_STATS_RXFRAGMENT 0x18 ++#define AR40XX_STATS_RX64BYTE 0x1c ++#define AR40XX_STATS_RX128BYTE 0x20 ++#define AR40XX_STATS_RX256BYTE 0x24 ++#define AR40XX_STATS_RX512BYTE 0x28 ++#define AR40XX_STATS_RX1024BYTE 0x2c ++#define AR40XX_STATS_RX1518BYTE 0x30 ++#define AR40XX_STATS_RXMAXBYTE 0x34 ++#define AR40XX_STATS_RXTOOLONG 0x38 ++#define AR40XX_STATS_RXGOODBYTE 0x3c ++#define AR40XX_STATS_RXBADBYTE 0x44 ++#define AR40XX_STATS_RXOVERFLOW 0x4c ++#define AR40XX_STATS_FILTERED 0x50 ++#define AR40XX_STATS_TXBROAD 0x54 ++#define AR40XX_STATS_TXPAUSE 0x58 ++#define AR40XX_STATS_TXMULTI 0x5c ++#define AR40XX_STATS_TXUNDERRUN 0x60 ++#define AR40XX_STATS_TX64BYTE 0x64 ++#define AR40XX_STATS_TX128BYTE 0x68 ++#define AR40XX_STATS_TX256BYTE 0x6c ++#define AR40XX_STATS_TX512BYTE 0x70 ++#define AR40XX_STATS_TX1024BYTE 0x74 ++#define AR40XX_STATS_TX1518BYTE 0x78 ++#define AR40XX_STATS_TXMAXBYTE 0x7c ++#define AR40XX_STATS_TXOVERSIZE 0x80 ++#define AR40XX_STATS_TXBYTE 0x84 ++#define AR40XX_STATS_TXCOLLISION 0x8c ++#define AR40XX_STATS_TXABORTCOL 0x90 ++#define AR40XX_STATS_TXMULTICOL 0x94 ++#define AR40XX_STATS_TXSINGLECOL 0x98 ++#define AR40XX_STATS_TXEXCDEFER 0x9c ++#define AR40XX_STATS_TXDEFER 0xa0 ++#define AR40XX_STATS_TXLATECOL 0xa4 ++ ++#define AR40XX_REG_MODULE_EN 0x030 ++#define AR40XX_MODULE_EN_MIB BIT(0) ++ ++#define AR40XX_REG_MIB_FUNC 0x034 ++#define AR40XX_MIB_BUSY BIT(17) ++#define AR40XX_MIB_CPU_KEEP BIT(20) ++#define AR40XX_MIB_FUNC BITS(24, 3) ++#define AR40XX_MIB_FUNC_S 24 ++#define AR40XX_MIB_FUNC_NO_OP 0x0 ++#define AR40XX_MIB_FUNC_FLUSH 0x1 ++ ++#define AR40XX_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) ++#define AR40XX_PORT_SPEED BITS(0, 2) ++#define AR40XX_PORT_STATUS_SPEED_S 0 ++#define AR40XX_PORT_TX_EN BIT(2) ++#define AR40XX_PORT_RX_EN BIT(3) ++#define AR40XX_PORT_STATUS_TXFLOW BIT(4) ++#define AR40XX_PORT_STATUS_RXFLOW BIT(5) ++#define AR40XX_PORT_DUPLEX BIT(6) ++#define AR40XX_PORT_TXHALF_FLOW BIT(7) ++#define AR40XX_PORT_STATUS_LINK_UP BIT(8) ++#define AR40XX_PORT_AUTO_LINK_EN BIT(9) ++#define AR40XX_PORT_STATUS_FLOW_CONTROL BIT(12) ++ ++#define AR40XX_REG_MAX_FRAME_SIZE 0x078 ++#define AR40XX_MAX_FRAME_SIZE_MTU BITS(0, 14) ++ ++#define AR40XX_REG_PORT_HEADER(_i) (0x09c + (_i) * 4) ++ ++#define AR40XX_REG_EEE_CTRL 0x100 ++#define AR40XX_EEE_CTRL_DISABLE_PHY(_i) BIT(4 + (_i) * 2) ++ ++#define AR40XX_REG_PORT_VLAN0(_i) (0x420 + (_i) * 0x8) ++#define AR40XX_PORT_VLAN0_DEF_SVID BITS(0, 12) ++#define AR40XX_PORT_VLAN0_DEF_SVID_S 0 ++#define AR40XX_PORT_VLAN0_DEF_CVID BITS(16, 12) ++#define AR40XX_PORT_VLAN0_DEF_CVID_S 16 ++ ++#define AR40XX_REG_PORT_VLAN1(_i) (0x424 + (_i) * 0x8) ++#define AR40XX_PORT_VLAN1_PORT_VLAN_PROP BIT(6) ++#define AR40XX_PORT_VLAN1_OUT_MODE BITS(12, 2) ++#define AR40XX_PORT_VLAN1_OUT_MODE_S 12 ++#define AR40XX_PORT_VLAN1_OUT_MODE_UNMOD 0 ++#define AR40XX_PORT_VLAN1_OUT_MODE_UNTAG 1 ++#define AR40XX_PORT_VLAN1_OUT_MODE_TAG 2 ++#define AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH 3 ++ ++#define AR40XX_REG_VTU_FUNC0 0x0610 ++#define AR40XX_VTU_FUNC0_EG_MODE BITS(4, 14) ++#define AR40XX_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2) ++#define AR40XX_VTU_FUNC0_EG_MODE_KEEP 0 ++#define AR40XX_VTU_FUNC0_EG_MODE_UNTAG 1 ++#define AR40XX_VTU_FUNC0_EG_MODE_TAG 2 ++#define AR40XX_VTU_FUNC0_EG_MODE_NOT 3 ++#define AR40XX_VTU_FUNC0_IVL BIT(19) ++#define AR40XX_VTU_FUNC0_VALID BIT(20) ++ ++#define AR40XX_REG_VTU_FUNC1 0x0614 ++#define AR40XX_VTU_FUNC1_OP BITS(0, 3) ++#define AR40XX_VTU_FUNC1_OP_NOOP 0 ++#define AR40XX_VTU_FUNC1_OP_FLUSH 1 ++#define AR40XX_VTU_FUNC1_OP_LOAD 2 ++#define AR40XX_VTU_FUNC1_OP_PURGE 3 ++#define AR40XX_VTU_FUNC1_OP_REMOVE_PORT 4 ++#define AR40XX_VTU_FUNC1_OP_GET_NEXT 5 ++#define AR40XX7_VTU_FUNC1_OP_GET_ONE 6 ++#define AR40XX_VTU_FUNC1_FULL BIT(4) ++#define AR40XX_VTU_FUNC1_PORT BIT(8, 4) ++#define AR40XX_VTU_FUNC1_PORT_S 8 ++#define AR40XX_VTU_FUNC1_VID BIT(16, 12) ++#define AR40XX_VTU_FUNC1_VID_S 16 ++#define AR40XX_VTU_FUNC1_BUSY BIT(31) ++ ++#define AR40XX_REG_FWD_CTRL0 0x620 ++#define AR40XX_FWD_CTRL0_CPU_PORT_EN BIT(10) ++#define AR40XX_FWD_CTRL0_MIRROR_PORT BITS(4, 4) ++#define AR40XX_FWD_CTRL0_MIRROR_PORT_S 4 ++ ++#define AR40XX_REG_FWD_CTRL1 0x624 ++#define AR40XX_FWD_CTRL1_UC_FLOOD BITS(0, 7) ++#define AR40XX_FWD_CTRL1_UC_FLOOD_S 0 ++#define AR40XX_FWD_CTRL1_MC_FLOOD BITS(8, 7) ++#define AR40XX_FWD_CTRL1_MC_FLOOD_S 8 ++#define AR40XX_FWD_CTRL1_BC_FLOOD BITS(16, 7) ++#define AR40XX_FWD_CTRL1_BC_FLOOD_S 16 ++#define AR40XX_FWD_CTRL1_IGMP BITS(24, 7) ++#define AR40XX_FWD_CTRL1_IGMP_S 24 ++ ++#define AR40XX_REG_PORT_LOOKUP(_i) (0x660 + (_i) * 0xc) ++#define AR40XX_PORT_LOOKUP_MEMBER BITS(0, 7) ++#define AR40XX_PORT_LOOKUP_IN_MODE BITS(8, 2) ++#define AR40XX_PORT_LOOKUP_IN_MODE_S 8 ++#define AR40XX_PORT_LOOKUP_STATE BITS(16, 3) ++#define AR40XX_PORT_LOOKUP_STATE_S 16 ++#define AR40XX_PORT_LOOKUP_LEARN BIT(20) ++#define AR40XX_PORT_LOOKUP_LOOPBACK BIT(21) ++#define AR40XX_PORT_LOOKUP_ING_MIRROR_EN BIT(25) ++ ++#define AR40XX_REG_ATU_FUNC 0x60c ++#define AR40XX_ATU_FUNC_OP BITS(0, 4) ++#define AR40XX_ATU_FUNC_OP_NOOP 0x0 ++#define AR40XX_ATU_FUNC_OP_FLUSH 0x1 ++#define AR40XX_ATU_FUNC_OP_LOAD 0x2 ++#define AR40XX_ATU_FUNC_OP_PURGE 0x3 ++#define AR40XX_ATU_FUNC_OP_FLUSH_LOCKED 0x4 ++#define AR40XX_ATU_FUNC_OP_FLUSH_UNICAST 0x5 ++#define AR40XX_ATU_FUNC_OP_GET_NEXT 0x6 ++#define AR40XX_ATU_FUNC_OP_SEARCH_MAC 0x7 ++#define AR40XX_ATU_FUNC_OP_CHANGE_TRUNK 0x8 ++#define AR40XX_ATU_FUNC_BUSY BIT(31) ++ ++#define AR40XX_REG_QM_DEBUG_ADDR 0x820 ++#define AR40XX_REG_QM_DEBUG_VALUE 0x824 ++#define AR40XX_REG_QM_PORT0_3_QNUM 0x1d ++#define AR40XX_REG_QM_PORT4_6_QNUM 0x1e ++ ++#define AR40XX_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8) ++#define AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16) ++ ++#define AR40XX_REG_PORT_FLOWCTRL_THRESH(_i) (0x9b0 + (_i) * 0x4) ++#define AR40XX_PORT0_FC_THRESH_ON_DFLT 0x60 ++#define AR40XX_PORT0_FC_THRESH_OFF_DFLT 0x90 ++ ++#define AR40XX_PHY_DEBUG_0 0 ++#define AR40XX_PHY_MANU_CTRL_EN BIT(12) ++ ++#define AR40XX_PHY_DEBUG_2 2 ++ ++#define AR40XX_PHY_SPEC_STATUS 0x11 ++#define AR40XX_PHY_SPEC_STATUS_LINK BIT(10) ++#define AR40XX_PHY_SPEC_STATUS_DUPLEX BIT(13) ++#define AR40XX_PHY_SPEC_STATUS_SPEED BITS(14, 2) ++ ++/* port forwarding state */ ++enum { ++ AR40XX_PORT_STATE_DISABLED = 0, ++ AR40XX_PORT_STATE_BLOCK = 1, ++ AR40XX_PORT_STATE_LISTEN = 2, ++ AR40XX_PORT_STATE_LEARN = 3, ++ AR40XX_PORT_STATE_FORWARD = 4 ++}; ++ ++/* ingress 802.1q mode */ ++enum { ++ AR40XX_IN_PORT_ONLY = 0, ++ AR40XX_IN_PORT_FALLBACK = 1, ++ AR40XX_IN_VLAN_ONLY = 2, ++ AR40XX_IN_SECURE = 3 ++}; ++ ++/* egress 802.1q mode */ ++enum { ++ AR40XX_OUT_KEEP = 0, ++ AR40XX_OUT_STRIP_VLAN = 1, ++ AR40XX_OUT_ADD_VLAN = 2 ++}; ++ ++/* port speed */ ++enum { ++ AR40XX_PORT_SPEED_10M = 0, ++ AR40XX_PORT_SPEED_100M = 1, ++ AR40XX_PORT_SPEED_1000M = 2, ++ AR40XX_PORT_SPEED_ERR = 3, ++}; ++ ++#define AR40XX_MIB_WORK_DELAY 2000 /* msecs */ ++ ++#define AR40XX_QM_WORK_DELAY 100 ++ ++#define AR40XX_MIB_FUNC_CAPTURE 0x3 ++ ++#define AR40XX_REG_PORT_STATS_START 0x1000 ++#define AR40XX_REG_PORT_STATS_LEN 0x100 ++ ++#define AR40XX_PORTS_ALL 0x3f ++ ++#define AR40XX_PSGMII_ID 5 ++#define AR40XX_PSGMII_CALB_NUM 100 ++#define AR40XX_MALIBU_PSGMII_MODE_CTRL 0x6d ++#define AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL 0x220c ++#define AR40XX_MALIBU_PHY_MMD7_DAC_CTRL 0x801a ++#define AR40XX_MALIBU_DAC_CTRL_MASK 0x380 ++#define AR40XX_MALIBU_DAC_CTRL_VALUE 0x280 ++#define AR40XX_MALIBU_PHY_RLP_CTRL 0x805a ++#define AR40XX_PSGMII_TX_DRIVER_1_CTRL 0xb ++#define AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP 0x8a ++#define AR40XX_MALIBU_PHY_LAST_ADDR 4 ++ ++static inline struct ar40xx_priv * ++swdev_to_ar40xx(struct switch_dev *swdev) ++{ ++ return container_of(swdev, struct ar40xx_priv, dev); ++} ++ ++#endif +--- /dev/null ++++ b/drivers/net/phy/mdio-ipq40xx.c +@@ -0,0 +1,203 @@ ++/* ++ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for ++ * any purpose with or without fee is hereby granted, provided that the ++ * above copyright notice and this permission notice appear in all copies. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT ++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define MDIO_CTRL_0_REG 0x40 ++#define MDIO_CTRL_1_REG 0x44 ++#define MDIO_CTRL_2_REG 0x48 ++#define MDIO_CTRL_3_REG 0x4c ++#define MDIO_CTRL_4_REG 0x50 ++#define MDIO_CTRL_4_ACCESS_BUSY BIT(16) ++#define MDIO_CTRL_4_ACCESS_START BIT(8) ++#define MDIO_CTRL_4_ACCESS_CODE_READ 0 ++#define MDIO_CTRL_4_ACCESS_CODE_WRITE 1 ++#define CTRL_0_REG_DEFAULT_VALUE 0x150FF ++ ++#define IPQ40XX_MDIO_RETRY 1000 ++#define IPQ40XX_MDIO_DELAY 10 ++ ++struct ipq40xx_mdio_data { ++ struct mii_bus *mii_bus; ++ void __iomem *membase; ++ int phy_irq[PHY_MAX_ADDR]; ++ struct device *dev; ++}; ++ ++static int ipq40xx_mdio_wait_busy(struct ipq40xx_mdio_data *am) ++{ ++ int i; ++ ++ for (i = 0; i < IPQ40XX_MDIO_RETRY; i++) { ++ unsigned int busy; ++ ++ busy = readl(am->membase + MDIO_CTRL_4_REG) & ++ MDIO_CTRL_4_ACCESS_BUSY; ++ if (!busy) ++ return 0; ++ ++ /* BUSY might take to be cleard by 15~20 times of loop */ ++ udelay(IPQ40XX_MDIO_DELAY); ++ } ++ ++ dev_err(am->dev, "%s: MDIO operation timed out\n", am->mii_bus->name); ++ ++ return -ETIMEDOUT; ++} ++ ++static int ipq40xx_mdio_read(struct mii_bus *bus, int mii_id, int regnum) ++{ ++ struct ipq40xx_mdio_data *am = bus->priv; ++ int value = 0; ++ unsigned int cmd = 0; ++ ++ lockdep_assert_held(&bus->mdio_lock); ++ ++ if (ipq40xx_mdio_wait_busy(am)) ++ return -ETIMEDOUT; ++ ++ /* issue the phy address and reg */ ++ writel((mii_id << 8) | regnum, am->membase + MDIO_CTRL_1_REG); ++ ++ cmd = MDIO_CTRL_4_ACCESS_START|MDIO_CTRL_4_ACCESS_CODE_READ; ++ ++ /* issue read command */ ++ writel(cmd, am->membase + MDIO_CTRL_4_REG); ++ ++ /* Wait read complete */ ++ if (ipq40xx_mdio_wait_busy(am)) ++ return -ETIMEDOUT; ++ ++ /* Read data */ ++ value = readl(am->membase + MDIO_CTRL_3_REG); ++ ++ return value; ++} ++ ++static int ipq40xx_mdio_write(struct mii_bus *bus, int mii_id, int regnum, ++ u16 value) ++{ ++ struct ipq40xx_mdio_data *am = bus->priv; ++ unsigned int cmd = 0; ++ ++ lockdep_assert_held(&bus->mdio_lock); ++ ++ if (ipq40xx_mdio_wait_busy(am)) ++ return -ETIMEDOUT; ++ ++ /* issue the phy address and reg */ ++ writel((mii_id << 8) | regnum, am->membase + MDIO_CTRL_1_REG); ++ ++ /* issue write data */ ++ writel(value, am->membase + MDIO_CTRL_2_REG); ++ ++ cmd = MDIO_CTRL_4_ACCESS_START|MDIO_CTRL_4_ACCESS_CODE_WRITE; ++ /* issue write command */ ++ writel(cmd, am->membase + MDIO_CTRL_4_REG); ++ ++ /* Wait write complete */ ++ if (ipq40xx_mdio_wait_busy(am)) ++ return -ETIMEDOUT; ++ ++ return 0; ++} ++ ++static int ipq40xx_mdio_probe(struct platform_device *pdev) ++{ ++ struct ipq40xx_mdio_data *am; ++ struct resource *res; ++ int i; ++ ++ am = devm_kzalloc(&pdev->dev, sizeof(*am), GFP_KERNEL); ++ if (!am) ++ return -ENOMEM; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "no iomem resource found\n"); ++ return -ENXIO; ++ } ++ ++ am->membase = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(am->membase)) { ++ dev_err(&pdev->dev, "unable to ioremap registers\n"); ++ return PTR_ERR(am->membase); ++ } ++ ++ am->mii_bus = devm_mdiobus_alloc(&pdev->dev); ++ if (!am->mii_bus) ++ return -ENOMEM; ++ ++ writel(CTRL_0_REG_DEFAULT_VALUE, am->membase + MDIO_CTRL_0_REG); ++ ++ am->mii_bus->name = "ipq40xx_mdio"; ++ am->mii_bus->read = ipq40xx_mdio_read; ++ am->mii_bus->write = ipq40xx_mdio_write; ++ memcpy(am->mii_bus->irq, am->phy_irq, sizeof(am->phy_irq)); ++ am->mii_bus->priv = am; ++ am->mii_bus->parent = &pdev->dev; ++ snprintf(am->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(&pdev->dev)); ++ ++ for (i = 0; i < PHY_MAX_ADDR; i++) ++ am->phy_irq[i] = PHY_POLL; ++ ++ am->dev = &pdev->dev; ++ platform_set_drvdata(pdev, am); ++ ++ /* edma_axi_probe() use "am" drvdata. ++ * ipq40xx_mdio_probe() must be called first. ++ */ ++ return of_mdiobus_register(am->mii_bus, pdev->dev.of_node); ++} ++ ++static int ipq40xx_mdio_remove(struct platform_device *pdev) ++{ ++ struct ipq40xx_mdio_data *am = platform_get_drvdata(pdev); ++ ++ mdiobus_unregister(am->mii_bus); ++ return 0; ++} ++ ++static const struct of_device_id ipq40xx_mdio_dt_ids[] = { ++ { .compatible = "qcom,ipq4019-mdio" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, ipq40xx_mdio_dt_ids); ++ ++static struct platform_driver ipq40xx_mdio_driver = { ++ .probe = ipq40xx_mdio_probe, ++ .remove = ipq40xx_mdio_remove, ++ .driver = { ++ .name = "ipq40xx-mdio", ++ .of_match_table = ipq40xx_mdio_dt_ids, ++ }, ++}; ++ ++module_platform_driver(ipq40xx_mdio_driver); ++ ++#define DRV_VERSION "1.0" ++ ++MODULE_DESCRIPTION("IPQ40XX MDIO interface driver"); ++MODULE_AUTHOR("Qualcomm Atheros"); ++MODULE_VERSION(DRV_VERSION); ++MODULE_LICENSE("Dual BSD/GPL"); diff --git a/target/linux/ipq40xx/patches-4.9/701-dts-ipq4019-add-mdio-node.patch b/target/linux/ipq40xx/patches-4.9/701-dts-ipq4019-add-mdio-node.patch new file mode 100644 index 000000000..676da7214 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/701-dts-ipq4019-add-mdio-node.patch @@ -0,0 +1,52 @@ +From 09ed737593f71bcca08a537a6c15264a1a6add08 Mon Sep 17 00:00:00 2001 +From: Christian Lamparter +Date: Sun, 20 Nov 2016 01:10:33 +0100 +Subject: [PATCH] dts: ipq4019: add mdio node for ethernet + +This patch adds the mdio device-tree node. +This is where the switch is connected to, so it's needed +for the ethernet interfaces. + +Note: The driver isn't anywhere close to be upstream, +so the info might change. +--- + arch/arm/boot/dts/qcom-ipq4019.dtsi | 28 ++++++++++++++++++++++++++++ + 1 file changed, 28 insertions(+) + +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -315,6 +315,34 @@ + reg = <0x4ab000 0x4>; + }; + ++ mdio@90000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "qcom,ipq4019-mdio"; ++ reg = <0x90000 0x64>; ++ status = "disabled"; ++ ++ ethernet-phy@0 { ++ reg = <0>; ++ }; ++ ++ ethernet-phy@1 { ++ reg = <1>; ++ }; ++ ++ ethernet-phy@2 { ++ reg = <2>; ++ }; ++ ++ ethernet-phy@3 { ++ reg = <3>; ++ }; ++ ++ ethernet-phy@4 { ++ reg = <4>; ++ }; ++ }; ++ + usb3_ss_phy: ssphy@9a000 { + compatible = "qca,uni-ssphy"; + reg = <0x9a000 0x800>; diff --git a/target/linux/ipq40xx/patches-4.9/702-dts-ipq4019-add-PHY-switch-nodes.patch b/target/linux/ipq40xx/patches-4.9/702-dts-ipq4019-add-PHY-switch-nodes.patch new file mode 100644 index 000000000..79031d398 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/702-dts-ipq4019-add-PHY-switch-nodes.patch @@ -0,0 +1,46 @@ +From 9deeec35dd3b628b95624e41d4e04acf728991ba Mon Sep 17 00:00:00 2001 +From: Christian Lamparter +Date: Sun, 20 Nov 2016 02:20:54 +0100 +Subject: [PATCH] dts: ipq4019: add PHY/switch nodes + +This patch adds both the "qcom,ess-switch" and "qcom,ess-psgmii" +nodes which are needed for the ar40xx.c driver to initialize the +switch. + +Signed-off-by: Christian Lamparter +--- + arch/arm/boot/dts/qcom-ipq4019.dtsi | 23 +++++++++++++++++++++++ + 1 file changed, 23 insertions(+) + +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -343,6 +343,29 @@ + }; + }; + ++ ess-switch@c000000 { ++ compatible = "qcom,ess-switch"; ++ reg = <0xc000000 0x80000>; ++ switch_access_mode = "local bus"; ++ resets = <&gcc ESS_RESET>; ++ reset-names = "ess_rst"; ++ clocks = <&gcc GCC_ESS_CLK>; ++ clock-names = "ess_clk"; ++ switch_cpu_bmp = <0x1>; ++ switch_lan_bmp = <0x1e>; ++ switch_wan_bmp = <0x20>; ++ switch_mac_mode = <0>; /* PORT_WRAPPER_PSGMII */ ++ switch_initvlas = <0x7c 0x54>; ++ status = "disabled"; ++ }; ++ ++ ess-psgmii@98000 { ++ compatible = "qcom,ess-psgmii"; ++ reg = <0x98000 0x800>; ++ psgmii_access_mode = "local bus"; ++ status = "disabled"; ++ }; ++ + usb3_ss_phy: ssphy@9a000 { + compatible = "qca,uni-ssphy"; + reg = <0x9a000 0x800>; diff --git a/target/linux/ipq40xx/patches-4.9/710-net-add-qualcomm-essedma-ethernet-driver.patch b/target/linux/ipq40xx/patches-4.9/710-net-add-qualcomm-essedma-ethernet-driver.patch new file mode 100644 index 000000000..eb84124b5 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/710-net-add-qualcomm-essedma-ethernet-driver.patch @@ -0,0 +1,4602 @@ +From 12e9319da1adacac92930c899c99f0e1970cac11 Mon Sep 17 00:00:00 2001 +From: Christian Lamparter +Date: Thu, 19 Jan 2017 02:01:31 +0100 +Subject: [PATCH 33/38] NET: add qualcomm essedma ethernet driver + +Signed-off-by: Christian Lamparter +--- + drivers/net/ethernet/qualcomm/Kconfig | 9 +++++++++ + drivers/net/ethernet/qualcomm/Makefile | 1 + + 2 files changed, 10 insertions(+) + +--- a/drivers/net/ethernet/qualcomm/Kconfig ++++ b/drivers/net/ethernet/qualcomm/Kconfig +@@ -37,4 +37,13 @@ config QCOM_EMAC + low power, Receive-Side Scaling (RSS), and IEEE 1588-2008 + Precision Clock Synchronization Protocol. + ++config ESSEDMA ++ tristate "Qualcomm Atheros ESS Edma support" ++ ---help--- ++ This driver supports ethernet edma adapter. ++ Say Y to build this driver. ++ ++ To compile this driver as a module, choose M here. The module ++ will be called essedma.ko. ++ + endif # NET_VENDOR_QUALCOMM +--- a/drivers/net/ethernet/qualcomm/Makefile ++++ b/drivers/net/ethernet/qualcomm/Makefile +@@ -6,3 +6,4 @@ obj-$(CONFIG_QCA7000) += qcaspi.o + qcaspi-objs := qca_spi.o qca_framing.o qca_7k.o qca_debug.o + + obj-y += emac/ ++obj-$(CONFIG_ESSEDMA) += essedma/ +--- /dev/null ++++ b/drivers/net/ethernet/qualcomm/essedma/Makefile +@@ -0,0 +1,9 @@ ++# ++## Makefile for the Qualcomm Atheros ethernet edma driver ++# ++ ++ ++obj-$(CONFIG_ESSEDMA) += essedma.o ++ ++essedma-objs := edma_axi.o edma.o edma_ethtool.o ++ +--- /dev/null ++++ b/drivers/net/ethernet/qualcomm/essedma/edma.c +@@ -0,0 +1,2168 @@ ++/* ++ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for ++ * any purpose with or without fee is hereby granted, provided that the ++ * above copyright notice and this permission notice appear in all copies. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT ++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#include ++#include ++#include "ess_edma.h" ++#include "edma.h" ++ ++extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED]; ++bool edma_stp_rstp; ++u16 edma_ath_eth_type; ++ ++/* edma_skb_priority_offset() ++ * get edma skb priority ++ */ ++static unsigned int edma_skb_priority_offset(struct sk_buff *skb) ++{ ++ return (skb->priority >> 2) & 1; ++} ++ ++/* edma_alloc_tx_ring() ++ * Allocate Tx descriptors ring ++ */ ++static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo, ++ struct edma_tx_desc_ring *etdr) ++{ ++ struct platform_device *pdev = edma_cinfo->pdev; ++ ++ /* Initialize ring */ ++ etdr->size = sizeof(struct edma_sw_desc) * etdr->count; ++ etdr->sw_next_to_fill = 0; ++ etdr->sw_next_to_clean = 0; ++ ++ /* Allocate SW descriptors */ ++ etdr->sw_desc = vzalloc(etdr->size); ++ if (!etdr->sw_desc) { ++ dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr); ++ return -ENOMEM; ++ } ++ ++ /* Allocate HW descriptors */ ++ etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma, ++ GFP_KERNEL); ++ if (!etdr->hw_desc) { ++ dev_err(&pdev->dev, "descriptor allocation for tx ring failed"); ++ vfree(etdr->sw_desc); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++/* edma_free_tx_ring() ++ * Free tx rings allocated by edma_alloc_tx_rings ++ */ ++static void edma_free_tx_ring(struct edma_common_info *edma_cinfo, ++ struct edma_tx_desc_ring *etdr) ++{ ++ struct platform_device *pdev = edma_cinfo->pdev; ++ ++ if (likely(etdr->dma)) ++ dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc, ++ etdr->dma); ++ ++ vfree(etdr->sw_desc); ++ etdr->sw_desc = NULL; ++} ++ ++/* edma_alloc_rx_ring() ++ * allocate rx descriptor ring ++ */ ++static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo, ++ struct edma_rfd_desc_ring *erxd) ++{ ++ struct platform_device *pdev = edma_cinfo->pdev; ++ ++ erxd->size = sizeof(struct edma_sw_desc) * erxd->count; ++ erxd->sw_next_to_fill = 0; ++ erxd->sw_next_to_clean = 0; ++ ++ /* Allocate SW descriptors */ ++ erxd->sw_desc = vzalloc(erxd->size); ++ if (!erxd->sw_desc) ++ return -ENOMEM; ++ ++ /* Alloc HW descriptors */ ++ erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma, ++ GFP_KERNEL); ++ if (!erxd->hw_desc) { ++ vfree(erxd->sw_desc); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++/* edma_free_rx_ring() ++ * Free rx ring allocated by alloc_rx_ring ++ */ ++static void edma_free_rx_ring(struct edma_common_info *edma_cinfo, ++ struct edma_rfd_desc_ring *rxdr) ++{ ++ struct platform_device *pdev = edma_cinfo->pdev; ++ ++ if (likely(rxdr->dma)) ++ dma_free_coherent(&pdev->dev, rxdr->size, rxdr->hw_desc, ++ rxdr->dma); ++ ++ vfree(rxdr->sw_desc); ++ rxdr->sw_desc = NULL; ++} ++ ++/* edma_configure_tx() ++ * Configure transmission control data ++ */ ++static void edma_configure_tx(struct edma_common_info *edma_cinfo) ++{ ++ u32 txq_ctrl_data; ++ ++ txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT); ++ txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN; ++ txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT); ++ edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data); ++} ++ ++ ++/* edma_configure_rx() ++ * configure reception control data ++ */ ++static void edma_configure_rx(struct edma_common_info *edma_cinfo) ++{ ++ struct edma_hw *hw = &edma_cinfo->hw; ++ u32 rss_type, rx_desc1, rxq_ctrl_data; ++ ++ /* Set RSS type */ ++ rss_type = hw->rss_type; ++ edma_write_reg(EDMA_REG_RSS_TYPE, rss_type); ++ ++ /* Set RFD burst number */ ++ rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT); ++ ++ /* Set RFD prefetch threshold */ ++ rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT); ++ ++ /* Set RFD in host ring low threshold to generte interrupt */ ++ rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT); ++ edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1); ++ ++ /* Set Rx FIFO threshold to start to DMA data to host */ ++ rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE; ++ ++ /* Set RX remove vlan bit */ ++ rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN; ++ ++ edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data); ++} ++ ++/* edma_alloc_rx_buf() ++ * does skb allocation for the received packets. ++ */ ++static int edma_alloc_rx_buf(struct edma_common_info ++ *edma_cinfo, ++ struct edma_rfd_desc_ring *erdr, ++ int cleaned_count, int queue_id) ++{ ++ struct platform_device *pdev = edma_cinfo->pdev; ++ struct edma_rx_free_desc *rx_desc; ++ struct edma_sw_desc *sw_desc; ++ struct sk_buff *skb; ++ unsigned int i; ++ u16 prod_idx, length; ++ u32 reg_data; ++ ++ if (cleaned_count > erdr->count) { ++ dev_err(&pdev->dev, "Incorrect cleaned_count %d", ++ cleaned_count); ++ return -1; ++ } ++ ++ i = erdr->sw_next_to_fill; ++ ++ while (cleaned_count) { ++ sw_desc = &erdr->sw_desc[i]; ++ length = edma_cinfo->rx_head_buffer_len; ++ ++ if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) { ++ skb = sw_desc->skb; ++ } else { ++ /* alloc skb */ ++ skb = netdev_alloc_skb(edma_netdev[0], length); ++ if (!skb) { ++ /* Better luck next round */ ++ break; ++ } ++ } ++ ++ if (edma_cinfo->page_mode) { ++ struct page *pg = alloc_page(GFP_ATOMIC); ++ ++ if (!pg) { ++ dev_kfree_skb_any(skb); ++ break; ++ } ++ ++ sw_desc->dma = dma_map_page(&pdev->dev, pg, 0, ++ edma_cinfo->rx_page_buffer_len, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(&pdev->dev, ++ sw_desc->dma)) { ++ __free_page(pg); ++ dev_kfree_skb_any(skb); ++ break; ++ } ++ ++ skb_fill_page_desc(skb, 0, pg, 0, ++ edma_cinfo->rx_page_buffer_len); ++ sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG; ++ sw_desc->length = edma_cinfo->rx_page_buffer_len; ++ } else { ++ sw_desc->dma = dma_map_single(&pdev->dev, skb->data, ++ length, DMA_FROM_DEVICE); ++ if (dma_mapping_error(&pdev->dev, ++ sw_desc->dma)) { ++ dev_kfree_skb_any(skb); ++ break; ++ } ++ ++ sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD; ++ sw_desc->length = length; ++ } ++ ++ /* Update the buffer info */ ++ sw_desc->skb = skb; ++ rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[i]); ++ rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma); ++ if (++i == erdr->count) ++ i = 0; ++ cleaned_count--; ++ } ++ ++ erdr->sw_next_to_fill = i; ++ ++ if (i == 0) ++ prod_idx = erdr->count - 1; ++ else ++ prod_idx = i - 1; ++ ++ /* Update the producer index */ ++ edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), ®_data); ++ reg_data &= ~EDMA_RFD_PROD_IDX_BITS; ++ reg_data |= prod_idx; ++ edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data); ++ return cleaned_count; ++} ++ ++/* edma_init_desc() ++ * update descriptor ring size, buffer and producer/consumer index ++ */ ++static void edma_init_desc(struct edma_common_info *edma_cinfo) ++{ ++ struct edma_rfd_desc_ring *rfd_ring; ++ struct edma_tx_desc_ring *etdr; ++ int i = 0, j = 0; ++ u32 data = 0; ++ u16 hw_cons_idx = 0; ++ ++ /* Set the base address of every TPD ring. */ ++ for (i = 0; i < edma_cinfo->num_tx_queues; i++) { ++ etdr = edma_cinfo->tpd_ring[i]; ++ ++ /* Update descriptor ring base address */ ++ edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma); ++ edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data); ++ ++ /* Calculate hardware consumer index */ ++ hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff; ++ etdr->sw_next_to_fill = hw_cons_idx; ++ etdr->sw_next_to_clean = hw_cons_idx; ++ data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT); ++ data |= hw_cons_idx; ++ ++ /* update producer index */ ++ edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data); ++ ++ /* update SW consumer index register */ ++ edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx); ++ ++ /* Set TPD ring size */ ++ edma_write_reg(EDMA_REG_TPD_RING_SIZE, ++ edma_cinfo->tx_ring_count & ++ EDMA_TPD_RING_SIZE_MASK); ++ } ++ ++ for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { ++ rfd_ring = edma_cinfo->rfd_ring[j]; ++ /* Update Receive Free descriptor ring base address */ ++ edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j), ++ (u32)(rfd_ring->dma)); ++ j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); ++ } ++ ++ data = edma_cinfo->rx_head_buffer_len; ++ if (edma_cinfo->page_mode) ++ data = edma_cinfo->rx_page_buffer_len; ++ ++ data &= EDMA_RX_BUF_SIZE_MASK; ++ data <<= EDMA_RX_BUF_SIZE_SHIFT; ++ ++ /* Update RFD ring size and RX buffer size */ ++ data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK) ++ << EDMA_RFD_RING_SIZE_SHIFT; ++ ++ edma_write_reg(EDMA_REG_RX_DESC0, data); ++ ++ /* Disable TX FIFO low watermark and high watermark */ ++ edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0); ++ ++ /* Load all of base address above */ ++ edma_read_reg(EDMA_REG_TX_SRAM_PART, &data); ++ data |= 1 << EDMA_LOAD_PTR_SHIFT; ++ edma_write_reg(EDMA_REG_TX_SRAM_PART, data); ++} ++ ++/* edma_receive_checksum ++ * Api to check checksum on receive packets ++ */ ++static void edma_receive_checksum(struct edma_rx_return_desc *rd, ++ struct sk_buff *skb) ++{ ++ skb_checksum_none_assert(skb); ++ ++ /* check the RRD IP/L4 checksum bit to see if ++ * its set, which in turn indicates checksum ++ * failure. ++ */ ++ if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK) ++ return; ++ ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++} ++ ++/* edma_clean_rfd() ++ * clean up rx resourcers on error ++ */ ++static void edma_clean_rfd(struct edma_rfd_desc_ring *erdr, u16 index) ++{ ++ struct edma_rx_free_desc *rx_desc; ++ struct edma_sw_desc *sw_desc; ++ ++ rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[index]); ++ sw_desc = &erdr->sw_desc[index]; ++ if (sw_desc->skb) { ++ dev_kfree_skb_any(sw_desc->skb); ++ sw_desc->skb = NULL; ++ } ++ ++ memset(rx_desc, 0, sizeof(struct edma_rx_free_desc)); ++} ++ ++/* edma_rx_complete_fraglist() ++ * Complete Rx processing for fraglist skbs ++ */ ++static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd) ++{ ++ int i; ++ u32 priority; ++ u16 port_type; ++ u8 mac_addr[EDMA_ETH_HDR_LEN]; ++ ++ port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT) ++ & EDMA_RRD_PORT_TYPE_MASK; ++ /* if port type is 0x4, then only proceed with ++ * other stp/rstp calculation ++ */ ++ if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) { ++ u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; ++ ++ /* calculate the frame priority */ ++ priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT) ++ & EDMA_RRD_PRIORITY_MASK; ++ ++ for (i = 0; i < EDMA_ETH_HDR_LEN; i++) ++ mac_addr[i] = skb->data[i]; ++ ++ /* Check if destination mac addr is bpdu addr */ ++ if (!memcmp(mac_addr, bpdu_mac, 6)) { ++ /* destination mac address is BPDU ++ * destination mac address, then add ++ * atheros header to the packet. ++ */ ++ u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) | ++ (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) | ++ (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id; ++ skb_push(skb, 4); ++ memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN); ++ *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type); ++ *(uint16_t *)&skb->data[14] = htons(athr_hdr); ++ } ++ } ++} ++ ++/* ++ * edma_rx_complete_fraglist() ++ * Complete Rx processing for fraglist skbs ++ */ ++static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean, ++ u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo) ++{ ++ struct platform_device *pdev = edma_cinfo->pdev; ++ struct edma_hw *hw = &edma_cinfo->hw; ++ struct sk_buff *skb_temp; ++ struct edma_sw_desc *sw_desc; ++ int i; ++ u16 size_remaining; ++ ++ skb->data_len = 0; ++ skb->tail += (hw->rx_head_buff_size - 16); ++ skb->len = skb->truesize = length; ++ size_remaining = length - (hw->rx_head_buff_size - 16); ++ ++ /* clean-up all related sw_descs */ ++ for (i = 1; i < num_rfds; i++) { ++ struct sk_buff *skb_prev; ++ sw_desc = &erdr->sw_desc[sw_next_to_clean]; ++ skb_temp = sw_desc->skb; ++ ++ dma_unmap_single(&pdev->dev, sw_desc->dma, ++ sw_desc->length, DMA_FROM_DEVICE); ++ ++ if (size_remaining < hw->rx_head_buff_size) ++ skb_put(skb_temp, size_remaining); ++ else ++ skb_put(skb_temp, hw->rx_head_buff_size); ++ ++ /* ++ * If we are processing the first rfd, we link ++ * skb->frag_list to the skb corresponding to the ++ * first RFD ++ */ ++ if (i == 1) ++ skb_shinfo(skb)->frag_list = skb_temp; ++ else ++ skb_prev->next = skb_temp; ++ skb_prev = skb_temp; ++ skb_temp->next = NULL; ++ ++ skb->data_len += skb_temp->len; ++ size_remaining -= skb_temp->len; ++ ++ /* Increment SW index */ ++ sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); ++ (*cleaned_count)++; ++ } ++ ++ return sw_next_to_clean; ++} ++ ++/* edma_rx_complete_paged() ++ * Complete Rx processing for paged skbs ++ */ ++static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean, ++ u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo) ++{ ++ struct platform_device *pdev = edma_cinfo->pdev; ++ struct sk_buff *skb_temp; ++ struct edma_sw_desc *sw_desc; ++ int i; ++ u16 size_remaining; ++ ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; ++ ++ /* Setup skbuff fields */ ++ skb->len = length; ++ ++ if (likely(num_rfds <= 1)) { ++ skb->data_len = length; ++ skb->truesize += edma_cinfo->rx_page_buffer_len; ++ skb_fill_page_desc(skb, 0, skb_frag_page(frag), ++ 16, length); ++ } else { ++ frag->size -= 16; ++ skb->data_len = frag->size; ++ skb->truesize += edma_cinfo->rx_page_buffer_len; ++ size_remaining = length - frag->size; ++ ++ skb_fill_page_desc(skb, 0, skb_frag_page(frag), ++ 16, frag->size); ++ ++ /* clean-up all related sw_descs */ ++ for (i = 1; i < num_rfds; i++) { ++ sw_desc = &erdr->sw_desc[sw_next_to_clean]; ++ skb_temp = sw_desc->skb; ++ frag = &skb_shinfo(skb_temp)->frags[0]; ++ dma_unmap_page(&pdev->dev, sw_desc->dma, ++ sw_desc->length, DMA_FROM_DEVICE); ++ ++ if (size_remaining < edma_cinfo->rx_page_buffer_len) ++ frag->size = size_remaining; ++ ++ skb_fill_page_desc(skb, i, skb_frag_page(frag), ++ 0, frag->size); ++ ++ skb_shinfo(skb_temp)->nr_frags = 0; ++ dev_kfree_skb_any(skb_temp); ++ ++ skb->data_len += frag->size; ++ skb->truesize += edma_cinfo->rx_page_buffer_len; ++ size_remaining -= frag->size; ++ ++ /* Increment SW index */ ++ sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); ++ (*cleaned_count)++; ++ } ++ } ++ ++ return sw_next_to_clean; ++} ++ ++/* ++ * edma_rx_complete() ++ * Main api called from the poll function to process rx packets. ++ */ ++static void edma_rx_complete(struct edma_common_info *edma_cinfo, ++ int *work_done, int work_to_do, int queue_id, ++ struct napi_struct *napi) ++{ ++ struct platform_device *pdev = edma_cinfo->pdev; ++ struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id]; ++ struct net_device *netdev; ++ struct edma_adapter *adapter; ++ struct edma_sw_desc *sw_desc; ++ struct sk_buff *skb; ++ struct edma_rx_return_desc *rd; ++ u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1, ++ sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0; ++ u32 data = 0; ++ u8 *vaddr; ++ int port_id, i, drop_count = 0; ++ u32 priority; ++ u16 count = erdr->count, rfd_avail; ++ u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3}; ++ ++ sw_next_to_clean = erdr->sw_next_to_clean; ++ ++ edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data); ++ hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) & ++ EDMA_RFD_CONS_IDX_MASK; ++ ++ do { ++ while (sw_next_to_clean != hw_next_to_clean) { ++ if (!work_to_do) ++ break; ++ ++ sw_desc = &erdr->sw_desc[sw_next_to_clean]; ++ skb = sw_desc->skb; ++ ++ /* Unmap the allocated buffer */ ++ if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) ++ dma_unmap_single(&pdev->dev, sw_desc->dma, ++ sw_desc->length, DMA_FROM_DEVICE); ++ else ++ dma_unmap_page(&pdev->dev, sw_desc->dma, ++ sw_desc->length, DMA_FROM_DEVICE); ++ ++ /* Get RRD */ ++ if (edma_cinfo->page_mode) { ++ vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0])); ++ memcpy((uint8_t *)&rrd[0], vaddr, 16); ++ rd = (struct edma_rx_return_desc *)rrd; ++ kunmap_atomic(vaddr); ++ } else { ++ rd = (struct edma_rx_return_desc *)skb->data; ++ } ++ ++ /* Check if RRD is valid */ ++ if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) { ++ edma_clean_rfd(erdr, sw_next_to_clean); ++ sw_next_to_clean = (sw_next_to_clean + 1) & ++ (erdr->count - 1); ++ cleaned_count++; ++ continue; ++ } ++ ++ /* Get the number of RFDs from RRD */ ++ num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK; ++ ++ /* Get Rx port ID from switch */ ++ port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK; ++ if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) { ++ dev_err(&pdev->dev, "Invalid RRD source port bit set"); ++ for (i = 0; i < num_rfds; i++) { ++ edma_clean_rfd(erdr, sw_next_to_clean); ++ sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); ++ cleaned_count++; ++ } ++ continue; ++ } ++ ++ /* check if we have a sink for the data we receive. ++ * If the interface isn't setup, we have to drop the ++ * incoming data for now. ++ */ ++ netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id]; ++ if (!netdev) { ++ edma_clean_rfd(erdr, sw_next_to_clean); ++ sw_next_to_clean = (sw_next_to_clean + 1) & ++ (erdr->count - 1); ++ cleaned_count++; ++ continue; ++ } ++ adapter = netdev_priv(netdev); ++ ++ /* This code is added to handle a usecase where high ++ * priority stream and a low priority stream are ++ * received simultaneously on DUT. The problem occurs ++ * if one of the Rx rings is full and the corresponding ++ * core is busy with other stuff. This causes ESS CPU ++ * port to backpressure all incoming traffic including ++ * high priority one. We monitor free descriptor count ++ * on each CPU and whenever it reaches threshold (< 80), ++ * we drop all low priority traffic and let only high ++ * priotiy traffic pass through. We can hence avoid ++ * ESS CPU port to send backpressure on high priroity ++ * stream. ++ */ ++ priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT) ++ & EDMA_RRD_PRIORITY_MASK; ++ if (likely(!priority && !edma_cinfo->page_mode && (num_rfds <= 1))) { ++ rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1); ++ if (rfd_avail < EDMA_RFD_AVAIL_THR) { ++ sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_REUSE; ++ sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); ++ adapter->stats.rx_dropped++; ++ cleaned_count++; ++ drop_count++; ++ if (drop_count == 3) { ++ work_to_do--; ++ (*work_done)++; ++ drop_count = 0; ++ } ++ if (cleaned_count == EDMA_RX_BUFFER_WRITE) { ++ /* If buffer clean count reaches 16, we replenish HW buffers. */ ++ ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id); ++ edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id), ++ sw_next_to_clean); ++ cleaned_count = ret_count; ++ } ++ continue; ++ } ++ } ++ ++ work_to_do--; ++ (*work_done)++; ++ ++ /* Increment SW index */ ++ sw_next_to_clean = (sw_next_to_clean + 1) & ++ (erdr->count - 1); ++ ++ cleaned_count++; ++ ++ /* Get the packet size and allocate buffer */ ++ length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK; ++ ++ if (edma_cinfo->page_mode) { ++ /* paged skb */ ++ sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo); ++ if (!pskb_may_pull(skb, ETH_HLEN)) { ++ dev_kfree_skb_any(skb); ++ continue; ++ } ++ } else { ++ /* single or fraglist skb */ ++ ++ /* Addition of 16 bytes is required, as in the packet ++ * first 16 bytes are rrd descriptors, so actual data ++ * starts from an offset of 16. ++ */ ++ skb_reserve(skb, 16); ++ if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode)) { ++ skb_put(skb, length); ++ } else { ++ sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo); ++ } ++ } ++ ++ if (edma_stp_rstp) { ++ edma_rx_complete_stp_rstp(skb, port_id, rd); ++ } ++ ++ skb->protocol = eth_type_trans(skb, netdev); ++ ++ /* Record Rx queue for RFS/RPS and fill flow hash from HW */ ++ skb_record_rx_queue(skb, queue_to_rxid[queue_id]); ++ if (netdev->features & NETIF_F_RXHASH) { ++ hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT); ++ if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END)) ++ skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4); ++ } ++ ++#ifdef CONFIG_NF_FLOW_COOKIE ++ skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK; ++#endif ++ edma_receive_checksum(rd, skb); ++ ++ /* Process VLAN HW acceleration indication provided by HW */ ++ if (unlikely(adapter->default_vlan_tag != rd->rrd4)) { ++ vlan = rd->rrd4; ++ if (likely(rd->rrd7 & EDMA_RRD_CVLAN)) ++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); ++ else if (rd->rrd1 & EDMA_RRD_SVLAN) ++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan); ++ } ++ ++ /* Update rx statistics */ ++ adapter->stats.rx_packets++; ++ adapter->stats.rx_bytes += length; ++ ++ /* Check if we reached refill threshold */ ++ if (cleaned_count == EDMA_RX_BUFFER_WRITE) { ++ ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id); ++ edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id), ++ sw_next_to_clean); ++ cleaned_count = ret_count; ++ } ++ ++ /* At this point skb should go to stack */ ++ napi_gro_receive(napi, skb); ++ } ++ ++ /* Check if we still have NAPI budget */ ++ if (!work_to_do) ++ break; ++ ++ /* Read index once again since we still have NAPI budget */ ++ edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data); ++ hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) & ++ EDMA_RFD_CONS_IDX_MASK; ++ } while (hw_next_to_clean != sw_next_to_clean); ++ ++ erdr->sw_next_to_clean = sw_next_to_clean; ++ ++ /* Refill here in case refill threshold wasn't reached */ ++ if (likely(cleaned_count)) { ++ ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id); ++ if (ret_count) ++ dev_dbg(&pdev->dev, "Not all buffers was reallocated"); ++ edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id), ++ erdr->sw_next_to_clean); ++ } ++} ++ ++/* edma_delete_rfs_filter() ++ * Remove RFS filter from switch ++ */ ++static int edma_delete_rfs_filter(struct edma_adapter *adapter, ++ struct edma_rfs_filter_node *filter_node) ++{ ++ int res = -1; ++ ++ struct flow_keys *keys = &filter_node->keys; ++ ++ if (likely(adapter->set_rfs_rule)) ++ res = (*adapter->set_rfs_rule)(adapter->netdev, ++ flow_get_u32_src(keys), flow_get_u32_dst(keys), ++ keys->ports.src, keys->ports.dst, ++ keys->basic.ip_proto, filter_node->rq_id, 0); ++ ++ return res; ++} ++ ++/* edma_add_rfs_filter() ++ * Add RFS filter to switch ++ */ ++static int edma_add_rfs_filter(struct edma_adapter *adapter, ++ struct flow_keys *keys, u16 rq, ++ struct edma_rfs_filter_node *filter_node) ++{ ++ int res = -1; ++ ++ struct flow_keys *dest_keys = &filter_node->keys; ++ ++ memcpy(dest_keys, &filter_node->keys, sizeof(*dest_keys)); ++/* ++ dest_keys->control = keys->control; ++ dest_keys->basic = keys->basic; ++ dest_keys->addrs = keys->addrs; ++ dest_keys->ports = keys->ports; ++ dest_keys.ip_proto = keys->ip_proto; ++*/ ++ /* Call callback registered by ESS driver */ ++ if (likely(adapter->set_rfs_rule)) ++ res = (*adapter->set_rfs_rule)(adapter->netdev, flow_get_u32_src(keys), ++ flow_get_u32_dst(keys), keys->ports.src, keys->ports.dst, ++ keys->basic.ip_proto, rq, 1); ++ ++ return res; ++} ++ ++/* edma_rfs_key_search() ++ * Look for existing RFS entry ++ */ ++static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h, ++ struct flow_keys *key) ++{ ++ struct edma_rfs_filter_node *p; ++ ++ hlist_for_each_entry(p, h, node) ++ if (flow_get_u32_src(&p->keys) == flow_get_u32_src(key) && ++ flow_get_u32_dst(&p->keys) == flow_get_u32_dst(key) && ++ p->keys.ports.src == key->ports.src && ++ p->keys.ports.dst == key->ports.dst && ++ p->keys.basic.ip_proto == key->basic.ip_proto) ++ return p; ++ return NULL; ++} ++ ++/* edma_initialise_rfs_flow_table() ++ * Initialise EDMA RFS flow table ++ */ ++static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter) ++{ ++ int i; ++ ++ spin_lock_init(&adapter->rfs.rfs_ftab_lock); ++ ++ /* Initialize EDMA flow hash table */ ++ for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) ++ INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]); ++ ++ adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES; ++ adapter->rfs.filter_available = adapter->rfs.max_num_filter; ++ adapter->rfs.hashtoclean = 0; ++ ++ /* Add timer to get periodic RFS updates from OS */ ++ init_timer(&adapter->rfs.expire_rfs); ++ adapter->rfs.expire_rfs.function = edma_flow_may_expire; ++ adapter->rfs.expire_rfs.data = (unsigned long)adapter; ++ mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4); ++} ++ ++/* edma_free_rfs_flow_table() ++ * Free EDMA RFS flow table ++ */ ++static void edma_free_rfs_flow_table(struct edma_adapter *adapter) ++{ ++ int i; ++ ++ /* Remove sync timer */ ++ del_timer_sync(&adapter->rfs.expire_rfs); ++ spin_lock_bh(&adapter->rfs.rfs_ftab_lock); ++ ++ /* Free EDMA RFS table entries */ ++ adapter->rfs.filter_available = 0; ++ ++ /* Clean-up EDMA flow hash table */ ++ for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) { ++ struct hlist_head *hhead; ++ struct hlist_node *tmp; ++ struct edma_rfs_filter_node *filter_node; ++ int res; ++ ++ hhead = &adapter->rfs.hlist_head[i]; ++ hlist_for_each_entry_safe(filter_node, tmp, hhead, node) { ++ res = edma_delete_rfs_filter(adapter, filter_node); ++ if (res < 0) ++ dev_warn(&adapter->netdev->dev, ++ "EDMA going down but RFS entry %d not allowed to be flushed by Switch", ++ filter_node->flow_id); ++ hlist_del(&filter_node->node); ++ kfree(filter_node); ++ } ++ } ++ spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); ++} ++ ++/* edma_tx_unmap_and_free() ++ * clean TX buffer ++ */ ++static inline void edma_tx_unmap_and_free(struct platform_device *pdev, ++ struct edma_sw_desc *sw_desc) ++{ ++ struct sk_buff *skb = sw_desc->skb; ++ ++ if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) || ++ (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST))) ++ /* unmap_single for skb head area */ ++ dma_unmap_single(&pdev->dev, sw_desc->dma, ++ sw_desc->length, DMA_TO_DEVICE); ++ else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG) ++ /* unmap page for paged fragments */ ++ dma_unmap_page(&pdev->dev, sw_desc->dma, ++ sw_desc->length, DMA_TO_DEVICE); ++ ++ if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST)) ++ dev_kfree_skb_any(skb); ++ ++ sw_desc->flags = 0; ++} ++ ++/* edma_tx_complete() ++ * Used to clean tx queues and update hardware and consumer index ++ */ ++static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id) ++{ ++ struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; ++ struct edma_sw_desc *sw_desc; ++ struct platform_device *pdev = edma_cinfo->pdev; ++ int i; ++ ++ u16 sw_next_to_clean = etdr->sw_next_to_clean; ++ u16 hw_next_to_clean; ++ u32 data = 0; ++ ++ edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data); ++ hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK; ++ ++ /* clean the buffer here */ ++ while (sw_next_to_clean != hw_next_to_clean) { ++ sw_desc = &etdr->sw_desc[sw_next_to_clean]; ++ edma_tx_unmap_and_free(pdev, sw_desc); ++ sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1); ++ } ++ ++ etdr->sw_next_to_clean = sw_next_to_clean; ++ ++ /* update the TPD consumer index register */ ++ edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean); ++ ++ /* Wake the queue if queue is stopped and netdev link is up */ ++ for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) { ++ if (netif_tx_queue_stopped(etdr->nq[i])) { ++ if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i])) ++ netif_tx_wake_queue(etdr->nq[i]); ++ } ++ } ++} ++ ++/* edma_get_tx_buffer() ++ * Get sw_desc corresponding to the TPD ++ */ ++static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo, ++ struct edma_tx_desc *tpd, int queue_id) ++{ ++ struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; ++ return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc]; ++} ++ ++/* edma_get_next_tpd() ++ * Return a TPD descriptor for transfer ++ */ ++static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo, ++ int queue_id) ++{ ++ struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; ++ u16 sw_next_to_fill = etdr->sw_next_to_fill; ++ struct edma_tx_desc *tpd_desc = ++ (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]); ++ ++ etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1); ++ ++ return tpd_desc; ++} ++ ++/* edma_tpd_available() ++ * Check number of free TPDs ++ */ ++static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo, ++ int queue_id) ++{ ++ struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; ++ ++ u16 sw_next_to_fill; ++ u16 sw_next_to_clean; ++ u16 count = 0; ++ ++ sw_next_to_clean = etdr->sw_next_to_clean; ++ sw_next_to_fill = etdr->sw_next_to_fill; ++ ++ if (likely(sw_next_to_clean <= sw_next_to_fill)) ++ count = etdr->count; ++ ++ return count + sw_next_to_clean - sw_next_to_fill - 1; ++} ++ ++/* edma_tx_queue_get() ++ * Get the starting number of the queue ++ */ ++static inline int edma_tx_queue_get(struct edma_adapter *adapter, ++ struct sk_buff *skb, int txq_id) ++{ ++ /* skb->priority is used as an index to skb priority table ++ * and based on packet priority, correspong queue is assigned. ++ */ ++ return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb); ++} ++ ++/* edma_tx_update_hw_idx() ++ * update the producer index for the ring transmitted ++ */ ++static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo, ++ struct sk_buff *skb, int queue_id) ++{ ++ struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; ++ u32 tpd_idx_data; ++ ++ /* Read and update the producer index */ ++ edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data); ++ tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS; ++ tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK) ++ << EDMA_TPD_PROD_IDX_SHIFT; ++ ++ edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data); ++} ++ ++/* edma_rollback_tx() ++ * Function to retrieve tx resources in case of error ++ */ ++static void edma_rollback_tx(struct edma_adapter *adapter, ++ struct edma_tx_desc *start_tpd, int queue_id) ++{ ++ struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id]; ++ struct edma_sw_desc *sw_desc; ++ struct edma_tx_desc *tpd = NULL; ++ u16 start_index, index; ++ ++ start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc); ++ ++ index = start_index; ++ while (index != etdr->sw_next_to_fill) { ++ tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]); ++ sw_desc = &etdr->sw_desc[index]; ++ edma_tx_unmap_and_free(adapter->pdev, sw_desc); ++ memset(tpd, 0, sizeof(struct edma_tx_desc)); ++ if (++index == etdr->count) ++ index = 0; ++ } ++ etdr->sw_next_to_fill = start_index; ++} ++ ++/* edma_tx_map_and_fill() ++ * gets called from edma_xmit_frame ++ * ++ * This is where the dma of the buffer to be transmitted ++ * gets mapped ++ */ ++static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo, ++ struct edma_adapter *adapter, struct sk_buff *skb, int queue_id, ++ unsigned int flags_transmit, u16 from_cpu, u16 dp_bitmap, ++ bool packet_is_rstp, int nr_frags) ++{ ++ struct edma_sw_desc *sw_desc = NULL; ++ struct platform_device *pdev = edma_cinfo->pdev; ++ struct edma_tx_desc *tpd = NULL, *start_tpd = NULL; ++ struct sk_buff *iter_skb; ++ int i = 0; ++ u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0; ++ u16 buf_len, lso_desc_len = 0; ++ ++ /* It should either be a nr_frags skb or fraglist skb but not both */ ++ BUG_ON(nr_frags && skb_has_frag_list(skb)); ++ ++ if (skb_is_gso(skb)) { ++ /* TODO: What additional checks need to be performed here */ ++ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { ++ lso_word1 |= EDMA_TPD_IPV4_EN; ++ ip_hdr(skb)->check = 0; ++ tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, ++ ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); ++ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { ++ lso_word1 |= EDMA_TPD_LSO_V2_EN; ++ ipv6_hdr(skb)->payload_len = 0; ++ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, ++ &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); ++ } else ++ return -EINVAL; ++ ++ lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) | ++ (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT); ++ } else if (flags_transmit & EDMA_HW_CHECKSUM) { ++ u8 css, cso; ++ cso = skb_checksum_start_offset(skb); ++ css = cso + skb->csum_offset; ++ ++ word1 |= (EDMA_TPD_CUSTOM_CSUM_EN); ++ word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT; ++ word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT); ++ } ++ ++ if (skb->protocol == htons(ETH_P_PPP_SES)) ++ word1 |= EDMA_TPD_PPPOE_EN; ++ ++ if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) { ++ switch(skb->vlan_proto) { ++ case htons(ETH_P_8021Q): ++ word3 |= (1 << EDMA_TX_INS_CVLAN); ++ word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT; ++ break; ++ case htons(ETH_P_8021AD): ++ word1 |= (1 << EDMA_TX_INS_SVLAN); ++ svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT; ++ break; ++ default: ++ dev_err(&pdev->dev, "no ctag or stag present\n"); ++ goto vlan_tag_error; ++ } ++ } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) { ++ word3 |= (1 << EDMA_TX_INS_CVLAN); ++ word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT; ++ } ++ ++ if (packet_is_rstp) { ++ word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT; ++ word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT; ++ } else { ++ word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT; ++ } ++ ++ buf_len = skb_headlen(skb); ++ ++ if (lso_word1) { ++ if (lso_word1 & EDMA_TPD_LSO_V2_EN) { ++ ++ /* IPv6 LSOv2 descriptor */ ++ start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id); ++ sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); ++ sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE; ++ ++ /* LSOv2 descriptor overrides addr field to pass length */ ++ tpd->addr = cpu_to_le16(skb->len); ++ tpd->svlan_tag = svlan_tag; ++ tpd->word1 = word1 | lso_word1; ++ tpd->word3 = word3; ++ } ++ ++ tpd = edma_get_next_tpd(edma_cinfo, queue_id); ++ if (!start_tpd) ++ start_tpd = tpd; ++ sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); ++ ++ /* The last buffer info contain the skb address, ++ * so skb will be freed after unmap ++ */ ++ sw_desc->length = lso_desc_len; ++ sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD; ++ ++ sw_desc->dma = dma_map_single(&adapter->pdev->dev, ++ skb->data, buf_len, DMA_TO_DEVICE); ++ if (dma_mapping_error(&pdev->dev, sw_desc->dma)) ++ goto dma_error; ++ ++ tpd->addr = cpu_to_le32(sw_desc->dma); ++ tpd->len = cpu_to_le16(buf_len); ++ ++ tpd->svlan_tag = svlan_tag; ++ tpd->word1 = word1 | lso_word1; ++ tpd->word3 = word3; ++ ++ /* The last buffer info contain the skb address, ++ * so it will be freed after unmap ++ */ ++ sw_desc->length = lso_desc_len; ++ sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD; ++ ++ buf_len = 0; ++ } ++ ++ if (likely(buf_len)) { ++ ++ /* TODO Do not dequeue descriptor if there is a potential error */ ++ tpd = edma_get_next_tpd(edma_cinfo, queue_id); ++ ++ if (!start_tpd) ++ start_tpd = tpd; ++ ++ sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); ++ ++ /* The last buffer info contain the skb address, ++ * so it will be free after unmap ++ */ ++ sw_desc->length = buf_len; ++ sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD; ++ sw_desc->dma = dma_map_single(&adapter->pdev->dev, ++ skb->data, buf_len, DMA_TO_DEVICE); ++ if (dma_mapping_error(&pdev->dev, sw_desc->dma)) ++ goto dma_error; ++ ++ tpd->addr = cpu_to_le32(sw_desc->dma); ++ tpd->len = cpu_to_le16(buf_len); ++ ++ tpd->svlan_tag = svlan_tag; ++ tpd->word1 = word1 | lso_word1; ++ tpd->word3 = word3; ++ } ++ ++ /* Walk through all paged fragments */ ++ while (nr_frags--) { ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ++ buf_len = skb_frag_size(frag); ++ tpd = edma_get_next_tpd(edma_cinfo, queue_id); ++ sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); ++ sw_desc->length = buf_len; ++ sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG; ++ ++ sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE); ++ ++ if (dma_mapping_error(NULL, sw_desc->dma)) ++ goto dma_error; ++ ++ tpd->addr = cpu_to_le32(sw_desc->dma); ++ tpd->len = cpu_to_le16(buf_len); ++ ++ tpd->svlan_tag = svlan_tag; ++ tpd->word1 = word1 | lso_word1; ++ tpd->word3 = word3; ++ i++; ++ } ++ ++ /* Walk through all fraglist skbs */ ++ skb_walk_frags(skb, iter_skb) { ++ buf_len = iter_skb->len; ++ tpd = edma_get_next_tpd(edma_cinfo, queue_id); ++ sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); ++ sw_desc->length = buf_len; ++ sw_desc->dma = dma_map_single(&adapter->pdev->dev, ++ iter_skb->data, buf_len, DMA_TO_DEVICE); ++ ++ if (dma_mapping_error(NULL, sw_desc->dma)) ++ goto dma_error; ++ ++ tpd->addr = cpu_to_le32(sw_desc->dma); ++ tpd->len = cpu_to_le16(buf_len); ++ tpd->svlan_tag = svlan_tag; ++ tpd->word1 = word1 | lso_word1; ++ tpd->word3 = word3; ++ sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST; ++ } ++ ++ if (tpd) ++ tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT; ++ ++ sw_desc->skb = skb; ++ sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST; ++ ++ return 0; ++ ++dma_error: ++ edma_rollback_tx(adapter, start_tpd, queue_id); ++ dev_err(&pdev->dev, "TX DMA map failed\n"); ++vlan_tag_error: ++ return -ENOMEM; ++} ++ ++/* edma_check_link() ++ * check Link status ++ */ ++static int edma_check_link(struct edma_adapter *adapter) ++{ ++ struct phy_device *phydev = adapter->phydev; ++ ++ if (!(adapter->poll_required)) ++ return __EDMA_LINKUP; ++ ++ if (phydev->link) ++ return __EDMA_LINKUP; ++ ++ return __EDMA_LINKDOWN; ++} ++ ++/* edma_adjust_link() ++ * check for edma link status ++ */ ++void edma_adjust_link(struct net_device *netdev) ++{ ++ int status; ++ struct edma_adapter *adapter = netdev_priv(netdev); ++ struct phy_device *phydev = adapter->phydev; ++ ++ if (!test_bit(__EDMA_UP, &adapter->state_flags)) ++ return; ++ ++ status = edma_check_link(adapter); ++ ++ if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) { ++ dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed); ++ adapter->link_state = __EDMA_LINKUP; ++ netif_carrier_on(netdev); ++ if (netif_running(netdev)) ++ netif_tx_wake_all_queues(netdev); ++ } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) { ++ dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name); ++ adapter->link_state = __EDMA_LINKDOWN; ++ netif_carrier_off(netdev); ++ netif_tx_stop_all_queues(netdev); ++ } ++} ++ ++/* edma_get_stats() ++ * Statistics api used to retreive the tx/rx statistics ++ */ ++struct net_device_stats *edma_get_stats(struct net_device *netdev) ++{ ++ struct edma_adapter *adapter = netdev_priv(netdev); ++ ++ return &adapter->stats; ++} ++ ++/* edma_xmit() ++ * Main api to be called by the core for packet transmission ++ */ ++netdev_tx_t edma_xmit(struct sk_buff *skb, ++ struct net_device *net_dev) ++{ ++ struct edma_adapter *adapter = netdev_priv(net_dev); ++ struct edma_common_info *edma_cinfo = adapter->edma_cinfo; ++ struct edma_tx_desc_ring *etdr; ++ u16 from_cpu, dp_bitmap, txq_id; ++ int ret, nr_frags = 0, num_tpds_needed = 1, queue_id; ++ unsigned int flags_transmit = 0; ++ bool packet_is_rstp = false; ++ struct netdev_queue *nq = NULL; ++ ++ if (skb_shinfo(skb)->nr_frags) { ++ nr_frags = skb_shinfo(skb)->nr_frags; ++ num_tpds_needed += nr_frags; ++ } else if (skb_has_frag_list(skb)) { ++ struct sk_buff *iter_skb; ++ ++ skb_walk_frags(skb, iter_skb) ++ num_tpds_needed++; ++ } ++ ++ if (num_tpds_needed > EDMA_MAX_SKB_FRAGS) { ++ dev_err(&net_dev->dev, ++ "skb received with fragments %d which is more than %lu", ++ num_tpds_needed, EDMA_MAX_SKB_FRAGS); ++ dev_kfree_skb_any(skb); ++ adapter->stats.tx_errors++; ++ return NETDEV_TX_OK; ++ } ++ ++ if (edma_stp_rstp) { ++ u16 ath_hdr, ath_eth_type; ++ u8 mac_addr[EDMA_ETH_HDR_LEN]; ++ ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]); ++ if (ath_eth_type == edma_ath_eth_type) { ++ packet_is_rstp = true; ++ ath_hdr = htons(*(uint16_t *)&skb->data[14]); ++ dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK; ++ from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT; ++ memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN); ++ ++ skb_pull(skb, 4); ++ ++ memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN); ++ } ++ } ++ ++ /* this will be one of the 4 TX queues exposed to linux kernel */ ++ txq_id = skb_get_queue_mapping(skb); ++ queue_id = edma_tx_queue_get(adapter, skb, txq_id); ++ etdr = edma_cinfo->tpd_ring[queue_id]; ++ nq = netdev_get_tx_queue(net_dev, txq_id); ++ ++ local_bh_disable(); ++ /* Tx is not handled in bottom half context. Hence, we need to protect ++ * Tx from tasks and bottom half ++ */ ++ ++ if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) { ++ /* not enough descriptor, just stop queue */ ++ netif_tx_stop_queue(nq); ++ local_bh_enable(); ++ dev_dbg(&net_dev->dev, "Not enough descriptors available"); ++ edma_cinfo->edma_ethstats.tx_desc_error++; ++ return NETDEV_TX_BUSY; ++ } ++ ++ /* Check and mark VLAN tag offload */ ++ if (skb_vlan_tag_present(skb)) ++ flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG; ++ else if (adapter->default_vlan_tag) ++ flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG; ++ ++ /* Check and mark checksum offload */ ++ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) ++ flags_transmit |= EDMA_HW_CHECKSUM; ++ ++ /* Map and fill descriptor for Tx */ ++ ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id, ++ flags_transmit, from_cpu, dp_bitmap, packet_is_rstp, nr_frags); ++ if (ret) { ++ dev_kfree_skb_any(skb); ++ adapter->stats.tx_errors++; ++ goto netdev_okay; ++ } ++ ++ /* Update SW producer index */ ++ edma_tx_update_hw_idx(edma_cinfo, skb, queue_id); ++ ++ /* update tx statistics */ ++ adapter->stats.tx_packets++; ++ adapter->stats.tx_bytes += skb->len; ++ ++netdev_okay: ++ local_bh_enable(); ++ return NETDEV_TX_OK; ++} ++ ++/* ++ * edma_flow_may_expire() ++ * Timer function called periodically to delete the node ++ */ ++void edma_flow_may_expire(unsigned long data) ++{ ++ struct edma_adapter *adapter = (struct edma_adapter *)data; ++ int j; ++ ++ spin_lock_bh(&adapter->rfs.rfs_ftab_lock); ++ for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) { ++ struct hlist_head *hhead; ++ struct hlist_node *tmp; ++ struct edma_rfs_filter_node *n; ++ bool res; ++ ++ hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++]; ++ hlist_for_each_entry_safe(n, tmp, hhead, node) { ++ res = rps_may_expire_flow(adapter->netdev, n->rq_id, ++ n->flow_id, n->filter_id); ++ if (res) { ++ int ret; ++ ret = edma_delete_rfs_filter(adapter, n); ++ if (ret < 0) ++ dev_dbg(&adapter->netdev->dev, ++ "RFS entry %d not allowed to be flushed by Switch", ++ n->flow_id); ++ else { ++ hlist_del(&n->node); ++ kfree(n); ++ adapter->rfs.filter_available++; ++ } ++ } ++ } ++ } ++ ++ adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1); ++ spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); ++ mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4); ++} ++ ++/* edma_rx_flow_steer() ++ * Called by core to to steer the flow to CPU ++ */ ++int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ++ u16 rxq, u32 flow_id) ++{ ++ struct flow_keys keys; ++ struct edma_rfs_filter_node *filter_node; ++ struct edma_adapter *adapter = netdev_priv(dev); ++ u16 hash_tblid; ++ int res; ++ ++ if (skb->protocol == htons(ETH_P_IPV6)) { ++ dev_err(&adapter->pdev->dev, "IPv6 not supported\n"); ++ res = -EINVAL; ++ goto no_protocol_err; ++ } ++ ++ /* Dissect flow parameters ++ * We only support IPv4 + TCP/UDP ++ */ ++ res = skb_flow_dissect_flow_keys(skb, &keys, 0); ++ if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) { ++ res = -EPROTONOSUPPORT; ++ goto no_protocol_err; ++ } ++ ++ /* Check if table entry exists */ ++ hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK; ++ ++ spin_lock_bh(&adapter->rfs.rfs_ftab_lock); ++ filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys); ++ ++ if (filter_node) { ++ if (rxq == filter_node->rq_id) { ++ res = -EEXIST; ++ goto out; ++ } else { ++ res = edma_delete_rfs_filter(adapter, filter_node); ++ if (res < 0) ++ dev_warn(&adapter->netdev->dev, ++ "Cannot steer flow %d to different queue", ++ filter_node->flow_id); ++ else { ++ adapter->rfs.filter_available++; ++ res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node); ++ if (res < 0) { ++ dev_warn(&adapter->netdev->dev, ++ "Cannot steer flow %d to different queue", ++ filter_node->flow_id); ++ } else { ++ adapter->rfs.filter_available--; ++ filter_node->rq_id = rxq; ++ filter_node->filter_id = res; ++ } ++ } ++ } ++ } else { ++ if (adapter->rfs.filter_available == 0) { ++ res = -EBUSY; ++ goto out; ++ } ++ ++ filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC); ++ if (!filter_node) { ++ res = -ENOMEM; ++ goto out; ++ } ++ ++ res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node); ++ if (res < 0) { ++ kfree(filter_node); ++ goto out; ++ } ++ ++ adapter->rfs.filter_available--; ++ filter_node->rq_id = rxq; ++ filter_node->filter_id = res; ++ filter_node->flow_id = flow_id; ++ filter_node->keys = keys; ++ INIT_HLIST_NODE(&filter_node->node); ++ hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]); ++ } ++ ++out: ++ spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); ++no_protocol_err: ++ return res; ++} ++ ++/* edma_register_rfs_filter() ++ * Add RFS filter callback ++ */ ++int edma_register_rfs_filter(struct net_device *netdev, ++ set_rfs_filter_callback_t set_filter) ++{ ++ struct edma_adapter *adapter = netdev_priv(netdev); ++ ++ spin_lock_bh(&adapter->rfs.rfs_ftab_lock); ++ ++ if (adapter->set_rfs_rule) { ++ spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); ++ return -1; ++ } ++ ++ adapter->set_rfs_rule = set_filter; ++ spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); ++ ++ return 0; ++} ++ ++/* edma_alloc_tx_rings() ++ * Allocate rx rings ++ */ ++int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo) ++{ ++ struct platform_device *pdev = edma_cinfo->pdev; ++ int i, err = 0; ++ ++ for (i = 0; i < edma_cinfo->num_tx_queues; i++) { ++ err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]); ++ if (err) { ++ dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i); ++ return err; ++ } ++ } ++ ++ return 0; ++} ++ ++/* edma_free_tx_rings() ++ * Free tx rings ++ */ ++void edma_free_tx_rings(struct edma_common_info *edma_cinfo) ++{ ++ int i; ++ ++ for (i = 0; i < edma_cinfo->num_tx_queues; i++) ++ edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]); ++} ++ ++/* edma_free_tx_resources() ++ * Free buffers associated with tx rings ++ */ ++void edma_free_tx_resources(struct edma_common_info *edma_cinfo) ++{ ++ struct edma_tx_desc_ring *etdr; ++ struct edma_sw_desc *sw_desc; ++ struct platform_device *pdev = edma_cinfo->pdev; ++ int i, j; ++ ++ for (i = 0; i < edma_cinfo->num_tx_queues; i++) { ++ etdr = edma_cinfo->tpd_ring[i]; ++ for (j = 0; j < EDMA_TX_RING_SIZE; j++) { ++ sw_desc = &etdr->sw_desc[j]; ++ if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD | ++ EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST)) ++ edma_tx_unmap_and_free(pdev, sw_desc); ++ } ++ } ++} ++ ++/* edma_alloc_rx_rings() ++ * Allocate rx rings ++ */ ++int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo) ++{ ++ struct platform_device *pdev = edma_cinfo->pdev; ++ int i, j, err = 0; ++ ++ for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { ++ err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]); ++ if (err) { ++ dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i); ++ return err; ++ } ++ j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); ++ } ++ ++ return 0; ++} ++ ++/* edma_free_rx_rings() ++ * free rx rings ++ */ ++void edma_free_rx_rings(struct edma_common_info *edma_cinfo) ++{ ++ int i, j; ++ ++ for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { ++ edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]); ++ j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); ++ } ++} ++ ++/* edma_free_queues() ++ * Free the queues allocaated ++ */ ++void edma_free_queues(struct edma_common_info *edma_cinfo) ++{ ++ int i , j; ++ ++ for (i = 0; i < edma_cinfo->num_tx_queues; i++) { ++ if (edma_cinfo->tpd_ring[i]) ++ kfree(edma_cinfo->tpd_ring[i]); ++ edma_cinfo->tpd_ring[i] = NULL; ++ } ++ ++ for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { ++ if (edma_cinfo->rfd_ring[j]) ++ kfree(edma_cinfo->rfd_ring[j]); ++ edma_cinfo->rfd_ring[j] = NULL; ++ j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); ++ } ++ ++ edma_cinfo->num_rx_queues = 0; ++ edma_cinfo->num_tx_queues = 0; ++ ++ return; ++} ++ ++/* edma_free_rx_resources() ++ * Free buffers associated with tx rings ++ */ ++void edma_free_rx_resources(struct edma_common_info *edma_cinfo) ++{ ++ struct edma_rfd_desc_ring *erdr; ++ struct edma_sw_desc *sw_desc; ++ struct platform_device *pdev = edma_cinfo->pdev; ++ int i, j, k; ++ ++ for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) { ++ erdr = edma_cinfo->rfd_ring[k]; ++ for (j = 0; j < EDMA_RX_RING_SIZE; j++) { ++ sw_desc = &erdr->sw_desc[j]; ++ if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) { ++ dma_unmap_single(&pdev->dev, sw_desc->dma, ++ sw_desc->length, DMA_FROM_DEVICE); ++ edma_clean_rfd(erdr, j); ++ } else if ((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)) { ++ dma_unmap_page(&pdev->dev, sw_desc->dma, ++ sw_desc->length, DMA_FROM_DEVICE); ++ edma_clean_rfd(erdr, j); ++ } ++ } ++ k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); ++ ++ } ++} ++ ++/* edma_alloc_queues_tx() ++ * Allocate memory for all rings ++ */ ++int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo) ++{ ++ int i; ++ ++ for (i = 0; i < edma_cinfo->num_tx_queues; i++) { ++ struct edma_tx_desc_ring *etdr; ++ etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL); ++ if (!etdr) ++ goto err; ++ etdr->count = edma_cinfo->tx_ring_count; ++ edma_cinfo->tpd_ring[i] = etdr; ++ } ++ ++ return 0; ++err: ++ edma_free_queues(edma_cinfo); ++ return -1; ++} ++ ++/* edma_alloc_queues_rx() ++ * Allocate memory for all rings ++ */ ++int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo) ++{ ++ int i, j; ++ ++ for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { ++ struct edma_rfd_desc_ring *rfd_ring; ++ rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring), ++ GFP_KERNEL); ++ if (!rfd_ring) ++ goto err; ++ rfd_ring->count = edma_cinfo->rx_ring_count; ++ edma_cinfo->rfd_ring[j] = rfd_ring; ++ j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); ++ } ++ return 0; ++err: ++ edma_free_queues(edma_cinfo); ++ return -1; ++} ++ ++/* edma_clear_irq_status() ++ * Clear interrupt status ++ */ ++void edma_clear_irq_status() ++{ ++ edma_write_reg(EDMA_REG_RX_ISR, 0xff); ++ edma_write_reg(EDMA_REG_TX_ISR, 0xffff); ++ edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff); ++ edma_write_reg(EDMA_REG_WOL_ISR, 0x1); ++}; ++ ++/* edma_configure() ++ * Configure skb, edma interrupts and control register. ++ */ ++int edma_configure(struct edma_common_info *edma_cinfo) ++{ ++ struct edma_hw *hw = &edma_cinfo->hw; ++ u32 intr_modrt_data; ++ u32 intr_ctrl_data = 0; ++ int i, j, ret_count; ++ ++ edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data); ++ intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT); ++ intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT; ++ edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data); ++ ++ edma_clear_irq_status(); ++ ++ /* Clear any WOL status */ ++ edma_write_reg(EDMA_REG_WOL_CTRL, 0); ++ intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT); ++ intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT); ++ edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data); ++ edma_configure_tx(edma_cinfo); ++ edma_configure_rx(edma_cinfo); ++ ++ /* Allocate the RX buffer */ ++ for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { ++ struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j]; ++ ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j); ++ if (ret_count) { ++ dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n"); ++ } ++ j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); ++ } ++ ++ /* Configure descriptor Ring */ ++ edma_init_desc(edma_cinfo); ++ return 0; ++} ++ ++/* edma_irq_enable() ++ * Enable default interrupt generation settings ++ */ ++void edma_irq_enable(struct edma_common_info *edma_cinfo) ++{ ++ struct edma_hw *hw = &edma_cinfo->hw; ++ int i, j; ++ ++ edma_write_reg(EDMA_REG_RX_ISR, 0xff); ++ for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { ++ edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask); ++ j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); ++ } ++ edma_write_reg(EDMA_REG_TX_ISR, 0xffff); ++ for (i = 0; i < edma_cinfo->num_tx_queues; i++) ++ edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask); ++} ++ ++/* edma_irq_disable() ++ * Disable Interrupt ++ */ ++void edma_irq_disable(struct edma_common_info *edma_cinfo) ++{ ++ int i; ++ ++ for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) ++ edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0); ++ ++ for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) ++ edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0); ++ edma_write_reg(EDMA_REG_MISC_IMR, 0); ++ edma_write_reg(EDMA_REG_WOL_IMR, 0); ++} ++ ++/* edma_free_irqs() ++ * Free All IRQs ++ */ ++void edma_free_irqs(struct edma_adapter *adapter) ++{ ++ struct edma_common_info *edma_cinfo = adapter->edma_cinfo; ++ int i, j; ++ int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2); ++ ++ for (i = 0; i < CONFIG_NR_CPUS; i++) { ++ for (j = edma_cinfo->edma_percpu_info[i].tx_start; j < (edma_cinfo->edma_percpu_info[i].tx_start + 4); j++) ++ free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]); ++ ++ for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++) ++ free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]); ++ } ++} ++ ++/* edma_enable_rx_ctrl() ++ * Enable RX queue control ++ */ ++void edma_enable_rx_ctrl(struct edma_hw *hw) ++{ ++ u32 data; ++ ++ edma_read_reg(EDMA_REG_RXQ_CTRL, &data); ++ data |= EDMA_RXQ_CTRL_EN; ++ edma_write_reg(EDMA_REG_RXQ_CTRL, data); ++} ++ ++ ++/* edma_enable_tx_ctrl() ++ * Enable TX queue control ++ */ ++void edma_enable_tx_ctrl(struct edma_hw *hw) ++{ ++ u32 data; ++ ++ edma_read_reg(EDMA_REG_TXQ_CTRL, &data); ++ data |= EDMA_TXQ_CTRL_TXQ_EN; ++ edma_write_reg(EDMA_REG_TXQ_CTRL, data); ++} ++ ++/* edma_stop_rx_tx() ++ * Disable RX/TQ Queue control ++ */ ++void edma_stop_rx_tx(struct edma_hw *hw) ++{ ++ u32 data; ++ ++ edma_read_reg(EDMA_REG_RXQ_CTRL, &data); ++ data &= ~EDMA_RXQ_CTRL_EN; ++ edma_write_reg(EDMA_REG_RXQ_CTRL, data); ++ edma_read_reg(EDMA_REG_TXQ_CTRL, &data); ++ data &= ~EDMA_TXQ_CTRL_TXQ_EN; ++ edma_write_reg(EDMA_REG_TXQ_CTRL, data); ++} ++ ++/* edma_reset() ++ * Reset the EDMA ++ */ ++int edma_reset(struct edma_common_info *edma_cinfo) ++{ ++ struct edma_hw *hw = &edma_cinfo->hw; ++ ++ edma_irq_disable(edma_cinfo); ++ ++ edma_clear_irq_status(); ++ ++ edma_stop_rx_tx(hw); ++ ++ return 0; ++} ++ ++/* edma_fill_netdev() ++ * Fill netdev for each etdr ++ */ ++int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id, ++ int dev, int txq_id) ++{ ++ struct edma_tx_desc_ring *etdr; ++ int i = 0; ++ ++ etdr = edma_cinfo->tpd_ring[queue_id]; ++ ++ while (etdr->netdev[i]) ++ i++; ++ ++ if (i >= EDMA_MAX_NETDEV_PER_QUEUE) ++ return -1; ++ ++ /* Populate the netdev associated with the tpd ring */ ++ etdr->netdev[i] = edma_netdev[dev]; ++ etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id); ++ ++ return 0; ++} ++ ++/* edma_change_mtu() ++ * change the MTU of the NIC. ++ */ ++int edma_change_mtu(struct net_device *netdev, int new_mtu) ++{ ++ struct edma_adapter *adapter = netdev_priv(netdev); ++ struct edma_common_info *edma_cinfo = adapter->edma_cinfo; ++ int old_mtu = netdev->mtu; ++ int max_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + (2 * VLAN_HLEN); ++ ++ if ((max_frame_size < ETH_ZLEN + ETH_FCS_LEN) || ++ (max_frame_size > EDMA_MAX_JUMBO_FRAME_SIZE)) { ++ dev_err(&edma_cinfo->pdev->dev, "MTU setting not correct\n"); ++ return -EINVAL; ++ } ++ ++ /* set MTU */ ++ if (old_mtu != new_mtu) { ++ netdev->mtu = new_mtu; ++ netdev_update_features(netdev); ++ } ++ ++ return 0; ++} ++ ++/* edma_set_mac() ++ * Change the Ethernet Address of the NIC ++ */ ++int edma_set_mac_addr(struct net_device *netdev, void *p) ++{ ++ struct sockaddr *addr = p; ++ ++ if (!is_valid_ether_addr(addr->sa_data)) ++ return -EINVAL; ++ ++ if (netif_running(netdev)) ++ return -EBUSY; ++ ++ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); ++ return 0; ++} ++ ++/* edma_set_stp_rstp() ++ * set stp/rstp ++ */ ++void edma_set_stp_rstp(bool rstp) ++{ ++ edma_stp_rstp = rstp; ++} ++ ++/* edma_assign_ath_hdr_type() ++ * assign atheros header eth type ++ */ ++void edma_assign_ath_hdr_type(int eth_type) ++{ ++ edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK; ++} ++ ++/* edma_get_default_vlan_tag() ++ * Used by other modules to get the default vlan tag ++ */ ++int edma_get_default_vlan_tag(struct net_device *netdev) ++{ ++ struct edma_adapter *adapter = netdev_priv(netdev); ++ ++ if (adapter->default_vlan_tag) ++ return adapter->default_vlan_tag; ++ ++ return 0; ++} ++ ++/* edma_open() ++ * gets called when netdevice is up, start the queue. ++ */ ++int edma_open(struct net_device *netdev) ++{ ++ struct edma_adapter *adapter = netdev_priv(netdev); ++ struct platform_device *pdev = adapter->edma_cinfo->pdev; ++ ++ netif_tx_start_all_queues(netdev); ++ edma_initialise_rfs_flow_table(adapter); ++ set_bit(__EDMA_UP, &adapter->state_flags); ++ ++ /* if Link polling is enabled, in our case enabled for WAN, then ++ * do a phy start, else always set link as UP ++ */ ++ if (adapter->poll_required) { ++ if (!IS_ERR(adapter->phydev)) { ++ phy_start(adapter->phydev); ++ phy_start_aneg(adapter->phydev); ++ adapter->link_state = __EDMA_LINKDOWN; ++ } else { ++ dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n"); ++ } ++ } else { ++ adapter->link_state = __EDMA_LINKUP; ++ netif_carrier_on(netdev); ++ } ++ ++ return 0; ++} ++ ++ ++/* edma_close() ++ * gets called when netdevice is down, stops the queue. ++ */ ++int edma_close(struct net_device *netdev) ++{ ++ struct edma_adapter *adapter = netdev_priv(netdev); ++ ++ edma_free_rfs_flow_table(adapter); ++ netif_carrier_off(netdev); ++ netif_tx_stop_all_queues(netdev); ++ ++ if (adapter->poll_required) { ++ if (!IS_ERR(adapter->phydev)) ++ phy_stop(adapter->phydev); ++ } ++ ++ adapter->link_state = __EDMA_LINKDOWN; ++ ++ /* Set GMAC state to UP before link state is checked ++ */ ++ clear_bit(__EDMA_UP, &adapter->state_flags); ++ ++ return 0; ++} ++ ++/* edma_poll ++ * polling function that gets called when the napi gets scheduled. ++ * ++ * Main sequence of task performed in this api ++ * is clear irq status -> clear_tx_irq -> clean_rx_irq-> ++ * enable interrupts. ++ */ ++int edma_poll(struct napi_struct *napi, int budget) ++{ ++ struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi, ++ struct edma_per_cpu_queues_info, napi); ++ struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo; ++ u32 reg_data; ++ u32 shadow_rx_status, shadow_tx_status; ++ int queue_id; ++ int i, work_done = 0; ++ ++ /* Store the Rx/Tx status by ANDing it with ++ * appropriate CPU RX?TX mask ++ */ ++ edma_read_reg(EDMA_REG_RX_ISR, ®_data); ++ edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask; ++ shadow_rx_status = edma_percpu_info->rx_status; ++ edma_read_reg(EDMA_REG_TX_ISR, ®_data); ++ edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask; ++ shadow_tx_status = edma_percpu_info->tx_status; ++ ++ /* Every core will have a start, which will be computed ++ * in probe and stored in edma_percpu_info->tx_start variable. ++ * We will shift the status bit by tx_start to obtain ++ * status bits for the core on which the current processing ++ * is happening. Since, there are 4 tx queues per core, ++ * we will run the loop till we get the correct queue to clear. ++ */ ++ while (edma_percpu_info->tx_status) { ++ queue_id = ffs(edma_percpu_info->tx_status) - 1; ++ edma_tx_complete(edma_cinfo, queue_id); ++ edma_percpu_info->tx_status &= ~(1 << queue_id); ++ } ++ ++ /* Every core will have a start, which will be computed ++ * in probe and stored in edma_percpu_info->tx_start variable. ++ * We will shift the status bit by tx_start to obtain ++ * status bits for the core on which the current processing ++ * is happening. Since, there are 4 tx queues per core, we ++ * will run the loop till we get the correct queue to clear. ++ */ ++ while (edma_percpu_info->rx_status) { ++ queue_id = ffs(edma_percpu_info->rx_status) - 1; ++ edma_rx_complete(edma_cinfo, &work_done, ++ budget, queue_id, napi); ++ ++ if (likely(work_done < budget)) ++ edma_percpu_info->rx_status &= ~(1 << queue_id); ++ else ++ break; ++ } ++ ++ /* Clear the status register, to avoid the interrupts to ++ * reoccur.This clearing of interrupt status register is ++ * done here as writing to status register only takes place ++ * once the producer/consumer index has been updated to ++ * reflect that the packet transmission/reception went fine. ++ */ ++ edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status); ++ edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status); ++ ++ /* If budget not fully consumed, exit the polling mode */ ++ if (likely(work_done < budget)) { ++ napi_complete(napi); ++ ++ /* re-enable the interrupts */ ++ for (i = 0; i < edma_cinfo->num_rxq_per_core; i++) ++ edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1); ++ for (i = 0; i < edma_cinfo->num_txq_per_core; i++) ++ edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x1); ++ } ++ ++ return work_done; ++} ++ ++/* edma interrupt() ++ * interrupt handler ++ */ ++irqreturn_t edma_interrupt(int irq, void *dev) ++{ ++ struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev; ++ struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo; ++ int i; ++ ++ /* Unmask the TX/RX interrupt register */ ++ for (i = 0; i < edma_cinfo->num_rxq_per_core; i++) ++ edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0); ++ ++ for (i = 0; i < edma_cinfo->num_txq_per_core; i++) ++ edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x0); ++ ++ napi_schedule(&edma_percpu_info->napi); ++ ++ return IRQ_HANDLED; ++} +--- /dev/null ++++ b/drivers/net/ethernet/qualcomm/essedma/edma.h +@@ -0,0 +1,447 @@ ++/* ++ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for ++ * any purpose with or without fee is hereby granted, provided that the ++ * above copyright notice and this permission notice appear in all copies. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT ++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#ifndef _EDMA_H_ ++#define _EDMA_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "ess_edma.h" ++ ++#define EDMA_CPU_CORES_SUPPORTED 4 ++#define EDMA_MAX_PORTID_SUPPORTED 5 ++#define EDMA_MAX_VLAN_SUPPORTED EDMA_MAX_PORTID_SUPPORTED ++#define EDMA_MAX_PORTID_BITMAP_INDEX (EDMA_MAX_PORTID_SUPPORTED + 1) ++#define EDMA_MAX_PORTID_BITMAP_SUPPORTED 0x1f /* 0001_1111 = 0x1f */ ++#define EDMA_MAX_NETDEV_PER_QUEUE 4 /* 3 Netdev per queue, 1 space for indexing */ ++ ++#define EDMA_MAX_RECEIVE_QUEUE 8 ++#define EDMA_MAX_TRANSMIT_QUEUE 16 ++ ++/* WAN/LAN adapter number */ ++#define EDMA_WAN 0 ++#define EDMA_LAN 1 ++ ++/* VLAN tag */ ++#define EDMA_LAN_DEFAULT_VLAN 1 ++#define EDMA_WAN_DEFAULT_VLAN 2 ++ ++#define EDMA_DEFAULT_GROUP1_VLAN 1 ++#define EDMA_DEFAULT_GROUP2_VLAN 2 ++#define EDMA_DEFAULT_GROUP3_VLAN 3 ++#define EDMA_DEFAULT_GROUP4_VLAN 4 ++#define EDMA_DEFAULT_GROUP5_VLAN 5 ++ ++/* Queues exposed to linux kernel */ ++#define EDMA_NETDEV_TX_QUEUE 4 ++#define EDMA_NETDEV_RX_QUEUE 4 ++ ++/* Number of queues per core */ ++#define EDMA_NUM_TXQ_PER_CORE 4 ++#define EDMA_NUM_RXQ_PER_CORE 2 ++ ++#define EDMA_TPD_EOP_SHIFT 31 ++ ++#define EDMA_PORT_ID_SHIFT 12 ++#define EDMA_PORT_ID_MASK 0x7 ++ ++/* tpd word 3 bit 18-28 */ ++#define EDMA_TPD_PORT_BITMAP_SHIFT 18 ++ ++#define EDMA_TPD_FROM_CPU_SHIFT 25 ++ ++#define EDMA_FROM_CPU_MASK 0x80 ++#define EDMA_SKB_PRIORITY_MASK 0x38 ++ ++/* TX/RX descriptor ring count */ ++/* should be a power of 2 */ ++#define EDMA_RX_RING_SIZE 128 ++#define EDMA_TX_RING_SIZE 128 ++ ++/* Flags used in paged/non paged mode */ ++#define EDMA_RX_HEAD_BUFF_SIZE_JUMBO 256 ++#define EDMA_RX_HEAD_BUFF_SIZE 1540 ++ ++/* MAX frame size supported by switch */ ++#define EDMA_MAX_JUMBO_FRAME_SIZE 9216 ++ ++/* Configurations */ ++#define EDMA_INTR_CLEAR_TYPE 0 ++#define EDMA_INTR_SW_IDX_W_TYPE 0 ++#define EDMA_FIFO_THRESH_TYPE 0 ++#define EDMA_RSS_TYPE 0 ++#define EDMA_RX_IMT 0x0020 ++#define EDMA_TX_IMT 0x0050 ++#define EDMA_TPD_BURST 5 ++#define EDMA_TXF_BURST 0x100 ++#define EDMA_RFD_BURST 8 ++#define EDMA_RFD_THR 16 ++#define EDMA_RFD_LTHR 0 ++ ++/* RX/TX per CPU based mask/shift */ ++#define EDMA_TX_PER_CPU_MASK 0xF ++#define EDMA_RX_PER_CPU_MASK 0x3 ++#define EDMA_TX_PER_CPU_MASK_SHIFT 0x2 ++#define EDMA_RX_PER_CPU_MASK_SHIFT 0x1 ++#define EDMA_TX_CPU_START_SHIFT 0x2 ++#define EDMA_RX_CPU_START_SHIFT 0x1 ++ ++/* FLags used in transmit direction */ ++#define EDMA_HW_CHECKSUM 0x00000001 ++#define EDMA_VLAN_TX_TAG_INSERT_FLAG 0x00000002 ++#define EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG 0x00000004 ++ ++#define EDMA_SW_DESC_FLAG_LAST 0x1 ++#define EDMA_SW_DESC_FLAG_SKB_HEAD 0x2 ++#define EDMA_SW_DESC_FLAG_SKB_FRAG 0x4 ++#define EDMA_SW_DESC_FLAG_SKB_FRAGLIST 0x8 ++#define EDMA_SW_DESC_FLAG_SKB_NONE 0x10 ++#define EDMA_SW_DESC_FLAG_SKB_REUSE 0x20 ++ ++ ++#define EDMA_MAX_SKB_FRAGS (MAX_SKB_FRAGS + 1) ++ ++/* Ethtool specific list of EDMA supported features */ ++#define EDMA_SUPPORTED_FEATURES (SUPPORTED_10baseT_Half \ ++ | SUPPORTED_10baseT_Full \ ++ | SUPPORTED_100baseT_Half \ ++ | SUPPORTED_100baseT_Full \ ++ | SUPPORTED_1000baseT_Full) ++ ++/* Recevie side atheros Header */ ++#define EDMA_RX_ATH_HDR_VERSION 0x2 ++#define EDMA_RX_ATH_HDR_VERSION_SHIFT 14 ++#define EDMA_RX_ATH_HDR_PRIORITY_SHIFT 11 ++#define EDMA_RX_ATH_PORT_TYPE_SHIFT 6 ++#define EDMA_RX_ATH_HDR_RSTP_PORT_TYPE 0x4 ++ ++/* Transmit side atheros Header */ ++#define EDMA_TX_ATH_HDR_PORT_BITMAP_MASK 0x7F ++#define EDMA_TX_ATH_HDR_FROM_CPU_MASK 0x80 ++#define EDMA_TX_ATH_HDR_FROM_CPU_SHIFT 7 ++ ++#define EDMA_TXQ_START_CORE0 8 ++#define EDMA_TXQ_START_CORE1 12 ++#define EDMA_TXQ_START_CORE2 0 ++#define EDMA_TXQ_START_CORE3 4 ++ ++#define EDMA_TXQ_IRQ_MASK_CORE0 0x0F00 ++#define EDMA_TXQ_IRQ_MASK_CORE1 0xF000 ++#define EDMA_TXQ_IRQ_MASK_CORE2 0x000F ++#define EDMA_TXQ_IRQ_MASK_CORE3 0x00F0 ++ ++#define EDMA_ETH_HDR_LEN 12 ++#define EDMA_ETH_TYPE_MASK 0xFFFF ++ ++#define EDMA_RX_BUFFER_WRITE 16 ++#define EDMA_RFD_AVAIL_THR 80 ++ ++#define EDMA_GMAC_NO_MDIO_PHY PHY_MAX_ADDR ++ ++extern int ssdk_rfs_ipct_rule_set(__be32 ip_src, __be32 ip_dst, ++ __be16 sport, __be16 dport, ++ uint8_t proto, u16 loadbalance, bool action); ++struct edma_ethtool_statistics { ++ u32 tx_q0_pkt; ++ u32 tx_q1_pkt; ++ u32 tx_q2_pkt; ++ u32 tx_q3_pkt; ++ u32 tx_q4_pkt; ++ u32 tx_q5_pkt; ++ u32 tx_q6_pkt; ++ u32 tx_q7_pkt; ++ u32 tx_q8_pkt; ++ u32 tx_q9_pkt; ++ u32 tx_q10_pkt; ++ u32 tx_q11_pkt; ++ u32 tx_q12_pkt; ++ u32 tx_q13_pkt; ++ u32 tx_q14_pkt; ++ u32 tx_q15_pkt; ++ u32 tx_q0_byte; ++ u32 tx_q1_byte; ++ u32 tx_q2_byte; ++ u32 tx_q3_byte; ++ u32 tx_q4_byte; ++ u32 tx_q5_byte; ++ u32 tx_q6_byte; ++ u32 tx_q7_byte; ++ u32 tx_q8_byte; ++ u32 tx_q9_byte; ++ u32 tx_q10_byte; ++ u32 tx_q11_byte; ++ u32 tx_q12_byte; ++ u32 tx_q13_byte; ++ u32 tx_q14_byte; ++ u32 tx_q15_byte; ++ u32 rx_q0_pkt; ++ u32 rx_q1_pkt; ++ u32 rx_q2_pkt; ++ u32 rx_q3_pkt; ++ u32 rx_q4_pkt; ++ u32 rx_q5_pkt; ++ u32 rx_q6_pkt; ++ u32 rx_q7_pkt; ++ u32 rx_q0_byte; ++ u32 rx_q1_byte; ++ u32 rx_q2_byte; ++ u32 rx_q3_byte; ++ u32 rx_q4_byte; ++ u32 rx_q5_byte; ++ u32 rx_q6_byte; ++ u32 rx_q7_byte; ++ u32 tx_desc_error; ++}; ++ ++struct edma_mdio_data { ++ struct mii_bus *mii_bus; ++ void __iomem *membase; ++ int phy_irq[PHY_MAX_ADDR]; ++}; ++ ++/* EDMA LINK state */ ++enum edma_link_state { ++ __EDMA_LINKUP, /* Indicate link is UP */ ++ __EDMA_LINKDOWN /* Indicate link is down */ ++}; ++ ++/* EDMA GMAC state */ ++enum edma_gmac_state { ++ __EDMA_UP /* use to indicate GMAC is up */ ++}; ++ ++/* edma transmit descriptor */ ++struct edma_tx_desc { ++ __le16 len; /* full packet including CRC */ ++ __le16 svlan_tag; /* vlan tag */ ++ __le32 word1; /* byte 4-7 */ ++ __le32 addr; /* address of buffer */ ++ __le32 word3; /* byte 12 */ ++}; ++ ++/* edma receive return descriptor */ ++struct edma_rx_return_desc { ++ u16 rrd0; ++ u16 rrd1; ++ u16 rrd2; ++ u16 rrd3; ++ u16 rrd4; ++ u16 rrd5; ++ u16 rrd6; ++ u16 rrd7; ++}; ++ ++/* RFD descriptor */ ++struct edma_rx_free_desc { ++ __le32 buffer_addr; /* buffer address */ ++}; ++ ++/* edma hw specific data */ ++struct edma_hw { ++ u32 __iomem *hw_addr; /* inner register address */ ++ struct edma_adapter *adapter; /* netdevice adapter */ ++ u32 rx_intr_mask; /*rx interrupt mask */ ++ u32 tx_intr_mask; /* tx interrupt nask */ ++ u32 misc_intr_mask; /* misc interrupt mask */ ++ u32 wol_intr_mask; /* wake on lan interrupt mask */ ++ bool intr_clear_type; /* interrupt clear */ ++ bool intr_sw_idx_w; /* interrupt software index */ ++ u32 rx_head_buff_size; /* Rx buffer size */ ++ u8 rss_type; /* rss protocol type */ ++}; ++ ++/* edma_sw_desc stores software descriptor ++ * SW descriptor has 1:1 map with HW descriptor ++ */ ++struct edma_sw_desc { ++ struct sk_buff *skb; ++ dma_addr_t dma; /* dma address */ ++ u16 length; /* Tx/Rx buffer length */ ++ u32 flags; ++}; ++ ++/* per core related information */ ++struct edma_per_cpu_queues_info { ++ struct napi_struct napi; /* napi associated with the core */ ++ u32 tx_mask; /* tx interrupt mask */ ++ u32 rx_mask; /* rx interrupt mask */ ++ u32 tx_status; /* tx interrupt status */ ++ u32 rx_status; /* rx interrupt status */ ++ u32 tx_start; /* tx queue start */ ++ u32 rx_start; /* rx queue start */ ++ struct edma_common_info *edma_cinfo; /* edma common info */ ++}; ++ ++/* edma specific common info */ ++struct edma_common_info { ++ struct edma_tx_desc_ring *tpd_ring[16]; /* 16 Tx queues */ ++ struct edma_rfd_desc_ring *rfd_ring[8]; /* 8 Rx queues */ ++ struct platform_device *pdev; /* device structure */ ++ struct net_device *netdev[EDMA_MAX_PORTID_SUPPORTED]; ++ struct net_device *portid_netdev_lookup_tbl[EDMA_MAX_PORTID_BITMAP_INDEX]; ++ struct ctl_table_header *edma_ctl_table_hdr; ++ int num_gmac; ++ struct edma_ethtool_statistics edma_ethstats; /* ethtool stats */ ++ int num_rx_queues; /* number of rx queue */ ++ u32 num_tx_queues; /* number of tx queue */ ++ u32 tx_irq[16]; /* number of tx irq */ ++ u32 rx_irq[8]; /* number of rx irq */ ++ u32 from_cpu; /* from CPU TPD field */ ++ u32 num_rxq_per_core; /* Rx queues per core */ ++ u32 num_txq_per_core; /* Tx queues per core */ ++ u16 tx_ring_count; /* Tx ring count */ ++ u16 rx_ring_count; /* Rx ring*/ ++ u16 rx_head_buffer_len; /* rx buffer length */ ++ u16 rx_page_buffer_len; /* rx buffer length */ ++ u32 page_mode; /* Jumbo frame supported flag */ ++ u32 fraglist_mode; /* fraglist supported flag */ ++ struct edma_hw hw; /* edma hw specific structure */ ++ struct edma_per_cpu_queues_info edma_percpu_info[CONFIG_NR_CPUS]; /* per cpu information */ ++ spinlock_t stats_lock; /* protect edma stats area for updation */ ++}; ++ ++/* transimit packet descriptor (tpd) ring */ ++struct edma_tx_desc_ring { ++ struct netdev_queue *nq[EDMA_MAX_NETDEV_PER_QUEUE]; /* Linux queue index */ ++ struct net_device *netdev[EDMA_MAX_NETDEV_PER_QUEUE]; ++ /* Array of netdevs associated with the tpd ring */ ++ void *hw_desc; /* descriptor ring virtual address */ ++ struct edma_sw_desc *sw_desc; /* buffer associated with ring */ ++ int netdev_bmp; /* Bitmap for per-ring netdevs */ ++ u32 size; /* descriptor ring length in bytes */ ++ u16 count; /* number of descriptors in the ring */ ++ dma_addr_t dma; /* descriptor ring physical address */ ++ u16 sw_next_to_fill; /* next Tx descriptor to fill */ ++ u16 sw_next_to_clean; /* next Tx descriptor to clean */ ++}; ++ ++/* receive free descriptor (rfd) ring */ ++struct edma_rfd_desc_ring { ++ void *hw_desc; /* descriptor ring virtual address */ ++ struct edma_sw_desc *sw_desc; /* buffer associated with ring */ ++ u16 size; /* bytes allocated to sw_desc */ ++ u16 count; /* number of descriptors in the ring */ ++ dma_addr_t dma; /* descriptor ring physical address */ ++ u16 sw_next_to_fill; /* next descriptor to fill */ ++ u16 sw_next_to_clean; /* next descriptor to clean */ ++}; ++ ++/* edma_rfs_flter_node - rfs filter node in hash table */ ++struct edma_rfs_filter_node { ++ struct flow_keys keys; ++ u32 flow_id; /* flow_id of filter provided by kernel */ ++ u16 filter_id; /* filter id of filter returned by adaptor */ ++ u16 rq_id; /* desired rq index */ ++ struct hlist_node node; /* edma rfs list node */ ++}; ++ ++/* edma_rfs_flow_tbl - rfs flow table */ ++struct edma_rfs_flow_table { ++ u16 max_num_filter; /* Maximum number of filters edma supports */ ++ u16 hashtoclean; /* hash table index to clean next */ ++ int filter_available; /* Number of free filters available */ ++ struct hlist_head hlist_head[EDMA_RFS_FLOW_ENTRIES]; ++ spinlock_t rfs_ftab_lock; ++ struct timer_list expire_rfs; /* timer function for edma_rps_may_expire_flow */ ++}; ++ ++/* EDMA net device structure */ ++struct edma_adapter { ++ struct net_device *netdev; /* netdevice */ ++ struct platform_device *pdev; /* platform device */ ++ struct edma_common_info *edma_cinfo; /* edma common info */ ++ struct phy_device *phydev; /* Phy device */ ++ struct edma_rfs_flow_table rfs; /* edma rfs flow table */ ++ struct net_device_stats stats; /* netdev statistics */ ++ set_rfs_filter_callback_t set_rfs_rule; ++ u32 flags;/* status flags */ ++ unsigned long state_flags; /* GMAC up/down flags */ ++ u32 forced_speed; /* link force speed */ ++ u32 forced_duplex; /* link force duplex */ ++ u32 link_state; /* phy link state */ ++ u32 phy_mdio_addr; /* PHY device address on MII interface */ ++ u32 poll_required; /* check if link polling is required */ ++ u32 tx_start_offset[CONFIG_NR_CPUS]; /* tx queue start */ ++ u32 default_vlan_tag; /* vlan tag */ ++ u32 dp_bitmap; ++ uint8_t phy_id[MII_BUS_ID_SIZE + 3]; ++}; ++ ++int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo); ++int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo); ++int edma_open(struct net_device *netdev); ++int edma_close(struct net_device *netdev); ++void edma_free_tx_resources(struct edma_common_info *edma_c_info); ++void edma_free_rx_resources(struct edma_common_info *edma_c_info); ++int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo); ++int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo); ++void edma_free_tx_rings(struct edma_common_info *edma_cinfo); ++void edma_free_rx_rings(struct edma_common_info *edma_cinfo); ++void edma_free_queues(struct edma_common_info *edma_cinfo); ++void edma_irq_disable(struct edma_common_info *edma_cinfo); ++int edma_reset(struct edma_common_info *edma_cinfo); ++int edma_poll(struct napi_struct *napi, int budget); ++netdev_tx_t edma_xmit(struct sk_buff *skb, ++ struct net_device *netdev); ++int edma_configure(struct edma_common_info *edma_cinfo); ++void edma_irq_enable(struct edma_common_info *edma_cinfo); ++void edma_enable_tx_ctrl(struct edma_hw *hw); ++void edma_enable_rx_ctrl(struct edma_hw *hw); ++void edma_stop_rx_tx(struct edma_hw *hw); ++void edma_free_irqs(struct edma_adapter *adapter); ++irqreturn_t edma_interrupt(int irq, void *dev); ++void edma_write_reg(u16 reg_addr, u32 reg_value); ++void edma_read_reg(u16 reg_addr, volatile u32 *reg_value); ++struct net_device_stats *edma_get_stats(struct net_device *netdev); ++int edma_set_mac_addr(struct net_device *netdev, void *p); ++int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ++ u16 rxq, u32 flow_id); ++int edma_register_rfs_filter(struct net_device *netdev, ++ set_rfs_filter_callback_t set_filter); ++void edma_flow_may_expire(unsigned long data); ++void edma_set_ethtool_ops(struct net_device *netdev); ++int edma_change_mtu(struct net_device *netdev, int new_mtu); ++void edma_set_stp_rstp(bool tag); ++void edma_assign_ath_hdr_type(int tag); ++int edma_get_default_vlan_tag(struct net_device *netdev); ++void edma_adjust_link(struct net_device *netdev); ++int edma_fill_netdev(struct edma_common_info *edma_cinfo, int qid, int num, int txq_id); ++void edma_read_append_stats(struct edma_common_info *edma_cinfo); ++void edma_change_tx_coalesce(int usecs); ++void edma_change_rx_coalesce(int usecs); ++void edma_get_tx_rx_coalesce(u32 *reg_val); ++void edma_clear_irq_status(void); ++#endif /* _EDMA_H_ */ +--- /dev/null ++++ b/drivers/net/ethernet/qualcomm/essedma/edma_axi.c +@@ -0,0 +1,1220 @@ ++/* ++ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for ++ * any purpose with or without fee is hereby granted, provided that the ++ * above copyright notice and this permission notice appear in all copies. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT ++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include "edma.h" ++#include "ess_edma.h" ++ ++/* Weight round robin and virtual QID mask */ ++#define EDMA_WRR_VID_SCTL_MASK 0xffff ++ ++/* Weight round robin and virtual QID shift */ ++#define EDMA_WRR_VID_SCTL_SHIFT 16 ++ ++char edma_axi_driver_name[] = "ess_edma"; ++static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | ++ NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; ++ ++static u32 edma_hw_addr; ++ ++struct timer_list edma_stats_timer; ++ ++char edma_tx_irq[16][64]; ++char edma_rx_irq[8][64]; ++struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED]; ++static u16 tx_start[4] = {EDMA_TXQ_START_CORE0, EDMA_TXQ_START_CORE1, ++ EDMA_TXQ_START_CORE2, EDMA_TXQ_START_CORE3}; ++static u32 tx_mask[4] = {EDMA_TXQ_IRQ_MASK_CORE0, EDMA_TXQ_IRQ_MASK_CORE1, ++ EDMA_TXQ_IRQ_MASK_CORE2, EDMA_TXQ_IRQ_MASK_CORE3}; ++ ++static u32 edma_default_ltag __read_mostly = EDMA_LAN_DEFAULT_VLAN; ++static u32 edma_default_wtag __read_mostly = EDMA_WAN_DEFAULT_VLAN; ++static u32 edma_default_group1_vtag __read_mostly = EDMA_DEFAULT_GROUP1_VLAN; ++static u32 edma_default_group2_vtag __read_mostly = EDMA_DEFAULT_GROUP2_VLAN; ++static u32 edma_default_group3_vtag __read_mostly = EDMA_DEFAULT_GROUP3_VLAN; ++static u32 edma_default_group4_vtag __read_mostly = EDMA_DEFAULT_GROUP4_VLAN; ++static u32 edma_default_group5_vtag __read_mostly = EDMA_DEFAULT_GROUP5_VLAN; ++static u32 edma_rss_idt_val = EDMA_RSS_IDT_VALUE; ++static u32 edma_rss_idt_idx; ++ ++static int edma_weight_assigned_to_q __read_mostly; ++static int edma_queue_to_virtual_q __read_mostly; ++static bool edma_enable_rstp __read_mostly; ++static int edma_athr_hdr_eth_type __read_mostly; ++ ++static int page_mode; ++module_param(page_mode, int, 0); ++MODULE_PARM_DESC(page_mode, "enable page mode"); ++ ++static int overwrite_mode; ++module_param(overwrite_mode, int, 0); ++MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting"); ++ ++static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE; ++module_param(jumbo_mru, int, 0); ++MODULE_PARM_DESC(jumbo_mru, "enable fraglist support"); ++ ++static int num_rxq = 4; ++module_param(num_rxq, int, 0); ++MODULE_PARM_DESC(num_rxq, "change the number of rx queues"); ++ ++void edma_write_reg(u16 reg_addr, u32 reg_value) ++{ ++ writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr))); ++} ++ ++void edma_read_reg(u16 reg_addr, volatile u32 *reg_value) ++{ ++ *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr)); ++} ++ ++/* edma_change_tx_coalesce() ++ * change tx interrupt moderation timer ++ */ ++void edma_change_tx_coalesce(int usecs) ++{ ++ u32 reg_value; ++ ++ /* Here, we right shift the value from the user by 1, this is ++ * done because IMT resolution timer is 2usecs. 1 count ++ * of this register corresponds to 2 usecs. ++ */ ++ edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, ®_value); ++ reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16)); ++ edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value); ++} ++ ++/* edma_change_rx_coalesce() ++ * change rx interrupt moderation timer ++ */ ++void edma_change_rx_coalesce(int usecs) ++{ ++ u32 reg_value; ++ ++ /* Here, we right shift the value from the user by 1, this is ++ * done because IMT resolution timer is 2usecs. 1 count ++ * of this register corresponds to 2 usecs. ++ */ ++ edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, ®_value); ++ reg_value = ((reg_value & 0xffff0000) | (usecs >> 1)); ++ edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value); ++} ++ ++/* edma_get_tx_rx_coalesce() ++ * Get tx/rx interrupt moderation value ++ */ ++void edma_get_tx_rx_coalesce(u32 *reg_val) ++{ ++ edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val); ++} ++ ++void edma_read_append_stats(struct edma_common_info *edma_cinfo) ++{ ++ uint32_t *p; ++ int i; ++ u32 stat; ++ ++ spin_lock(&edma_cinfo->stats_lock); ++ p = (uint32_t *)&(edma_cinfo->edma_ethstats); ++ ++ for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) { ++ edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat); ++ *p += stat; ++ p++; ++ } ++ ++ for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) { ++ edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat); ++ *p += stat; ++ p++; ++ } ++ ++ for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) { ++ edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat); ++ *p += stat; ++ p++; ++ } ++ ++ for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) { ++ edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat); ++ *p += stat; ++ p++; ++ } ++ ++ spin_unlock(&edma_cinfo->stats_lock); ++} ++ ++static void edma_statistics_timer(unsigned long data) ++{ ++ struct edma_common_info *edma_cinfo = (struct edma_common_info *)data; ++ ++ edma_read_append_stats(edma_cinfo); ++ ++ mod_timer(&edma_stats_timer, jiffies + 1*HZ); ++} ++ ++static int edma_enable_stp_rstp(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ int ret; ++ ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ if (write) ++ edma_set_stp_rstp(edma_enable_rstp); ++ ++ return ret; ++} ++ ++static int edma_ath_hdr_eth_type(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ int ret; ++ ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ if (write) ++ edma_assign_ath_hdr_type(edma_athr_hdr_eth_type); ++ ++ return ret; ++} ++ ++static int edma_change_default_lan_vlan(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ struct edma_adapter *adapter; ++ int ret; ++ ++ if (!edma_netdev[1]) { ++ pr_err("Netdevice for default_lan does not exist\n"); ++ return -1; ++ } ++ ++ adapter = netdev_priv(edma_netdev[1]); ++ ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ ++ if (write) ++ adapter->default_vlan_tag = edma_default_ltag; ++ ++ return ret; ++} ++ ++static int edma_change_default_wan_vlan(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ struct edma_adapter *adapter; ++ int ret; ++ ++ if (!edma_netdev[0]) { ++ pr_err("Netdevice for default_wan does not exist\n"); ++ return -1; ++ } ++ ++ adapter = netdev_priv(edma_netdev[0]); ++ ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ ++ if (write) ++ adapter->default_vlan_tag = edma_default_wtag; ++ ++ return ret; ++} ++ ++static int edma_change_group1_vtag(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ struct edma_adapter *adapter; ++ struct edma_common_info *edma_cinfo; ++ int ret; ++ ++ if (!edma_netdev[0]) { ++ pr_err("Netdevice for Group 1 does not exist\n"); ++ return -1; ++ } ++ ++ adapter = netdev_priv(edma_netdev[0]); ++ edma_cinfo = adapter->edma_cinfo; ++ ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ ++ if (write) ++ adapter->default_vlan_tag = edma_default_group1_vtag; ++ ++ return ret; ++} ++ ++static int edma_change_group2_vtag(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ struct edma_adapter *adapter; ++ struct edma_common_info *edma_cinfo; ++ int ret; ++ ++ if (!edma_netdev[1]) { ++ pr_err("Netdevice for Group 2 does not exist\n"); ++ return -1; ++ } ++ ++ adapter = netdev_priv(edma_netdev[1]); ++ edma_cinfo = adapter->edma_cinfo; ++ ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ ++ if (write) ++ adapter->default_vlan_tag = edma_default_group2_vtag; ++ ++ return ret; ++} ++ ++static int edma_change_group3_vtag(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ struct edma_adapter *adapter; ++ struct edma_common_info *edma_cinfo; ++ int ret; ++ ++ if (!edma_netdev[2]) { ++ pr_err("Netdevice for Group 3 does not exist\n"); ++ return -1; ++ } ++ ++ adapter = netdev_priv(edma_netdev[2]); ++ edma_cinfo = adapter->edma_cinfo; ++ ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ ++ if (write) ++ adapter->default_vlan_tag = edma_default_group3_vtag; ++ ++ return ret; ++} ++ ++static int edma_change_group4_vtag(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ struct edma_adapter *adapter; ++ struct edma_common_info *edma_cinfo; ++ int ret; ++ ++ if (!edma_netdev[3]) { ++ pr_err("Netdevice for Group 4 does not exist\n"); ++ return -1; ++ } ++ ++ adapter = netdev_priv(edma_netdev[3]); ++ edma_cinfo = adapter->edma_cinfo; ++ ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ ++ if (write) ++ adapter->default_vlan_tag = edma_default_group4_vtag; ++ ++ return ret; ++} ++ ++static int edma_change_group5_vtag(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ struct edma_adapter *adapter; ++ struct edma_common_info *edma_cinfo; ++ int ret; ++ ++ if (!edma_netdev[4]) { ++ pr_err("Netdevice for Group 5 does not exist\n"); ++ return -1; ++ } ++ ++ adapter = netdev_priv(edma_netdev[4]); ++ edma_cinfo = adapter->edma_cinfo; ++ ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ ++ if (write) ++ adapter->default_vlan_tag = edma_default_group5_vtag; ++ ++ return ret; ++} ++ ++static int edma_set_rss_idt_value(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ int ret; ++ ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ if (write && !ret) ++ edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx), ++ edma_rss_idt_val); ++ return ret; ++} ++ ++static int edma_set_rss_idt_idx(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ int ret; ++ u32 old_value = edma_rss_idt_idx; ++ ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ if (!write || ret) ++ return ret; ++ ++ if (edma_rss_idt_idx >= EDMA_NUM_IDT) { ++ pr_err("Invalid RSS indirection table index %d\n", ++ edma_rss_idt_idx); ++ edma_rss_idt_idx = old_value; ++ return -EINVAL; ++ } ++ return ret; ++} ++ ++static int edma_weight_assigned_to_queues(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ int ret, queue_id, weight; ++ u32 reg_data, data, reg_addr; ++ ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ if (write) { ++ queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK; ++ if (queue_id < 0 || queue_id > 15) { ++ pr_err("queue_id not within desired range\n"); ++ return -EINVAL; ++ } ++ ++ weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT; ++ if (weight < 0 || weight > 0xF) { ++ pr_err("queue_id not within desired range\n"); ++ return -EINVAL; ++ } ++ ++ data = weight << EDMA_WRR_SHIFT(queue_id); ++ ++ reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3); ++ edma_read_reg(reg_addr, ®_data); ++ reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id)); ++ edma_write_reg(reg_addr, data | reg_data); ++ } ++ ++ return ret; ++} ++ ++static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, ++ loff_t *ppos) ++{ ++ int ret, queue_id, virtual_qid; ++ u32 reg_data, data, reg_addr; ++ ++ ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ if (write) { ++ queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK; ++ if (queue_id < 0 || queue_id > 15) { ++ pr_err("queue_id not within desired range\n"); ++ return -EINVAL; ++ } ++ ++ virtual_qid = edma_queue_to_virtual_q >> ++ EDMA_WRR_VID_SCTL_SHIFT; ++ if (virtual_qid < 0 || virtual_qid > 8) { ++ pr_err("queue_id not within desired range\n"); ++ return -EINVAL; ++ } ++ ++ data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id); ++ ++ reg_addr = EDMA_REG_VQ_CTRL0 + (queue_id & ~0x3); ++ edma_read_reg(reg_addr, ®_data); ++ reg_data &= ~(1 << EDMA_VQ_ID_SHIFT(queue_id)); ++ edma_write_reg(reg_addr, data | reg_data); ++ } ++ ++ return ret; ++} ++ ++static struct ctl_table edma_table[] = { ++ { ++ .procname = "default_lan_tag", ++ .data = &edma_default_ltag, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = edma_change_default_lan_vlan ++ }, ++ { ++ .procname = "default_wan_tag", ++ .data = &edma_default_wtag, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = edma_change_default_wan_vlan ++ }, ++ { ++ .procname = "weight_assigned_to_queues", ++ .data = &edma_weight_assigned_to_q, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = edma_weight_assigned_to_queues ++ }, ++ { ++ .procname = "queue_to_virtual_queue_map", ++ .data = &edma_queue_to_virtual_q, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = edma_queue_to_virtual_queue_map ++ }, ++ { ++ .procname = "enable_stp_rstp", ++ .data = &edma_enable_rstp, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = edma_enable_stp_rstp ++ }, ++ { ++ .procname = "athr_hdr_eth_type", ++ .data = &edma_athr_hdr_eth_type, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = edma_ath_hdr_eth_type ++ }, ++ { ++ .procname = "default_group1_vlan_tag", ++ .data = &edma_default_group1_vtag, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = edma_change_group1_vtag ++ }, ++ { ++ .procname = "default_group2_vlan_tag", ++ .data = &edma_default_group2_vtag, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = edma_change_group2_vtag ++ }, ++ { ++ .procname = "default_group3_vlan_tag", ++ .data = &edma_default_group3_vtag, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = edma_change_group3_vtag ++ }, ++ { ++ .procname = "default_group4_vlan_tag", ++ .data = &edma_default_group4_vtag, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = edma_change_group4_vtag ++ }, ++ { ++ .procname = "default_group5_vlan_tag", ++ .data = &edma_default_group5_vtag, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = edma_change_group5_vtag ++ }, ++ { ++ .procname = "edma_rss_idt_value", ++ .data = &edma_rss_idt_val, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = edma_set_rss_idt_value ++ }, ++ { ++ .procname = "edma_rss_idt_idx", ++ .data = &edma_rss_idt_idx, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = edma_set_rss_idt_idx ++ }, ++ {} ++}; ++ ++/* edma_axi_netdev_ops ++ * Describe the operations supported by registered netdevices ++ * ++ * static const struct net_device_ops edma_axi_netdev_ops = { ++ * .ndo_open = edma_open, ++ * .ndo_stop = edma_close, ++ * .ndo_start_xmit = edma_xmit_frame, ++ * .ndo_set_mac_address = edma_set_mac_addr, ++ * } ++ */ ++static const struct net_device_ops edma_axi_netdev_ops = { ++ .ndo_open = edma_open, ++ .ndo_stop = edma_close, ++ .ndo_start_xmit = edma_xmit, ++ .ndo_set_mac_address = edma_set_mac_addr, ++#ifdef CONFIG_RFS_ACCEL ++ .ndo_rx_flow_steer = edma_rx_flow_steer, ++ .ndo_register_rfs_filter = edma_register_rfs_filter, ++ .ndo_get_default_vlan_tag = edma_get_default_vlan_tag, ++#endif ++ .ndo_get_stats = edma_get_stats, ++ .ndo_change_mtu = edma_change_mtu, ++}; ++ ++/* edma_axi_probe() ++ * Initialise an adapter identified by a platform_device structure. ++ * ++ * The OS initialization, configuring of the adapter private structure, ++ * and a hardware reset occur in the probe. ++ */ ++static int edma_axi_probe(struct platform_device *pdev) ++{ ++ struct edma_common_info *edma_cinfo; ++ struct edma_hw *hw; ++ struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED]; ++ struct resource *res; ++ struct device_node *np = pdev->dev.of_node; ++ struct device_node *pnp; ++ struct device_node *mdio_node = NULL; ++ struct platform_device *mdio_plat = NULL; ++ struct mii_bus *miibus = NULL; ++ struct edma_mdio_data *mdio_data = NULL; ++ int i, j, k, err = 0; ++ int portid_bmp; ++ int idx = 0, idx_mac = 0; ++ ++ if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) { ++ dev_err(&pdev->dev, "Invalid CPU Cores\n"); ++ return -EINVAL; ++ } ++ ++ if ((num_rxq != 4) && (num_rxq != 8)) { ++ dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n"); ++ return -EINVAL; ++ } ++ edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL); ++ if (!edma_cinfo) { ++ err = -ENOMEM; ++ goto err_alloc; ++ } ++ ++ edma_cinfo->pdev = pdev; ++ ++ of_property_read_u32(np, "qcom,num_gmac", &edma_cinfo->num_gmac); ++ if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) { ++ pr_err("Invalid DTSI Entry for qcom,num_gmac\n"); ++ err = -EINVAL; ++ goto err_cinfo; ++ } ++ ++ /* Initialize the netdev array before allocation ++ * to avoid double free ++ */ ++ for (i = 0 ; i < edma_cinfo->num_gmac ; i++) ++ edma_netdev[i] = NULL; ++ ++ for (i = 0 ; i < edma_cinfo->num_gmac ; i++) { ++ edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter), ++ EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE); ++ ++ if (!edma_netdev[i]) { ++ dev_err(&pdev->dev, ++ "net device alloc fails for index=%d\n", i); ++ err = -ENODEV; ++ goto err_ioremap; ++ } ++ ++ SET_NETDEV_DEV(edma_netdev[i], &pdev->dev); ++ platform_set_drvdata(pdev, edma_netdev[i]); ++ edma_cinfo->netdev[i] = edma_netdev[i]; ++ } ++ ++ /* Fill ring details */ ++ edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE; ++ edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4); ++ edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE; ++ ++ /* Update num rx queues based on module parameter */ ++ edma_cinfo->num_rx_queues = num_rxq; ++ edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2); ++ ++ edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE; ++ ++ hw = &edma_cinfo->hw; ++ ++ /* Fill HW defaults */ ++ hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK; ++ hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK; ++ ++ of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode); ++ of_property_read_u32(np, "qcom,rx_head_buf_size", ++ &hw->rx_head_buff_size); ++ ++ if (overwrite_mode) { ++ dev_info(&pdev->dev, "page mode overwritten"); ++ edma_cinfo->page_mode = page_mode; ++ } ++ ++ if (jumbo_mru) ++ edma_cinfo->fraglist_mode = 1; ++ ++ if (edma_cinfo->page_mode) ++ hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO; ++ else if (edma_cinfo->fraglist_mode) ++ hw->rx_head_buff_size = jumbo_mru; ++ else if (!hw->rx_head_buff_size) ++ hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE; ++ ++ hw->misc_intr_mask = 0; ++ hw->wol_intr_mask = 0; ++ ++ hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE; ++ hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE; ++ ++ /* configure RSS type to the different protocol that can be ++ * supported ++ */ ++ hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP | ++ EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP | ++ EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ ++ edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(edma_cinfo->hw.hw_addr)) { ++ err = PTR_ERR(edma_cinfo->hw.hw_addr); ++ goto err_ioremap; ++ } ++ ++ edma_hw_addr = (u32)edma_cinfo->hw.hw_addr; ++ ++ /* Parse tx queue interrupt number from device tree */ ++ for (i = 0; i < edma_cinfo->num_tx_queues; i++) ++ edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i); ++ ++ /* Parse rx queue interrupt number from device tree ++ * Here we are setting j to point to the point where we ++ * left tx interrupt parsing(i.e 16) and run run the loop ++ * from 0 to 7 to parse rx interrupt number. ++ */ ++ for (i = 0, j = edma_cinfo->num_tx_queues, k = 0; ++ i < edma_cinfo->num_rx_queues; i++) { ++ edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j); ++ k += ((num_rxq == 4) ? 2 : 1); ++ j += ((num_rxq == 4) ? 2 : 1); ++ } ++ ++ edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size; ++ edma_cinfo->rx_page_buffer_len = PAGE_SIZE; ++ ++ err = edma_alloc_queues_tx(edma_cinfo); ++ if (err) { ++ dev_err(&pdev->dev, "Allocation of TX queue failed\n"); ++ goto err_tx_qinit; ++ } ++ ++ err = edma_alloc_queues_rx(edma_cinfo); ++ if (err) { ++ dev_err(&pdev->dev, "Allocation of RX queue failed\n"); ++ goto err_rx_qinit; ++ } ++ ++ err = edma_alloc_tx_rings(edma_cinfo); ++ if (err) { ++ dev_err(&pdev->dev, "Allocation of TX resources failed\n"); ++ goto err_tx_rinit; ++ } ++ ++ err = edma_alloc_rx_rings(edma_cinfo); ++ if (err) { ++ dev_err(&pdev->dev, "Allocation of RX resources failed\n"); ++ goto err_rx_rinit; ++ } ++ ++ /* Initialize netdev and netdev bitmap for transmit descriptor rings */ ++ for (i = 0; i < edma_cinfo->num_tx_queues; i++) { ++ struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[i]; ++ int j; ++ ++ etdr->netdev_bmp = 0; ++ for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) { ++ etdr->netdev[j] = NULL; ++ etdr->nq[j] = NULL; ++ } ++ } ++ ++ if (of_property_read_bool(np, "qcom,mdio_supported")) { ++ mdio_node = of_find_compatible_node(NULL, NULL, ++ "qcom,ipq4019-mdio"); ++ if (!mdio_node) { ++ dev_err(&pdev->dev, "cannot find mdio node by phandle"); ++ err = -EIO; ++ goto err_mdiobus_init_fail; ++ } ++ ++ mdio_plat = of_find_device_by_node(mdio_node); ++ if (!mdio_plat) { ++ dev_err(&pdev->dev, ++ "cannot find platform device from mdio node"); ++ of_node_put(mdio_node); ++ err = -EIO; ++ goto err_mdiobus_init_fail; ++ } ++ ++ mdio_data = dev_get_drvdata(&mdio_plat->dev); ++ if (!mdio_data) { ++ dev_err(&pdev->dev, ++ "cannot get mii bus reference from device data"); ++ of_node_put(mdio_node); ++ err = -EIO; ++ goto err_mdiobus_init_fail; ++ } ++ ++ miibus = mdio_data->mii_bus; ++ } ++ ++ for_each_available_child_of_node(np, pnp) { ++ const char *mac_addr; ++ ++ /* this check is needed if parent and daughter dts have ++ * different number of gmac nodes ++ */ ++ if (idx_mac == edma_cinfo->num_gmac) { ++ of_node_put(np); ++ break; ++ } ++ ++ mac_addr = of_get_mac_address(pnp); ++ if (mac_addr) ++ memcpy(edma_netdev[idx_mac]->dev_addr, mac_addr, ETH_ALEN); ++ ++ idx_mac++; ++ } ++ ++ /* Populate the adapter structure register the netdevice */ ++ for (i = 0; i < edma_cinfo->num_gmac; i++) { ++ int k, m; ++ ++ adapter[i] = netdev_priv(edma_netdev[i]); ++ adapter[i]->netdev = edma_netdev[i]; ++ adapter[i]->pdev = pdev; ++ for (j = 0; j < CONFIG_NR_CPUS; j++) { ++ m = i % 2; ++ adapter[i]->tx_start_offset[j] = ++ ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1)); ++ /* Share the queues with available net-devices. ++ * For instance , with 5 net-devices ++ * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13 ++ * and eth1/eth3 will get the remaining. ++ */ ++ for (k = adapter[i]->tx_start_offset[j]; k < ++ (adapter[i]->tx_start_offset[j] + 2); k++) { ++ if (edma_fill_netdev(edma_cinfo, k, i, j)) { ++ pr_err("Netdev overflow Error\n"); ++ goto err_register; ++ } ++ } ++ } ++ ++ adapter[i]->edma_cinfo = edma_cinfo; ++ edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops; ++ edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM ++ | NETIF_F_HW_VLAN_CTAG_TX ++ | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG | ++ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO; ++ edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | ++ NETIF_F_HW_VLAN_CTAG_RX ++ | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | ++ NETIF_F_GRO; ++ edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG | ++ NETIF_F_TSO | NETIF_F_TSO6 | ++ NETIF_F_GRO; ++ edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG | ++ NETIF_F_TSO | NETIF_F_TSO6 | ++ NETIF_F_GRO; ++ ++#ifdef CONFIG_RFS_ACCEL ++ edma_netdev[i]->features |= NETIF_F_RXHASH | NETIF_F_NTUPLE; ++ edma_netdev[i]->hw_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE; ++ edma_netdev[i]->vlan_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE; ++ edma_netdev[i]->wanted_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE; ++#endif ++ edma_set_ethtool_ops(edma_netdev[i]); ++ ++ /* This just fill in some default MAC address ++ */ ++ if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) { ++ random_ether_addr(edma_netdev[i]->dev_addr); ++ pr_info("EDMA using MAC@ - using"); ++ pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n", ++ *(edma_netdev[i]->dev_addr), ++ *(edma_netdev[i]->dev_addr + 1), ++ *(edma_netdev[i]->dev_addr + 2), ++ *(edma_netdev[i]->dev_addr + 3), ++ *(edma_netdev[i]->dev_addr + 4), ++ *(edma_netdev[i]->dev_addr + 5)); ++ } ++ ++ err = register_netdev(edma_netdev[i]); ++ if (err) ++ goto err_register; ++ ++ /* carrier off reporting is important to ++ * ethtool even BEFORE open ++ */ ++ netif_carrier_off(edma_netdev[i]); ++ ++ /* Allocate reverse irq cpu mapping structure for ++ * receive queues ++ */ ++#ifdef CONFIG_RFS_ACCEL ++ edma_netdev[i]->rx_cpu_rmap = ++ alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE); ++ if (!edma_netdev[i]->rx_cpu_rmap) { ++ err = -ENOMEM; ++ goto err_rmap_alloc_fail; ++ } ++#endif ++ } ++ ++ for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++) ++ edma_cinfo->portid_netdev_lookup_tbl[i] = NULL; ++ ++ for_each_available_child_of_node(np, pnp) { ++ const uint32_t *vlan_tag = NULL; ++ int len; ++ ++ /* this check is needed if parent and daughter dts have ++ * different number of gmac nodes ++ */ ++ if (idx == edma_cinfo->num_gmac) ++ break; ++ ++ /* Populate port-id to netdev lookup table */ ++ vlan_tag = of_get_property(pnp, "vlan_tag", &len); ++ if (!vlan_tag) { ++ pr_err("Vlan tag parsing Failed.\n"); ++ goto err_rmap_alloc_fail; ++ } ++ ++ adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1); ++ vlan_tag++; ++ portid_bmp = of_read_number(vlan_tag, 1); ++ adapter[idx]->dp_bitmap = portid_bmp; ++ ++ portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */ ++ while (portid_bmp) { ++ int port_bit = ffs(portid_bmp); ++ ++ if (port_bit > EDMA_MAX_PORTID_SUPPORTED) ++ goto err_rmap_alloc_fail; ++ edma_cinfo->portid_netdev_lookup_tbl[port_bit] = ++ edma_netdev[idx]; ++ portid_bmp &= ~(1 << (port_bit - 1)); ++ } ++ ++ if (!of_property_read_u32(pnp, "qcom,poll_required", ++ &adapter[idx]->poll_required)) { ++ if (adapter[idx]->poll_required) { ++ of_property_read_u32(pnp, "qcom,phy_mdio_addr", ++ &adapter[idx]->phy_mdio_addr); ++ of_property_read_u32(pnp, "qcom,forced_speed", ++ &adapter[idx]->forced_speed); ++ of_property_read_u32(pnp, "qcom,forced_duplex", ++ &adapter[idx]->forced_duplex); ++ ++ /* create a phyid using MDIO bus id ++ * and MDIO bus address ++ */ ++ snprintf(adapter[idx]->phy_id, ++ MII_BUS_ID_SIZE + 3, PHY_ID_FMT, ++ miibus->id, ++ adapter[idx]->phy_mdio_addr); ++ } ++ } else { ++ adapter[idx]->poll_required = 0; ++ adapter[idx]->forced_speed = SPEED_1000; ++ adapter[idx]->forced_duplex = DUPLEX_FULL; ++ } ++ ++ idx++; ++ } ++ ++ edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net, ++ "net/edma", ++ edma_table); ++ if (!edma_cinfo->edma_ctl_table_hdr) { ++ dev_err(&pdev->dev, "edma sysctl table hdr not registered\n"); ++ goto err_unregister_sysctl_tbl; ++ } ++ ++ /* Disable all 16 Tx and 8 rx irqs */ ++ edma_irq_disable(edma_cinfo); ++ ++ err = edma_reset(edma_cinfo); ++ if (err) { ++ err = -EIO; ++ goto err_reset; ++ } ++ ++ /* populate per_core_info, do a napi_Add, request 16 TX irqs, ++ * 8 RX irqs, do a napi enable ++ */ ++ for (i = 0; i < CONFIG_NR_CPUS; i++) { ++ u8 rx_start; ++ ++ edma_cinfo->edma_percpu_info[i].napi.state = 0; ++ ++ netif_napi_add(edma_netdev[0], ++ &edma_cinfo->edma_percpu_info[i].napi, ++ edma_poll, 64); ++ napi_enable(&edma_cinfo->edma_percpu_info[i].napi); ++ edma_cinfo->edma_percpu_info[i].tx_mask = tx_mask[i]; ++ edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK ++ << (i << EDMA_RX_PER_CPU_MASK_SHIFT); ++ edma_cinfo->edma_percpu_info[i].tx_start = tx_start[i]; ++ edma_cinfo->edma_percpu_info[i].rx_start = ++ i << EDMA_RX_CPU_START_SHIFT; ++ rx_start = i << EDMA_RX_CPU_START_SHIFT; ++ edma_cinfo->edma_percpu_info[i].tx_status = 0; ++ edma_cinfo->edma_percpu_info[i].rx_status = 0; ++ edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo; ++ ++ /* Request irq per core */ ++ for (j = edma_cinfo->edma_percpu_info[i].tx_start; ++ j < tx_start[i] + 4; j++) { ++ sprintf(&edma_tx_irq[j][0], "edma_eth_tx%d", j); ++ err = request_irq(edma_cinfo->tx_irq[j], ++ edma_interrupt, ++ 0, ++ &edma_tx_irq[j][0], ++ &edma_cinfo->edma_percpu_info[i]); ++ if (err) ++ goto err_reset; ++ } ++ ++ for (j = edma_cinfo->edma_percpu_info[i].rx_start; ++ j < (rx_start + ++ ((edma_cinfo->num_rx_queues == 4) ? 1 : 2)); ++ j++) { ++ sprintf(&edma_rx_irq[j][0], "edma_eth_rx%d", j); ++ err = request_irq(edma_cinfo->rx_irq[j], ++ edma_interrupt, ++ 0, ++ &edma_rx_irq[j][0], ++ &edma_cinfo->edma_percpu_info[i]); ++ if (err) ++ goto err_reset; ++ } ++ ++#ifdef CONFIG_RFS_ACCEL ++ for (j = edma_cinfo->edma_percpu_info[i].rx_start; ++ j < rx_start + 2; j += 2) { ++ err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap, ++ edma_cinfo->rx_irq[j]); ++ if (err) ++ goto err_rmap_add_fail; ++ } ++#endif ++ } ++ ++ /* Used to clear interrupt status, allocate rx buffer, ++ * configure edma descriptors registers ++ */ ++ err = edma_configure(edma_cinfo); ++ if (err) { ++ err = -EIO; ++ goto err_configure; ++ } ++ ++ /* Configure RSS indirection table. ++ * 128 hash will be configured in the following ++ * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively ++ * and so on ++ */ ++ for (i = 0; i < EDMA_NUM_IDT; i++) ++ edma_write_reg(EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE); ++ ++ /* Configure load balance mapping table. ++ * 4 table entry will be configured according to the ++ * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4} ++ * respectively. ++ */ ++ edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE); ++ ++ /* Configure Virtual queue for Tx rings ++ * User can also change this value runtime through ++ * a sysctl ++ */ ++ edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE); ++ edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE); ++ ++ /* Configure Max AXI Burst write size to 128 bytes*/ ++ edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE, ++ EDMA_AXIW_MAXWRSIZE_VALUE); ++ ++ /* Enable All 16 tx and 8 rx irq mask */ ++ edma_irq_enable(edma_cinfo); ++ edma_enable_tx_ctrl(&edma_cinfo->hw); ++ edma_enable_rx_ctrl(&edma_cinfo->hw); ++ ++ for (i = 0; i < edma_cinfo->num_gmac; i++) { ++ if (adapter[i]->poll_required) { ++ adapter[i]->phydev = ++ phy_connect(edma_netdev[i], ++ (const char *)adapter[i]->phy_id, ++ &edma_adjust_link, ++ PHY_INTERFACE_MODE_SGMII); ++ if (IS_ERR(adapter[i]->phydev)) { ++ dev_dbg(&pdev->dev, "PHY attach FAIL"); ++ err = -EIO; ++ goto edma_phy_attach_fail; ++ } else { ++ adapter[i]->phydev->advertising |= ++ ADVERTISED_Pause | ++ ADVERTISED_Asym_Pause; ++ adapter[i]->phydev->supported |= ++ SUPPORTED_Pause | ++ SUPPORTED_Asym_Pause; ++ } ++ } else { ++ adapter[i]->phydev = NULL; ++ } ++ } ++ ++ spin_lock_init(&edma_cinfo->stats_lock); ++ ++ init_timer(&edma_stats_timer); ++ edma_stats_timer.expires = jiffies + 1*HZ; ++ edma_stats_timer.data = (unsigned long)edma_cinfo; ++ edma_stats_timer.function = edma_statistics_timer; /* timer handler */ ++ add_timer(&edma_stats_timer); ++ ++ return 0; ++ ++edma_phy_attach_fail: ++ miibus = NULL; ++err_configure: ++#ifdef CONFIG_RFS_ACCEL ++ for (i = 0; i < edma_cinfo->num_gmac; i++) { ++ free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap); ++ adapter[i]->netdev->rx_cpu_rmap = NULL; ++ } ++#endif ++err_rmap_add_fail: ++ edma_free_irqs(adapter[0]); ++ for (i = 0; i < CONFIG_NR_CPUS; i++) ++ napi_disable(&edma_cinfo->edma_percpu_info[i].napi); ++err_reset: ++err_unregister_sysctl_tbl: ++err_rmap_alloc_fail: ++ for (i = 0; i < edma_cinfo->num_gmac; i++) ++ unregister_netdev(edma_netdev[i]); ++err_register: ++err_mdiobus_init_fail: ++ edma_free_rx_rings(edma_cinfo); ++err_rx_rinit: ++ edma_free_tx_rings(edma_cinfo); ++err_tx_rinit: ++ edma_free_queues(edma_cinfo); ++err_rx_qinit: ++err_tx_qinit: ++ iounmap(edma_cinfo->hw.hw_addr); ++err_ioremap: ++ for (i = 0; i < edma_cinfo->num_gmac; i++) { ++ if (edma_netdev[i]) ++ free_netdev(edma_netdev[i]); ++ } ++err_cinfo: ++ kfree(edma_cinfo); ++err_alloc: ++ return err; ++} ++ ++/* edma_axi_remove() ++ * Device Removal Routine ++ * ++ * edma_axi_remove is called by the platform subsystem to alert the driver ++ * that it should release a platform device. ++ */ ++static int edma_axi_remove(struct platform_device *pdev) ++{ ++ struct edma_adapter *adapter = netdev_priv(edma_netdev[0]); ++ struct edma_common_info *edma_cinfo = adapter->edma_cinfo; ++ struct edma_hw *hw = &edma_cinfo->hw; ++ int i; ++ ++ for (i = 0; i < edma_cinfo->num_gmac; i++) ++ unregister_netdev(edma_netdev[i]); ++ ++ edma_stop_rx_tx(hw); ++ for (i = 0; i < CONFIG_NR_CPUS; i++) ++ napi_disable(&edma_cinfo->edma_percpu_info[i].napi); ++ ++ edma_irq_disable(edma_cinfo); ++ edma_write_reg(EDMA_REG_RX_ISR, 0xff); ++ edma_write_reg(EDMA_REG_TX_ISR, 0xffff); ++#ifdef CONFIG_RFS_ACCEL ++ for (i = 0; i < edma_cinfo->num_gmac; i++) { ++ free_irq_cpu_rmap(edma_netdev[i]->rx_cpu_rmap); ++ edma_netdev[i]->rx_cpu_rmap = NULL; ++ } ++#endif ++ ++ for (i = 0; i < edma_cinfo->num_gmac; i++) { ++ struct edma_adapter *adapter = netdev_priv(edma_netdev[i]); ++ ++ if (adapter->phydev) ++ phy_disconnect(adapter->phydev); ++ } ++ ++ del_timer_sync(&edma_stats_timer); ++ edma_free_irqs(adapter); ++ unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr); ++ edma_free_tx_resources(edma_cinfo); ++ edma_free_rx_resources(edma_cinfo); ++ edma_free_tx_rings(edma_cinfo); ++ edma_free_rx_rings(edma_cinfo); ++ edma_free_queues(edma_cinfo); ++ for (i = 0; i < edma_cinfo->num_gmac; i++) ++ free_netdev(edma_netdev[i]); ++ ++ kfree(edma_cinfo); ++ ++ return 0; ++} ++ ++static const struct of_device_id edma_of_mtable[] = { ++ {.compatible = "qcom,ess-edma" }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, edma_of_mtable); ++ ++static struct platform_driver edma_axi_driver = { ++ .driver = { ++ .name = edma_axi_driver_name, ++ .of_match_table = edma_of_mtable, ++ }, ++ .probe = edma_axi_probe, ++ .remove = edma_axi_remove, ++}; ++ ++module_platform_driver(edma_axi_driver); ++ ++MODULE_AUTHOR("Qualcomm Atheros Inc"); ++MODULE_DESCRIPTION("QCA ESS EDMA driver"); ++MODULE_LICENSE("GPL"); +--- /dev/null ++++ b/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c +@@ -0,0 +1,374 @@ ++/* ++ * Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for ++ * any purpose with or without fee is hereby granted, provided that the ++ * above copyright notice and this permission notice appear in all copies. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT ++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++#include "edma.h" ++ ++struct edma_ethtool_stats { ++ uint8_t stat_string[ETH_GSTRING_LEN]; ++ uint32_t stat_offset; ++}; ++ ++#define EDMA_STAT(m) offsetof(struct edma_ethtool_statistics, m) ++#define DRVINFO_LEN 32 ++ ++/* Array of strings describing statistics ++ */ ++static const struct edma_ethtool_stats edma_gstrings_stats[] = { ++ {"tx_q0_pkt", EDMA_STAT(tx_q0_pkt)}, ++ {"tx_q1_pkt", EDMA_STAT(tx_q1_pkt)}, ++ {"tx_q2_pkt", EDMA_STAT(tx_q2_pkt)}, ++ {"tx_q3_pkt", EDMA_STAT(tx_q3_pkt)}, ++ {"tx_q4_pkt", EDMA_STAT(tx_q4_pkt)}, ++ {"tx_q5_pkt", EDMA_STAT(tx_q5_pkt)}, ++ {"tx_q6_pkt", EDMA_STAT(tx_q6_pkt)}, ++ {"tx_q7_pkt", EDMA_STAT(tx_q7_pkt)}, ++ {"tx_q8_pkt", EDMA_STAT(tx_q8_pkt)}, ++ {"tx_q9_pkt", EDMA_STAT(tx_q9_pkt)}, ++ {"tx_q10_pkt", EDMA_STAT(tx_q10_pkt)}, ++ {"tx_q11_pkt", EDMA_STAT(tx_q11_pkt)}, ++ {"tx_q12_pkt", EDMA_STAT(tx_q12_pkt)}, ++ {"tx_q13_pkt", EDMA_STAT(tx_q13_pkt)}, ++ {"tx_q14_pkt", EDMA_STAT(tx_q14_pkt)}, ++ {"tx_q15_pkt", EDMA_STAT(tx_q15_pkt)}, ++ {"tx_q0_byte", EDMA_STAT(tx_q0_byte)}, ++ {"tx_q1_byte", EDMA_STAT(tx_q1_byte)}, ++ {"tx_q2_byte", EDMA_STAT(tx_q2_byte)}, ++ {"tx_q3_byte", EDMA_STAT(tx_q3_byte)}, ++ {"tx_q4_byte", EDMA_STAT(tx_q4_byte)}, ++ {"tx_q5_byte", EDMA_STAT(tx_q5_byte)}, ++ {"tx_q6_byte", EDMA_STAT(tx_q6_byte)}, ++ {"tx_q7_byte", EDMA_STAT(tx_q7_byte)}, ++ {"tx_q8_byte", EDMA_STAT(tx_q8_byte)}, ++ {"tx_q9_byte", EDMA_STAT(tx_q9_byte)}, ++ {"tx_q10_byte", EDMA_STAT(tx_q10_byte)}, ++ {"tx_q11_byte", EDMA_STAT(tx_q11_byte)}, ++ {"tx_q12_byte", EDMA_STAT(tx_q12_byte)}, ++ {"tx_q13_byte", EDMA_STAT(tx_q13_byte)}, ++ {"tx_q14_byte", EDMA_STAT(tx_q14_byte)}, ++ {"tx_q15_byte", EDMA_STAT(tx_q15_byte)}, ++ {"rx_q0_pkt", EDMA_STAT(rx_q0_pkt)}, ++ {"rx_q1_pkt", EDMA_STAT(rx_q1_pkt)}, ++ {"rx_q2_pkt", EDMA_STAT(rx_q2_pkt)}, ++ {"rx_q3_pkt", EDMA_STAT(rx_q3_pkt)}, ++ {"rx_q4_pkt", EDMA_STAT(rx_q4_pkt)}, ++ {"rx_q5_pkt", EDMA_STAT(rx_q5_pkt)}, ++ {"rx_q6_pkt", EDMA_STAT(rx_q6_pkt)}, ++ {"rx_q7_pkt", EDMA_STAT(rx_q7_pkt)}, ++ {"rx_q0_byte", EDMA_STAT(rx_q0_byte)}, ++ {"rx_q1_byte", EDMA_STAT(rx_q1_byte)}, ++ {"rx_q2_byte", EDMA_STAT(rx_q2_byte)}, ++ {"rx_q3_byte", EDMA_STAT(rx_q3_byte)}, ++ {"rx_q4_byte", EDMA_STAT(rx_q4_byte)}, ++ {"rx_q5_byte", EDMA_STAT(rx_q5_byte)}, ++ {"rx_q6_byte", EDMA_STAT(rx_q6_byte)}, ++ {"rx_q7_byte", EDMA_STAT(rx_q7_byte)}, ++ {"tx_desc_error", EDMA_STAT(tx_desc_error)}, ++}; ++ ++#define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats) ++ ++/* edma_get_strset_count() ++ * Get strset count ++ */ ++static int edma_get_strset_count(struct net_device *netdev, ++ int sset) ++{ ++ switch (sset) { ++ case ETH_SS_STATS: ++ return EDMA_STATS_LEN; ++ default: ++ netdev_dbg(netdev, "%s: Invalid string set", __func__); ++ return -EOPNOTSUPP; ++ } ++} ++ ++ ++/* edma_get_strings() ++ * get stats string ++ */ ++static void edma_get_strings(struct net_device *netdev, uint32_t stringset, ++ uint8_t *data) ++{ ++ uint8_t *p = data; ++ uint32_t i; ++ ++ switch (stringset) { ++ case ETH_SS_STATS: ++ for (i = 0; i < EDMA_STATS_LEN; i++) { ++ memcpy(p, edma_gstrings_stats[i].stat_string, ++ min((size_t)ETH_GSTRING_LEN, ++ strlen(edma_gstrings_stats[i].stat_string) ++ + 1)); ++ p += ETH_GSTRING_LEN; ++ } ++ break; ++ } ++} ++ ++/* edma_get_ethtool_stats() ++ * Get ethtool statistics ++ */ ++static void edma_get_ethtool_stats(struct net_device *netdev, ++ struct ethtool_stats *stats, uint64_t *data) ++{ ++ struct edma_adapter *adapter = netdev_priv(netdev); ++ struct edma_common_info *edma_cinfo = adapter->edma_cinfo; ++ int i; ++ uint8_t *p = NULL; ++ ++ edma_read_append_stats(edma_cinfo); ++ ++ for(i = 0; i < EDMA_STATS_LEN; i++) { ++ p = (uint8_t *)&(edma_cinfo->edma_ethstats) + ++ edma_gstrings_stats[i].stat_offset; ++ data[i] = *(uint32_t *)p; ++ } ++} ++ ++/* edma_get_drvinfo() ++ * get edma driver info ++ */ ++static void edma_get_drvinfo(struct net_device *dev, ++ struct ethtool_drvinfo *info) ++{ ++ strlcpy(info->driver, "ess_edma", DRVINFO_LEN); ++ strlcpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN); ++} ++ ++/* edma_nway_reset() ++ * Reset the phy, if available. ++ */ ++static int edma_nway_reset(struct net_device *netdev) ++{ ++ return -EINVAL; ++} ++ ++/* edma_get_wol() ++ * get wake on lan info ++ */ ++static void edma_get_wol(struct net_device *netdev, ++ struct ethtool_wolinfo *wol) ++{ ++ wol->supported = 0; ++ wol->wolopts = 0; ++} ++ ++/* edma_get_msglevel() ++ * get message level. ++ */ ++static uint32_t edma_get_msglevel(struct net_device *netdev) ++{ ++ return 0; ++} ++ ++/* edma_get_settings() ++ * Get edma settings ++ */ ++static int edma_get_settings(struct net_device *netdev, ++ struct ethtool_cmd *ecmd) ++{ ++ struct edma_adapter *adapter = netdev_priv(netdev); ++ ++ if (adapter->poll_required) { ++ struct phy_device *phydev = NULL; ++ uint16_t phyreg; ++ ++ if ((adapter->forced_speed != SPEED_UNKNOWN) ++ && !(adapter->poll_required)) ++ return -EPERM; ++ ++ phydev = adapter->phydev; ++ ++ ecmd->advertising = phydev->advertising; ++ ecmd->autoneg = phydev->autoneg; ++ ++ if (adapter->link_state == __EDMA_LINKDOWN) { ++ ecmd->speed = SPEED_UNKNOWN; ++ ecmd->duplex = DUPLEX_UNKNOWN; ++ } else { ++ ecmd->speed = phydev->speed; ++ ecmd->duplex = phydev->duplex; ++ } ++ ++ ecmd->phy_address = adapter->phy_mdio_addr; ++ ++ phyreg = (uint16_t)phy_read(adapter->phydev, MII_LPA); ++ if (phyreg & LPA_10HALF) ++ ecmd->lp_advertising |= ADVERTISED_10baseT_Half; ++ ++ if (phyreg & LPA_10FULL) ++ ecmd->lp_advertising |= ADVERTISED_10baseT_Full; ++ ++ if (phyreg & LPA_100HALF) ++ ecmd->lp_advertising |= ADVERTISED_100baseT_Half; ++ ++ if (phyreg & LPA_100FULL) ++ ecmd->lp_advertising |= ADVERTISED_100baseT_Full; ++ ++ phyreg = (uint16_t)phy_read(adapter->phydev, MII_STAT1000); ++ if (phyreg & LPA_1000HALF) ++ ecmd->lp_advertising |= ADVERTISED_1000baseT_Half; ++ ++ if (phyreg & LPA_1000FULL) ++ ecmd->lp_advertising |= ADVERTISED_1000baseT_Full; ++ } else { ++ /* If the speed/duplex for this GMAC is forced and we ++ * are not polling for link state changes, return the ++ * values as specified by platform. This will be true ++ * for GMACs connected to switch, and interfaces that ++ * do not use a PHY. ++ */ ++ if (!(adapter->poll_required)) { ++ if (adapter->forced_speed != SPEED_UNKNOWN) { ++ /* set speed and duplex */ ++ ethtool_cmd_speed_set(ecmd, SPEED_1000); ++ ecmd->duplex = DUPLEX_FULL; ++ ++ /* Populate capabilities advertised by self */ ++ ecmd->advertising = 0; ++ ecmd->autoneg = 0; ++ ecmd->port = PORT_TP; ++ ecmd->transceiver = XCVR_EXTERNAL; ++ } else { ++ /* non link polled and non ++ * forced speed/duplex interface ++ */ ++ return -EIO; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++/* edma_set_settings() ++ * Set EDMA settings ++ */ ++static int edma_set_settings(struct net_device *netdev, ++ struct ethtool_cmd *ecmd) ++{ ++ struct edma_adapter *adapter = netdev_priv(netdev); ++ struct phy_device *phydev = NULL; ++ ++ if ((adapter->forced_speed != SPEED_UNKNOWN) && ++ !adapter->poll_required) ++ return -EPERM; ++ ++ phydev = adapter->phydev; ++ phydev->advertising = ecmd->advertising; ++ phydev->autoneg = ecmd->autoneg; ++ phydev->speed = ethtool_cmd_speed(ecmd); ++ phydev->duplex = ecmd->duplex; ++ ++ genphy_config_aneg(phydev); ++ ++ return 0; ++} ++ ++/* edma_get_coalesce ++ * get interrupt mitigation ++ */ ++static int edma_get_coalesce(struct net_device *netdev, ++ struct ethtool_coalesce *ec) ++{ ++ u32 reg_val; ++ ++ edma_get_tx_rx_coalesce(®_val); ++ ++ /* We read the Interrupt Moderation Timer(IMT) register value, ++ * use lower 16 bit for rx and higher 16 bit for Tx. We do a ++ * left shift by 1, because IMT resolution timer is 2usecs. ++ * Hence the value given by the register is multiplied by 2 to ++ * get the actual time in usecs. ++ */ ++ ec->tx_coalesce_usecs = (((reg_val >> 16) & 0xffff) << 1); ++ ec->rx_coalesce_usecs = ((reg_val & 0xffff) << 1); ++ ++ return 0; ++} ++ ++/* edma_set_coalesce ++ * set interrupt mitigation ++ */ ++static int edma_set_coalesce(struct net_device *netdev, ++ struct ethtool_coalesce *ec) ++{ ++ if (ec->tx_coalesce_usecs) ++ edma_change_tx_coalesce(ec->tx_coalesce_usecs); ++ if (ec->rx_coalesce_usecs) ++ edma_change_rx_coalesce(ec->rx_coalesce_usecs); ++ ++ return 0; ++} ++ ++/* edma_set_priv_flags() ++ * Set EDMA private flags ++ */ ++static int edma_set_priv_flags(struct net_device *netdev, u32 flags) ++{ ++ return 0; ++} ++ ++/* edma_get_priv_flags() ++ * get edma driver flags ++ */ ++static u32 edma_get_priv_flags(struct net_device *netdev) ++{ ++ return 0; ++} ++ ++/* edma_get_ringparam() ++ * get ring size ++ */ ++static void edma_get_ringparam(struct net_device *netdev, ++ struct ethtool_ringparam *ring) ++{ ++ struct edma_adapter *adapter = netdev_priv(netdev); ++ struct edma_common_info *edma_cinfo = adapter->edma_cinfo; ++ ++ ring->tx_max_pending = edma_cinfo->tx_ring_count; ++ ring->rx_max_pending = edma_cinfo->rx_ring_count; ++} ++ ++/* Ethtool operations ++ */ ++static const struct ethtool_ops edma_ethtool_ops = { ++ .get_drvinfo = &edma_get_drvinfo, ++ .get_link = ðtool_op_get_link, ++ .get_msglevel = &edma_get_msglevel, ++ .nway_reset = &edma_nway_reset, ++ .get_wol = &edma_get_wol, ++ .get_settings = &edma_get_settings, ++ .set_settings = &edma_set_settings, ++ .get_strings = &edma_get_strings, ++ .get_sset_count = &edma_get_strset_count, ++ .get_ethtool_stats = &edma_get_ethtool_stats, ++ .get_coalesce = &edma_get_coalesce, ++ .set_coalesce = &edma_set_coalesce, ++ .get_priv_flags = edma_get_priv_flags, ++ .set_priv_flags = edma_set_priv_flags, ++ .get_ringparam = edma_get_ringparam, ++}; ++ ++/* edma_set_ethtool_ops ++ * Set ethtool operations ++ */ ++void edma_set_ethtool_ops(struct net_device *netdev) ++{ ++ netdev->ethtool_ops = &edma_ethtool_ops; ++} +--- /dev/null ++++ b/drivers/net/ethernet/qualcomm/essedma/ess_edma.h +@@ -0,0 +1,332 @@ ++/* ++ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for ++ * any purpose with or without fee is hereby granted, provided that the ++ * above copyright notice and this permission notice appear in all copies. ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT ++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#ifndef _ESS_EDMA_H_ ++#define _ESS_EDMA_H_ ++ ++#include ++ ++struct edma_adapter; ++struct edma_hw; ++ ++/* register definition */ ++#define EDMA_REG_MAS_CTRL 0x0 ++#define EDMA_REG_TIMEOUT_CTRL 0x004 ++#define EDMA_REG_DBG0 0x008 ++#define EDMA_REG_DBG1 0x00C ++#define EDMA_REG_SW_CTRL0 0x100 ++#define EDMA_REG_SW_CTRL1 0x104 ++ ++/* Interrupt Status Register */ ++#define EDMA_REG_RX_ISR 0x200 ++#define EDMA_REG_TX_ISR 0x208 ++#define EDMA_REG_MISC_ISR 0x210 ++#define EDMA_REG_WOL_ISR 0x218 ++ ++#define EDMA_MISC_ISR_RX_URG_Q(x) (1 << x) ++ ++#define EDMA_MISC_ISR_AXIR_TIMEOUT 0x00000100 ++#define EDMA_MISC_ISR_AXIR_ERR 0x00000200 ++#define EDMA_MISC_ISR_TXF_DEAD 0x00000400 ++#define EDMA_MISC_ISR_AXIW_ERR 0x00000800 ++#define EDMA_MISC_ISR_AXIW_TIMEOUT 0x00001000 ++ ++#define EDMA_WOL_ISR 0x00000001 ++ ++/* Interrupt Mask Register */ ++#define EDMA_REG_MISC_IMR 0x214 ++#define EDMA_REG_WOL_IMR 0x218 ++ ++#define EDMA_RX_IMR_NORMAL_MASK 0x1 ++#define EDMA_TX_IMR_NORMAL_MASK 0x1 ++#define EDMA_MISC_IMR_NORMAL_MASK 0x80001FFF ++#define EDMA_WOL_IMR_NORMAL_MASK 0x1 ++ ++/* Edma receive consumer index */ ++#define EDMA_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */ ++/* Edma transmit consumer index */ ++#define EDMA_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */ ++ ++/* IRQ Moderator Initial Timer Register */ ++#define EDMA_REG_IRQ_MODRT_TIMER_INIT 0x280 ++#define EDMA_IRQ_MODRT_TIMER_MASK 0xFFFF ++#define EDMA_IRQ_MODRT_RX_TIMER_SHIFT 0 ++#define EDMA_IRQ_MODRT_TX_TIMER_SHIFT 16 ++ ++/* Interrupt Control Register */ ++#define EDMA_REG_INTR_CTRL 0x284 ++#define EDMA_INTR_CLR_TYP_SHIFT 0 ++#define EDMA_INTR_SW_IDX_W_TYP_SHIFT 1 ++#define EDMA_INTR_CLEAR_TYPE_W1 0 ++#define EDMA_INTR_CLEAR_TYPE_R 1 ++ ++/* RX Interrupt Mask Register */ ++#define EDMA_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */ ++ ++/* TX Interrupt mask register */ ++#define EDMA_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */ ++ ++/* Load Ptr Register ++ * Software sets this bit after the initialization of the head and tail ++ */ ++#define EDMA_REG_TX_SRAM_PART 0x400 ++#define EDMA_LOAD_PTR_SHIFT 16 ++ ++/* TXQ Control Register */ ++#define EDMA_REG_TXQ_CTRL 0x404 ++#define EDMA_TXQ_CTRL_IP_OPTION_EN 0x10 ++#define EDMA_TXQ_CTRL_TXQ_EN 0x20 ++#define EDMA_TXQ_CTRL_ENH_MODE 0x40 ++#define EDMA_TXQ_CTRL_LS_8023_EN 0x80 ++#define EDMA_TXQ_CTRL_TPD_BURST_EN 0x100 ++#define EDMA_TXQ_CTRL_LSO_BREAK_EN 0x200 ++#define EDMA_TXQ_NUM_TPD_BURST_MASK 0xF ++#define EDMA_TXQ_TXF_BURST_NUM_MASK 0xFFFF ++#define EDMA_TXQ_NUM_TPD_BURST_SHIFT 0 ++#define EDMA_TXQ_TXF_BURST_NUM_SHIFT 16 ++ ++#define EDMA_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */ ++#define EDMA_TXF_WATER_MARK_MASK 0x0FFF ++#define EDMA_TXF_LOW_WATER_MARK_SHIFT 0 ++#define EDMA_TXF_HIGH_WATER_MARK_SHIFT 16 ++#define EDMA_TXQ_CTRL_BURST_MODE_EN 0x80000000 ++ ++/* WRR Control Register */ ++#define EDMA_REG_WRR_CTRL_Q0_Q3 0x40c ++#define EDMA_REG_WRR_CTRL_Q4_Q7 0x410 ++#define EDMA_REG_WRR_CTRL_Q8_Q11 0x414 ++#define EDMA_REG_WRR_CTRL_Q12_Q15 0x418 ++ ++/* Weight round robin(WRR), it takes queue as input, and computes ++ * starting bits where we need to write the weight for a particular ++ * queue ++ */ ++#define EDMA_WRR_SHIFT(x) (((x) * 5) % 20) ++ ++/* Tx Descriptor Control Register */ ++#define EDMA_REG_TPD_RING_SIZE 0x41C ++#define EDMA_TPD_RING_SIZE_SHIFT 0 ++#define EDMA_TPD_RING_SIZE_MASK 0xFFFF ++ ++/* Transmit descriptor base address */ ++#define EDMA_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */ ++ ++/* TPD Index Register */ ++#define EDMA_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */ ++ ++#define EDMA_TPD_PROD_IDX_BITS 0x0000FFFF ++#define EDMA_TPD_CONS_IDX_BITS 0xFFFF0000 ++#define EDMA_TPD_PROD_IDX_MASK 0xFFFF ++#define EDMA_TPD_CONS_IDX_MASK 0xFFFF ++#define EDMA_TPD_PROD_IDX_SHIFT 0 ++#define EDMA_TPD_CONS_IDX_SHIFT 16 ++ ++/* TX Virtual Queue Mapping Control Register */ ++#define EDMA_REG_VQ_CTRL0 0x4A0 ++#define EDMA_REG_VQ_CTRL1 0x4A4 ++ ++/* Virtual QID shift, it takes queue as input, and computes ++ * Virtual QID position in virtual qid control register ++ */ ++#define EDMA_VQ_ID_SHIFT(i) (((i) * 3) % 24) ++ ++/* Virtual Queue Default Value */ ++#define EDMA_VQ_REG_VALUE 0x240240 ++ ++/* Tx side Port Interface Control Register */ ++#define EDMA_REG_PORT_CTRL 0x4A8 ++#define EDMA_PAD_EN_SHIFT 15 ++ ++/* Tx side VLAN Configuration Register */ ++#define EDMA_REG_VLAN_CFG 0x4AC ++ ++#define EDMA_TX_CVLAN 16 ++#define EDMA_TX_INS_CVLAN 17 ++#define EDMA_TX_CVLAN_TAG_SHIFT 0 ++ ++#define EDMA_TX_SVLAN 14 ++#define EDMA_TX_INS_SVLAN 15 ++#define EDMA_TX_SVLAN_TAG_SHIFT 16 ++ ++/* Tx Queue Packet Statistic Register */ ++#define EDMA_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */ ++ ++#define EDMA_TX_STAT_PKT_MASK 0xFFFFFF ++ ++/* Tx Queue Byte Statistic Register */ ++#define EDMA_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */ ++ ++/* Load Balance Based Ring Offset Register */ ++#define EDMA_REG_LB_RING 0x800 ++#define EDMA_LB_RING_ENTRY_MASK 0xff ++#define EDMA_LB_RING_ID_MASK 0x7 ++#define EDMA_LB_RING_PROFILE_ID_MASK 0x3 ++#define EDMA_LB_RING_ENTRY_BIT_OFFSET 8 ++#define EDMA_LB_RING_ID_OFFSET 0 ++#define EDMA_LB_RING_PROFILE_ID_OFFSET 3 ++#define EDMA_LB_REG_VALUE 0x6040200 ++ ++/* Load Balance Priority Mapping Register */ ++#define EDMA_REG_LB_PRI_START 0x804 ++#define EDMA_REG_LB_PRI_END 0x810 ++#define EDMA_LB_PRI_REG_INC 4 ++#define EDMA_LB_PRI_ENTRY_BIT_OFFSET 4 ++#define EDMA_LB_PRI_ENTRY_MASK 0xf ++ ++/* RSS Priority Mapping Register */ ++#define EDMA_REG_RSS_PRI 0x820 ++#define EDMA_RSS_PRI_ENTRY_MASK 0xf ++#define EDMA_RSS_RING_ID_MASK 0x7 ++#define EDMA_RSS_PRI_ENTRY_BIT_OFFSET 4 ++ ++/* RSS Indirection Register */ ++#define EDMA_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */ ++#define EDMA_NUM_IDT 16 ++#define EDMA_RSS_IDT_VALUE 0x64206420 ++ ++/* Default RSS Ring Register */ ++#define EDMA_REG_DEF_RSS 0x890 ++#define EDMA_DEF_RSS_MASK 0x7 ++ ++/* RSS Hash Function Type Register */ ++#define EDMA_REG_RSS_TYPE 0x894 ++#define EDMA_RSS_TYPE_NONE 0x01 ++#define EDMA_RSS_TYPE_IPV4TCP 0x02 ++#define EDMA_RSS_TYPE_IPV6_TCP 0x04 ++#define EDMA_RSS_TYPE_IPV4_UDP 0x08 ++#define EDMA_RSS_TYPE_IPV6UDP 0x10 ++#define EDMA_RSS_TYPE_IPV4 0x20 ++#define EDMA_RSS_TYPE_IPV6 0x40 ++#define EDMA_RSS_HASH_MODE_MASK 0x7f ++ ++#define EDMA_REG_RSS_HASH_VALUE 0x8C0 ++ ++#define EDMA_REG_RSS_TYPE_RESULT 0x8C4 ++ ++#define EDMA_HASH_TYPE_START 0 ++#define EDMA_HASH_TYPE_END 5 ++#define EDMA_HASH_TYPE_SHIFT 12 ++ ++#define EDMA_RFS_FLOW_ENTRIES 1024 ++#define EDMA_RFS_FLOW_ENTRIES_MASK (EDMA_RFS_FLOW_ENTRIES - 1) ++#define EDMA_RFS_EXPIRE_COUNT_PER_CALL 128 ++ ++/* RFD Base Address Register */ ++#define EDMA_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */ ++ ++/* RFD Index Register */ ++#define EDMA_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2)) ++ ++#define EDMA_RFD_PROD_IDX_BITS 0x00000FFF ++#define EDMA_RFD_CONS_IDX_BITS 0x0FFF0000 ++#define EDMA_RFD_PROD_IDX_MASK 0xFFF ++#define EDMA_RFD_CONS_IDX_MASK 0xFFF ++#define EDMA_RFD_PROD_IDX_SHIFT 0 ++#define EDMA_RFD_CONS_IDX_SHIFT 16 ++ ++/* Rx Descriptor Control Register */ ++#define EDMA_REG_RX_DESC0 0xA10 ++#define EDMA_RFD_RING_SIZE_MASK 0xFFF ++#define EDMA_RX_BUF_SIZE_MASK 0xFFFF ++#define EDMA_RFD_RING_SIZE_SHIFT 0 ++#define EDMA_RX_BUF_SIZE_SHIFT 16 ++ ++#define EDMA_REG_RX_DESC1 0xA14 ++#define EDMA_RXQ_RFD_BURST_NUM_MASK 0x3F ++#define EDMA_RXQ_RFD_PF_THRESH_MASK 0x1F ++#define EDMA_RXQ_RFD_LOW_THRESH_MASK 0xFFF ++#define EDMA_RXQ_RFD_BURST_NUM_SHIFT 0 ++#define EDMA_RXQ_RFD_PF_THRESH_SHIFT 8 ++#define EDMA_RXQ_RFD_LOW_THRESH_SHIFT 16 ++ ++/* RXQ Control Register */ ++#define EDMA_REG_RXQ_CTRL 0xA18 ++#define EDMA_FIFO_THRESH_TYPE_SHIF 0 ++#define EDMA_FIFO_THRESH_128_BYTE 0x0 ++#define EDMA_FIFO_THRESH_64_BYTE 0x1 ++#define EDMA_RXQ_CTRL_RMV_VLAN 0x00000002 ++#define EDMA_RXQ_CTRL_EN 0x0000FF00 ++ ++/* AXI Burst Size Config */ ++#define EDMA_REG_AXIW_CTRL_MAXWRSIZE 0xA1C ++#define EDMA_AXIW_MAXWRSIZE_VALUE 0x0 ++ ++/* Rx Statistics Register */ ++#define EDMA_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */ ++#define EDMA_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */ ++ ++/* WoL Pattern Length Register */ ++#define EDMA_REG_WOL_PATTERN_LEN0 0xC00 ++#define EDMA_WOL_PT_LEN_MASK 0xFF ++#define EDMA_WOL_PT0_LEN_SHIFT 0 ++#define EDMA_WOL_PT1_LEN_SHIFT 8 ++#define EDMA_WOL_PT2_LEN_SHIFT 16 ++#define EDMA_WOL_PT3_LEN_SHIFT 24 ++ ++#define EDMA_REG_WOL_PATTERN_LEN1 0xC04 ++#define EDMA_WOL_PT4_LEN_SHIFT 0 ++#define EDMA_WOL_PT5_LEN_SHIFT 8 ++#define EDMA_WOL_PT6_LEN_SHIFT 16 ++ ++/* WoL Control Register */ ++#define EDMA_REG_WOL_CTRL 0xC08 ++#define EDMA_WOL_WK_EN 0x00000001 ++#define EDMA_WOL_MG_EN 0x00000002 ++#define EDMA_WOL_PT0_EN 0x00000004 ++#define EDMA_WOL_PT1_EN 0x00000008 ++#define EDMA_WOL_PT2_EN 0x00000010 ++#define EDMA_WOL_PT3_EN 0x00000020 ++#define EDMA_WOL_PT4_EN 0x00000040 ++#define EDMA_WOL_PT5_EN 0x00000080 ++#define EDMA_WOL_PT6_EN 0x00000100 ++ ++/* MAC Control Register */ ++#define EDMA_REG_MAC_CTRL0 0xC20 ++#define EDMA_REG_MAC_CTRL1 0xC24 ++ ++/* WoL Pattern Register */ ++#define EDMA_REG_WOL_PATTERN_START 0x5000 ++#define EDMA_PATTERN_PART_REG_OFFSET 0x40 ++ ++ ++/* TX descriptor fields */ ++#define EDMA_TPD_HDR_SHIFT 0 ++#define EDMA_TPD_PPPOE_EN 0x00000100 ++#define EDMA_TPD_IP_CSUM_EN 0x00000200 ++#define EDMA_TPD_TCP_CSUM_EN 0x0000400 ++#define EDMA_TPD_UDP_CSUM_EN 0x00000800 ++#define EDMA_TPD_CUSTOM_CSUM_EN 0x00000C00 ++#define EDMA_TPD_LSO_EN 0x00001000 ++#define EDMA_TPD_LSO_V2_EN 0x00002000 ++#define EDMA_TPD_IPV4_EN 0x00010000 ++#define EDMA_TPD_MSS_MASK 0x1FFF ++#define EDMA_TPD_MSS_SHIFT 18 ++#define EDMA_TPD_CUSTOM_CSUM_SHIFT 18 ++ ++/* RRD descriptor fields */ ++#define EDMA_RRD_NUM_RFD_MASK 0x000F ++#define EDMA_RRD_SVLAN 0x8000 ++#define EDMA_RRD_FLOW_COOKIE_MASK 0x07FF; ++ ++#define EDMA_RRD_PKT_SIZE_MASK 0x3FFF ++#define EDMA_RRD_CSUM_FAIL_MASK 0xC000 ++#define EDMA_RRD_CVLAN 0x0001 ++#define EDMA_RRD_DESC_VALID 0x8000 ++ ++#define EDMA_RRD_PRIORITY_SHIFT 4 ++#define EDMA_RRD_PRIORITY_MASK 0x7 ++#define EDMA_RRD_PORT_TYPE_SHIFT 7 ++#define EDMA_RRD_PORT_TYPE_MASK 0x1F ++#endif /* _ESS_EDMA_H_ */ diff --git a/target/linux/ipq40xx/patches-4.9/711-dts-ipq4019-add-ethernet-essedma-node.patch b/target/linux/ipq40xx/patches-4.9/711-dts-ipq4019-add-ethernet-essedma-node.patch new file mode 100644 index 000000000..771ff2275 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/711-dts-ipq4019-add-ethernet-essedma-node.patch @@ -0,0 +1,92 @@ +From c611d3780fa101662a822d10acf8feb04ca97409 Mon Sep 17 00:00:00 2001 +From: Christian Lamparter +Date: Sun, 20 Nov 2016 01:01:10 +0100 +Subject: [PATCH] dts: ipq4019: add ethernet essedma node + +This patch adds the device-tree node for the ethernet +interfaces. + +Note: The driver isn't anywhere close to be upstream, +so the info might change. + +Signed-off-by: Christian Lamparter +--- + arch/arm/boot/dts/qcom-ipq4019.dtsi | 60 +++++++++++++++++++++++++++++++++++++ + 1 file changed, 60 insertions(+) + +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -26,6 +26,8 @@ + aliases { + spi0 = &spi_0; + i2c0 = &i2c_0; ++ ethernet0 = &gmac0; ++ ethernet1 = &gmac1; + }; + + cpus { +@@ -366,6 +368,64 @@ + status = "disabled"; + }; + ++ edma@c080000 { ++ compatible = "qcom,ess-edma"; ++ reg = <0xc080000 0x8000>; ++ qcom,page-mode = <0>; ++ qcom,rx_head_buf_size = <1540>; ++ qcom,mdio_supported; ++ qcom,poll_required = <1>; ++ qcom,num_gmac = <2>; ++ interrupts = <0 65 IRQ_TYPE_EDGE_RISING ++ 0 66 IRQ_TYPE_EDGE_RISING ++ 0 67 IRQ_TYPE_EDGE_RISING ++ 0 68 IRQ_TYPE_EDGE_RISING ++ 0 69 IRQ_TYPE_EDGE_RISING ++ 0 70 IRQ_TYPE_EDGE_RISING ++ 0 71 IRQ_TYPE_EDGE_RISING ++ 0 72 IRQ_TYPE_EDGE_RISING ++ 0 73 IRQ_TYPE_EDGE_RISING ++ 0 74 IRQ_TYPE_EDGE_RISING ++ 0 75 IRQ_TYPE_EDGE_RISING ++ 0 76 IRQ_TYPE_EDGE_RISING ++ 0 77 IRQ_TYPE_EDGE_RISING ++ 0 78 IRQ_TYPE_EDGE_RISING ++ 0 79 IRQ_TYPE_EDGE_RISING ++ 0 80 IRQ_TYPE_EDGE_RISING ++ 0 240 IRQ_TYPE_EDGE_RISING ++ 0 241 IRQ_TYPE_EDGE_RISING ++ 0 242 IRQ_TYPE_EDGE_RISING ++ 0 243 IRQ_TYPE_EDGE_RISING ++ 0 244 IRQ_TYPE_EDGE_RISING ++ 0 245 IRQ_TYPE_EDGE_RISING ++ 0 246 IRQ_TYPE_EDGE_RISING ++ 0 247 IRQ_TYPE_EDGE_RISING ++ 0 248 IRQ_TYPE_EDGE_RISING ++ 0 249 IRQ_TYPE_EDGE_RISING ++ 0 250 IRQ_TYPE_EDGE_RISING ++ 0 251 IRQ_TYPE_EDGE_RISING ++ 0 252 IRQ_TYPE_EDGE_RISING ++ 0 253 IRQ_TYPE_EDGE_RISING ++ 0 254 IRQ_TYPE_EDGE_RISING ++ 0 255 IRQ_TYPE_EDGE_RISING>; ++ ++ status = "disabled"; ++ ++ gmac0: gmac0 { ++ local-mac-address = [00 00 00 00 00 00]; ++ vlan_tag = <1 0x1f>; ++ }; ++ ++ gmac1: gmac1 { ++ local-mac-address = [00 00 00 00 00 00]; ++ qcom,phy_mdio_addr = <4>; ++ qcom,poll_required = <1>; ++ qcom,forced_speed = <1000>; ++ qcom,forced_duplex = <1>; ++ vlan_tag = <2 0x20>; ++ }; ++ }; ++ + usb3_ss_phy: ssphy@9a000 { + compatible = "qca,uni-ssphy"; + reg = <0x9a000 0x800>; diff --git a/target/linux/ipq40xx/patches-4.9/712-net-essedma-disable-default-vlan.patch b/target/linux/ipq40xx/patches-4.9/712-net-essedma-disable-default-vlan.patch new file mode 100644 index 000000000..2a1cade85 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/712-net-essedma-disable-default-vlan.patch @@ -0,0 +1,69 @@ +From: Christian Lamparter +Subject: [PATCH] net: essedma: disable default vlan tagging +Date: Tue, 21 Mar 13:59:02 CET 2017 +0100 + +The essedma driver has its own unique take on VLAN management +and its configuration. In the original SDK, each VLAN is +assigned one virtual ethernet netdev. + +However, this is non-standard. So, this patch does away +with the default_vlan_tag property the driver is using +and therefore forces the user to use the kernel's vlan +feature. + +This patch also removes the "qcom,poll_required = <1>;" from +the essedma node. + +Signed-off-by: Christian Lamparter +--- +--- a/drivers/net/ethernet/qualcomm/essedma/edma.c ++++ b/drivers/net/ethernet/qualcomm/essedma/edma.c +@@ -715,13 +715,11 @@ static void edma_rx_complete(struct edma + edma_receive_checksum(rd, skb); + + /* Process VLAN HW acceleration indication provided by HW */ +- if (unlikely(adapter->default_vlan_tag != rd->rrd4)) { +- vlan = rd->rrd4; +- if (likely(rd->rrd7 & EDMA_RRD_CVLAN)) +- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); +- else if (rd->rrd1 & EDMA_RRD_SVLAN) +- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan); +- } ++ vlan = rd->rrd4; ++ if (likely(rd->rrd7 & EDMA_RRD_CVLAN)) ++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); ++ else if (rd->rrd1 & EDMA_RRD_SVLAN) ++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan); + + /* Update rx statistics */ + adapter->stats.rx_packets++; +@@ -1390,8 +1388,6 @@ netdev_tx_t edma_xmit(struct sk_buff *sk + /* Check and mark VLAN tag offload */ + if (skb_vlan_tag_present(skb)) + flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG; +- else if (adapter->default_vlan_tag) +- flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG; + + /* Check and mark checksum offload */ + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -404,8 +404,7 @@ + qcom,page-mode = <0>; + qcom,rx_head_buf_size = <1540>; + qcom,mdio_supported; +- qcom,poll_required = <1>; +- qcom,num_gmac = <2>; ++ qcom,num_gmac = <1>; + interrupts = <0 65 IRQ_TYPE_EDGE_RISING + 0 66 IRQ_TYPE_EDGE_RISING + 0 67 IRQ_TYPE_EDGE_RISING +@@ -443,7 +442,7 @@ + + gmac0: gmac0 { + local-mac-address = [00 00 00 00 00 00]; +- vlan_tag = <1 0x1f>; ++ vlan_tag = <1 0x3f>; + }; + + gmac1: gmac1 { diff --git a/target/linux/ipq40xx/patches-4.9/820-qcom-ipq4019-Add-IPQ4019-USB-HS-SS-PHY-drivers.patch b/target/linux/ipq40xx/patches-4.9/820-qcom-ipq4019-Add-IPQ4019-USB-HS-SS-PHY-drivers.patch new file mode 100644 index 000000000..3636c5ca0 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/820-qcom-ipq4019-Add-IPQ4019-USB-HS-SS-PHY-drivers.patch @@ -0,0 +1,429 @@ +From e73682ec4455c34f3f3edc7f40d90ed297521012 Mon Sep 17 00:00:00 2001 +From: Senthilkumar N L +Date: Tue, 6 Jan 2015 12:52:23 +0530 +Subject: [PATCH] qcom: ipq4019: Add IPQ4019 USB HS/SS PHY drivers + +These drivers handles control and configuration of the HS +and SS USB PHY transceivers. + +Signed-off-by: Senthilkumar N L +Signed-off-by: Christian Lamparter + +--- +Changed: + - replaced spaces with tabs + - remove emulation and host variables +--- + drivers/usb/phy/Kconfig | 11 ++ + drivers/usb/phy/Makefile | 2 + + drivers/usb/phy/phy-qca-baldur.c | 233 +++++++++++++++++++++++++++++++++++++++ + drivers/usb/phy/phy-qca-uniphy.c | 141 +++++++++++++++++++++++ + 4 files changed, 387 insertions(+) + create mode 100644 drivers/usb/phy/phy-qca-baldur.c + create mode 100644 drivers/usb/phy/phy-qca-uniphy.c + +--- a/drivers/usb/phy/Kconfig ++++ b/drivers/usb/phy/Kconfig +@@ -194,6 +194,17 @@ config USB_MXS_PHY + + MXS Phy is used by some of the i.MX SoCs, for example imx23/28/6x. + ++config USB_IPQ4019_PHY ++ tristate "IPQ4019 PHY wrappers support" ++ depends on (USB || USB_GADGET) && ARCH_QCOM ++ select USB_PHY ++ help ++ Enable this to support the USB PHY transceivers on QCA961x chips. ++ It handles PHY initialization, clock management required after ++ resetting the hardware and power management. ++ This driver is required even for peripheral only or host only ++ mode configurations. ++ + config USB_ULPI + bool "Generic ULPI Transceiver Driver" + depends on ARM || ARM64 +--- a/drivers/usb/phy/Makefile ++++ b/drivers/usb/phy/Makefile +@@ -21,6 +21,8 @@ obj-$(CONFIG_USB_GPIO_VBUS) += phy-gpio + obj-$(CONFIG_USB_ISP1301) += phy-isp1301.o + obj-$(CONFIG_USB_MSM_OTG) += phy-msm-usb.o + obj-$(CONFIG_USB_QCOM_8X16_PHY) += phy-qcom-8x16-usb.o ++obj-$(CONFIG_USB_IPQ4019_PHY) += phy-qca-baldur.o ++obj-$(CONFIG_USB_IPQ4019_PHY) += phy-qca-uniphy.o + obj-$(CONFIG_USB_MV_OTG) += phy-mv-usb.o + obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o + obj-$(CONFIG_USB_ULPI) += phy-ulpi.o +--- /dev/null ++++ b/drivers/usb/phy/phy-qca-baldur.c +@@ -0,0 +1,233 @@ ++/* Copyright (c) 2015, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/** ++ * USB Hardware registers ++ */ ++#define PHY_CTRL0_ADDR 0x000 ++#define PHY_CTRL1_ADDR 0x004 ++#define PHY_CTRL2_ADDR 0x008 ++#define PHY_CTRL3_ADDR 0x00C ++#define PHY_CTRL4_ADDR 0x010 ++#define PHY_MISC_ADDR 0x024 ++#define PHY_IPG_ADDR 0x030 ++ ++#define PHY_CTRL0_VAL 0xA4600015 ++#define PHY_CTRL1_VAL 0x09500000 ++#define PHY_CTRL2_VAL 0x00058180 ++#define PHY_CTRL3_VAL 0x6DB6DCD6 ++#define PHY_CTRL4_VAL 0x836DB6DB ++#define PHY_MISC_VAL 0x3803FB0C ++#define PHY_IPG_VAL 0x47323232 ++ ++#define USB30_HS_PHY_HOST_MODE (0x01 << 21) ++#define USB20_HS_PHY_HOST_MODE (0x01 << 5) ++ ++/* used to differentiate between USB3 HS and USB2 HS PHY */ ++struct qca_baldur_hs_data { ++ unsigned int usb3_hs_phy; ++ unsigned int phy_config_offset; ++}; ++ ++struct qca_baldur_hs_phy { ++ struct device *dev; ++ struct usb_phy phy; ++ ++ void __iomem *base; ++ void __iomem *qscratch_base; ++ ++ struct reset_control *por_rst; ++ struct reset_control *srif_rst; ++ ++ const struct qca_baldur_hs_data *data; ++}; ++ ++#define phy_to_dw_phy(x) container_of((x), struct qca_baldur_hs_phy, phy) ++ ++static int qca_baldur_phy_read(struct usb_phy *x, u32 reg) ++{ ++ struct qca_baldur_hs_phy *phy = phy_to_dw_phy(x); ++ ++ return readl(phy->base + reg); ++} ++ ++static int qca_baldur_phy_write(struct usb_phy *x, u32 val, u32 reg) ++{ ++ struct qca_baldur_hs_phy *phy = phy_to_dw_phy(x); ++ ++ writel(val, phy->base + reg); ++ return 0; ++} ++ ++static int qca_baldur_hs_phy_init(struct usb_phy *x) ++{ ++ struct qca_baldur_hs_phy *phy = phy_to_dw_phy(x); ++ ++ /* assert HS PHY POR reset */ ++ reset_control_assert(phy->por_rst); ++ msleep(10); ++ ++ /* assert HS PHY SRIF reset */ ++ reset_control_assert(phy->srif_rst); ++ msleep(10); ++ ++ /* deassert HS PHY SRIF reset and program HS PHY registers */ ++ reset_control_deassert(phy->srif_rst); ++ msleep(10); ++ ++ /* perform PHY register writes */ ++ writel(PHY_CTRL0_VAL, phy->base + PHY_CTRL0_ADDR); ++ writel(PHY_CTRL1_VAL, phy->base + PHY_CTRL1_ADDR); ++ writel(PHY_CTRL2_VAL, phy->base + PHY_CTRL2_ADDR); ++ writel(PHY_CTRL3_VAL, phy->base + PHY_CTRL3_ADDR); ++ writel(PHY_CTRL4_VAL, phy->base + PHY_CTRL4_ADDR); ++ writel(PHY_MISC_VAL, phy->base + PHY_MISC_ADDR); ++ writel(PHY_IPG_VAL, phy->base + PHY_IPG_ADDR); ++ ++ msleep(10); ++ ++ /* de-assert USB3 HS PHY POR reset */ ++ reset_control_deassert(phy->por_rst); ++ ++ return 0; ++} ++ ++static int qca_baldur_hs_get_resources(struct qca_baldur_hs_phy *phy) ++{ ++ struct platform_device *pdev = to_platform_device(phy->dev); ++ struct resource *res; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ phy->base = devm_ioremap_resource(phy->dev, res); ++ if (IS_ERR(phy->base)) ++ return PTR_ERR(phy->base); ++ ++ phy->por_rst = devm_reset_control_get(phy->dev, "por_rst"); ++ if (IS_ERR(phy->por_rst)) ++ return PTR_ERR(phy->por_rst); ++ ++ phy->srif_rst = devm_reset_control_get(phy->dev, "srif_rst"); ++ if (IS_ERR(phy->srif_rst)) ++ return PTR_ERR(phy->srif_rst); ++ ++ return 0; ++} ++ ++static void qca_baldur_hs_put_resources(struct qca_baldur_hs_phy *phy) ++{ ++ reset_control_assert(phy->srif_rst); ++ reset_control_assert(phy->por_rst); ++} ++ ++static int qca_baldur_hs_remove(struct platform_device *pdev) ++{ ++ struct qca_baldur_hs_phy *phy = platform_get_drvdata(pdev); ++ ++ usb_remove_phy(&phy->phy); ++ return 0; ++} ++ ++static void qca_baldur_hs_phy_shutdown(struct usb_phy *x) ++{ ++ struct qca_baldur_hs_phy *phy = phy_to_dw_phy(x); ++ ++ qca_baldur_hs_put_resources(phy); ++} ++ ++static struct usb_phy_io_ops qca_baldur_io_ops = { ++ .read = qca_baldur_phy_read, ++ .write = qca_baldur_phy_write, ++}; ++ ++static const struct qca_baldur_hs_data usb3_hs_data = { ++ .usb3_hs_phy = 1, ++ .phy_config_offset = USB30_HS_PHY_HOST_MODE, ++}; ++ ++static const struct qca_baldur_hs_data usb2_hs_data = { ++ .usb3_hs_phy = 0, ++ .phy_config_offset = USB20_HS_PHY_HOST_MODE, ++}; ++ ++static const struct of_device_id qca_baldur_hs_id_table[] = { ++ { .compatible = "qca,baldur-usb3-hsphy", .data = &usb3_hs_data }, ++ { .compatible = "qca,baldur-usb2-hsphy", .data = &usb2_hs_data }, ++ { /* Sentinel */ } ++}; ++MODULE_DEVICE_TABLE(of, qca_baldur_hs_id_table); ++ ++static int qca_baldur_hs_probe(struct platform_device *pdev) ++{ ++ const struct of_device_id *match; ++ struct qca_baldur_hs_phy *phy; ++ int err; ++ ++ match = of_match_device(qca_baldur_hs_id_table, &pdev->dev); ++ if (!match) ++ return -ENODEV; ++ ++ phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL); ++ if (!phy) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, phy); ++ phy->dev = &pdev->dev; ++ ++ phy->data = match->data; ++ ++ err = qca_baldur_hs_get_resources(phy); ++ if (err < 0) { ++ dev_err(&pdev->dev, "failed to request resources: %d\n", err); ++ return err; ++ } ++ ++ phy->phy.dev = phy->dev; ++ phy->phy.label = "qca-baldur-hsphy"; ++ phy->phy.init = qca_baldur_hs_phy_init; ++ phy->phy.shutdown = qca_baldur_hs_phy_shutdown; ++ phy->phy.type = USB_PHY_TYPE_USB2; ++ phy->phy.io_ops = &qca_baldur_io_ops; ++ ++ err = usb_add_phy_dev(&phy->phy); ++ return err; ++} ++ ++static struct platform_driver qca_baldur_hs_driver = { ++ .probe = qca_baldur_hs_probe, ++ .remove = qca_baldur_hs_remove, ++ .driver = { ++ .name = "qca-baldur-hsphy", ++ .owner = THIS_MODULE, ++ .of_match_table = qca_baldur_hs_id_table, ++ }, ++}; ++ ++module_platform_driver(qca_baldur_hs_driver); ++ ++MODULE_ALIAS("platform:qca-baldur-hsphy"); ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_DESCRIPTION("USB3 QCA BALDUR HSPHY driver"); +--- /dev/null ++++ b/drivers/usb/phy/phy-qca-uniphy.c +@@ -0,0 +1,135 @@ ++/* Copyright (c) 2015, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct qca_uni_ss_phy { ++ struct usb_phy phy; ++ struct device *dev; ++ ++ void __iomem *base; ++ ++ struct reset_control *por_rst; ++}; ++ ++#define phy_to_dw_phy(x) container_of((x), struct qca_uni_ss_phy, phy) ++ ++static void qca_uni_ss_phy_shutdown(struct usb_phy *x) ++{ ++ struct qca_uni_ss_phy *phy = phy_to_dw_phy(x); ++ ++ /* assert SS PHY POR reset */ ++ reset_control_assert(phy->por_rst); ++} ++ ++static int qca_uni_ss_phy_init(struct usb_phy *x) ++{ ++ struct qca_uni_ss_phy *phy = phy_to_dw_phy(x); ++ ++ /* assert SS PHY POR reset */ ++ reset_control_assert(phy->por_rst); ++ ++ msleep(20); ++ ++ /* deassert SS PHY POR reset */ ++ reset_control_deassert(phy->por_rst); ++ ++ return 0; ++} ++ ++static int qca_uni_ss_get_resources(struct platform_device *pdev, ++ struct qca_uni_ss_phy *phy) ++{ ++ struct resource *res; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ phy->base = devm_ioremap_resource(phy->dev, res); ++ if (IS_ERR(phy->base)) ++ return PTR_ERR(phy->base); ++ ++ phy->por_rst = devm_reset_control_get(phy->dev, "por_rst"); ++ if (IS_ERR(phy->por_rst)) ++ return PTR_ERR(phy->por_rst); ++ ++ return 0; ++} ++ ++static int qca_uni_ss_remove(struct platform_device *pdev) ++{ ++ struct qca_uni_ss_phy *phy = platform_get_drvdata(pdev); ++ ++ usb_remove_phy(&phy->phy); ++ return 0; ++} ++ ++static const struct of_device_id qca_uni_ss_id_table[] = { ++ { .compatible = "qca,uni-ssphy" }, ++ { /* Sentinel */ } ++}; ++MODULE_DEVICE_TABLE(of, qca_uni_ss_id_table); ++ ++static int qca_uni_ss_probe(struct platform_device *pdev) ++{ ++ struct qca_uni_ss_phy *phy; ++ int ret; ++ ++ phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL); ++ if (!phy) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, phy); ++ phy->dev = &pdev->dev; ++ ++ ret = qca_uni_ss_get_resources(pdev, phy); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "failed to request resources: %d\n", ret); ++ return ret; ++ } ++ ++ phy->phy.dev = phy->dev; ++ phy->phy.label = "qca-uni-ssphy"; ++ phy->phy.init = qca_uni_ss_phy_init; ++ phy->phy.shutdown = qca_uni_ss_phy_shutdown; ++ phy->phy.type = USB_PHY_TYPE_USB3; ++ ++ ret = usb_add_phy_dev(&phy->phy); ++ return ret; ++} ++ ++static struct platform_driver qca_uni_ss_driver = { ++ .probe = qca_uni_ss_probe, ++ .remove = qca_uni_ss_remove, ++ .driver = { ++ .name = "qca-uni-ssphy", ++ .owner = THIS_MODULE, ++ .of_match_table = qca_uni_ss_id_table, ++ }, ++}; ++ ++module_platform_driver(qca_uni_ss_driver); ++ ++MODULE_ALIAS("platform:qca-uni-ssphy"); ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_DESCRIPTION("USB3 QCA UNI SSPHY driver"); diff --git a/target/linux/ipq40xx/patches-4.9/830-usb-dwc3-register-qca-ipq4019-dwc3-in-dwc3-of-simple.patch b/target/linux/ipq40xx/patches-4.9/830-usb-dwc3-register-qca-ipq4019-dwc3-in-dwc3-of-simple.patch new file mode 100644 index 000000000..18591b205 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/830-usb-dwc3-register-qca-ipq4019-dwc3-in-dwc3-of-simple.patch @@ -0,0 +1,25 @@ +From 08c18ab774368feb610d1eb952957bb1bb35129f Mon Sep 17 00:00:00 2001 +From: Christian Lamparter +Date: Sat, 19 Nov 2016 00:52:35 +0100 +Subject: [PATCH 37/38] usb: dwc3: register qca,ipq4019-dwc3 in dwc3-of-simple + +For host mode, the dwc3 found in the IPQ4019 can be driven +by the dwc3-of-simple module. It will get more tricky for +OTG since they'll need to enable VBUS and reconfigure the +registers. + +Signed-off-by: Christian Lamparter +--- + drivers/usb/dwc3/dwc3-of-simple.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/usb/dwc3/dwc3-of-simple.c ++++ b/drivers/usb/dwc3/dwc3-of-simple.c +@@ -174,6 +174,7 @@ static const struct dev_pm_ops dwc3_of_s + + static const struct of_device_id of_dwc3_simple_match[] = { + { .compatible = "qcom,dwc3" }, ++ { .compatible = "qca,ipq4019-dwc3" }, + { .compatible = "rockchip,rk3399-dwc3" }, + { .compatible = "xlnx,zynqmp-dwc3" }, + { .compatible = "cavium,octeon-7130-usb-uctl" }, diff --git a/target/linux/ipq40xx/patches-4.9/850-soc-add-qualcomm-syscon.patch b/target/linux/ipq40xx/patches-4.9/850-soc-add-qualcomm-syscon.patch new file mode 100644 index 000000000..beb85e7ef --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/850-soc-add-qualcomm-syscon.patch @@ -0,0 +1,177 @@ +From: Christian Lamparter +Subject: SoC: add qualcomm syscon +--- a/drivers/soc/qcom/Makefile ++++ b/drivers/soc/qcom/Makefile +@@ -7,3 +7,4 @@ obj-$(CONFIG_QCOM_SMEM_STATE) += smem_st + obj-$(CONFIG_QCOM_SMP2P) += smp2p.o + obj-$(CONFIG_QCOM_SMSM) += smsm.o + obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o ++obj-$(CONFIG_QCOM_TCSR) += qcom_tcsr.o +--- a/drivers/soc/qcom/Kconfig ++++ b/drivers/soc/qcom/Kconfig +@@ -70,6 +70,13 @@ config QCOM_SMSM + Say yes here to support the Qualcomm Shared Memory State Machine. + The state machine is represented by bits in shared memory. + ++config QCOM_TCSR ++ tristate "QCOM Top Control and Status Registers" ++ depends on ARCH_QCOM ++ help ++ Say y here to enable TCSR support. The TCSR provides control ++ functions for various peripherals. ++ + config QCOM_WCNSS_CTRL + tristate "Qualcomm WCNSS control driver" + depends on QCOM_SMD +--- /dev/null ++++ b/drivers/soc/qcom/qcom_tcsr.c +@@ -0,0 +1,98 @@ ++/* ++ * Copyright (c) 2014, The Linux foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License rev 2 and ++ * only rev 2 as published by the free Software foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define TCSR_USB_PORT_SEL 0xb0 ++#define TCSR_USB_HSPHY_CONFIG 0xC ++ ++#define TCSR_ESS_INTERFACE_SEL_OFFSET 0x0 ++#define TCSR_ESS_INTERFACE_SEL_MASK 0xf ++ ++#define TCSR_WIFI0_GLB_CFG_OFFSET 0x0 ++#define TCSR_WIFI1_GLB_CFG_OFFSET 0x4 ++#define TCSR_PNOC_SNOC_MEMTYPE_M0_M2 0x4 ++ ++static int tcsr_probe(struct platform_device *pdev) ++{ ++ struct resource *res; ++ const struct device_node *node = pdev->dev.of_node; ++ void __iomem *base; ++ u32 val; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); ++ ++ if (!of_property_read_u32(node, "qcom,usb-ctrl-select", &val)) { ++ dev_err(&pdev->dev, "setting usb port select = %d\n", val); ++ writel(val, base + TCSR_USB_PORT_SEL); ++ } ++ ++ if (!of_property_read_u32(node, "qcom,usb-hsphy-mode-select", &val)) { ++ dev_info(&pdev->dev, "setting usb hs phy mode select = %x\n", val); ++ writel(val, base + TCSR_USB_HSPHY_CONFIG); ++ } ++ ++ if (!of_property_read_u32(node, "qcom,ess-interface-select", &val)) { ++ u32 tmp = 0; ++ dev_info(&pdev->dev, "setting ess interface select = %x\n", val); ++ tmp = readl(base + TCSR_ESS_INTERFACE_SEL_OFFSET); ++ tmp = tmp & (~TCSR_ESS_INTERFACE_SEL_MASK); ++ tmp = tmp | (val&TCSR_ESS_INTERFACE_SEL_MASK); ++ writel(tmp, base + TCSR_ESS_INTERFACE_SEL_OFFSET); ++ } ++ ++ if (!of_property_read_u32(node, "qcom,wifi_glb_cfg", &val)) { ++ dev_info(&pdev->dev, "setting wifi_glb_cfg = %x\n", val); ++ writel(val, base + TCSR_WIFI0_GLB_CFG_OFFSET); ++ writel(val, base + TCSR_WIFI1_GLB_CFG_OFFSET); ++ } ++ ++ if (!of_property_read_u32(node, "qcom,wifi_noc_memtype_m0_m2", &val)) { ++ dev_info(&pdev->dev, ++ "setting wifi_noc_memtype_m0_m2 = %x\n", val); ++ writel(val, base + TCSR_PNOC_SNOC_MEMTYPE_M0_M2); ++ } ++ ++ return 0; ++} ++ ++static const struct of_device_id tcsr_dt_match[] = { ++ { .compatible = "qcom,tcsr", }, ++ { }, ++}; ++ ++MODULE_DEVICE_TABLE(of, tcsr_dt_match); ++ ++static struct platform_driver tcsr_driver = { ++ .driver = { ++ .name = "tcsr", ++ .owner = THIS_MODULE, ++ .of_match_table = tcsr_dt_match, ++ }, ++ .probe = tcsr_probe, ++}; ++ ++module_platform_driver(tcsr_driver); ++ ++MODULE_AUTHOR("Andy Gross "); ++MODULE_DESCRIPTION("QCOM TCSR driver"); ++MODULE_LICENSE("GPL v2"); +--- /dev/null ++++ b/include/dt-bindings/soc/qcom,tcsr.h +@@ -0,0 +1,48 @@ ++/* Copyright (c) 2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++#ifndef __DT_BINDINGS_QCOM_TCSR_H ++#define __DT_BINDINGS_QCOM_TCSR_H ++ ++#define TCSR_USB_SELECT_USB3_P0 0x1 ++#define TCSR_USB_SELECT_USB3_P1 0x2 ++#define TCSR_USB_SELECT_USB3_DUAL 0x3 ++ ++/* IPQ40xx HS PHY Mode Select */ ++#define TCSR_USB_HSPHY_HOST_MODE 0x00E700E7 ++#define TCSR_USB_HSPHY_DEVICE_MODE 0x00C700E7 ++ ++/* IPQ40xx ess interface mode select */ ++#define TCSR_ESS_PSGMII 0 ++#define TCSR_ESS_PSGMII_RGMII5 1 ++#define TCSR_ESS_PSGMII_RMII0 2 ++#define TCSR_ESS_PSGMII_RMII1 4 ++#define TCSR_ESS_PSGMII_RMII0_RMII1 6 ++#define TCSR_ESS_PSGMII_RGMII4 9 ++ ++/* ++ * IPQ40xx WiFi Global Config ++ * Bit 30:AXID_EN ++ * Enable AXI master bus Axid translating to confirm all txn submitted by order ++ * Bit 24: Use locally generated socslv_wxi_bvalid ++ * 1: use locally generate socslv_wxi_bvalid for performance. ++ * 0: use SNOC socslv_wxi_bvalid. ++ */ ++#define TCSR_WIFI_GLB_CFG 0x41000000 ++ ++/* IPQ40xx MEM_TYPE_SEL_M0_M2 Select Bit 26:24 - 2 NORMAL */ ++#define TCSR_WIFI_NOC_MEMTYPE_M0_M2 0x02222222 ++ ++/* TCSR A/B REG */ ++#define IPQ806X_TCSR_REG_A_ADM_CRCI_MUX_SEL 0 ++#define IPQ806X_TCSR_REG_B_ADM_CRCI_MUX_SEL 1 ++ ++#endif diff --git a/target/linux/ipq40xx/patches-4.9/852-ipq4019-pinctrl-Updated-various-Pin-definitions.patch b/target/linux/ipq40xx/patches-4.9/852-ipq4019-pinctrl-Updated-various-Pin-definitions.patch new file mode 100644 index 000000000..d0a6a26a5 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/852-ipq4019-pinctrl-Updated-various-Pin-definitions.patch @@ -0,0 +1,1328 @@ +From fc6cf61517b8b4ab4678659936fc7572f699d6e7 Mon Sep 17 00:00:00 2001 +From: Ram Chandra Jangir +Date: Tue, 28 Mar 2017 14:00:00 +0530 +Subject: [PATCH] ipq4019: pinctrl: Updated various Pin definitions + +Populate default values for various GPIO functions + +Signed-off-by: Ram Chandra Jangir +--- + drivers/pinctrl/qcom/pinctrl-ipq4019.c | 1189 +++++++++++++++++++++++++++++--- + 1 file changed, 1111 insertions(+), 78 deletions(-) + +--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c ++++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c +@@ -276,16 +276,531 @@ DECLARE_QCA_GPIO_PINS(99); + + + enum ipq4019_functions { ++ qca_mux_rmii0_refclk, ++ qca_mux_wifi0_rfsilient0, ++ qca_mux_wifi1_rfsilient0, ++ qca_mux_smart2, ++ qca_mux_led4, ++ qca_mux_wifi0_cal, ++ qca_mux_wifi1_cal, ++ qca_mux_wifi_wci0, ++ qca_mux_rmii0_dv, ++ qca_mux_wifi_wci1, ++ qca_mux_rmii1_refclk, ++ qca_mux_blsp_spi1, ++ qca_mux_led5, ++ qca_mux_rmii10, ++ qca_mux_led6, ++ qca_mux_rmii11, ++ qca_mux_led7, ++ qca_mux_rmii1_dv, ++ qca_mux_led8, ++ qca_mux_rmii1_tx, ++ qca_mux_aud_pin, ++ qca_mux_led9, ++ qca_mux_rmii1_rx, ++ qca_mux_led10, ++ qca_mux_wifi0_rfsilient1, ++ qca_mux_wifi1_rfsilient1, ++ qca_mux_led11, ++ qca_mux_boot7, ++ qca_mux_qpic_pad, ++ qca_mux_pcie_clk, ++ qca_mux_tm_clk0, ++ qca_mux_wifi00, ++ qca_mux_wifi10, ++ qca_mux_mdio1, ++ qca_mux_prng_rosc, ++ qca_mux_dbg_out, ++ qca_mux_tm0, ++ qca_mux_wifi01, ++ qca_mux_wifi11, ++ qca_mux_atest_char3, ++ qca_mux_pmu0, ++ qca_mux_boot8, ++ qca_mux_tm1, ++ qca_mux_atest_char2, ++ qca_mux_pmu1, ++ qca_mux_boot9, ++ qca_mux_tm2, ++ qca_mux_atest_char1, ++ qca_mux_tm_ack, ++ qca_mux_wifi03, ++ qca_mux_wifi13, ++ qca_mux_qpic_pad4, ++ qca_mux_atest_char0, ++ qca_mux_tm3, ++ qca_mux_wifi02, ++ qca_mux_wifi12, ++ qca_mux_qpic_pad5, ++ qca_mux_smart3, ++ qca_mux_wcss0_dbg14, ++ qca_mux_tm4, ++ qca_mux_wifi04, ++ qca_mux_wifi14, ++ qca_mux_qpic_pad6, ++ qca_mux_wcss0_dbg15, ++ qca_mux_qdss_tracectl_a, ++ qca_mux_boot18, ++ qca_mux_tm5, ++ qca_mux_qpic_pad7, ++ qca_mux_atest_char, ++ qca_mux_wcss0_dbg4, ++ qca_mux_qdss_traceclk_a, ++ qca_mux_boot19, ++ qca_mux_tm6, ++ qca_mux_wcss0_dbg5, ++ qca_mux_qdss_cti_trig_out_a0, ++ qca_mux_boot14, ++ qca_mux_tm7, ++ qca_mux_chip_rst, ++ qca_mux_wcss0_dbg6, ++ qca_mux_qdss_cti_trig_out_b0, ++ qca_mux_boot11, ++ qca_mux_tm8, ++ qca_mux_wcss0_dbg7, ++ qca_mux_wcss1_dbg7, ++ qca_mux_boot20, ++ qca_mux_tm9, ++ qca_mux_qpic_pad1, ++ qca_mux_wcss0_dbg8, ++ qca_mux_wcss1_dbg8, ++ qca_mux_qpic_pad2, ++ qca_mux_wcss0_dbg9, ++ qca_mux_wcss1_dbg9, ++ qca_mux_qpic_pad3, ++ qca_mux_wcss0_dbg10, ++ qca_mux_wcss1_dbg10, ++ qca_mux_qpic_pad0, ++ qca_mux_wcss0_dbg11, ++ qca_mux_wcss1_dbg11, ++ qca_mux_qpic_pad8, ++ qca_mux_wcss0_dbg12, ++ qca_mux_wcss1_dbg12, ++ qca_mux_wifi034, ++ qca_mux_wifi134, ++ qca_mux_jtag_tdi, + qca_mux_gpio, ++ qca_mux_i2s_rx_bclk, ++ qca_mux_jtag_tck, ++ qca_mux_i2s_rx_fsync, ++ qca_mux_jtag_tms, ++ qca_mux_i2s_rxd, ++ qca_mux_smart0, ++ qca_mux_jtag_tdo, ++ qca_mux_jtag_rst, ++ qca_mux_jtag_trst, ++ qca_mux_mdio0, ++ qca_mux_wcss0_dbg18, ++ qca_mux_wcss1_dbg18, ++ qca_mux_qdss_tracedata_a, ++ qca_mux_mdc, ++ qca_mux_wcss0_dbg19, ++ qca_mux_wcss1_dbg19, + qca_mux_blsp_uart1, ++ qca_mux_wifi0_uart, ++ qca_mux_wifi1_uart, ++ qca_mux_smart1, ++ qca_mux_wcss0_dbg20, ++ qca_mux_wcss1_dbg20, ++ qca_mux_wifi0_uart0, ++ qca_mux_wifi1_uart0, ++ qca_mux_wcss0_dbg21, ++ qca_mux_wcss1_dbg21, + qca_mux_blsp_i2c0, ++ qca_mux_wcss0_dbg22, ++ qca_mux_wcss1_dbg22, ++ qca_mux_wcss0_dbg23, ++ qca_mux_wcss1_dbg23, ++ qca_mux_blsp_spi0, + qca_mux_blsp_i2c1, ++ qca_mux_wcss0_dbg24, ++ qca_mux_wcss1_dbg24, ++ qca_mux_wcss0_dbg25, ++ qca_mux_wcss1_dbg25, ++ qca_mux_wcss0_dbg26, ++ qca_mux_wcss1_dbg26, ++ qca_mux_wcss0_dbg, ++ qca_mux_wcss1_dbg, + qca_mux_blsp_uart0, +- qca_mux_blsp_spi1, +- qca_mux_blsp_spi0, ++ qca_mux_led0, ++ qca_mux_wcss0_dbg28, ++ qca_mux_wcss1_dbg28, ++ qca_mux_led1, ++ qca_mux_wcss0_dbg29, ++ qca_mux_wcss1_dbg29, ++ qca_mux_wifi0_uart1, ++ qca_mux_wifi1_uart1, ++ qca_mux_wcss0_dbg30, ++ qca_mux_wcss1_dbg30, ++ qca_mux_wcss0_dbg31, ++ qca_mux_wcss1_dbg31, ++ qca_mux_i2s_rx_mclk, ++ qca_mux_wcss0_dbg16, ++ qca_mux_wcss1_dbg16, ++ qca_mux_wcss0_dbg17, ++ qca_mux_wcss1_dbg17, ++ qca_mux_rgmii0, ++ qca_mux_sdio0, ++ qca_mux_rgmii1, ++ qca_mux_sdio1, ++ qca_mux_rgmii2, ++ qca_mux_i2s_tx_mclk, ++ qca_mux_sdio2, ++ qca_mux_rgmii3, ++ qca_mux_i2s_tx_bclk, ++ qca_mux_sdio3, ++ qca_mux_rgmii_rx, ++ qca_mux_i2s_tx_fsync, ++ qca_mux_sdio_clk, ++ qca_mux_rgmii_txc, ++ qca_mux_i2s_td1, ++ qca_mux_sdio_cmd, ++ qca_mux_i2s_td2, ++ qca_mux_sdio4, ++ qca_mux_i2s_td3, ++ qca_mux_sdio5, ++ qca_mux_audio_pwm0, ++ qca_mux_sdio6, ++ qca_mux_audio_pwm1, ++ qca_mux_wcss0_dbg27, ++ qca_mux_wcss1_dbg27, ++ qca_mux_sdio7, ++ qca_mux_rgmii_rxc, ++ qca_mux_audio_pwm2, ++ qca_mux_rgmii_tx, ++ qca_mux_audio_pwm3, ++ qca_mux_boot2, ++ qca_mux_i2s_spdif_in, ++ qca_mux_i2s_spdif_out, ++ qca_mux_rmii00, ++ qca_mux_led2, ++ qca_mux_rmii01, ++ qca_mux_wifi0_wci, ++ qca_mux_wifi1_wci, ++ qca_mux_boot4, ++ qca_mux_rmii0_tx, ++ qca_mux_boot5, ++ qca_mux_rmii0_rx, ++ qca_mux_pcie_clk1, ++ qca_mux_led3, ++ qca_mux_sdio_cd, + qca_mux_NA, + }; + ++static const char * const rmii0_refclk_groups[] = { ++ "gpio40", ++}; ++static const char * const wifi0_rfsilient0_groups[] = { ++ "gpio40", ++}; ++static const char * const wifi1_rfsilient0_groups[] = { ++ "gpio40", ++}; ++static const char * const smart2_groups[] = { ++ "gpio40", "gpio41", "gpio48", "gpio49", ++}; ++static const char * const led4_groups[] = { ++ "gpio40", ++}; ++static const char * const wifi0_cal_groups[] = { ++ "gpio41", "gpio51", ++}; ++static const char * const wifi1_cal_groups[] = { ++ "gpio41", "gpio51", ++}; ++static const char * const wifi_wci0_groups[] = { ++ "gpio42", ++}; ++static const char * const rmii0_dv_groups[] = { ++ "gpio43", ++}; ++static const char * const wifi_wci1_groups[] = { ++ "gpio43", ++}; ++static const char * const rmii1_refclk_groups[] = { ++ "gpio44", ++}; ++static const char * const blsp_spi1_groups[] = { ++ "gpio44", "gpio45", "gpio46", "gpio47", ++}; ++static const char * const led5_groups[] = { ++ "gpio44", ++}; ++static const char * const rmii10_groups[] = { ++ "gpio45", "gpio50", ++}; ++static const char * const led6_groups[] = { ++ "gpio45", ++}; ++static const char * const rmii11_groups[] = { ++ "gpio46", "gpio51", ++}; ++static const char * const led7_groups[] = { ++ "gpio46", ++}; ++static const char * const rmii1_dv_groups[] = { ++ "gpio47", ++}; ++static const char * const led8_groups[] = { ++ "gpio47", ++}; ++static const char * const rmii1_tx_groups[] = { ++ "gpio48", ++}; ++static const char * const aud_pin_groups[] = { ++ "gpio48", "gpio49", "gpio50", "gpio51", ++}; ++static const char * const led9_groups[] = { ++ "gpio48", ++}; ++static const char * const rmii1_rx_groups[] = { ++ "gpio49", ++}; ++static const char * const led10_groups[] = { ++ "gpio49", ++}; ++static const char * const wifi0_rfsilient1_groups[] = { ++ "gpio50", ++}; ++static const char * const wifi1_rfsilient1_groups[] = { ++ "gpio50", ++}; ++static const char * const led11_groups[] = { ++ "gpio50", ++}; ++static const char * const boot7_groups[] = { ++ "gpio51", ++}; ++static const char * const qpic_pad_groups[] = { ++ "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", "gpio61", "gpio62", ++ "gpio63", "gpio69", ++}; ++static const char * const pcie_clk_groups[] = { ++ "gpio52", ++}; ++static const char * const tm_clk0_groups[] = { ++ "gpio52", ++}; ++static const char * const wifi00_groups[] = { ++ "gpio52", ++}; ++static const char * const wifi10_groups[] = { ++ "gpio52", ++}; ++static const char * const mdio1_groups[] = { ++ "gpio53", ++}; ++static const char * const prng_rosc_groups[] = { ++ "gpio53", ++}; ++static const char * const dbg_out_groups[] = { ++ "gpio53", ++}; ++static const char * const tm0_groups[] = { ++ "gpio53", ++}; ++static const char * const wifi01_groups[] = { ++ "gpio53", ++}; ++static const char * const wifi11_groups[] = { ++ "gpio53", ++}; ++static const char * const atest_char3_groups[] = { ++ "gpio54", ++}; ++static const char * const pmu0_groups[] = { ++ "gpio54", ++}; ++static const char * const boot8_groups[] = { ++ "gpio54", ++}; ++static const char * const tm1_groups[] = { ++ "gpio54", ++}; ++static const char * const atest_char2_groups[] = { ++ "gpio55", ++}; ++static const char * const pmu1_groups[] = { ++ "gpio55", ++}; ++static const char * const boot9_groups[] = { ++ "gpio55", ++}; ++static const char * const tm2_groups[] = { ++ "gpio55", ++}; ++static const char * const atest_char1_groups[] = { ++ "gpio56", ++}; ++static const char * const tm_ack_groups[] = { ++ "gpio56", ++}; ++static const char * const wifi03_groups[] = { ++ "gpio56", ++}; ++static const char * const wifi13_groups[] = { ++ "gpio56", ++}; ++static const char * const qpic_pad4_groups[] = { ++ "gpio57", ++}; ++static const char * const atest_char0_groups[] = { ++ "gpio57", ++}; ++static const char * const tm3_groups[] = { ++ "gpio57", ++}; ++static const char * const wifi02_groups[] = { ++ "gpio57", ++}; ++static const char * const wifi12_groups[] = { ++ "gpio57", ++}; ++static const char * const qpic_pad5_groups[] = { ++ "gpio58", ++}; ++static const char * const smart3_groups[] = { ++ "gpio58", "gpio59", "gpio60", "gpio61", ++}; ++static const char * const wcss0_dbg14_groups[] = { ++ "gpio58", ++}; ++static const char * const tm4_groups[] = { ++ "gpio58", ++}; ++static const char * const wifi04_groups[] = { ++ "gpio58", ++}; ++static const char * const wifi14_groups[] = { ++ "gpio58", ++}; ++static const char * const qpic_pad6_groups[] = { ++ "gpio59", ++}; ++static const char * const wcss0_dbg15_groups[] = { ++ "gpio59", ++}; ++static const char * const qdss_tracectl_a_groups[] = { ++ "gpio59", ++}; ++static const char * const boot18_groups[] = { ++ "gpio59", ++}; ++static const char * const tm5_groups[] = { ++ "gpio59", ++}; ++static const char * const qpic_pad7_groups[] = { ++ "gpio60", ++}; ++static const char * const atest_char_groups[] = { ++ "gpio60", ++}; ++static const char * const wcss0_dbg4_groups[] = { ++ "gpio60", ++}; ++static const char * const qdss_traceclk_a_groups[] = { ++ "gpio60", ++}; ++static const char * const boot19_groups[] = { ++ "gpio60", ++}; ++static const char * const tm6_groups[] = { ++ "gpio60", ++}; ++static const char * const wcss0_dbg5_groups[] = { ++ "gpio61", ++}; ++static const char * const qdss_cti_trig_out_a0_groups[] = { ++ "gpio61", ++}; ++static const char * const boot14_groups[] = { ++ "gpio61", ++}; ++static const char * const tm7_groups[] = { ++ "gpio61", ++}; ++static const char * const chip_rst_groups[] = { ++ "gpio62", ++}; ++static const char * const wcss0_dbg6_groups[] = { ++ "gpio62", ++}; ++static const char * const qdss_cti_trig_out_b0_groups[] = { ++ "gpio62", ++}; ++static const char * const boot11_groups[] = { ++ "gpio62", ++}; ++static const char * const tm8_groups[] = { ++ "gpio62", ++}; ++static const char * const wcss0_dbg7_groups[] = { ++ "gpio63", ++}; ++static const char * const wcss1_dbg7_groups[] = { ++ "gpio63", ++}; ++static const char * const boot20_groups[] = { ++ "gpio63", ++}; ++static const char * const tm9_groups[] = { ++ "gpio63", ++}; ++static const char * const qpic_pad1_groups[] = { ++ "gpio64", ++}; ++static const char * const wcss0_dbg8_groups[] = { ++ "gpio64", ++}; ++static const char * const wcss1_dbg8_groups[] = { ++ "gpio64", ++}; ++static const char * const qpic_pad2_groups[] = { ++ "gpio65", ++}; ++static const char * const wcss0_dbg9_groups[] = { ++ "gpio65", ++}; ++static const char * const wcss1_dbg9_groups[] = { ++ "gpio65", ++}; ++static const char * const qpic_pad3_groups[] = { ++ "gpio66", ++}; ++static const char * const wcss0_dbg10_groups[] = { ++ "gpio66", ++}; ++static const char * const wcss1_dbg10_groups[] = { ++ "gpio66", ++}; ++static const char * const qpic_pad0_groups[] = { ++ "gpio67", ++}; ++static const char * const wcss0_dbg11_groups[] = { ++ "gpio67", ++}; ++static const char * const wcss1_dbg11_groups[] = { ++ "gpio67", ++}; ++static const char * const qpic_pad8_groups[] = { ++ "gpio68", ++}; ++static const char * const wcss0_dbg12_groups[] = { ++ "gpio68", ++}; ++static const char * const wcss1_dbg12_groups[] = { ++ "gpio68", ++}; ++static const char * const wifi034_groups[] = { ++ "gpio98", ++}; ++static const char * const wifi134_groups[] = { ++ "gpio98", ++}; ++static const char * const jtag_tdi_groups[] = { ++ "gpio0", ++}; + static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", +@@ -303,13 +818,103 @@ static const char * const gpio_groups[] + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", + "gpio99", + }; +- ++static const char * const i2s_rx_bclk_groups[] = { ++ "gpio0", "gpio21", "gpio60", ++}; ++static const char * const jtag_tck_groups[] = { ++ "gpio1", ++}; ++static const char * const i2s_rx_fsync_groups[] = { ++ "gpio1", "gpio22", "gpio61", ++}; ++static const char * const jtag_tms_groups[] = { ++ "gpio2", ++}; ++static const char * const i2s_rxd_groups[] = { ++ "gpio2", "gpio23", "gpio63", ++}; ++static const char * const smart0_groups[] = { ++ "gpio0", "gpio1", "gpio2", "gpio5", "gpio44", "gpio45", "gpio46", ++ "gpio47", ++}; ++static const char * const jtag_tdo_groups[] = { ++ "gpio3", ++}; ++static const char * const jtag_rst_groups[] = { ++ "gpio4", ++}; ++static const char * const jtag_trst_groups[] = { ++ "gpio5", ++}; ++static const char * const mdio0_groups[] = { ++ "gpio6", ++}; ++static const char * const wcss0_dbg18_groups[] = { ++ "gpio6", "gpio22", "gpio39", ++}; ++static const char * const wcss1_dbg18_groups[] = { ++ "gpio6", "gpio22", "gpio39", ++}; ++static const char * const qdss_tracedata_a_groups[] = { ++ "gpio6", "gpio7", "gpio8", "gpio9", "gpio10", "gpio11", "gpio16", ++ "gpio17", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", ++ "gpio43", ++}; ++static const char * const mdc_groups[] = { ++ "gpio7", "gpio52", ++}; ++static const char * const wcss0_dbg19_groups[] = { ++ "gpio7", "gpio23", "gpio40", ++}; ++static const char * const wcss1_dbg19_groups[] = { ++ "gpio7", "gpio23", "gpio40", ++}; + static const char * const blsp_uart1_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11", + }; ++static const char * const wifi0_uart_groups[] = { ++ "gpio8", "gpio9", "gpio11", "gpio19", "gpio62", ++}; ++static const char * const wifi1_uart_groups[] = { ++ "gpio8", "gpio11", "gpio19", "gpio62", "gpio63", ++}; ++static const char * const smart1_groups[] = { ++ "gpio8", "gpio9", "gpio16", "gpio17", "gpio58", "gpio59", "gpio60", ++ "gpio61", ++}; ++static const char * const wcss0_dbg20_groups[] = { ++ "gpio8", "gpio24", "gpio41", ++}; ++static const char * const wcss1_dbg20_groups[] = { ++ "gpio8", "gpio24", "gpio41", ++}; ++static const char * const wifi0_uart0_groups[] = { ++ "gpio9", "gpio10", ++}; ++static const char * const wifi1_uart0_groups[] = { ++ "gpio9", "gpio10", ++}; ++static const char * const wcss0_dbg21_groups[] = { ++ "gpio9", "gpio25", "gpio42", ++}; ++static const char * const wcss1_dbg21_groups[] = { ++ "gpio9", "gpio25", "gpio42", ++}; + static const char * const blsp_i2c0_groups[] = { + "gpio10", "gpio11", "gpio20", "gpio21", "gpio58", "gpio59", + }; ++static const char * const wcss0_dbg22_groups[] = { ++ "gpio10", "gpio26", "gpio43", ++}; ++static const char * const wcss1_dbg22_groups[] = { ++ "gpio10", "gpio26", "gpio43", ++}; ++static const char * const wcss0_dbg23_groups[] = { ++ "gpio11", "gpio27", "gpio44", ++}; ++static const char * const wcss1_dbg23_groups[] = { ++ "gpio11", "gpio27", "gpio44", ++}; + static const char * const blsp_spi0_groups[] = { + "gpio12", "gpio13", "gpio14", "gpio15", "gpio45", + "gpio54", "gpio55", "gpio56", "gpio57", +@@ -317,94 +922,582 @@ static const char * const blsp_spi0_grou + static const char * const blsp_i2c1_groups[] = { + "gpio12", "gpio13", "gpio34", "gpio35", + }; ++static const char * const wcss0_dbg24_groups[] = { ++ "gpio12", "gpio28", "gpio45", ++}; ++static const char * const wcss1_dbg24_groups[] = { ++ "gpio12", "gpio28", "gpio45", ++}; ++static const char * const wcss0_dbg25_groups[] = { ++ "gpio13", "gpio29", "gpio46", ++}; ++static const char * const wcss1_dbg25_groups[] = { ++ "gpio13", "gpio29", "gpio46", ++}; ++static const char * const wcss0_dbg26_groups[] = { ++ "gpio14", "gpio30", "gpio47", ++}; ++static const char * const wcss1_dbg26_groups[] = { ++ "gpio14", "gpio30", "gpio47", ++}; ++static const char * const wcss0_dbg_groups[] = { ++ "gpio15", "gpio69", ++}; ++static const char * const wcss1_dbg_groups[] = { ++ "gpio15", ++}; + static const char * const blsp_uart0_groups[] = { + "gpio16", "gpio17", "gpio60", "gpio61", + }; +-static const char * const blsp_spi1_groups[] = { +- "gpio44", "gpio45", "gpio46", "gpio47", ++static const char * const led0_groups[] = { ++ "gpio16", "gpio36", "gpio60", ++}; ++static const char * const wcss0_dbg28_groups[] = { ++ "gpio16", "gpio32", "gpio49", ++}; ++static const char * const wcss1_dbg28_groups[] = { ++ "gpio16", "gpio32", "gpio49", ++}; ++static const char * const led1_groups[] = { ++ "gpio17", "gpio37", "gpio61", ++}; ++static const char * const wcss0_dbg29_groups[] = { ++ "gpio17", "gpio33", "gpio50", ++}; ++static const char * const wcss1_dbg29_groups[] = { ++ "gpio17", "gpio33", "gpio50", ++}; ++static const char * const wifi0_uart1_groups[] = { ++ "gpio18", "gpio63", ++}; ++static const char * const wifi1_uart1_groups[] = { ++ "gpio18", "gpio63", ++}; ++static const char * const wcss0_dbg30_groups[] = { ++ "gpio18", "gpio34", "gpio51", ++}; ++static const char * const wcss1_dbg30_groups[] = { ++ "gpio18", "gpio34", "gpio51", ++}; ++static const char * const wcss0_dbg31_groups[] = { ++ "gpio19", "gpio35", "gpio52", ++}; ++static const char * const wcss1_dbg31_groups[] = { ++ "gpio19", "gpio35", ++}; ++static const char * const i2s_rx_mclk_groups[] = { ++ "gpio20", "gpio58", ++}; ++static const char * const wcss0_dbg16_groups[] = { ++ "gpio20", "gpio37", ++}; ++static const char * const wcss1_dbg16_groups[] = { ++ "gpio20", "gpio37", ++}; ++static const char * const wcss0_dbg17_groups[] = { ++ "gpio21", "gpio38", ++}; ++static const char * const wcss1_dbg17_groups[] = { ++ "gpio21", "gpio38", ++}; ++static const char * const rgmii0_groups[] = { ++ "gpio22", "gpio28", ++}; ++static const char * const sdio0_groups[] = { ++ "gpio23", ++}; ++static const char * const rgmii1_groups[] = { ++ "gpio23", "gpio29", ++}; ++static const char * const sdio1_groups[] = { ++ "gpio24", ++}; ++static const char * const rgmii2_groups[] = { ++ "gpio24", "gpio30", ++}; ++static const char * const i2s_tx_mclk_groups[] = { ++ "gpio24", "gpio52", ++}; ++static const char * const sdio2_groups[] = { ++ "gpio25", ++}; ++static const char * const rgmii3_groups[] = { ++ "gpio25", "gpio31", ++}; ++static const char * const i2s_tx_bclk_groups[] = { ++ "gpio25", "gpio53", "gpio60", ++}; ++static const char * const sdio3_groups[] = { ++ "gpio26", ++}; ++static const char * const rgmii_rx_groups[] = { ++ "gpio26", ++}; ++static const char * const i2s_tx_fsync_groups[] = { ++ "gpio26", "gpio57", "gpio61", ++}; ++static const char * const sdio_clk_groups[] = { ++ "gpio27", ++}; ++static const char * const rgmii_txc_groups[] = { ++ "gpio27", ++}; ++static const char * const i2s_td1_groups[] = { ++ "gpio27", "gpio54", "gpio63", ++}; ++static const char * const sdio_cmd_groups[] = { ++ "gpio28", ++}; ++static const char * const i2s_td2_groups[] = { ++ "gpio28", "gpio55", ++}; ++static const char * const sdio4_groups[] = { ++ "gpio29", ++}; ++static const char * const i2s_td3_groups[] = { ++ "gpio29", "gpio56", ++}; ++static const char * const sdio5_groups[] = { ++ "gpio30", ++}; ++static const char * const audio_pwm0_groups[] = { ++ "gpio30", "gpio64", ++}; ++static const char * const sdio6_groups[] = { ++ "gpio31", ++}; ++static const char * const audio_pwm1_groups[] = { ++ "gpio31", "gpio65", ++}; ++static const char * const wcss0_dbg27_groups[] = { ++ "gpio31", "gpio48", ++}; ++static const char * const wcss1_dbg27_groups[] = { ++ "gpio31", "gpio48", ++}; ++static const char * const sdio7_groups[] = { ++ "gpio32", ++}; ++static const char * const rgmii_rxc_groups[] = { ++ "gpio32", ++}; ++static const char * const audio_pwm2_groups[] = { ++ "gpio32", "gpio66", ++}; ++static const char * const rgmii_tx_groups[] = { ++ "gpio33", ++}; ++static const char * const audio_pwm3_groups[] = { ++ "gpio33", "gpio67", ++}; ++static const char * const boot2_groups[] = { ++ "gpio33", ++}; ++static const char * const i2s_spdif_in_groups[] = { ++ "gpio34", "gpio59", "gpio63", ++}; ++static const char * const i2s_spdif_out_groups[] = { ++ "gpio35", "gpio62", "gpio63", ++}; ++static const char * const rmii00_groups[] = { ++ "gpio36", "gpio41", ++}; ++static const char * const led2_groups[] = { ++ "gpio36", "gpio38", "gpio58", ++}; ++static const char * const rmii01_groups[] = { ++ "gpio37", "gpio42", ++}; ++static const char * const wifi0_wci_groups[] = { ++ "gpio37", ++}; ++static const char * const wifi1_wci_groups[] = { ++ "gpio37", ++}; ++static const char * const boot4_groups[] = { ++ "gpio37", ++}; ++static const char * const rmii0_tx_groups[] = { ++ "gpio38", ++}; ++static const char * const boot5_groups[] = { ++ "gpio38", ++}; ++static const char * const rmii0_rx_groups[] = { ++ "gpio39", ++}; ++static const char * const pcie_clk1_groups[] = { ++ "gpio39", ++}; ++static const char * const led3_groups[] = { ++ "gpio39", ++}; ++static const char * const sdio_cd_groups[] = { ++ "gpio22", + }; + + static const struct msm_function ipq4019_functions[] = { ++ FUNCTION(rmii0_refclk), ++ FUNCTION(wifi0_rfsilient0), ++ FUNCTION(wifi1_rfsilient0), ++ FUNCTION(smart2), ++ FUNCTION(led4), ++ FUNCTION(wifi0_cal), ++ FUNCTION(wifi1_cal), ++ FUNCTION(wifi_wci0), ++ FUNCTION(rmii0_dv), ++ FUNCTION(wifi_wci1), ++ FUNCTION(rmii1_refclk), ++ FUNCTION(blsp_spi1), ++ FUNCTION(led5), ++ FUNCTION(rmii10), ++ FUNCTION(led6), ++ FUNCTION(rmii11), ++ FUNCTION(led7), ++ FUNCTION(rmii1_dv), ++ FUNCTION(led8), ++ FUNCTION(rmii1_tx), ++ FUNCTION(aud_pin), ++ FUNCTION(led9), ++ FUNCTION(rmii1_rx), ++ FUNCTION(led10), ++ FUNCTION(wifi0_rfsilient1), ++ FUNCTION(wifi1_rfsilient1), ++ FUNCTION(led11), ++ FUNCTION(boot7), ++ FUNCTION(qpic_pad), ++ FUNCTION(pcie_clk), ++ FUNCTION(tm_clk0), ++ FUNCTION(wifi00), ++ FUNCTION(wifi10), ++ FUNCTION(mdio1), ++ FUNCTION(prng_rosc), ++ FUNCTION(dbg_out), ++ FUNCTION(tm0), ++ FUNCTION(wifi01), ++ FUNCTION(wifi11), ++ FUNCTION(atest_char3), ++ FUNCTION(pmu0), ++ FUNCTION(boot8), ++ FUNCTION(tm1), ++ FUNCTION(atest_char2), ++ FUNCTION(pmu1), ++ FUNCTION(boot9), ++ FUNCTION(tm2), ++ FUNCTION(atest_char1), ++ FUNCTION(tm_ack), ++ FUNCTION(wifi03), ++ FUNCTION(wifi13), ++ FUNCTION(qpic_pad4), ++ FUNCTION(atest_char0), ++ FUNCTION(tm3), ++ FUNCTION(wifi02), ++ FUNCTION(wifi12), ++ FUNCTION(qpic_pad5), ++ FUNCTION(smart3), ++ FUNCTION(wcss0_dbg14), ++ FUNCTION(tm4), ++ FUNCTION(wifi04), ++ FUNCTION(wifi14), ++ FUNCTION(qpic_pad6), ++ FUNCTION(wcss0_dbg15), ++ FUNCTION(qdss_tracectl_a), ++ FUNCTION(boot18), ++ FUNCTION(tm5), ++ FUNCTION(qpic_pad7), ++ FUNCTION(atest_char), ++ FUNCTION(wcss0_dbg4), ++ FUNCTION(qdss_traceclk_a), ++ FUNCTION(boot19), ++ FUNCTION(tm6), ++ FUNCTION(wcss0_dbg5), ++ FUNCTION(qdss_cti_trig_out_a0), ++ FUNCTION(boot14), ++ FUNCTION(tm7), ++ FUNCTION(chip_rst), ++ FUNCTION(wcss0_dbg6), ++ FUNCTION(qdss_cti_trig_out_b0), ++ FUNCTION(boot11), ++ FUNCTION(tm8), ++ FUNCTION(wcss0_dbg7), ++ FUNCTION(wcss1_dbg7), ++ FUNCTION(boot20), ++ FUNCTION(tm9), ++ FUNCTION(qpic_pad1), ++ FUNCTION(wcss0_dbg8), ++ FUNCTION(wcss1_dbg8), ++ FUNCTION(qpic_pad2), ++ FUNCTION(wcss0_dbg9), ++ FUNCTION(wcss1_dbg9), ++ FUNCTION(qpic_pad3), ++ FUNCTION(wcss0_dbg10), ++ FUNCTION(wcss1_dbg10), ++ FUNCTION(qpic_pad0), ++ FUNCTION(wcss0_dbg11), ++ FUNCTION(wcss1_dbg11), ++ FUNCTION(qpic_pad8), ++ FUNCTION(wcss0_dbg12), ++ FUNCTION(wcss1_dbg12), ++ FUNCTION(wifi034), ++ FUNCTION(wifi134), ++ FUNCTION(jtag_tdi), + FUNCTION(gpio), ++ FUNCTION(i2s_rx_bclk), ++ FUNCTION(jtag_tck), ++ FUNCTION(i2s_rx_fsync), ++ FUNCTION(jtag_tms), ++ FUNCTION(i2s_rxd), ++ FUNCTION(smart0), ++ FUNCTION(jtag_tdo), ++ FUNCTION(jtag_rst), ++ FUNCTION(jtag_trst), ++ FUNCTION(mdio0), ++ FUNCTION(wcss0_dbg18), ++ FUNCTION(wcss1_dbg18), ++ FUNCTION(qdss_tracedata_a), ++ FUNCTION(mdc), ++ FUNCTION(wcss0_dbg19), ++ FUNCTION(wcss1_dbg19), + FUNCTION(blsp_uart1), ++ FUNCTION(wifi0_uart), ++ FUNCTION(wifi1_uart), ++ FUNCTION(smart1), ++ FUNCTION(wcss0_dbg20), ++ FUNCTION(wcss1_dbg20), ++ FUNCTION(wifi0_uart0), ++ FUNCTION(wifi1_uart0), ++ FUNCTION(wcss0_dbg21), ++ FUNCTION(wcss1_dbg21), + FUNCTION(blsp_i2c0), ++ FUNCTION(wcss0_dbg22), ++ FUNCTION(wcss1_dbg22), ++ FUNCTION(wcss0_dbg23), ++ FUNCTION(wcss1_dbg23), ++ FUNCTION(blsp_spi0), + FUNCTION(blsp_i2c1), ++ FUNCTION(wcss0_dbg24), ++ FUNCTION(wcss1_dbg24), ++ FUNCTION(wcss0_dbg25), ++ FUNCTION(wcss1_dbg25), ++ FUNCTION(wcss0_dbg26), ++ FUNCTION(wcss1_dbg26), ++ FUNCTION(wcss0_dbg), ++ FUNCTION(wcss1_dbg), + FUNCTION(blsp_uart0), +- FUNCTION(blsp_spi1), +- FUNCTION(blsp_spi0), ++ FUNCTION(led0), ++ FUNCTION(wcss0_dbg28), ++ FUNCTION(wcss1_dbg28), ++ FUNCTION(led1), ++ FUNCTION(wcss0_dbg29), ++ FUNCTION(wcss1_dbg29), ++ FUNCTION(wifi0_uart1), ++ FUNCTION(wifi1_uart1), ++ FUNCTION(wcss0_dbg30), ++ FUNCTION(wcss1_dbg30), ++ FUNCTION(wcss0_dbg31), ++ FUNCTION(wcss1_dbg31), ++ FUNCTION(i2s_rx_mclk), ++ FUNCTION(wcss0_dbg16), ++ FUNCTION(wcss1_dbg16), ++ FUNCTION(wcss0_dbg17), ++ FUNCTION(wcss1_dbg17), ++ FUNCTION(rgmii0), ++ FUNCTION(sdio0), ++ FUNCTION(rgmii1), ++ FUNCTION(sdio1), ++ FUNCTION(rgmii2), ++ FUNCTION(i2s_tx_mclk), ++ FUNCTION(sdio2), ++ FUNCTION(rgmii3), ++ FUNCTION(i2s_tx_bclk), ++ FUNCTION(sdio3), ++ FUNCTION(rgmii_rx), ++ FUNCTION(i2s_tx_fsync), ++ FUNCTION(sdio_clk), ++ FUNCTION(rgmii_txc), ++ FUNCTION(i2s_td1), ++ FUNCTION(sdio_cmd), ++ FUNCTION(i2s_td2), ++ FUNCTION(sdio4), ++ FUNCTION(i2s_td3), ++ FUNCTION(sdio5), ++ FUNCTION(audio_pwm0), ++ FUNCTION(sdio6), ++ FUNCTION(audio_pwm1), ++ FUNCTION(wcss0_dbg27), ++ FUNCTION(wcss1_dbg27), ++ FUNCTION(sdio7), ++ FUNCTION(rgmii_rxc), ++ FUNCTION(audio_pwm2), ++ FUNCTION(rgmii_tx), ++ FUNCTION(audio_pwm3), ++ FUNCTION(boot2), ++ FUNCTION(i2s_spdif_in), ++ FUNCTION(i2s_spdif_out), ++ FUNCTION(rmii00), ++ FUNCTION(led2), ++ FUNCTION(rmii01), ++ FUNCTION(wifi0_wci), ++ FUNCTION(wifi1_wci), ++ FUNCTION(boot4), ++ FUNCTION(rmii0_tx), ++ FUNCTION(boot5), ++ FUNCTION(rmii0_rx), ++ FUNCTION(pcie_clk1), ++ FUNCTION(led3), ++ FUNCTION(sdio_cd), + }; + + static const struct msm_pingroup ipq4019_groups[] = { +- PINGROUP(0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(5, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(6, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(7, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(8, blsp_uart1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(9, blsp_uart1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(10, blsp_uart1, NA, NA, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(11, blsp_uart1, NA, NA, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(12, blsp_spi0, blsp_i2c1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(13, blsp_spi0, blsp_i2c1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(14, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(15, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(16, blsp_uart0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(17, blsp_uart0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(18, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(19, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(20, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(21, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(22, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(23, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(24, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(25, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(26, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(27, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(28, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(29, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(30, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(32, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(33, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(34, blsp_i2c1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(35, blsp_i2c1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(36, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(37, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(38, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(40, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(41, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(42, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(43, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(44, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(45, NA, blsp_spi1, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(46, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(47, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(48, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(49, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(50, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(51, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(52, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(53, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(54, NA, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(55, NA, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(56, NA, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(57, NA, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(58, NA, NA, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(59, NA, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(60, NA, blsp_uart0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(61, NA, blsp_uart0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(62, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(63, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(65, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(66, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(0, jtag_tdi, smart0, i2s_rx_bclk, NA, NA, NA, NA, NA, NA, NA, ++ NA, NA, NA, NA), ++ PINGROUP(1, jtag_tck, smart0, i2s_rx_fsync, NA, NA, NA, NA, NA, NA, NA, ++ NA, NA, NA, NA), ++ PINGROUP(2, jtag_tms, smart0, i2s_rxd, NA, NA, NA, NA, NA, NA, NA, NA, ++ NA, NA, NA), ++ PINGROUP(3, jtag_tdo, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, ++ NA), ++ PINGROUP(4, jtag_rst, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, ++ NA), ++ PINGROUP(5, jtag_trst, smart0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, ++ NA, NA), ++ PINGROUP(6, mdio0, NA, wcss0_dbg18, wcss1_dbg18, NA, qdss_tracedata_a, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(7, mdc, NA, wcss0_dbg19, wcss1_dbg19, NA, qdss_tracedata_a, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(8, blsp_uart1, wifi0_uart, wifi1_uart, smart1, NA, ++ wcss0_dbg20, wcss1_dbg20, NA, qdss_tracedata_a, NA, NA, NA, ++ NA, NA), ++ PINGROUP(9, blsp_uart1, wifi0_uart0, wifi1_uart0, smart1, wifi0_uart, ++ NA, wcss0_dbg21, wcss1_dbg21, NA, qdss_tracedata_a, NA, NA, ++ NA, NA), ++ PINGROUP(10, blsp_uart1, wifi0_uart0, wifi1_uart0, blsp_i2c0, NA, ++ wcss0_dbg22, wcss1_dbg22, NA, qdss_tracedata_a, NA, NA, NA, ++ NA, NA), ++ PINGROUP(11, blsp_uart1, wifi0_uart, wifi1_uart, blsp_i2c0, NA, ++ wcss0_dbg23, wcss1_dbg23, NA, qdss_tracedata_a, NA, NA, NA, ++ NA, NA), ++ PINGROUP(12, blsp_spi0, blsp_i2c1, NA, wcss0_dbg24, wcss1_dbg24, NA, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(13, blsp_spi0, blsp_i2c1, NA, wcss0_dbg25, wcss1_dbg25, NA, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(14, blsp_spi0, NA, wcss0_dbg26, wcss1_dbg26, NA, NA, NA, NA, ++ NA, NA, NA, NA, NA, NA), ++ PINGROUP(15, blsp_spi0, NA, wcss0_dbg, wcss1_dbg, NA, NA, NA, NA, NA, ++ NA, NA, NA, NA, NA), ++ PINGROUP(16, blsp_uart0, led0, smart1, NA, wcss0_dbg28, wcss1_dbg28, ++ NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA), ++ PINGROUP(17, blsp_uart0, led1, smart1, NA, wcss0_dbg29, wcss1_dbg29, ++ NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA), ++ PINGROUP(18, wifi0_uart1, wifi1_uart1, NA, wcss0_dbg30, wcss1_dbg30, ++ NA, NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(19, wifi0_uart, wifi1_uart, NA, wcss0_dbg31, wcss1_dbg31, NA, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(20, blsp_i2c0, i2s_rx_mclk, NA, wcss0_dbg16, wcss1_dbg16, NA, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(21, blsp_i2c0, i2s_rx_bclk, NA, wcss0_dbg17, wcss1_dbg17, NA, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(22, rgmii0, i2s_rx_fsync, NA, wcss0_dbg18, wcss1_dbg18, NA, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(23, sdio0, rgmii1, i2s_rxd, NA, wcss0_dbg19, wcss1_dbg19, NA, ++ NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(24, sdio1, rgmii2, i2s_tx_mclk, NA, wcss0_dbg20, wcss1_dbg20, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(25, sdio2, rgmii3, i2s_tx_bclk, NA, wcss0_dbg21, wcss1_dbg21, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(26, sdio3, rgmii_rx, i2s_tx_fsync, NA, wcss0_dbg22, ++ wcss1_dbg22, NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(27, sdio_clk, rgmii_txc, i2s_td1, NA, wcss0_dbg23, ++ wcss1_dbg23, NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(28, sdio_cmd, rgmii0, i2s_td2, NA, wcss0_dbg24, wcss1_dbg24, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(29, sdio4, rgmii1, i2s_td3, NA, wcss0_dbg25, wcss1_dbg25, NA, ++ NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(30, sdio5, rgmii2, audio_pwm0, NA, wcss0_dbg26, wcss1_dbg26, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(31, sdio6, rgmii3, audio_pwm1, NA, wcss0_dbg27, wcss1_dbg27, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(32, sdio7, rgmii_rxc, audio_pwm2, NA, wcss0_dbg28, ++ wcss1_dbg28, NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(33, rgmii_tx, audio_pwm3, NA, wcss0_dbg29, wcss1_dbg29, NA, ++ boot2, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(34, blsp_i2c1, i2s_spdif_in, NA, wcss0_dbg30, wcss1_dbg30, NA, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(35, blsp_i2c1, i2s_spdif_out, NA, wcss0_dbg31, wcss1_dbg31, ++ NA, NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(36, rmii00, led2, led0, NA, NA, NA, NA, NA, NA, NA, NA, NA, ++ NA, NA), ++ PINGROUP(37, rmii01, wifi0_wci, wifi1_wci, led1, NA, NA, wcss0_dbg16, ++ wcss1_dbg16, NA, qdss_tracedata_a, boot4, NA, NA, NA), ++ PINGROUP(38, rmii0_tx, led2, NA, NA, wcss0_dbg17, wcss1_dbg17, NA, ++ qdss_tracedata_a, boot5, NA, NA, NA, NA, NA), ++ PINGROUP(39, rmii0_rx, pcie_clk1, led3, NA, NA, wcss0_dbg18, ++ wcss1_dbg18, NA, NA, qdss_tracedata_a, NA, NA, NA, NA), ++ PINGROUP(40, rmii0_refclk, wifi0_rfsilient0, wifi1_rfsilient0, smart2, ++ led4, NA, NA, wcss0_dbg19, wcss1_dbg19, NA, NA, ++ qdss_tracedata_a, NA, NA), ++ PINGROUP(41, rmii00, wifi0_cal, wifi1_cal, smart2, NA, NA, wcss0_dbg20, ++ wcss1_dbg20, NA, NA, qdss_tracedata_a, NA, NA, NA), ++ PINGROUP(42, rmii01, wifi_wci0, NA, NA, wcss0_dbg21, wcss1_dbg21, NA, ++ NA, qdss_tracedata_a, NA, NA, NA, NA, NA), ++ PINGROUP(43, rmii0_dv, wifi_wci1, NA, NA, wcss0_dbg22, wcss1_dbg22, NA, ++ NA, qdss_tracedata_a, NA, NA, NA, NA, NA), ++ PINGROUP(44, rmii1_refclk, blsp_spi1, smart0, led5, NA, NA, ++ wcss0_dbg23, wcss1_dbg23, NA, NA, NA, NA, NA, NA), ++ PINGROUP(45, rmii10, blsp_spi1, blsp_spi0, smart0, led6, NA, NA, ++ wcss0_dbg24, wcss1_dbg24, NA, NA, NA, NA, NA), ++ PINGROUP(46, rmii11, blsp_spi1, smart0, led7, NA, NA, wcss0_dbg25, ++ wcss1_dbg25, NA, NA, NA, NA, NA, NA), ++ PINGROUP(47, rmii1_dv, blsp_spi1, smart0, led8, NA, NA, wcss0_dbg26, ++ wcss1_dbg26, NA, NA, NA, NA, NA, NA), ++ PINGROUP(48, rmii1_tx, aud_pin, smart2, led9, NA, NA, wcss0_dbg27, ++ wcss1_dbg27, NA, NA, NA, NA, NA, NA), ++ PINGROUP(49, rmii1_rx, aud_pin, smart2, led10, NA, NA, wcss0_dbg28, ++ wcss1_dbg28, NA, NA, NA, NA, NA, NA), ++ PINGROUP(50, rmii10, aud_pin, wifi0_rfsilient1, wifi1_rfsilient1, ++ led11, NA, NA, wcss0_dbg29, wcss1_dbg29, NA, NA, NA, NA, NA), ++ PINGROUP(51, rmii11, aud_pin, wifi0_cal, wifi1_cal, NA, NA, ++ wcss0_dbg30, wcss1_dbg30, NA, boot7, NA, NA, NA, NA), ++ PINGROUP(52, qpic_pad, mdc, pcie_clk, i2s_tx_mclk, NA, NA, wcss0_dbg31, ++ tm_clk0, wifi00, wifi10, NA, NA, NA, NA), ++ PINGROUP(53, qpic_pad, mdio1, i2s_tx_bclk, prng_rosc, dbg_out, tm0, ++ wifi01, wifi11, NA, NA, NA, NA, NA, NA), ++ PINGROUP(54, qpic_pad, blsp_spi0, i2s_td1, atest_char3, pmu0, NA, NA, ++ boot8, tm1, NA, NA, NA, NA, NA), ++ PINGROUP(55, qpic_pad, blsp_spi0, i2s_td2, atest_char2, pmu1, NA, NA, ++ boot9, tm2, NA, NA, NA, NA, NA), ++ PINGROUP(56, qpic_pad, blsp_spi0, i2s_td3, atest_char1, NA, tm_ack, ++ wifi03, wifi13, NA, NA, NA, NA, NA, NA), ++ PINGROUP(57, qpic_pad4, blsp_spi0, i2s_tx_fsync, atest_char0, NA, tm3, ++ wifi02, wifi12, NA, NA, NA, NA, NA, NA), ++ PINGROUP(58, qpic_pad5, led2, blsp_i2c0, smart3, smart1, i2s_rx_mclk, ++ NA, wcss0_dbg14, tm4, wifi04, wifi14, NA, NA, NA), ++ PINGROUP(59, qpic_pad6, blsp_i2c0, smart3, smart1, i2s_spdif_in, NA, ++ NA, wcss0_dbg15, qdss_tracectl_a, boot18, tm5, NA, NA, NA), ++ PINGROUP(60, qpic_pad7, blsp_uart0, smart1, smart3, led0, i2s_tx_bclk, ++ i2s_rx_bclk, atest_char, NA, wcss0_dbg4, qdss_traceclk_a, ++ boot19, tm6, NA), ++ PINGROUP(61, qpic_pad, blsp_uart0, smart1, smart3, led1, i2s_tx_fsync, ++ i2s_rx_fsync, NA, NA, wcss0_dbg5, qdss_cti_trig_out_a0, ++ boot14, tm7, NA), ++ PINGROUP(62, qpic_pad, chip_rst, wifi0_uart, wifi1_uart, i2s_spdif_out, ++ NA, NA, wcss0_dbg6, qdss_cti_trig_out_b0, boot11, tm8, NA, NA, ++ NA), ++ PINGROUP(63, qpic_pad, wifi0_uart1, wifi1_uart1, wifi1_uart, i2s_td1, ++ i2s_rxd, i2s_spdif_out, i2s_spdif_in, NA, wcss0_dbg7, ++ wcss1_dbg7, boot20, tm9, NA), ++ PINGROUP(64, qpic_pad1, audio_pwm0, NA, wcss0_dbg8, wcss1_dbg8, NA, NA, ++ NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(65, qpic_pad2, audio_pwm1, NA, wcss0_dbg9, wcss1_dbg9, NA, NA, ++ NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(66, qpic_pad3, audio_pwm2, NA, wcss0_dbg10, wcss1_dbg10, NA, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(67, qpic_pad0, audio_pwm3, NA, wcss0_dbg11, wcss1_dbg11, NA, ++ NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(68, qpic_pad8, NA, wcss0_dbg12, wcss1_dbg12, NA, NA, NA, NA, ++ NA, NA, NA, NA, NA, NA), ++ PINGROUP(69, qpic_pad, NA, wcss0_dbg, NA, NA, NA, NA, NA, NA, NA, NA, ++ NA, NA, NA), + PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +@@ -433,7 +1526,8 @@ static const struct msm_pingroup ipq4019 + PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), +- PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), ++ PINGROUP(98, wifi034, wifi134, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, ++ NA, NA), + PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + }; + +@@ -460,6 +1554,7 @@ static const struct of_device_id ipq4019 + static struct platform_driver ipq4019_pinctrl_driver = { + .driver = { + .name = "ipq4019-pinctrl", ++ .owner = THIS_MODULE, + .of_match_table = ipq4019_pinctrl_of_match, + }, + .probe = ipq4019_pinctrl_probe, diff --git a/target/linux/ipq40xx/patches-4.9/859-msm-pinctrl-Add-support-to-configure-ipq40xx-GPIO_PU.patch b/target/linux/ipq40xx/patches-4.9/859-msm-pinctrl-Add-support-to-configure-ipq40xx-GPIO_PU.patch new file mode 100644 index 000000000..07cf01b26 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/859-msm-pinctrl-Add-support-to-configure-ipq40xx-GPIO_PU.patch @@ -0,0 +1,236 @@ +From e77af7de404eb464f7da9e0daeb8b362cc66a7ba Mon Sep 17 00:00:00 2001 +From: Ram Chandra Jangir +Date: Tue, 9 May 2017 11:45:00 +0530 +Subject: [PATCH] msm: pinctrl: Add support to configure ipq40xx GPIO_PULL bits + +GPIO_PULL bits configurations in TLMM_GPIO_CFG register +differs for IPQ40xx from rest of the other qcom SoC's. +This change add support to configure the msm_gpio_pull +bits for ipq40xx, It is required to fix the proper +configurations of gpio-pull bits for nand pins mux. + +IPQ40xx SoC: +2'b10: Internal pull up enable. +2'b11: Unsupport + +For other SoC's: +2'b10: Keeper +2'b11: Pull-Up + +Signed-off-by: Ram Chandra Jangir +--- + drivers/pinctrl/qcom/pinctrl-apq8064.c | 1 + + drivers/pinctrl/qcom/pinctrl-apq8084.c | 1 + + drivers/pinctrl/qcom/pinctrl-ipq4019.c | 8 ++++++++ + drivers/pinctrl/qcom/pinctrl-ipq8064.c | 1 + + drivers/pinctrl/qcom/pinctrl-mdm9615.c | 1 + + drivers/pinctrl/qcom/pinctrl-msm.c | 21 ++++++++------------- + drivers/pinctrl/qcom/pinctrl-msm.h | 19 +++++++++++++++++++ + drivers/pinctrl/qcom/pinctrl-msm8660.c | 1 + + drivers/pinctrl/qcom/pinctrl-msm8916.c | 1 + + drivers/pinctrl/qcom/pinctrl-msm8960.c | 1 + + drivers/pinctrl/qcom/pinctrl-msm8x74.c | 1 + + 11 files changed, 43 insertions(+), 13 deletions(-) + +--- a/drivers/pinctrl/qcom/pinctrl-apq8064.c ++++ b/drivers/pinctrl/qcom/pinctrl-apq8064.c +@@ -597,6 +597,7 @@ static const struct msm_pinctrl_soc_data + .groups = apq8064_groups, + .ngroups = ARRAY_SIZE(apq8064_groups), + .ngpios = NUM_GPIO_PINGROUPS, ++ .gpio_pull = &msm_gpio_pull, + }; + + static int apq8064_pinctrl_probe(struct platform_device *pdev) +--- a/drivers/pinctrl/qcom/pinctrl-apq8084.c ++++ b/drivers/pinctrl/qcom/pinctrl-apq8084.c +@@ -1206,6 +1206,7 @@ static const struct msm_pinctrl_soc_data + .groups = apq8084_groups, + .ngroups = ARRAY_SIZE(apq8084_groups), + .ngpios = NUM_GPIO_PINGROUPS, ++ .gpio_pull = &msm_gpio_pull, + }; + + static int apq8084_pinctrl_probe(struct platform_device *pdev) +--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c ++++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c +@@ -1531,6 +1531,13 @@ static const struct msm_pingroup ipq4019 + PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + }; + ++static const struct msm_pinctrl_gpio_pull ipq4019_gpio_pull = { ++ .no_pull = 0, ++ .pull_down = 1, ++ .keeper = 0, ++ .pull_up = 2, ++}; ++ + static const struct msm_pinctrl_soc_data ipq4019_pinctrl = { + .pins = ipq4019_pins, + .npins = ARRAY_SIZE(ipq4019_pins), +@@ -1539,6 +1546,7 @@ static const struct msm_pinctrl_soc_data + .groups = ipq4019_groups, + .ngroups = ARRAY_SIZE(ipq4019_groups), + .ngpios = 100, ++ .gpio_pull = &ipq4019_gpio_pull, + }; + + static int ipq4019_pinctrl_probe(struct platform_device *pdev) +--- a/drivers/pinctrl/qcom/pinctrl-ipq8064.c ++++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c +@@ -630,6 +630,7 @@ static const struct msm_pinctrl_soc_data + .groups = ipq8064_groups, + .ngroups = ARRAY_SIZE(ipq8064_groups), + .ngpios = NUM_GPIO_PINGROUPS, ++ .gpio_pull = &msm_gpio_pull, + }; + + static int ipq8064_pinctrl_probe(struct platform_device *pdev) +--- a/drivers/pinctrl/qcom/pinctrl-mdm9615.c ++++ b/drivers/pinctrl/qcom/pinctrl-mdm9615.c +@@ -444,6 +444,7 @@ static const struct msm_pinctrl_soc_data + .groups = mdm9615_groups, + .ngroups = ARRAY_SIZE(mdm9615_groups), + .ngpios = NUM_GPIO_PINGROUPS, ++ .gpio_pull = &msm_gpio_pull, + }; + + static int mdm9615_pinctrl_probe(struct platform_device *pdev) +--- a/drivers/pinctrl/qcom/pinctrl-msm.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c +@@ -203,11 +203,6 @@ static int msm_config_reg(struct msm_pin + return 0; + } + +-#define MSM_NO_PULL 0 +-#define MSM_PULL_DOWN 1 +-#define MSM_KEEPER 2 +-#define MSM_PULL_UP 3 +- + static unsigned msm_regval_to_drive(u32 val) + { + return (val + 1) * 2; +@@ -238,16 +233,16 @@ static int msm_config_group_get(struct p + /* Convert register value to pinconf value */ + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: +- arg = arg == MSM_NO_PULL; ++ arg = arg == pctrl->soc->gpio_pull->no_pull; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: +- arg = arg == MSM_PULL_DOWN; ++ arg = arg == pctrl->soc->gpio_pull->pull_down; + break; + case PIN_CONFIG_BIAS_BUS_HOLD: +- arg = arg == MSM_KEEPER; ++ arg = arg == pctrl->soc->gpio_pull->keeper; + break; + case PIN_CONFIG_BIAS_PULL_UP: +- arg = arg == MSM_PULL_UP; ++ arg = arg == pctrl->soc->gpio_pull->pull_up; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + arg = msm_regval_to_drive(arg); +@@ -304,16 +299,16 @@ static int msm_config_group_set(struct p + /* Convert pinconf values to register values */ + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: +- arg = MSM_NO_PULL; ++ arg = pctrl->soc->gpio_pull->no_pull; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: +- arg = MSM_PULL_DOWN; ++ arg = pctrl->soc->gpio_pull->pull_down; + break; + case PIN_CONFIG_BIAS_BUS_HOLD: +- arg = MSM_KEEPER; ++ arg = pctrl->soc->gpio_pull->keeper; + break; + case PIN_CONFIG_BIAS_PULL_UP: +- arg = MSM_PULL_UP; ++ arg = pctrl->soc->gpio_pull->pull_up; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + /* Check for invalid values */ +--- a/drivers/pinctrl/qcom/pinctrl-msm.h ++++ b/drivers/pinctrl/qcom/pinctrl-msm.h +@@ -98,6 +98,16 @@ struct msm_pingroup { + }; + + /** ++ * struct msm_pinctrl_gpio_pull - pinctrl pull value bit field descriptor ++ */ ++struct msm_pinctrl_gpio_pull { ++ unsigned no_pull; ++ unsigned pull_down; ++ unsigned keeper; ++ unsigned pull_up; ++}; ++ ++/** + * struct msm_pinctrl_soc_data - Qualcomm pin controller driver configuration + * @pins: An array describing all pins the pin controller affects. + * @npins: The number of entries in @pins. +@@ -106,6 +116,7 @@ struct msm_pingroup { + * @groups: An array describing all pin groups the pin SoC supports. + * @ngroups: The numbmer of entries in @groups. + * @ngpio: The number of pingroups the driver should expose as GPIOs. ++ * @gpio_pull_val: The pull value bit field descriptor. + */ + struct msm_pinctrl_soc_data { + const struct pinctrl_pin_desc *pins; +@@ -115,6 +126,14 @@ struct msm_pinctrl_soc_data { + const struct msm_pingroup *groups; + unsigned ngroups; + unsigned ngpios; ++ const struct msm_pinctrl_gpio_pull *gpio_pull; ++}; ++ ++static const struct msm_pinctrl_gpio_pull msm_gpio_pull = { ++ .no_pull = 0, ++ .pull_down = 1, ++ .keeper = 2, ++ .pull_up = 3, + }; + + int msm_pinctrl_probe(struct platform_device *pdev, +--- a/drivers/pinctrl/qcom/pinctrl-msm8660.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c +@@ -979,6 +979,7 @@ static const struct msm_pinctrl_soc_data + .groups = msm8660_groups, + .ngroups = ARRAY_SIZE(msm8660_groups), + .ngpios = NUM_GPIO_PINGROUPS, ++ .gpio_pull = &msm_gpio_pull, + }; + + static int msm8660_pinctrl_probe(struct platform_device *pdev) +--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c +@@ -967,6 +967,7 @@ static const struct msm_pinctrl_soc_data + .groups = msm8916_groups, + .ngroups = ARRAY_SIZE(msm8916_groups), + .ngpios = NUM_GPIO_PINGROUPS, ++ .gpio_pull = &msm_gpio_pull, + }; + + static int msm8916_pinctrl_probe(struct platform_device *pdev) +--- a/drivers/pinctrl/qcom/pinctrl-msm8960.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c +@@ -1244,6 +1244,7 @@ static const struct msm_pinctrl_soc_data + .groups = msm8960_groups, + .ngroups = ARRAY_SIZE(msm8960_groups), + .ngpios = NUM_GPIO_PINGROUPS, ++ .gpio_pull = &msm_gpio_pull, + }; + + static int msm8960_pinctrl_probe(struct platform_device *pdev) +--- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c +@@ -1069,6 +1069,7 @@ static const struct msm_pinctrl_soc_data + .groups = msm8x74_groups, + .ngroups = ARRAY_SIZE(msm8x74_groups), + .ngpios = NUM_GPIO_PINGROUPS, ++ .gpio_pull = &msm_gpio_pull, + }; + + static int msm8x74_pinctrl_probe(struct platform_device *pdev) diff --git a/target/linux/ipq40xx/patches-4.9/860-qcom-mtd-nand-Add-bam_dma-support-in-qcom_nand-drive.patch b/target/linux/ipq40xx/patches-4.9/860-qcom-mtd-nand-Add-bam_dma-support-in-qcom_nand-drive.patch new file mode 100644 index 000000000..5a3c02c70 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/860-qcom-mtd-nand-Add-bam_dma-support-in-qcom_nand-drive.patch @@ -0,0 +1,370 @@ +From 074036f9de6b8c5fc642e8e2540950f6a35aa804 Mon Sep 17 00:00:00 2001 +From: Ram Chandra Jangir +Date: Thu, 20 Apr 2017 10:31:10 +0530 +Subject: [PATCH] qcom: mtd: nand: Add bam_dma support in qcom_nand driver + +The current driver only support ADM DMA so this patch adds the +BAM DMA support in current NAND driver with compatible string +qcom,ebi2-nandc-bam. +Added bam channels and data buffers, NAND BAM uses 3 channels: +command, data tx and data rx, while ADM uses only single channel. +So this patch adds the BAM channel in device tree and using the +same in NAND driver allocation function. + +Signed-off-by: Ram Chandra Jangir +--- + .../devicetree/bindings/mtd/qcom_nandc.txt | 69 +++++++-- + drivers/mtd/nand/qcom_nandc.c | 160 +++++++++++++++++---- + 2 files changed, 190 insertions(+), 39 deletions(-) + +--- a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt ++++ b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt +@@ -1,21 +1,26 @@ + * Qualcomm NAND controller + + Required properties: +-- compatible: should be "qcom,ipq806x-nand" ++- compatible: "qcom,ipq806x-nand" for IPQ8064 which uses ++ ADM DMA. ++ "qcom,ebi2-nand-bam" - nand drivers using BAM DMA ++ like IPQ4019. + - reg: MMIO address range + - clocks: must contain core clock and always on clock + - clock-names: must contain "core" for the core clock and "aon" for the + always on clock + - dmas: DMA specifier, consisting of a phandle to the ADM DMA +- controller node and the channel number to be used for +- NAND. Refer to dma.txt and qcom_adm.txt for more details +-- dma-names: must be "rxtx" +-- qcom,cmd-crci: must contain the ADM command type CRCI block instance +- number specified for the NAND controller on the given +- platform +-- qcom,data-crci: must contain the ADM data type CRCI block instance +- number specified for the NAND controller on the given +- platform ++ or BAM DMA controller node and the channel number to ++ be used for NAND. Refer to dma.txt, qcom_adm.txt(ADM) ++ and qcom_bam_dma.txt(BAM) for more details ++- dma-names: "rxtx" - ADM ++ "tx", "rx", "cmd" - BAM ++- qcom,cmd-crci: Only required for ADM DMA. must contain the ADM command ++ type CRCI block instance number specified for the NAND ++ controller on the given platform. ++- qcom,data-crci: Only required for ADM DMA. must contain the ADM data ++ type CRCI block instance number specified for the NAND ++ controller on the given platform. + - #address-cells: <1> - subnodes give the chip-select number + - #size-cells: <0> + +@@ -44,7 +49,7 @@ partition.txt for more detail. + Example: + + nand@1ac00000 { +- compatible = "qcom,ebi2-nandc"; ++ compatible = "qcom,ipq806x-nand","qcom.qcom_nand"; + reg = <0x1ac00000 0x800>; + + clocks = <&gcc EBI2_CLK>, +@@ -58,6 +63,48 @@ nand@1ac00000 { + + #address-cells = <1>; + #size-cells = <0>; ++ ++ nandcs@0 { ++ compatible = "qcom,nandcs"; ++ reg = <0>; ++ ++ nand-ecc-strength = <4>; ++ nand-ecc-step-size = <512>; ++ nand-bus-width = <8>; ++ ++ partitions { ++ compatible = "fixed-partitions"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "boot-nand"; ++ reg = <0 0x58a0000>; ++ }; ++ ++ partition@58a0000 { ++ label = "fs-nand"; ++ reg = <0x58a0000 0x4000000>; ++ }; ++ }; ++ }; ++}; ++ ++nand@79B0000 { ++ compatible = "qcom,ebi2-nandc-bam"; ++ reg = <0x79B0000 0x1000>; ++ ++ clocks = <&gcc EBI2_CLK>, ++ <&gcc EBI2_AON_CLK>; ++ clock-names = "core", "aon"; ++ ++ dmas = <&qpicbam 0>, ++ <&qpicbam 1>, ++ <&qpicbam 2>; ++ dma-names = "tx", "rx", "cmd"; ++ ++ #address-cells = <1>; ++ #size-cells = <0>; + + nandcs@0 { + compatible = "qcom,nandcs"; +--- a/drivers/mtd/nand/qcom_nandc.c ++++ b/drivers/mtd/nand/qcom_nandc.c +@@ -234,6 +234,7 @@ struct nandc_regs { + * by upper layers directly + * @buf_size/count/start: markers for chip->read_buf/write_buf functions + * @reg_read_buf: local buffer for reading back registers via DMA ++ * @reg_read_buf_phys: contains dma address for register read buffer + * @reg_read_pos: marker for data read in reg_read_buf + * + * @regs: a contiguous chunk of memory for DMA register +@@ -242,7 +243,10 @@ struct nandc_regs { + * @cmd1/vld: some fixed controller register values + * @ecc_modes: supported ECC modes by the current controller, + * initialized via DT match data +- */ ++ * @bch_enabled: flag to tell whether BCH or RS ECC mode is used ++ * @dma_bam_enabled: flag to tell whether nand controller is using ++ * bam dma ++*/ + struct qcom_nand_controller { + struct nand_hw_control controller; + struct list_head host_list; +@@ -255,17 +259,28 @@ struct qcom_nand_controller { + struct clk *core_clk; + struct clk *aon_clk; + +- struct dma_chan *chan; +- unsigned int cmd_crci; +- unsigned int data_crci; + struct list_head desc_list; ++ union { ++ struct { ++ struct dma_chan *tx_chan; ++ struct dma_chan *rx_chan; ++ struct dma_chan *cmd_chan; ++ }; ++ struct { ++ struct dma_chan *chan; ++ unsigned int cmd_crci; ++ unsigned int data_crci; ++ }; ++ }; + + u8 *data_buffer; ++ bool dma_bam_enabled; + int buf_size; + int buf_count; + int buf_start; + + __le32 *reg_read_buf; ++ dma_addr_t reg_read_buf_phys; + int reg_read_pos; + + struct nandc_regs *regs; +@@ -324,6 +339,17 @@ struct qcom_nand_host { + u32 clrreadstatus; + }; + ++/* ++ * This data type corresponds to the nand driver data which will be used at ++ * driver probe time ++ * @ecc_modes - ecc mode for nand ++ * @dma_bam_enabled - whether this driver is using bam ++ */ ++struct qcom_nand_driver_data { ++ u32 ecc_modes; ++ bool dma_bam_enabled; ++}; ++ + static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip) + { + return container_of(chip, struct qcom_nand_host, chip); +@@ -1949,16 +1975,46 @@ static int qcom_nandc_alloc(struct qcom_ + if (!nandc->regs) + return -ENOMEM; + +- nandc->reg_read_buf = devm_kzalloc(nandc->dev, +- MAX_REG_RD * sizeof(*nandc->reg_read_buf), +- GFP_KERNEL); +- if (!nandc->reg_read_buf) +- return -ENOMEM; ++ if (!nandc->dma_bam_enabled) { ++ nandc->reg_read_buf = devm_kzalloc(nandc->dev, ++ MAX_REG_RD * ++ sizeof(*nandc->reg_read_buf), ++ GFP_KERNEL); + +- nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx"); +- if (!nandc->chan) { +- dev_err(nandc->dev, "failed to request slave channel\n"); +- return -ENODEV; ++ if (!nandc->reg_read_buf) ++ return -ENOMEM; ++ ++ nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx"); ++ if (!nandc->chan) { ++ dev_err(nandc->dev, "failed to request slave channel\n"); ++ return -ENODEV; ++ } ++ } else { ++ nandc->reg_read_buf = dmam_alloc_coherent(nandc->dev, ++ MAX_REG_RD * ++ sizeof(*nandc->reg_read_buf), ++ &nandc->reg_read_buf_phys, GFP_KERNEL); ++ ++ if (!nandc->reg_read_buf) ++ return -ENOMEM; ++ ++ nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx"); ++ if (!nandc->tx_chan) { ++ dev_err(nandc->dev, "failed to request tx channel\n"); ++ return -ENODEV; ++ } ++ ++ nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx"); ++ if (!nandc->rx_chan) { ++ dev_err(nandc->dev, "failed to request rx channel\n"); ++ return -ENODEV; ++ } ++ ++ nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd"); ++ if (!nandc->cmd_chan) { ++ dev_err(nandc->dev, "failed to request cmd channel\n"); ++ return -ENODEV; ++ } + } + + INIT_LIST_HEAD(&nandc->desc_list); +@@ -1971,8 +2027,35 @@ static int qcom_nandc_alloc(struct qcom_ + + static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc) + { +- dma_release_channel(nandc->chan); +-} ++ if (nandc->dma_bam_enabled) { ++ if (nandc->tx_chan) ++ dma_release_channel(nandc->tx_chan); ++ ++ if (nandc->rx_chan) ++ dma_release_channel(nandc->rx_chan); ++ ++ if (nandc->cmd_chan) ++ dma_release_channel(nandc->tx_chan); ++ ++ if (nandc->reg_read_buf) ++ dmam_free_coherent(nandc->dev, MAX_REG_RD * ++ sizeof(*nandc->reg_read_buf), ++ nandc->reg_read_buf, ++ nandc->reg_read_buf_phys); ++ } else { ++ if (nandc->chan) ++ dma_release_channel(nandc->chan); ++ ++ if (nandc->reg_read_buf) ++ devm_kfree(nandc->dev, nandc->reg_read_buf); ++ } ++ ++ if (nandc->regs) ++ devm_kfree(nandc->dev, nandc->regs); ++ ++ if (nandc->data_buffer) ++ devm_kfree(nandc->dev, nandc->data_buffer); ++ } + + /* one time setup of a few nand controller registers */ + static int qcom_nandc_setup(struct qcom_nand_controller *nandc) +@@ -2010,6 +2093,8 @@ static int qcom_nand_host_init(struct qc + mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs); + mtd->owner = THIS_MODULE; + mtd->dev.parent = dev; ++ mtd->priv = chip; ++ chip->priv = nandc; + + chip->cmdfunc = qcom_nandc_command; + chip->select_chip = qcom_nandc_select_chip; +@@ -2057,16 +2142,20 @@ static int qcom_nandc_parse_dt(struct pl + struct device_node *np = nandc->dev->of_node; + int ret; + +- ret = of_property_read_u32(np, "qcom,cmd-crci", &nandc->cmd_crci); +- if (ret) { +- dev_err(nandc->dev, "command CRCI unspecified\n"); +- return ret; +- } ++ if (!nandc->dma_bam_enabled) { ++ ret = of_property_read_u32(np, "qcom,cmd-crci", ++ &nandc->cmd_crci); ++ if (ret) { ++ dev_err(nandc->dev, "command CRCI unspecified\n"); ++ return ret; ++ } + +- ret = of_property_read_u32(np, "qcom,data-crci", &nandc->data_crci); +- if (ret) { +- dev_err(nandc->dev, "data CRCI unspecified\n"); +- return ret; ++ ret = of_property_read_u32(np, "qcom,data-crci", ++ &nandc->data_crci); ++ if (ret) { ++ dev_err(nandc->dev, "data CRCI unspecified\n"); ++ return ret; ++ } + } + + return 0; +@@ -2081,6 +2170,7 @@ static int qcom_nandc_probe(struct platf + struct device_node *dn = dev->of_node, *child; + struct resource *res; + int ret; ++ struct qcom_nand_driver_data *driver_data; + + nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL); + if (!nandc) +@@ -2095,7 +2185,10 @@ static int qcom_nandc_probe(struct platf + return -ENODEV; + } + +- nandc->ecc_modes = (unsigned long)dev_data; ++ driver_data = (struct qcom_nand_driver_data *)dev_data; ++ ++ nandc->ecc_modes = driver_data->ecc_modes; ++ nandc->dma_bam_enabled = driver_data->dma_bam_enabled; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + nandc->base = devm_ioremap_resource(dev, res); +@@ -2187,7 +2280,15 @@ static int qcom_nandc_remove(struct plat + return 0; + } + +-#define EBI2_NANDC_ECC_MODES (ECC_RS_4BIT | ECC_BCH_8BIT) ++struct qcom_nand_driver_data ebi2_nandc_bam_data = { ++ .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), ++ .dma_bam_enabled = true, ++}; ++ ++struct qcom_nand_driver_data ebi2_nandc_data = { ++ .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT), ++ .dma_bam_enabled = false, ++}; + + /* + * data will hold a struct pointer containing more differences once we support +@@ -2195,7 +2296,10 @@ static int qcom_nandc_remove(struct plat + */ + static const struct of_device_id qcom_nandc_of_match[] = { + { .compatible = "qcom,ipq806x-nand", +- .data = (void *)EBI2_NANDC_ECC_MODES, ++ .data = (void *) &ebi2_nandc_data, ++ }, ++ { .compatible = "qcom,ebi2-nandc-bam", ++ .data = (void *) &ebi2_nandc_bam_data, + }, + {} + }; diff --git a/target/linux/ipq40xx/patches-4.9/861-qcom-mtd-nand-Added-bam-transaction-and-support-addi.patch b/target/linux/ipq40xx/patches-4.9/861-qcom-mtd-nand-Added-bam-transaction-and-support-addi.patch new file mode 100644 index 000000000..4b9f67220 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/861-qcom-mtd-nand-Added-bam-transaction-and-support-addi.patch @@ -0,0 +1,1267 @@ +From 645c7805f2602569263d7ac78050b2c9e91e3377 Mon Sep 17 00:00:00 2001 +From: Ram Chandra Jangir +Date: Thu, 20 Apr 2017 10:23:00 +0530 +Subject: [PATCH] qcom: mtd: nand: Added bam transaction and support + additional CSRs + +This patch adds the following for NAND BAM DMA support + - Bam transaction which will be used for any NAND request. + It contains the array of command elements, command and + data sgl. This transaction will be resetted before every + request. + - Allocation function for NAND BAM transaction which will be + called only once at probe time. + - Reset function for NAND BAM transaction which will be called + before any new NAND request. + - Add support for additional CSRs. + NAND_READ_LOCATION - page offset for reading in BAM DMA mode + NAND_ERASED_CW_DETECT_CFG - status for erased code words + NAND_BUFFER_STATUS - status for ECC + +Signed-off-by: Abhishek Sahu +Signed-off-by: Ram Chandra Jangir +--- + drivers/mtd/nand/qcom_nandc.c | 631 +++++++++++++++++++++++++++++++++++---- + include/linux/dma/qcom_bam_dma.h | 149 +++++++++ + 2 files changed, 721 insertions(+), 59 deletions(-) + create mode 100644 include/linux/dma/qcom_bam_dma.h + +--- a/drivers/mtd/nand/qcom_nandc.c ++++ b/drivers/mtd/nand/qcom_nandc.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + /* NANDc reg offsets */ + #define NAND_FLASH_CMD 0x00 +@@ -53,6 +54,8 @@ + #define NAND_VERSION 0xf08 + #define NAND_READ_LOCATION_0 0xf20 + #define NAND_READ_LOCATION_1 0xf24 ++#define NAND_READ_LOCATION_2 0xf28 ++#define NAND_READ_LOCATION_3 0xf2c + + /* dummy register offsets, used by write_reg_dma */ + #define NAND_DEV_CMD1_RESTORE 0xdead +@@ -135,6 +138,11 @@ + #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED) + #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED) + ++/* NAND_READ_LOCATION_n bits */ ++#define READ_LOCATION_OFFSET 0 ++#define READ_LOCATION_SIZE 16 ++#define READ_LOCATION_LAST 31 ++ + /* Version Mask */ + #define NAND_VERSION_MAJOR_MASK 0xf0000000 + #define NAND_VERSION_MAJOR_SHIFT 28 +@@ -156,6 +164,9 @@ + #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \ + ERASE_START_VLD | SEQ_READ_START_VLD) + ++/* NAND_CTRL bits */ ++#define BAM_MODE_EN BIT(0) ++ + /* + * the NAND controller performs reads/writes with ECC in 516 byte chunks. + * the driver calls the chunks 'step' or 'codeword' interchangeably +@@ -177,12 +188,77 @@ + #define ECC_BCH_4BIT BIT(2) + #define ECC_BCH_8BIT BIT(3) + ++/* Flags used for BAM DMA desc preparation*/ ++/* Don't set the EOT in current tx sgl */ ++#define DMA_DESC_FLAG_NO_EOT (0x0001) ++/* Set the NWD flag in current sgl */ ++#define DMA_DESC_FLAG_BAM_NWD (0x0002) ++/* Close current sgl and start writing in another sgl */ ++#define DMA_DESC_FLAG_BAM_NEXT_SGL (0x0004) ++/* ++ * Erased codeword status is being used two times in single transfer so this ++ * flag will determine the current value of erased codeword status register ++ */ ++#define DMA_DESC_ERASED_CW_SET (0x0008) ++ ++/* Returns the dma address for reg read buffer */ ++#define REG_BUF_DMA_ADDR(chip, vaddr) \ ++ ((chip)->reg_read_buf_phys + \ ++ ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf)) ++ ++/* Returns the nand register physical address */ ++#define NAND_REG_PHYS_ADDRESS(chip, addr) \ ++ ((chip)->base_dma + (addr)) ++ ++/* command element array size in bam transaction */ ++#define BAM_CMD_ELEMENT_SIZE (256) ++/* command sgl size in bam transaction */ ++#define BAM_CMD_SGL_SIZE (256) ++/* data sgl size in bam transaction */ ++#define BAM_DATA_SGL_SIZE (128) ++ ++/* ++ * This data type corresponds to the BAM transaction which will be used for any ++ * nand request. ++ * @bam_ce - the array of bam command elements ++ * @cmd_sgl - sgl for nand bam command pipe ++ * @tx_sgl - sgl for nand bam consumer pipe ++ * @rx_sgl - sgl for nand bam producer pipe ++ * @bam_ce_index - the index in bam_ce which is available for next sgl request ++ * @pre_bam_ce_index - the index in bam_ce which marks the start position ce ++ * for current sgl. It will be used for size calculation ++ * for current sgl ++ * @cmd_sgl_cnt - no of entries in command sgl. ++ * @tx_sgl_cnt - no of entries in tx sgl. ++ * @rx_sgl_cnt - no of entries in rx sgl. ++ */ ++struct bam_transaction { ++ struct bam_cmd_element bam_ce[BAM_CMD_ELEMENT_SIZE]; ++ struct qcom_bam_sgl cmd_sgl[BAM_CMD_SGL_SIZE]; ++ struct qcom_bam_sgl tx_sgl[BAM_DATA_SGL_SIZE]; ++ struct qcom_bam_sgl rx_sgl[BAM_DATA_SGL_SIZE]; ++ uint32_t bam_ce_index; ++ uint32_t pre_bam_ce_index; ++ uint32_t cmd_sgl_cnt; ++ uint32_t tx_sgl_cnt; ++ uint32_t rx_sgl_cnt; ++}; ++ ++/** ++ * This data type corresponds to the nand dma descriptor ++ * @list - list for desc_info ++ * @dir - DMA transfer direction ++ * @sgl - sgl which will be used for single sgl dma descriptor ++ * @dma_desc - low level dma engine descriptor ++ * @bam_desc_data - used for bam desc mappings ++ */ + struct desc_info { + struct list_head node; + + enum dma_data_direction dir; + struct scatterlist sgl; + struct dma_async_tx_descriptor *dma_desc; ++ struct qcom_bam_custom_data bam_desc_data; + }; + + /* +@@ -210,6 +286,13 @@ struct nandc_regs { + __le32 orig_vld; + + __le32 ecc_buf_cfg; ++ __le32 read_location0; ++ __le32 read_location1; ++ __le32 read_location2; ++ __le32 read_location3; ++ ++ __le32 erased_cw_detect_cfg_clr; ++ __le32 erased_cw_detect_cfg_set; + }; + + /* +@@ -225,6 +308,7 @@ struct nandc_regs { + * @aon_clk: another controller clock + * + * @chan: dma channel ++ * @bam_txn: contains the bam transaction address + * @cmd_crci: ADM DMA CRCI for command flow control + * @data_crci: ADM DMA CRCI for data flow control + * @desc_list: DMA descriptor list (list of desc_infos) +@@ -250,6 +334,7 @@ struct nandc_regs { + struct qcom_nand_controller { + struct nand_hw_control controller; + struct list_head host_list; ++ struct bam_transaction *bam_txn; + + struct device *dev; + +@@ -350,6 +435,45 @@ struct qcom_nand_driver_data { + bool dma_bam_enabled; + }; + ++/* Allocates and Initializes the BAM transaction */ ++struct bam_transaction *alloc_bam_transaction( ++ struct qcom_nand_controller *nandc) ++{ ++ struct bam_transaction *bam_txn; ++ ++ bam_txn = kzalloc(sizeof(*bam_txn), GFP_KERNEL); ++ ++ if (!bam_txn) ++ return NULL; ++ ++ bam_txn->bam_ce_index = 0; ++ bam_txn->pre_bam_ce_index = 0; ++ bam_txn->cmd_sgl_cnt = 0; ++ bam_txn->tx_sgl_cnt = 0; ++ bam_txn->rx_sgl_cnt = 0; ++ ++ qcom_bam_sg_init_table(bam_txn->cmd_sgl, BAM_CMD_SGL_SIZE); ++ qcom_bam_sg_init_table(bam_txn->tx_sgl, BAM_DATA_SGL_SIZE); ++ qcom_bam_sg_init_table(bam_txn->rx_sgl, BAM_DATA_SGL_SIZE); ++ ++ return bam_txn; ++} ++ ++/* Clears the BAM transaction index */ ++void clear_bam_transaction(struct qcom_nand_controller *nandc) ++{ ++ struct bam_transaction *bam_txn = nandc->bam_txn; ++ ++ if (!nandc->dma_bam_enabled) ++ return; ++ ++ bam_txn->bam_ce_index = 0; ++ bam_txn->pre_bam_ce_index = 0; ++ bam_txn->cmd_sgl_cnt = 0; ++ bam_txn->tx_sgl_cnt = 0; ++ bam_txn->rx_sgl_cnt = 0; ++} ++ + static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip) + { + return container_of(chip, struct qcom_nand_host, chip); +@@ -406,6 +530,16 @@ static __le32 *offset_to_nandc_reg(struc + return ®s->orig_vld; + case NAND_EBI2_ECC_BUF_CFG: + return ®s->ecc_buf_cfg; ++ case NAND_BUFFER_STATUS: ++ return ®s->clrreadstatus; ++ case NAND_READ_LOCATION_0: ++ return ®s->read_location0; ++ case NAND_READ_LOCATION_1: ++ return ®s->read_location1; ++ case NAND_READ_LOCATION_2: ++ return ®s->read_location2; ++ case NAND_READ_LOCATION_3: ++ return ®s->read_location3; + default: + return NULL; + } +@@ -447,7 +581,7 @@ static void update_rw_regs(struct qcom_n + { + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); +- u32 cmd, cfg0, cfg1, ecc_bch_cfg; ++ u32 cmd, cfg0, cfg1, ecc_bch_cfg, read_location0; + + if (read) { + if (host->use_ecc) +@@ -464,12 +598,20 @@ static void update_rw_regs(struct qcom_n + + cfg1 = host->cfg1; + ecc_bch_cfg = host->ecc_bch_cfg; ++ if (read) ++ read_location0 = (0 << READ_LOCATION_OFFSET) | ++ (host->cw_data << READ_LOCATION_SIZE) | ++ (1 << READ_LOCATION_LAST); + } else { + cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) | + (num_cw - 1) << CW_PER_PAGE; + + cfg1 = host->cfg1_raw; + ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE; ++ if (read) ++ read_location0 = (0 << READ_LOCATION_OFFSET) | ++ (host->cw_size << READ_LOCATION_SIZE) | ++ (1 << READ_LOCATION_LAST); + } + + nandc_set_reg(nandc, NAND_FLASH_CMD, cmd); +@@ -480,8 +622,104 @@ static void update_rw_regs(struct qcom_n + nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus); + nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus); + nandc_set_reg(nandc, NAND_EXEC_CMD, 1); ++ ++ if (read) ++ nandc_set_reg(nandc, NAND_READ_LOCATION_0, read_location0); ++} ++ ++/* ++ * Prepares the command descriptor for BAM DMA which will be used for NAND ++ * register read and write. The command descriptor requires the command ++ * to be formed in command element type so this function uses the command ++ * element from bam transaction ce array and fills the same with required ++ * data. A single SGL can contain multiple command elements so ++ * DMA_DESC_FLAG_BAM_NEXT_SGL will be used for starting the separate SGL ++ * after the current command element. ++ */ ++static int prep_dma_desc_command(struct qcom_nand_controller *nandc, bool read, ++ int reg_off, const void *vaddr, ++ int size, unsigned int flags) ++{ ++ int bam_ce_size; ++ int i; ++ struct bam_cmd_element *bam_ce_buffer; ++ struct bam_transaction *bam_txn = nandc->bam_txn; ++ ++ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_index]; ++ ++ /* fill the command desc */ ++ for (i = 0; i < size; i++) { ++ if (read) { ++ qcom_prep_bam_ce(&bam_ce_buffer[i], ++ NAND_REG_PHYS_ADDRESS(nandc, reg_off + 4 * i), ++ BAM_READ_COMMAND, ++ REG_BUF_DMA_ADDR(nandc, ++ (unsigned int *)vaddr + i)); ++ } else { ++ qcom_prep_bam_ce(&bam_ce_buffer[i], ++ NAND_REG_PHYS_ADDRESS(nandc, reg_off + 4 * i), ++ BAM_WRITE_COMMAND, ++ *((unsigned int *)vaddr + i)); ++ } ++ } ++ ++ /* use the separate sgl after this command */ ++ if (flags & DMA_DESC_FLAG_BAM_NEXT_SGL) { ++ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->pre_bam_ce_index]; ++ bam_txn->bam_ce_index += size; ++ bam_ce_size = (bam_txn->bam_ce_index - ++ bam_txn->pre_bam_ce_index) * ++ sizeof(struct bam_cmd_element); ++ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].sgl, ++ bam_ce_buffer, ++ bam_ce_size); ++ if (flags & DMA_DESC_FLAG_BAM_NWD) ++ bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].dma_flags = ++ DESC_FLAG_NWD | DESC_FLAG_CMD; ++ else ++ bam_txn->cmd_sgl[bam_txn->cmd_sgl_cnt].dma_flags = ++ DESC_FLAG_CMD; ++ ++ bam_txn->cmd_sgl_cnt++; ++ bam_txn->pre_bam_ce_index = bam_txn->bam_ce_index; ++ } else { ++ bam_txn->bam_ce_index += size; ++ } ++ ++ return 0; + } + ++/* ++ * Prepares the data descriptor for BAM DMA which will be used for NAND ++ * data read and write. ++ */ ++static int prep_dma_desc_data_bam(struct qcom_nand_controller *nandc, bool read, ++ int reg_off, const void *vaddr, ++ int size, unsigned int flags) ++{ ++ struct bam_transaction *bam_txn = nandc->bam_txn; ++ ++ if (read) { ++ sg_set_buf(&bam_txn->rx_sgl[bam_txn->rx_sgl_cnt].sgl, ++ vaddr, size); ++ bam_txn->rx_sgl[bam_txn->rx_sgl_cnt].dma_flags = 0; ++ bam_txn->rx_sgl_cnt++; ++ } else { ++ sg_set_buf(&bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].sgl, ++ vaddr, size); ++ if (flags & DMA_DESC_FLAG_NO_EOT) ++ bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].dma_flags = 0; ++ else ++ bam_txn->tx_sgl[bam_txn->tx_sgl_cnt].dma_flags = ++ DESC_FLAG_EOT; ++ ++ bam_txn->tx_sgl_cnt++; ++ } ++ ++ return 0; ++} ++ ++/* Prepares the dma desciptor for adm dma engine */ + static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read, + int reg_off, const void *vaddr, int size, + bool flow_control) +@@ -560,7 +798,7 @@ err: + * @num_regs: number of registers to read + */ + static int read_reg_dma(struct qcom_nand_controller *nandc, int first, +- int num_regs) ++ int num_regs, unsigned int flags) + { + bool flow_control = false; + void *vaddr; +@@ -569,10 +807,18 @@ static int read_reg_dma(struct qcom_nand + if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) + flow_control = true; + +- size = num_regs * sizeof(u32); + vaddr = nandc->reg_read_buf + nandc->reg_read_pos; + nandc->reg_read_pos += num_regs; + ++ if (nandc->dma_bam_enabled) { ++ size = num_regs; ++ ++ return prep_dma_desc_command(nandc, true, first, vaddr, size, ++ flags); ++ } ++ ++ size = num_regs * sizeof(u32); ++ + return prep_dma_desc(nandc, true, first, vaddr, size, flow_control); + } + +@@ -584,7 +830,7 @@ static int read_reg_dma(struct qcom_nand + * @num_regs: number of registers to write + */ + static int write_reg_dma(struct qcom_nand_controller *nandc, int first, +- int num_regs) ++ int num_regs, unsigned int flags) + { + bool flow_control = false; + struct nandc_regs *regs = nandc->regs; +@@ -596,12 +842,29 @@ static int write_reg_dma(struct qcom_nan + if (first == NAND_FLASH_CMD) + flow_control = true; + ++ if (first == NAND_ERASED_CW_DETECT_CFG) { ++ if (flags & DMA_DESC_ERASED_CW_SET) ++ vaddr = ®s->erased_cw_detect_cfg_set; ++ else ++ vaddr = ®s->erased_cw_detect_cfg_clr; ++ } ++ ++ if (first == NAND_EXEC_CMD) ++ flags |= DMA_DESC_FLAG_BAM_NWD; ++ + if (first == NAND_DEV_CMD1_RESTORE) + first = NAND_DEV_CMD1; + + if (first == NAND_DEV_CMD_VLD_RESTORE) + first = NAND_DEV_CMD_VLD; + ++ if (nandc->dma_bam_enabled) { ++ size = num_regs; ++ ++ return prep_dma_desc_command(nandc, false, first, vaddr, size, ++ flags); ++ } ++ + size = num_regs * sizeof(u32); + + return prep_dma_desc(nandc, false, first, vaddr, size, flow_control); +@@ -616,8 +879,12 @@ static int write_reg_dma(struct qcom_nan + * @size: DMA transaction size in bytes + */ + static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off, +- const u8 *vaddr, int size) ++ const u8 *vaddr, int size, unsigned int flags) + { ++ if (nandc->dma_bam_enabled) ++ return prep_dma_desc_data_bam(nandc, true, reg_off, vaddr, size, ++ flags); ++ + return prep_dma_desc(nandc, true, reg_off, vaddr, size, false); + } + +@@ -630,8 +897,12 @@ static int read_data_dma(struct qcom_nan + * @size: DMA transaction size in bytes + */ + static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off, +- const u8 *vaddr, int size) ++ const u8 *vaddr, int size, unsigned int flags) + { ++ if (nandc->dma_bam_enabled) ++ return prep_dma_desc_data_bam(nandc, false, reg_off, vaddr, ++ size, flags); ++ + return prep_dma_desc(nandc, false, reg_off, vaddr, size, false); + } + +@@ -641,14 +912,57 @@ static int write_data_dma(struct qcom_na + */ + static void config_cw_read(struct qcom_nand_controller *nandc) + { +- write_reg_dma(nandc, NAND_FLASH_CMD, 3); +- write_reg_dma(nandc, NAND_DEV0_CFG0, 3); +- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1); + +- write_reg_dma(nandc, NAND_EXEC_CMD, 1); ++ write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0); ++ write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0); ++ write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0); ++ ++ write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0); ++ write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, ++ DMA_DESC_ERASED_CW_SET); ++ if (nandc->dma_bam_enabled) ++ write_reg_dma(nandc, NAND_READ_LOCATION_0, 1, ++ DMA_DESC_FLAG_BAM_NEXT_SGL); + +- read_reg_dma(nandc, NAND_FLASH_STATUS, 2); +- read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1); ++ ++ write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NWD | ++ DMA_DESC_FLAG_BAM_NEXT_SGL); ++ ++ read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0); ++ read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1, ++ DMA_DESC_FLAG_BAM_NEXT_SGL); ++} ++ ++/* ++ * Helpers to prepare DMA descriptors for configuring registers ++ * before reading a NAND page with BAM. ++ */ ++static void config_bam_page_read(struct qcom_nand_controller *nandc) ++{ ++ write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0); ++ write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0); ++ write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0); ++ write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0); ++ write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, ++ DMA_DESC_ERASED_CW_SET | ++ DMA_DESC_FLAG_BAM_NEXT_SGL); ++} ++ ++/* ++ * Helpers to prepare DMA descriptors for configuring registers ++ * before reading each codeword in NAND page with BAM. ++ */ ++static void config_bam_cw_read(struct qcom_nand_controller *nandc) ++{ ++ if (nandc->dma_bam_enabled) ++ write_reg_dma(nandc, NAND_READ_LOCATION_0, 4, 0); ++ ++ write_reg_dma(nandc, NAND_FLASH_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); ++ write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); ++ ++ read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0); ++ read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1, ++ DMA_DESC_FLAG_BAM_NEXT_SGL); + } + + /* +@@ -657,19 +971,20 @@ static void config_cw_read(struct qcom_n + */ + static void config_cw_write_pre(struct qcom_nand_controller *nandc) + { +- write_reg_dma(nandc, NAND_FLASH_CMD, 3); +- write_reg_dma(nandc, NAND_DEV0_CFG0, 3); +- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1); ++ write_reg_dma(nandc, NAND_FLASH_CMD, 3, 0); ++ write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0); ++ write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, ++ DMA_DESC_FLAG_BAM_NEXT_SGL); + } + + static void config_cw_write_post(struct qcom_nand_controller *nandc) + { +- write_reg_dma(nandc, NAND_EXEC_CMD, 1); ++ write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); + +- read_reg_dma(nandc, NAND_FLASH_STATUS, 1); ++ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); + +- write_reg_dma(nandc, NAND_FLASH_STATUS, 1); +- write_reg_dma(nandc, NAND_READ_STATUS, 1); ++ write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0); ++ write_reg_dma(nandc, NAND_READ_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); + } + + /* +@@ -683,6 +998,8 @@ static int nandc_param(struct qcom_nand_ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + ++ clear_bam_transaction(nandc); ++ + /* + * NAND_CMD_PARAM is called before we know much about the FLASH chip + * in use. we configure the controller to perform a raw read of 512 +@@ -715,9 +1032,13 @@ static int nandc_param(struct qcom_nand_ + + nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1); + nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld); ++ nandc_set_reg(nandc, NAND_READ_LOCATION_0, ++ (0 << READ_LOCATION_OFFSET) | ++ (512 << READ_LOCATION_SIZE) | ++ (1 << READ_LOCATION_LAST)); + +- write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1); +- write_reg_dma(nandc, NAND_DEV_CMD1, 1); ++ write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0); ++ write_reg_dma(nandc, NAND_DEV_CMD1, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); + + nandc->buf_count = 512; + memset(nandc->data_buffer, 0xff, nandc->buf_count); +@@ -725,11 +1046,12 @@ static int nandc_param(struct qcom_nand_ + config_cw_read(nandc); + + read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, +- nandc->buf_count); ++ nandc->buf_count, 0); + + /* restore CMD1 and VLD regs */ +- write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1); +- write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1); ++ write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0); ++ write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, ++ DMA_DESC_FLAG_BAM_NEXT_SGL); + + return 0; + } +@@ -740,6 +1062,8 @@ static int erase_block(struct qcom_nand_ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + ++ clear_bam_transaction(nandc); ++ + nandc_set_reg(nandc, NAND_FLASH_CMD, + BLOCK_ERASE | PAGE_ACC | LAST_PAGE); + nandc_set_reg(nandc, NAND_ADDR0, page_addr); +@@ -751,14 +1075,15 @@ static int erase_block(struct qcom_nand_ + nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus); + nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus); + +- write_reg_dma(nandc, NAND_FLASH_CMD, 3); +- write_reg_dma(nandc, NAND_DEV0_CFG0, 2); +- write_reg_dma(nandc, NAND_EXEC_CMD, 1); + +- read_reg_dma(nandc, NAND_FLASH_STATUS, 1); ++ write_reg_dma(nandc, NAND_FLASH_CMD, 3, DMA_DESC_FLAG_BAM_NEXT_SGL); ++ write_reg_dma(nandc, NAND_DEV0_CFG0, 2, DMA_DESC_FLAG_BAM_NEXT_SGL); ++ write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); ++ ++ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); + +- write_reg_dma(nandc, NAND_FLASH_STATUS, 1); +- write_reg_dma(nandc, NAND_READ_STATUS, 1); ++ write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0); ++ write_reg_dma(nandc, NAND_READ_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); + + return 0; + } +@@ -772,16 +1097,19 @@ static int read_id(struct qcom_nand_host + if (column == -1) + return 0; + ++ clear_bam_transaction(nandc); ++ + nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID); + nandc_set_reg(nandc, NAND_ADDR0, column); + nandc_set_reg(nandc, NAND_ADDR1, 0); +- nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN); ++ nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, ++ nandc->dma_bam_enabled ? 0 : DM_EN); + nandc_set_reg(nandc, NAND_EXEC_CMD, 1); + +- write_reg_dma(nandc, NAND_FLASH_CMD, 4); +- write_reg_dma(nandc, NAND_EXEC_CMD, 1); ++ write_reg_dma(nandc, NAND_FLASH_CMD, 4, DMA_DESC_FLAG_BAM_NEXT_SGL); ++ write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); + +- read_reg_dma(nandc, NAND_READ_ID, 1); ++ read_reg_dma(nandc, NAND_READ_ID, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); + + return 0; + } +@@ -792,28 +1120,108 @@ static int reset(struct qcom_nand_host * + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + ++ clear_bam_transaction(nandc); ++ + nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE); + nandc_set_reg(nandc, NAND_EXEC_CMD, 1); + +- write_reg_dma(nandc, NAND_FLASH_CMD, 1); +- write_reg_dma(nandc, NAND_EXEC_CMD, 1); ++ write_reg_dma(nandc, NAND_FLASH_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); ++ write_reg_dma(nandc, NAND_EXEC_CMD, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); + +- read_reg_dma(nandc, NAND_FLASH_STATUS, 1); ++ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, DMA_DESC_FLAG_BAM_NEXT_SGL); + + return 0; + } + ++static int prepare_bam_async_desc(struct qcom_nand_controller *nandc, ++ struct dma_chan *chan, ++ struct qcom_bam_sgl *bam_sgl, ++ int sgl_cnt, ++ enum dma_transfer_direction direction) ++{ ++ struct desc_info *desc; ++ struct dma_async_tx_descriptor *dma_desc; ++ ++ if (!qcom_bam_map_sg(nandc->dev, bam_sgl, sgl_cnt, direction)) { ++ dev_err(nandc->dev, "failure in mapping sgl\n"); ++ return -ENOMEM; ++ } ++ ++ desc = kzalloc(sizeof(*desc), GFP_KERNEL); ++ if (!desc) { ++ qcom_bam_unmap_sg(nandc->dev, bam_sgl, sgl_cnt, direction); ++ return -ENOMEM; ++ } ++ ++ ++ desc->bam_desc_data.dir = direction; ++ desc->bam_desc_data.sgl_cnt = sgl_cnt; ++ desc->bam_desc_data.bam_sgl = bam_sgl; ++ ++ dma_desc = dmaengine_prep_dma_custom_mapping(chan, ++ &desc->bam_desc_data, ++ 0); ++ ++ if (!dma_desc) { ++ dev_err(nandc->dev, "failure in cmd prep desc\n"); ++ qcom_bam_unmap_sg(nandc->dev, bam_sgl, sgl_cnt, direction); ++ kfree(desc); ++ return -EINVAL; ++ } ++ ++ desc->dma_desc = dma_desc; ++ ++ list_add_tail(&desc->node, &nandc->desc_list); ++ ++ return 0; ++ ++} ++ + /* helpers to submit/free our list of dma descriptors */ + static int submit_descs(struct qcom_nand_controller *nandc) + { + struct desc_info *desc; + dma_cookie_t cookie = 0; ++ struct bam_transaction *bam_txn = nandc->bam_txn; ++ int r; ++ ++ if (nandc->dma_bam_enabled) { ++ if (bam_txn->rx_sgl_cnt) { ++ r = prepare_bam_async_desc(nandc, nandc->rx_chan, ++ bam_txn->rx_sgl, bam_txn->rx_sgl_cnt, ++ DMA_DEV_TO_MEM); ++ if (r) ++ return r; ++ } ++ ++ if (bam_txn->tx_sgl_cnt) { ++ r = prepare_bam_async_desc(nandc, nandc->tx_chan, ++ bam_txn->tx_sgl, bam_txn->tx_sgl_cnt, ++ DMA_MEM_TO_DEV); ++ if (r) ++ return r; ++ } ++ ++ r = prepare_bam_async_desc(nandc, nandc->cmd_chan, ++ bam_txn->cmd_sgl, bam_txn->cmd_sgl_cnt, ++ DMA_MEM_TO_DEV); ++ if (r) ++ return r; ++ } + + list_for_each_entry(desc, &nandc->desc_list, node) + cookie = dmaengine_submit(desc->dma_desc); + +- if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE) +- return -ETIMEDOUT; ++ if (nandc->dma_bam_enabled) { ++ dma_async_issue_pending(nandc->tx_chan); ++ dma_async_issue_pending(nandc->rx_chan); ++ ++ if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE) ++ return -ETIMEDOUT; ++ } else { ++ if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE) ++ return -ETIMEDOUT; ++ } + + return 0; + } +@@ -824,7 +1232,16 @@ static void free_descs(struct qcom_nand_ + + list_for_each_entry_safe(desc, n, &nandc->desc_list, node) { + list_del(&desc->node); +- dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir); ++ ++ if (nandc->dma_bam_enabled) ++ qcom_bam_unmap_sg(nandc->dev, ++ desc->bam_desc_data.bam_sgl, ++ desc->bam_desc_data.sgl_cnt, ++ desc->bam_desc_data.dir); ++ else ++ dma_unmap_sg(nandc->dev, &desc->sgl, 1, ++ desc->dir); ++ + kfree(desc); + } + } +@@ -1135,6 +1552,9 @@ static int read_page_ecc(struct qcom_nan + struct nand_ecc_ctrl *ecc = &chip->ecc; + int i, ret; + ++ if (nandc->dma_bam_enabled) ++ config_bam_page_read(nandc); ++ + /* queue cmd descs for each codeword */ + for (i = 0; i < ecc->steps; i++) { + int data_size, oob_size; +@@ -1148,11 +1568,36 @@ static int read_page_ecc(struct qcom_nan + oob_size = host->ecc_bytes_hw + host->spare_bytes; + } + +- config_cw_read(nandc); ++ if (nandc->dma_bam_enabled) { ++ if (data_buf && oob_buf) { ++ nandc_set_reg(nandc, NAND_READ_LOCATION_0, ++ (0 << READ_LOCATION_OFFSET) | ++ (data_size << READ_LOCATION_SIZE) | ++ (0 << READ_LOCATION_LAST)); ++ nandc_set_reg(nandc, NAND_READ_LOCATION_1, ++ (data_size << READ_LOCATION_OFFSET) | ++ (oob_size << READ_LOCATION_SIZE) | ++ (1 << READ_LOCATION_LAST)); ++ } else if (data_buf) { ++ nandc_set_reg(nandc, NAND_READ_LOCATION_0, ++ (0 << READ_LOCATION_OFFSET) | ++ (data_size << READ_LOCATION_SIZE) | ++ (1 << READ_LOCATION_LAST)); ++ } else { ++ nandc_set_reg(nandc, NAND_READ_LOCATION_0, ++ (data_size << READ_LOCATION_OFFSET) | ++ (oob_size << READ_LOCATION_SIZE) | ++ (1 << READ_LOCATION_LAST)); ++ } ++ ++ config_bam_cw_read(nandc); ++ } else { ++ config_cw_read(nandc); ++ } + + if (data_buf) + read_data_dma(nandc, FLASH_BUF_ACC, data_buf, +- data_size); ++ data_size, 0); + + /* + * when ecc is enabled, the controller doesn't read the real +@@ -1168,7 +1613,7 @@ static int read_page_ecc(struct qcom_nan + *oob_buf++ = 0xff; + + read_data_dma(nandc, FLASH_BUF_ACC + data_size, +- oob_buf, oob_size); ++ oob_buf, oob_size, 0); + } + + if (data_buf) +@@ -1207,10 +1652,14 @@ static int copy_last_cw(struct qcom_nand + + set_address(host, host->cw_size * (ecc->steps - 1), page); + update_rw_regs(host, 1, true); ++ nandc_set_reg(nandc, NAND_READ_LOCATION_0, ++ (0 << READ_LOCATION_OFFSET) | ++ (size << READ_LOCATION_SIZE) | ++ (1 << READ_LOCATION_LAST)); + + config_cw_read(nandc); + +- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size); ++ read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0); + + ret = submit_descs(nandc); + if (ret) +@@ -1233,6 +1682,7 @@ static int qcom_nandc_read_page(struct m + data_buf = buf; + oob_buf = oob_required ? chip->oob_poi : NULL; + ++ clear_bam_transaction(nandc); + ret = read_page_ecc(host, data_buf, oob_buf); + if (ret) { + dev_err(nandc->dev, "failure to read page\n"); +@@ -1252,13 +1702,19 @@ static int qcom_nandc_read_page_raw(stru + u8 *data_buf, *oob_buf; + struct nand_ecc_ctrl *ecc = &chip->ecc; + int i, ret; ++ int read_location; + + data_buf = buf; + oob_buf = chip->oob_poi; + + host->use_ecc = false; ++ ++ clear_bam_transaction(nandc); + update_rw_regs(host, ecc->steps, true); + ++ if (nandc->dma_bam_enabled) ++ config_bam_page_read(nandc); ++ + for (i = 0; i < ecc->steps; i++) { + int data_size1, data_size2, oob_size1, oob_size2; + int reg_off = FLASH_BUF_ACC; +@@ -1276,21 +1732,49 @@ static int qcom_nandc_read_page_raw(stru + oob_size2 = host->ecc_bytes_hw + host->spare_bytes; + } + +- config_cw_read(nandc); ++ if (nandc->dma_bam_enabled) { ++ read_location = 0; ++ nandc_set_reg(nandc, NAND_READ_LOCATION_0, ++ (read_location << READ_LOCATION_OFFSET) | ++ (data_size1 << READ_LOCATION_SIZE) | ++ (0 << READ_LOCATION_LAST)); ++ read_location += data_size1; ++ ++ nandc_set_reg(nandc, NAND_READ_LOCATION_1, ++ (read_location << READ_LOCATION_OFFSET) | ++ (oob_size1 << READ_LOCATION_SIZE) | ++ (0 << READ_LOCATION_LAST)); ++ read_location += oob_size1; ++ ++ nandc_set_reg(nandc, NAND_READ_LOCATION_2, ++ (read_location << READ_LOCATION_OFFSET) | ++ (data_size2 << READ_LOCATION_SIZE) | ++ (0 << READ_LOCATION_LAST)); ++ read_location += data_size2; ++ ++ nandc_set_reg(nandc, NAND_READ_LOCATION_3, ++ (read_location << READ_LOCATION_OFFSET) | ++ (oob_size2 << READ_LOCATION_SIZE) | ++ (1 << READ_LOCATION_LAST)); ++ ++ config_bam_cw_read(nandc); ++ } else { ++ config_cw_read(nandc); ++ } + +- read_data_dma(nandc, reg_off, data_buf, data_size1); ++ read_data_dma(nandc, reg_off, data_buf, data_size1, 0); + reg_off += data_size1; + data_buf += data_size1; + +- read_data_dma(nandc, reg_off, oob_buf, oob_size1); ++ read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0); + reg_off += oob_size1; + oob_buf += oob_size1; + +- read_data_dma(nandc, reg_off, data_buf, data_size2); ++ read_data_dma(nandc, reg_off, data_buf, data_size2, 0); + reg_off += data_size2; + data_buf += data_size2; + +- read_data_dma(nandc, reg_off, oob_buf, oob_size2); ++ read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0); + oob_buf += oob_size2; + } + +@@ -1313,6 +1797,7 @@ static int qcom_nandc_read_oob(struct mt + int ret; + + clear_read_regs(nandc); ++ clear_bam_transaction(nandc); + + host->use_ecc = true; + set_address(host, 0, page); +@@ -1336,6 +1821,7 @@ static int qcom_nandc_write_page(struct + int i, ret; + + clear_read_regs(nandc); ++ clear_bam_transaction(nandc); + + data_buf = (u8 *)buf; + oob_buf = chip->oob_poi; +@@ -1357,7 +1843,8 @@ static int qcom_nandc_write_page(struct + + config_cw_write_pre(nandc); + +- write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size); ++ write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size, ++ i == (ecc->steps - 1) ? DMA_DESC_FLAG_NO_EOT : 0); + + /* + * when ECC is enabled, we don't really need to write anything +@@ -1370,7 +1857,7 @@ static int qcom_nandc_write_page(struct + oob_buf += host->bbm_size; + + write_data_dma(nandc, FLASH_BUF_ACC + data_size, +- oob_buf, oob_size); ++ oob_buf, oob_size, 0); + } + + config_cw_write_post(nandc); +@@ -1400,6 +1887,7 @@ static int qcom_nandc_write_page_raw(str + int i, ret; + + clear_read_regs(nandc); ++ clear_bam_transaction(nandc); + + data_buf = (u8 *)buf; + oob_buf = chip->oob_poi; +@@ -1426,19 +1914,22 @@ static int qcom_nandc_write_page_raw(str + + config_cw_write_pre(nandc); + +- write_data_dma(nandc, reg_off, data_buf, data_size1); ++ write_data_dma(nandc, reg_off, data_buf, data_size1, ++ DMA_DESC_FLAG_NO_EOT); + reg_off += data_size1; + data_buf += data_size1; + +- write_data_dma(nandc, reg_off, oob_buf, oob_size1); ++ write_data_dma(nandc, reg_off, oob_buf, oob_size1, ++ DMA_DESC_FLAG_NO_EOT); + reg_off += oob_size1; + oob_buf += oob_size1; + +- write_data_dma(nandc, reg_off, data_buf, data_size2); ++ write_data_dma(nandc, reg_off, data_buf, data_size2, ++ DMA_DESC_FLAG_NO_EOT); + reg_off += data_size2; + data_buf += data_size2; + +- write_data_dma(nandc, reg_off, oob_buf, oob_size2); ++ write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0); + oob_buf += oob_size2; + + config_cw_write_post(nandc); +@@ -1474,6 +1965,7 @@ static int qcom_nandc_write_oob(struct m + + host->use_ecc = true; + ++ clear_bam_transaction(nandc); + ret = copy_last_cw(host, page); + if (ret) + return ret; +@@ -1493,7 +1985,7 @@ static int qcom_nandc_write_oob(struct m + + config_cw_write_pre(nandc); + write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, +- data_size + oob_size); ++ data_size + oob_size, 0); + config_cw_write_post(nandc); + + ret = submit_descs(nandc); +@@ -1531,6 +2023,7 @@ static int qcom_nandc_block_bad(struct m + */ + host->use_ecc = false; + ++ clear_bam_transaction(nandc); + ret = copy_last_cw(host, page); + if (ret) + goto err; +@@ -1561,6 +2054,7 @@ static int qcom_nandc_block_markbad(stru + int page, ret, status = 0; + + clear_read_regs(nandc); ++ clear_bam_transaction(nandc); + + /* + * to mark the BBM as bad, we flash the entire last codeword with 0s. +@@ -1577,7 +2071,8 @@ static int qcom_nandc_block_markbad(stru + update_rw_regs(host, 1, false); + + config_cw_write_pre(nandc); +- write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size); ++ write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, ++ host->cw_size, 0); + config_cw_write_post(nandc); + + ret = submit_descs(nandc); +@@ -1937,6 +2432,8 @@ static int qcom_nand_host_setup(struct q + + host->clrflashstatus = FS_READY_BSY_N; + host->clrreadstatus = 0xc0; ++ nandc->regs->erased_cw_detect_cfg_clr = CLR_ERASED_PAGE_DET; ++ nandc->regs->erased_cw_detect_cfg_set = SET_ERASED_PAGE_DET; + + dev_dbg(nandc->dev, + "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n", +@@ -2015,6 +2512,12 @@ static int qcom_nandc_alloc(struct qcom_ + dev_err(nandc->dev, "failed to request cmd channel\n"); + return -ENODEV; + } ++ ++ nandc->bam_txn = alloc_bam_transaction(nandc); ++ if (!nandc->bam_txn) { ++ dev_err(nandc->dev, "failed to allocate bam transaction\n"); ++ return -ENOMEM; ++ } + } + + INIT_LIST_HEAD(&nandc->desc_list); +@@ -2050,6 +2553,9 @@ static void qcom_nandc_unalloc(struct qc + devm_kfree(nandc->dev, nandc->reg_read_buf); + } + ++ if (nandc->bam_txn) ++ devm_kfree(nandc->dev, nandc->bam_txn); ++ + if (nandc->regs) + devm_kfree(nandc->dev, nandc->regs); + +@@ -2060,12 +2566,19 @@ static void qcom_nandc_unalloc(struct qc + /* one time setup of a few nand controller registers */ + static int qcom_nandc_setup(struct qcom_nand_controller *nandc) + { ++ u32 nand_ctrl; ++ + /* kill onenand */ + nandc_write(nandc, SFLASHC_BURST_CFG, 0); + nandc_write(nandc, NAND_DEV_CMD_VLD, NAND_DEV_CMD_VLD_VAL); + +- /* enable ADM DMA */ +- nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN); ++ /* enable ADM or BAM DMA */ ++ if (!nandc->dma_bam_enabled) { ++ nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN); ++ } else { ++ nand_ctrl = nandc_read(nandc, NAND_CTRL); ++ nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN); ++ } + + /* save the original values of these registers */ + nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1); +--- /dev/null ++++ b/include/linux/dma/qcom_bam_dma.h +@@ -0,0 +1,149 @@ ++/* ++ * Copyright (c) 2017, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#ifndef _QCOM_BAM_DMA_H ++#define _QCOM_BAM_DMA_H ++ ++#include ++ ++#define DESC_FLAG_INT BIT(15) ++#define DESC_FLAG_EOT BIT(14) ++#define DESC_FLAG_EOB BIT(13) ++#define DESC_FLAG_NWD BIT(12) ++#define DESC_FLAG_CMD BIT(11) ++ ++/* ++ * QCOM BAM DMA SGL struct ++ * ++ * @sgl: DMA SGL ++ * @dma_flags: BAM DMA flags ++ */ ++struct qcom_bam_sgl { ++ struct scatterlist sgl; ++ unsigned int dma_flags; ++}; ++ ++/* ++ * This data type corresponds to the native Command Element ++ * supported by BAM DMA Engine. ++ * ++ * @addr - register address. ++ * @command - command type. ++ * @data - for write command: content to be written into peripheral register. ++ * for read command: dest addr to write peripheral register value to. ++ * @mask - register mask. ++ * @reserved - for future usage. ++ * ++ */ ++struct bam_cmd_element { ++ __le32 addr:24; ++ __le32 command:8; ++ __le32 data; ++ __le32 mask; ++ __le32 reserved; ++}; ++ ++/* ++ * This enum indicates the command type in a command element ++ */ ++enum bam_command_type { ++ BAM_WRITE_COMMAND = 0, ++ BAM_READ_COMMAND, ++}; ++ ++/* ++ * qcom_bam_sg_init_table - Init QCOM BAM SGL ++ * @bam_sgl: bam sgl ++ * @nents: number of entries in bam sgl ++ * ++ * This function performs the initialization for each SGL in BAM SGL ++ * with generic SGL API. ++ */ ++static inline void qcom_bam_sg_init_table(struct qcom_bam_sgl *bam_sgl, ++ unsigned int nents) ++{ ++ int i; ++ ++ for (i = 0; i < nents; i++) ++ sg_init_table(&bam_sgl[i].sgl, 1); ++} ++ ++/* ++ * qcom_bam_unmap_sg - Unmap QCOM BAM SGL ++ * @dev: device for which unmapping needs to be done ++ * @bam_sgl: bam sgl ++ * @nents: number of entries in bam sgl ++ * @dir: dma transfer direction ++ * ++ * This function performs the DMA unmapping for each SGL in BAM SGL ++ * with generic SGL API. ++ */ ++static inline void qcom_bam_unmap_sg(struct device *dev, ++ struct qcom_bam_sgl *bam_sgl, int nents, enum dma_data_direction dir) ++{ ++ int i; ++ ++ for (i = 0; i < nents; i++) ++ dma_unmap_sg(dev, &bam_sgl[i].sgl, 1, dir); ++} ++ ++/* ++ * qcom_bam_map_sg - Map QCOM BAM SGL ++ * @dev: device for which mapping needs to be done ++ * @bam_sgl: bam sgl ++ * @nents: number of entries in bam sgl ++ * @dir: dma transfer direction ++ * ++ * This function performs the DMA mapping for each SGL in BAM SGL ++ * with generic SGL API. ++ * ++ * returns 0 on error and > 0 on success ++ */ ++static inline int qcom_bam_map_sg(struct device *dev, ++ struct qcom_bam_sgl *bam_sgl, int nents, enum dma_data_direction dir) ++{ ++ int i, ret = 0; ++ ++ for (i = 0; i < nents; i++) { ++ ret = dma_map_sg(dev, &bam_sgl[i].sgl, 1, dir); ++ if (!ret) ++ break; ++ } ++ ++ /* unmap the mapped sgl from previous loop in case of error */ ++ if (!ret) ++ qcom_bam_unmap_sg(dev, bam_sgl, i, dir); ++ ++ return ret; ++} ++ ++/* ++ * qcom_prep_bam_ce - Wrapper function to prepare a single BAM command element ++ * with the data that is passed to this function. ++ * @bam_ce: bam command element ++ * @addr: target address ++ * @command: command in bam_command_type ++ * @data: actual data for write and dest addr for read ++ */ ++static inline void qcom_prep_bam_ce(struct bam_cmd_element *bam_ce, ++ uint32_t addr, uint32_t command, uint32_t data) ++{ ++ bam_ce->addr = cpu_to_le32(addr); ++ bam_ce->command = cpu_to_le32(command); ++ bam_ce->data = cpu_to_le32(data); ++ bam_ce->mask = 0xFFFFFFFF; ++} ++#endif diff --git a/target/linux/ipq40xx/patches-4.9/862-dmaengine-qcom-bam_dma-Add-custom-data-mapping.patch b/target/linux/ipq40xx/patches-4.9/862-dmaengine-qcom-bam_dma-Add-custom-data-mapping.patch new file mode 100644 index 000000000..796938f2d --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/862-dmaengine-qcom-bam_dma-Add-custom-data-mapping.patch @@ -0,0 +1,209 @@ +From 5a7ccdf845d64b385affdcffaf2defbe9848be15 Mon Sep 17 00:00:00 2001 +From: Ram Chandra Jangir +Date: Thu, 20 Apr 2017 10:39:00 +0530 +Subject: [PATCH] dmaengine: qcom: bam_dma: Add custom data mapping + +Add a new function to support for preparing DMA descriptor +for custom data. + +Signed-off-by: Abhishek Sahu +Signed-off-by: Ram Chandra Jangir +--- + drivers/dma/qcom/bam_dma.c | 97 +++++++++++++++++++++++++++++++++++++--- + include/linux/dma/qcom_bam_dma.h | 14 ++++++ + include/linux/dmaengine.h | 14 ++++++ + 3 files changed, 119 insertions(+), 6 deletions(-) + +--- a/drivers/dma/qcom/bam_dma.c ++++ b/drivers/dma/qcom/bam_dma.c +@@ -49,6 +49,7 @@ + #include + #include + #include ++#include + + #include "../dmaengine.h" + #include "../virt-dma.h" +@@ -61,11 +62,6 @@ struct bam_desc_hw { + + #define BAM_DMA_AUTOSUSPEND_DELAY 100 + +-#define DESC_FLAG_INT BIT(15) +-#define DESC_FLAG_EOT BIT(14) +-#define DESC_FLAG_EOB BIT(13) +-#define DESC_FLAG_NWD BIT(12) +- + struct bam_async_desc { + struct virt_dma_desc vd; + +@@ -670,6 +666,93 @@ err_out: + } + + /** ++ * bam_prep_dma_custom_mapping - Prep DMA descriptor from custom data ++ * ++ * @chan: dma channel ++ * @data: custom data ++ * @flags: DMA flags ++ */ ++static struct dma_async_tx_descriptor *bam_prep_dma_custom_mapping( ++ struct dma_chan *chan, ++ void *data, unsigned long flags) ++{ ++ struct bam_chan *bchan = to_bam_chan(chan); ++ struct bam_device *bdev = bchan->bdev; ++ struct bam_async_desc *async_desc; ++ struct qcom_bam_custom_data *desc_data = data; ++ u32 i; ++ struct bam_desc_hw *desc; ++ unsigned int num_alloc = 0; ++ ++ ++ if (!is_slave_direction(desc_data->dir)) { ++ dev_err(bdev->dev, "invalid dma direction\n"); ++ return NULL; ++ } ++ ++ /* calculate number of required entries */ ++ for (i = 0; i < desc_data->sgl_cnt; i++) ++ num_alloc += DIV_ROUND_UP( ++ sg_dma_len(&desc_data->bam_sgl[i].sgl), BAM_FIFO_SIZE); ++ ++ /* allocate enough room to accommodate the number of entries */ ++ async_desc = kzalloc(sizeof(*async_desc) + ++ (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT); ++ ++ if (!async_desc) ++ goto err_out; ++ ++ if (flags & DMA_PREP_FENCE) ++ async_desc->flags |= DESC_FLAG_NWD; ++ ++ if (flags & DMA_PREP_INTERRUPT) ++ async_desc->flags |= DESC_FLAG_EOT; ++ else ++ async_desc->flags |= DESC_FLAG_INT; ++ ++ async_desc->num_desc = num_alloc; ++ async_desc->curr_desc = async_desc->desc; ++ async_desc->dir = desc_data->dir; ++ ++ /* fill in temporary descriptors */ ++ desc = async_desc->desc; ++ for (i = 0; i < desc_data->sgl_cnt; i++) { ++ unsigned int remainder; ++ unsigned int curr_offset = 0; ++ ++ remainder = sg_dma_len(&desc_data->bam_sgl[i].sgl); ++ ++ do { ++ desc->addr = cpu_to_le32( ++ sg_dma_address(&desc_data->bam_sgl[i].sgl) + ++ curr_offset); ++ ++ if (desc_data->bam_sgl[i].dma_flags) ++ desc->flags |= cpu_to_le16( ++ desc_data->bam_sgl[i].dma_flags); ++ ++ if (remainder > BAM_FIFO_SIZE) { ++ desc->size = cpu_to_le16(BAM_FIFO_SIZE); ++ remainder -= BAM_FIFO_SIZE; ++ curr_offset += BAM_FIFO_SIZE; ++ } else { ++ desc->size = cpu_to_le16(remainder); ++ remainder = 0; ++ } ++ ++ async_desc->length += desc->size; ++ desc++; ++ } while (remainder > 0); ++ } ++ ++ return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags); ++ ++err_out: ++ kfree(async_desc); ++ return NULL; ++} ++ ++/** + * bam_dma_terminate_all - terminate all transactions on a channel + * @bchan: bam dma channel + * +@@ -960,7 +1043,7 @@ static void bam_start_dma(struct bam_cha + + /* set any special flags on the last descriptor */ + if (async_desc->num_desc == async_desc->xfer_len) +- desc[async_desc->xfer_len - 1].flags = ++ desc[async_desc->xfer_len - 1].flags |= + cpu_to_le16(async_desc->flags); + else + desc[async_desc->xfer_len - 1].flags |= +@@ -1237,6 +1320,8 @@ static int bam_dma_probe(struct platform + bdev->common.device_alloc_chan_resources = bam_alloc_chan; + bdev->common.device_free_chan_resources = bam_free_chan; + bdev->common.device_prep_slave_sg = bam_prep_slave_sg; ++ bdev->common.device_prep_dma_custom_mapping = ++ bam_prep_dma_custom_mapping; + bdev->common.device_config = bam_slave_config; + bdev->common.device_pause = bam_pause; + bdev->common.device_resume = bam_resume; +--- a/include/linux/dma/qcom_bam_dma.h ++++ b/include/linux/dma/qcom_bam_dma.h +@@ -65,6 +65,19 @@ enum bam_command_type { + }; + + /* ++ * QCOM BAM DMA custom data ++ * ++ * @sgl_cnt: number of sgl in bam_sgl ++ * @dir: DMA data transfer direction ++ * @bam_sgl: BAM SGL pointer ++ */ ++struct qcom_bam_custom_data { ++ u32 sgl_cnt; ++ enum dma_transfer_direction dir; ++ struct qcom_bam_sgl *bam_sgl; ++}; ++ ++/* + * qcom_bam_sg_init_table - Init QCOM BAM SGL + * @bam_sgl: bam sgl + * @nents: number of entries in bam sgl +--- a/include/linux/dmaengine.h ++++ b/include/linux/dmaengine.h +@@ -692,6 +692,8 @@ struct dma_filter { + * be called after period_len bytes have been transferred. + * @device_prep_interleaved_dma: Transfer expression in a generic way. + * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address ++ * @device_prep_dma_custom_mapping: prepares a dma operation from dma driver ++ * specific custom data + * @device_config: Pushes a new configuration to a channel, return 0 or an error + * code + * @device_pause: Pauses any transfer happening on a channel. Returns +@@ -783,6 +785,9 @@ struct dma_device { + struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)( + struct dma_chan *chan, dma_addr_t dst, u64 data, + unsigned long flags); ++ struct dma_async_tx_descriptor *(*device_prep_dma_custom_mapping)( ++ struct dma_chan *chan, void *data, ++ unsigned long flags); + + int (*device_config)(struct dma_chan *chan, + struct dma_slave_config *config); +@@ -899,6 +904,15 @@ static inline struct dma_async_tx_descri + src_sg, src_nents, flags); + } + ++static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_custom_mapping( ++ struct dma_chan *chan, ++ void *data, ++ unsigned long flags) ++{ ++ return chan->device->device_prep_dma_custom_mapping(chan, data, ++ flags); ++} ++ + /** + * dmaengine_terminate_all() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers diff --git a/target/linux/ipq40xx/patches-4.9/863-dts-ipq4019-add-nand-and-qpic-bam-dma-node.patch b/target/linux/ipq40xx/patches-4.9/863-dts-ipq4019-add-nand-and-qpic-bam-dma-node.patch new file mode 100644 index 000000000..f054927e0 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/863-dts-ipq4019-add-nand-and-qpic-bam-dma-node.patch @@ -0,0 +1,166 @@ +From 02bbf3c46e1e38e9ca699143566903683e3a015d Mon Sep 17 00:00:00 2001 +From: Ram Chandra Jangir +Date: Thu, 20 Apr 2017 10:45:00 +0530 +Subject: [PATCH] dts: ipq4019: add nand and qpic bam dma node + +This change adds QPIC BAM dma and NAND driver node's in +IPQ4019 device tree, also enable this for AP-DK04.1 based +boards. + +Signed-off-by: Ram Chandra Jangir +--- + arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi | 75 +++++++++++++++++++++++++++ + arch/arm/boot/dts/qcom-ipq4019.dtsi | 38 ++++++++++++++ + 2 files changed, 113 insertions(+) + +--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi +@@ -88,6 +88,86 @@ + bias-disable; + }; + }; ++ ++ nand_pins: nand_pins { ++ ++ mux_1 { ++ pins = "gpio52", "gpio53", "gpio54", ++ "gpio55", "gpio56", "gpio61", ++ "gpio62", "gpio63", "gpio69"; ++ function = "qpic_pad"; ++ bias-disable; ++ }; ++ ++ mux_2 { ++ pins = "gpio67"; ++ function = "qpic_pad0"; ++ bias-disable; ++ }; ++ ++ mux_3 { ++ pins = "gpio64"; ++ function = "qpic_pad1"; ++ bias-disable; ++ }; ++ ++ mux_4 { ++ pins = "gpio65"; ++ function = "qpic_pad2"; ++ bias-disable; ++ }; ++ ++ mux_5 { ++ pins = "gpio66"; ++ function = "qpic_pad3"; ++ bias-disable; ++ }; ++ ++ mux_6 { ++ pins = "gpio57"; ++ function = "qpic_pad4"; ++ bias-disable; ++ }; ++ ++ mux_7 { ++ pins = "gpio58"; ++ function = "qpic_pad5"; ++ bias-disable; ++ }; ++ ++ mux_8 { ++ pins = "gpio59"; ++ function = "qpic_pad6"; ++ bias-disable; ++ }; ++ ++ mux_9 { ++ pins = "gpio60"; ++ function = "qpic_pad7"; ++ bias-disable; ++ }; ++ ++ mux_10 { ++ pins = "gpio68"; ++ function = "qpic_pad8"; ++ bias-disable; ++ }; ++ ++ pullups { ++ pins = "gpio52", "gpio53", "gpio58", ++ "gpio59"; ++ bias-pull-up; ++ }; ++ ++ pulldowns { ++ pins = "gpio54", "gpio55", "gpio56", ++ "gpio57", "gpio60", "gpio61", ++ "gpio62", "gpio63", "gpio64", ++ "gpio65", "gpio66", "gpio67", ++ "gpio68", "gpio69"; ++ bias-pull-down; ++ }; ++ }; + }; + + blsp_dma: dma@7884000 { +@@ -159,5 +239,15 @@ + watchdog@b017000 { + status = "ok"; + }; ++ ++ qpic_bam: dma@7984000 { ++ status = "ok"; ++ }; ++ ++ nand: qpic-nand@79b0000 { ++ pinctrl-0 = <&nand_pins>; ++ pinctrl-names = "default"; ++ status = "ok"; ++ }; + }; + }; +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -580,5 +580,43 @@ + "legacy"; + status = "disabled"; + }; ++ ++ qpic_bam: dma@7984000 { ++ compatible = "qcom,bam-v1.7.0"; ++ reg = <0x7984000 0x1a000>; ++ interrupts = <0 101 0>; ++ clocks = <&gcc GCC_QPIC_AHB_CLK>; ++ clock-names = "bam_clk"; ++ #dma-cells = <1>; ++ qcom,ee = <0>; ++ status = "disabled"; ++ }; ++ ++ nand: qpic-nand@79b0000 { ++ compatible = "qcom,ebi2-nandc-bam", "qcom,msm-nand"; ++ reg = <0x79b0000 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ clocks = <&gcc GCC_QPIC_CLK>, ++ <&gcc GCC_QPIC_AHB_CLK>; ++ clock-names = "core", "aon"; ++ ++ dmas = <&qpic_bam 0>, ++ <&qpic_bam 1>, ++ <&qpic_bam 2>; ++ dma-names = "tx", "rx", "cmd"; ++ status = "disabled"; ++ ++ nandcs@0 { ++ compatible = "qcom,nandcs"; ++ reg = <0>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ nand-ecc-strength = <4>; ++ nand-ecc-step-size = <512>; ++ nand-bus-width = <8>; ++ }; ++ }; + }; + }; diff --git a/target/linux/ipq40xx/patches-4.9/864-00-v3-1-2-dts-ipq4019-Fix-pinctrl-node-name.patch b/target/linux/ipq40xx/patches-4.9/864-00-v3-1-2-dts-ipq4019-Fix-pinctrl-node-name.patch new file mode 100644 index 000000000..dbd5537f8 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/864-00-v3-1-2-dts-ipq4019-Fix-pinctrl-node-name.patch @@ -0,0 +1,44 @@ +From patchwork Mon Jul 3 07:47:12 2017 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [v3,1/2] dts: ipq4019: Fix pinctrl node name +From: Varadarajan Narayanan +X-Patchwork-Id: 9822099 +Message-Id: <1499068033-24000-2-git-send-email-varada@codeaurora.org> +To: andy.gross@linaro.org, david.brown@linaro.org, robh+dt@kernel.org, + mark.rutland@arm.com, linux@armlinux.org.uk, + linux-arm-msm@vger.kernel.org, + linux-soc@vger.kernel.org, devicetree@vger.kernel.org, + linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org +Cc: Varadarajan Narayanan +Date: Mon, 3 Jul 2017 13:17:12 +0530 + +Signed-off-by: Varadarajan Narayanan +--- + arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi | 2 +- + arch/arm/boot/dts/qcom-ipq4019.dtsi | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi +@@ -40,7 +40,7 @@ + clock-frequency = <48000000>; + }; + +- pinctrl@0x01000000 { ++ pinctrl@1000000 { + serial_pins: serial_pinmux { + mux { + pins = "gpio60", "gpio61"; +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -149,7 +149,7 @@ + reg = <0x1800000 0x60000>; + }; + +- tlmm: pinctrl@0x01000000 { ++ tlmm: pinctrl@1000000 { + compatible = "qcom,ipq4019-pinctrl"; + reg = <0x01000000 0x300000>; + gpio-controller; diff --git a/target/linux/ipq40xx/patches-4.9/864-00-v3-2-2-dts-ipq4019-Move-xo-and-timer-nodes-to-SoC-dtsi.patch b/target/linux/ipq40xx/patches-4.9/864-00-v3-2-2-dts-ipq4019-Move-xo-and-timer-nodes-to-SoC-dtsi.patch new file mode 100644 index 000000000..3e2c39611 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/864-00-v3-2-2-dts-ipq4019-Move-xo-and-timer-nodes-to-SoC-dtsi.patch @@ -0,0 +1,78 @@ +From patchwork Mon Jul 3 07:47:13 2017 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [v3,2/2] dts: ipq4019: Move xo and timer nodes to SoC dtsi +From: Varadarajan Narayanan +X-Patchwork-Id: 9822107 +Message-Id: <1499068033-24000-3-git-send-email-varada@codeaurora.org> +To: andy.gross@linaro.org, david.brown@linaro.org, robh+dt@kernel.org, + mark.rutland@arm.com, linux@armlinux.org.uk, + linux-arm-msm@vger.kernel.org, + linux-soc@vger.kernel.org, devicetree@vger.kernel.org, + linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org +Cc: Varadarajan Narayanan +Date: Mon, 3 Jul 2017 13:17:13 +0530 + +The node for xo and timer belong to the SoC DTS file. +Else, new board DT files may not inherit these nodes. + +Signed-off-by: Varadarajan Narayanan +--- + arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi | 19 ------------------- + arch/arm/boot/dts/qcom-ipq4019.dtsi | 15 +++++++++++++++ + 2 files changed, 15 insertions(+), 19 deletions(-) + +--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi +@@ -20,26 +20,7 @@ + model = "Qualcomm Technologies, Inc. IPQ4019/AP-DK01.1"; + compatible = "qcom,ipq4019"; + +- clocks { +- xo: xo { +- compatible = "fixed-clock"; +- clock-frequency = <48000000>; +- #clock-cells = <0>; +- }; +- }; +- + soc { +- +- +- timer { +- compatible = "arm,armv7-timer"; +- interrupts = <1 2 0xf08>, +- <1 3 0xf08>, +- <1 4 0xf08>, +- <1 1 0xf08>; +- clock-frequency = <48000000>; +- }; +- + pinctrl@1000000 { + serial_pins: serial_pinmux { + mux { +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -126,6 +126,21 @@ + clock-frequency = <32768>; + #clock-cells = <0>; + }; ++ ++ xo: xo { ++ compatible = "fixed-clock"; ++ clock-frequency = <48000000>; ++ #clock-cells = <0>; ++ }; ++ }; ++ ++ timer { ++ compatible = "arm,armv7-timer"; ++ interrupts = <1 2 0xf08>, ++ <1 3 0xf08>, ++ <1 4 0xf08>, ++ <1 1 0xf08>; ++ clock-frequency = <48000000>; + }; + + soc { diff --git a/target/linux/ipq40xx/patches-4.9/864-01-dts-ipq4019-ap-dk04-fix-pinctrl-node-name.patch b/target/linux/ipq40xx/patches-4.9/864-01-dts-ipq4019-ap-dk04-fix-pinctrl-node-name.patch new file mode 100644 index 000000000..f2de6d618 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/864-01-dts-ipq4019-ap-dk04-fix-pinctrl-node-name.patch @@ -0,0 +1,11 @@ +--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi +@@ -38,7 +38,7 @@ + clock-frequency = <48000000>; + }; + +- pinctrl@0x01000000 { ++ pinctrl@1000000 { + serial_0_pins: serial_pinmux { + mux { + pins = "gpio16", "gpio17"; diff --git a/target/linux/ipq40xx/patches-4.9/864-02-dts-ipq4019-ap-dk04-remove-xo-and-timer-nodes.patch b/target/linux/ipq40xx/patches-4.9/864-02-dts-ipq4019-ap-dk04-remove-xo-and-timer-nodes.patch new file mode 100644 index 000000000..dcbdb8d77 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/864-02-dts-ipq4019-ap-dk04-remove-xo-and-timer-nodes.patch @@ -0,0 +1,27 @@ +--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi +@@ -20,24 +20,7 @@ + model = "Qualcomm Technologies, Inc. IPQ4019/AP-DK04.1"; + compatible = "qcom,ipq4019"; + +- clocks { +- xo: xo { +- compatible = "fixed-clock"; +- clock-frequency = <48000000>; +- #clock-cells = <0>; +- }; +- }; +- + soc { +- timer { +- compatible = "arm,armv7-timer"; +- interrupts = <1 2 0xf08>, +- <1 3 0xf08>, +- <1 4 0xf08>, +- <1 1 0xf08>; +- clock-frequency = <48000000>; +- }; +- + pinctrl@1000000 { + serial_0_pins: serial_pinmux { + mux { diff --git a/target/linux/ipq40xx/patches-4.9/864-03-dts-ipq4019-ap-dk01-add-tcsr-config-to-dtsi.patch b/target/linux/ipq40xx/patches-4.9/864-03-dts-ipq4019-ap-dk01-add-tcsr-config-to-dtsi.patch new file mode 100644 index 000000000..9d11dc022 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/864-03-dts-ipq4019-ap-dk01-add-tcsr-config-to-dtsi.patch @@ -0,0 +1,42 @@ +--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi +@@ -15,12 +15,39 @@ + */ + + #include "qcom-ipq4019.dtsi" ++#include + + / { + model = "Qualcomm Technologies, Inc. IPQ4019/AP-DK01.1"; + compatible = "qcom,ipq4019"; + + soc { ++ tcsr@194b000 { ++ /* select hostmode */ ++ compatible = "qcom,tcsr"; ++ reg = <0x194b000 0x100>; ++ qcom,usb-hsphy-mode-select = ; ++ status = "ok"; ++ }; ++ ++ ess_tcsr@1953000 { ++ compatible = "qcom,tcsr"; ++ reg = <0x1953000 0x1000>; ++ qcom,ess-interface-select = ; ++ }; ++ ++ tcsr@1949000 { ++ compatible = "qcom,tcsr"; ++ reg = <0x1949000 0x100>; ++ qcom,wifi_glb_cfg = ; ++ }; ++ ++ tcsr@1957000 { ++ compatible = "qcom,tcsr"; ++ reg = <0x1957000 0x100>; ++ qcom,wifi_noc_memtype_m0_m2 = ; ++ }; ++ + pinctrl@1000000 { + serial_pins: serial_pinmux { + mux { diff --git a/target/linux/ipq40xx/patches-4.9/864-04-dts-ipq4019-ap-dk01-add-network-nodes-to-dtsi.patch b/target/linux/ipq40xx/patches-4.9/864-04-dts-ipq4019-ap-dk01-add-network-nodes-to-dtsi.patch new file mode 100644 index 000000000..02a102e37 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/864-04-dts-ipq4019-ap-dk01-add-network-nodes-to-dtsi.patch @@ -0,0 +1,32 @@ +--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi +@@ -136,5 +136,29 @@ + usb2: usb2@60f8800 { + status = "ok"; + }; ++ ++ mdio@90000 { ++ status = "okay"; ++ }; ++ ++ ess-switch@c000000 { ++ status = "okay"; ++ }; ++ ++ ess-psgmii@98000 { ++ status = "okay"; ++ }; ++ ++ edma@c080000 { ++ status = "okay"; ++ }; ++ ++ wifi@a000000 { ++ status = "okay"; ++ }; ++ ++ wifi@a800000 { ++ status = "okay"; ++ }; + }; + }; diff --git a/target/linux/ipq40xx/patches-4.9/864-05-dts-ipq4019-ap-dk01-remove-spi-chip-node-from-dtsi.patch b/target/linux/ipq40xx/patches-4.9/864-05-dts-ipq4019-ap-dk01-remove-spi-chip-node-from-dtsi.patch new file mode 100644 index 000000000..0d4b80b30 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/864-05-dts-ipq4019-ap-dk01-remove-spi-chip-node-from-dtsi.patch @@ -0,0 +1,17 @@ +--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi +@@ -89,14 +89,6 @@ + pinctrl-names = "default"; + status = "ok"; + cs-gpios = <&tlmm 54 0>; +- +- mx25l25635e@0 { +- #address-cells = <1>; +- #size-cells = <1>; +- reg = <0>; +- compatible = "mx25l25635e"; +- spi-max-frequency = <24000000>; +- }; + }; + + serial@78af000 { diff --git a/target/linux/ipq40xx/patches-4.9/864-06-dts-ipq4019-fix-max-cpu-speed.patch b/target/linux/ipq40xx/patches-4.9/864-06-dts-ipq4019-fix-max-cpu-speed.patch new file mode 100644 index 000000000..33f7f04f3 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/864-06-dts-ipq4019-fix-max-cpu-speed.patch @@ -0,0 +1,13 @@ +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi +@@ -108,8 +108,8 @@ + opp-hz = /bits/ 64 <500000000>; + clock-latency-ns = <256000>; + }; +- opp@666000000 { +- opp-hz = /bits/ 64 <666000000>; ++ opp@716800000 { ++ opp-hz = /bits/ 64 <716800000>; + clock-latency-ns = <256000>; + }; + }; diff --git a/target/linux/ipq40xx/patches-4.9/864-07-dts-ipq4019-ap-dk01.1-c1-add-spi-and-ram-nodes.patch b/target/linux/ipq40xx/patches-4.9/864-07-dts-ipq4019-ap-dk01.1-c1-add-spi-and-ram-nodes.patch new file mode 100644 index 000000000..e9d262069 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/864-07-dts-ipq4019-ap-dk01.1-c1-add-spi-and-ram-nodes.patch @@ -0,0 +1,115 @@ +--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1-c1.dts ++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1-c1.dts +@@ -19,4 +19,112 @@ + / { + model = "Qualcomm Technologies, Inc. IPQ40xx/AP-DK01.1-C1"; + ++ memory { ++ device_type = "memory"; ++ reg = <0x80000000 0x10000000>; ++ }; ++ ++ reserved-memory { ++ #address-cells = <0x1>; ++ #size-cells = <0x1>; ++ ranges; ++ ++ apps_bl@87000000 { ++ reg = <0x87000000 0x400000>; ++ no-map; ++ }; ++ ++ sbl@87400000 { ++ reg = <0x87400000 0x100000>; ++ no-map; ++ }; ++ ++ cnss_debug@87500000 { ++ reg = <0x87500000 0x600000>; ++ no-map; ++ }; ++ ++ cpu_context_dump@87b00000 { ++ reg = <0x87b00000 0x080000>; ++ no-map; ++ }; ++ ++ tz_apps@87b80000 { ++ reg = <0x87b80000 0x280000>; ++ no-map; ++ }; ++ ++ smem@87e00000 { ++ reg = <0x87e00000 0x080000>; ++ no-map; ++ }; ++ ++ tz@87e80000 { ++ reg = <0x87e80000 0x180000>; ++ no-map; ++ }; ++ }; ++}; ++ ++&spi_0 { ++ mx25l25635f@0 { ++ compatible = "mx25l25635f", "jedec,spi-nor"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0>; ++ spi-max-frequency = <24000000>; ++ ++ SBL1@0 { ++ label = "SBL1"; ++ reg = <0x0 0x40000>; ++ read-only; ++ }; ++ MIBIB@40000 { ++ label = "MIBIB"; ++ reg = <0x40000 0x20000>; ++ read-only; ++ }; ++ QSEE@60000 { ++ label = "QSEE"; ++ reg = <0x60000 0x60000>; ++ read-only; ++ }; ++ CDT@c0000 { ++ label = "CDT"; ++ reg = <0xc0000 0x10000>; ++ read-only; ++ }; ++ DDRPARAMS@d0000 { ++ label = "DDRPARAMS"; ++ reg = <0xd0000 0x10000>; ++ read-only; ++ }; ++ APPSBLENV@e0000 { ++ label = "APPSBLENV"; ++ reg = <0xe0000 0x10000>; ++ read-only; ++ }; ++ APPSBL@f0000 { ++ label = "APPSBL"; ++ reg = <0xf0000 0x80000>; ++ read-only; ++ }; ++ ART@170000 { ++ label = "ART"; ++ reg = <0x170000 0x10000>; ++ read-only; ++ }; ++ kernel@180000 { ++ label = "kernel"; ++ reg = <0x180000 0x400000>; ++ }; ++ rootfs@580000 { ++ label = "rootfs"; ++ reg = <0x580000 0x1600000>; ++ }; ++ firmware@180000 { ++ label = "firmware"; ++ reg = <0x180000 0x1a00000>; ++ }; ++ }; + }; diff --git a/target/linux/ipq40xx/patches-4.9/864-08-dts-ipq4019-ap-dk01.1-c1-add-compatible-string.patch b/target/linux/ipq40xx/patches-4.9/864-08-dts-ipq4019-ap-dk01.1-c1-add-compatible-string.patch new file mode 100644 index 000000000..2d4ff3104 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/864-08-dts-ipq4019-ap-dk01.1-c1-add-compatible-string.patch @@ -0,0 +1,10 @@ +--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1-c1.dts ++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1-c1.dts +@@ -18,6 +18,7 @@ + + / { + model = "Qualcomm Technologies, Inc. IPQ40xx/AP-DK01.1-C1"; ++ compatible = "qcom,ap-dk01.1-c1", "qcom,ap-dk01.2-c1", "qcom,ipq4019"; + + memory { + device_type = "memory"; diff --git a/target/linux/ipq40xx/patches-4.9/900-mach-qcom-add-msm_pcie-driver.patch b/target/linux/ipq40xx/patches-4.9/900-mach-qcom-add-msm_pcie-driver.patch new file mode 100644 index 000000000..be3a1af15 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.9/900-mach-qcom-add-msm_pcie-driver.patch @@ -0,0 +1,4883 @@ +From a0473413c8a10ea1884f2532b89e029b70e389d0 Mon Sep 17 00:00:00 2001 +From: Chen Minqiang +Date: Sat, 6 Jan 2018 13:59:06 +0800 +Subject: [PATCH 3/3] mach-qcom: add msm_pcie driver + +--- + Documentation/devicetree/bindings/pci/msm_pcie.txt | 164 ++ + arch/arm/mach-qcom/Kconfig | 8 + + arch/arm/mach-qcom/Makefile | 3 + + arch/arm/mach-qcom/include/mach/gpiomux.h | 216 ++ + arch/arm/mach-qcom/include/mach/msm_pcie.h | 134 ++ + arch/arm/mach-qcom/pcie.c | 2389 ++++++++++++++++++++ + arch/arm/mach-qcom/pcie.h | 329 +++ + arch/arm/mach-qcom/pcie_irq.c | 598 +++++ + arch/arm/mach-qcom/pcie_phy.c | 403 ++++ + arch/arm/mach-qcom/pcie_phy.h | 545 +++++ + 10 files changed, 4789 insertions(+) + create mode 100644 Documentation/devicetree/bindings/pci/msm_pcie.txt + create mode 100644 arch/arm/mach-qcom/include/mach/gpiomux.h + create mode 100644 arch/arm/mach-qcom/include/mach/msm_pcie.h + create mode 100644 arch/arm/mach-qcom/pcie.c + create mode 100644 arch/arm/mach-qcom/pcie.h + create mode 100644 arch/arm/mach-qcom/pcie_irq.c + create mode 100644 arch/arm/mach-qcom/pcie_phy.c + create mode 100644 arch/arm/mach-qcom/pcie_phy.h + +diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt +new file mode 100644 +index 0000000..24a2be7 +--- /dev/null ++++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt +@@ -0,0 +1,164 @@ ++MSM PCIe ++ ++MSM PCI express root complex ++ ++Required properties: ++ - compatible: should be "qcom,pci-msm" ++ - cell-index: defines root complex ID. ++ - #address-cells: Should provide a value of 0. ++ - reg: should contain PCIe register maps. ++ - reg-names: indicates various resources passed to driver by name. ++ Should be "parf", "phy", "dm_core", "elbi", "conf", "io", "bars". ++ These correspond to different modules within the PCIe core. ++ - interrupts: Should be in the format <0 1 2> and it is an index to the ++ interrupt-map that contains PCIe related interrupts. ++ - #interrupt-cells: Should provide a value of 1. ++ - #interrupt-map-mask: should provide a value of 0xffffffff. ++ - interrupt-map: Must create mapping for the number of interrupts ++ that are defined in above interrupts property. ++ For PCIe device node, it should define 12 mappings for ++ the corresponding PCIe interrupts supporting the specification. ++ - interrupt-names: indicates interrupts passed to driver by name. ++ Should be "int_msi", "int_a", "int_b", "int_c", "int_d", ++ "int_pls_pme", "int_pme_legacy", "int_pls_err", ++ "int_aer_legacy", "int_pls_link_up", ++ "int_pls_link_down", "int_bridge_flush_n" ++ These correspond to the standard PCIe specification to support ++ MSIs, virtual IRQ's (INT#), link state notifications. ++ - perst-gpio: PERST GPIO specified by PCIe spec. ++ - wake-gpio: WAKE GPIO specified by PCIe spec. ++ - -supply: phandle to the regulator device tree node. ++ Refer to the schematics for the corresponding voltage regulators. ++ vreg-1.8-supply: phandle to the analog supply for the PCIe controller. ++ vreg-3.3-supply: phandle to the analog supply for the PCIe controller. ++ vreg-0.9-supply: phandle to the analog supply for the PCIe controller. ++ ++Optional Properties: ++ - qcom,-voltage-level: specifies voltage levels for supply. ++ Should be specified in pairs (max, min, optimal), units uV. ++ - clkreq-gpio: CLKREQ GPIO specified by PCIe spec. ++ - pinctrl-names: The state name of the pin configuration. Only ++ support: "default" ++ - pinctrl-0: For details of pinctrl properties, please refer to: ++ "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt" ++ - clocks: list of clock phandles ++ - clock-names: list of names of clock inputs. ++ Should be "pcie_0_pipe_clk", "pcie_0_ref_clk_src", ++ "pcie_0_aux_clk", "pcie_0_cfg_ahb_clk", ++ "pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk", ++ "pcie_0_ldo"; ++ - max-clock-frequency-hz: list of the maximum operating frequencies stored ++ in the same order of clock names; ++ - qcom,l0s-supported: L0s is supported. ++ - qcom,l1-supported: L1 is supported. ++ - qcom,l1ss-supported: L1 sub-states (L1ss) is supported. ++ - qcom,aux-clk-sync: The AUX clock is synchronous to the Core clock to ++ support L1ss. ++ - qcom,n-fts: The number of fast training sequences sent when the link state ++ is changed from L0s to L0. ++ - qcom,ep-wakeirq: The endpoint will issue wake signal when it is up, and the ++ root complex has the capability to enumerate the endpoint for this case. ++ - qcom,msi-gicm-addr: MSI address for GICv2m. ++ - qcom,msi-gicm-base: MSI IRQ base for GICv2m. ++ - qcom,ext-ref-clk: The reference clock is external. ++ - qcom,ep-latency: The time (unit: ms) to wait for the PCIe endpoint to become ++ stable after power on, before de-assert the PERST to the endpoint. ++ - qcom,tlp-rd-size: The max TLP read size (Calculation: 128 times 2 to the ++ tlp-rd-size power). ++ - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for ++ below optional properties: ++ - qcom,msm-bus,name ++ - qcom,msm-bus,num-cases ++ - qcom,msm-bus,num-paths ++ - qcom,msm-bus,vectors-KBps ++ - qcom,scm-dev-id: If present then device id value is passed to secure channel ++ manager(scm) driver. scm driver uses this device id to restore PCIe ++ controller related security configuration after coming out of the controller ++ power collapse. ++ ++Example: ++ ++ pcie0: qcom,pcie@fc520000 { ++ compatible = "qcom,msm_pcie"; ++ cell-index = <0>; ++ #address-cells = <0>; ++ reg = <0xfc520000 0x2000>, ++ <0xfc526000 0x1000>, ++ <0xff000000 0x1000>, ++ <0xff001000 0x1000>, ++ <0xff100000 0x1000>, ++ <0xff200000 0x100000>, ++ <0xff300000 0xd00000>; ++ reg-names = "parf", "dm_core", "elbi", ++ "conf", "io", "bars"; ++ interrupt-parent = <&pcie0>; ++ interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12>; ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0xffffffff>; ++ interrupt-map = <0 &intc 0 243 0 ++ 1 &intc 0 244 0 ++ 2 &intc 0 245 0 ++ 3 &intc 0 247 0 ++ 4 &intc 0 248 0 ++ 5 &intc 0 249 0 ++ 6 &intc 0 250 0 ++ 7 &intc 0 251 0 ++ 8 &intc 0 252 0 ++ 9 &intc 0 253 0 ++ 10 &intc 0 254 0 ++ 11 &intc 0 255 0>; ++ interrupt-names = "int_msi", "int_a", "int_b", "int_c", "int_d", ++ "int_pls_pme", "int_pme_legacy", "int_pls_err", ++ "int_aer_legacy", "int_pls_link_up", ++ "int_pls_link_down", "int_bridge_flush_n"; ++ perst-gpio = <&msmgpio 70 0>; ++ wake-gpio = <&msmgpio 69 0>; ++ clkreq-gpio = <&msmgpio 68 0>; ++ ++ gdsc-vdd-supply = <&gdsc_pcie_0>; ++ vreg-1.8-supply = <&pma8084_l12>; ++ vreg-0.9-supply = <&pma8084_l4>; ++ vreg-3.3-supply = <&wlan_vreg>; ++ ++ qcom,vreg-1.8-voltage-level = <1800000 1800000 1000>; ++ qcom,vreg-0.9-voltage-level = <950000 950000 24000>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pcie0_clkreq_default &pcie0_perst_default &pcie0_wake_default>; ++ ++ clocks = <&clock_gcc clk_gcc_pcie_0_pipe_clk>, ++ <&clock_rpm clk_ln_bb_clk>, ++ <&clock_gcc clk_gcc_pcie_0_aux_clk>, ++ <&clock_gcc clk_gcc_pcie_0_cfg_ahb_clk>, ++ <&clock_gcc clk_gcc_pcie_0_mstr_axi_clk>, ++ <&clock_gcc clk_gcc_pcie_0_slv_axi_clk>, ++ <&clock_gcc clk_pcie_0_phy_ldo>, ++ <&clock_gcc clk_gcc_pcie_phy_0_reset>; ++ ++ clock-names = "pcie_0_pipe_clk", "pcie_0_ref_clk_src", ++ "pcie_0_aux_clk", "pcie_0_cfg_ahb_clk", ++ "pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk", ++ "pcie_0_ldo"; ++ max-clock-frequency-hz = <125000000>, <0>, <1000000>, ++ <0>, <0>, <0>, <0>; ++ qcom,l0s-supported; ++ qcom,l1-supported; ++ qcom,l1ss-supported; ++ qcom,aux-clk-sync; ++ qcom,n-fts = <0x50>; ++ qcom,ep-wakeirq; ++ qcom,msi-gicm-addr = <0xf9040040>; ++ qcom,msi-gicm-base = <0x160>; ++ qcom,ext-ref-clk; ++ qcom,tlp-rd-size = <0x5>; ++ qcom,ep-latency = <100>; ++ ++ qcom,msm-bus,name = "pcie0"; ++ qcom,msm-bus,num-cases = <2>; ++ qcom,msm-bus,num-paths = <1>; ++ qcom,msm-bus,vectors-KBps = ++ <45 512 0 0>, ++ <45 512 500 800>; ++ ++ qcom,scm-dev-id = <11>; ++ }; +diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig +index b438cd0..38b4009 100644 +--- a/arch/arm/mach-qcom/Kconfig ++++ b/arch/arm/mach-qcom/Kconfig +@@ -10,6 +10,14 @@ menuconfig ARCH_QCOM + help + Support for Qualcomm's devicetree based systems. + ++config MSM_PCIE ++ bool "MSM PCIe Controller driver" ++ depends on PCI && PCI_MSI ++ select PCI_DOMAINS ++ help ++ Enables the PCIe functionality by configures PCIe core on ++ MSM chipset and by enabling the ARM PCI framework extension. ++ + if ARCH_QCOM + + config ARCH_MSM8X60 +diff --git a/arch/arm/mach-qcom/Makefile b/arch/arm/mach-qcom/Makefile +index 12878e9..542691b 100644 +--- a/arch/arm/mach-qcom/Makefile ++++ b/arch/arm/mach-qcom/Makefile +@@ -1 +1,4 @@ ++EXTRA_CFLAGS += -I$(srctree)/arch/arm/mach-qcom/include ++EXTRA_CFLAGS += -I$(srctree)/drivers/bus/msm_bus + obj-$(CONFIG_SMP) += platsmp.o ++obj-$(CONFIG_MSM_PCIE) += pcie.o pcie_irq.o pcie_phy.o +diff --git a/arch/arm/mach-qcom/include/mach/gpiomux.h b/arch/arm/mach-qcom/include/mach/gpiomux.h +new file mode 100644 +index 0000000..2278677 +--- /dev/null ++++ b/arch/arm/mach-qcom/include/mach/gpiomux.h +@@ -0,0 +1,216 @@ ++/* Copyright (c) 2010-2011,2013-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++#ifndef __ARCH_ARM_MACH_MSM_GPIOMUX_H ++#define __ARCH_ARM_MACH_MSM_GPIOMUX_H ++ ++#include ++#include ++ ++enum msm_gpiomux_setting { ++ GPIOMUX_ACTIVE = 0, ++ GPIOMUX_SUSPENDED, ++ GPIOMUX_NSETTINGS ++}; ++ ++enum gpiomux_drv { ++ GPIOMUX_DRV_2MA = 0, ++ GPIOMUX_DRV_4MA, ++ GPIOMUX_DRV_6MA, ++ GPIOMUX_DRV_8MA, ++ GPIOMUX_DRV_10MA, ++ GPIOMUX_DRV_12MA, ++ GPIOMUX_DRV_14MA, ++ GPIOMUX_DRV_16MA, ++}; ++ ++enum gpiomux_func { ++ GPIOMUX_FUNC_GPIO = 0, ++ GPIOMUX_FUNC_1, ++ GPIOMUX_FUNC_2, ++ GPIOMUX_FUNC_3, ++ GPIOMUX_FUNC_4, ++ GPIOMUX_FUNC_5, ++ GPIOMUX_FUNC_6, ++ GPIOMUX_FUNC_7, ++ GPIOMUX_FUNC_8, ++ GPIOMUX_FUNC_9, ++ GPIOMUX_FUNC_A, ++ GPIOMUX_FUNC_B, ++ GPIOMUX_FUNC_C, ++ GPIOMUX_FUNC_D, ++ GPIOMUX_FUNC_E, ++ GPIOMUX_FUNC_F, ++}; ++ ++enum gpiomux_pull { ++ GPIOMUX_PULL_NONE = 0, ++ GPIOMUX_PULL_DOWN, ++ GPIOMUX_PULL_KEEPER, ++ GPIOMUX_PULL_UP, ++}; ++ ++/* Direction settings are only meaningful when GPIOMUX_FUNC_GPIO is selected. ++ * This element is ignored for all other FUNC selections, as the output- ++ * enable pin is not under software control in those cases. See the SWI ++ * for your target for more details. ++ */ ++enum gpiomux_dir { ++ GPIOMUX_IN = 0, ++ GPIOMUX_OUT_HIGH, ++ GPIOMUX_OUT_LOW, ++}; ++ ++struct gpiomux_setting { ++ enum gpiomux_func func; ++ enum gpiomux_drv drv; ++ enum gpiomux_pull pull; ++ enum gpiomux_dir dir; ++}; ++ ++/** ++ * struct msm_gpiomux_config: gpiomux settings for one gpio line. ++ * ++ * A complete gpiomux config is the combination of a drive-strength, ++ * function, pull, and (sometimes) direction. For functions other than GPIO, ++ * the input/output setting is hard-wired according to the function. ++ * ++ * @gpio: The index number of the gpio being described. ++ * @settings: The settings to be installed, specifically: ++ * GPIOMUX_ACTIVE: The setting to be installed when the ++ * line is active, or its reference count is > 0. ++ * GPIOMUX_SUSPENDED: The setting to be installed when ++ * the line is suspended, or its reference count is 0. ++ */ ++struct msm_gpiomux_config { ++ unsigned gpio; ++ struct gpiomux_setting *settings[GPIOMUX_NSETTINGS]; ++}; ++ ++/** ++ * struct msm_gpiomux_configs: a collection of gpiomux configs. ++ * ++ * It is so common to manage blocks of gpiomux configs that the data structure ++ * for doing so has been standardized here as a convenience. ++ * ++ * @cfg: A pointer to the first config in an array of configs. ++ * @ncfg: The number of configs in the array. ++ */ ++struct msm_gpiomux_configs { ++ struct msm_gpiomux_config *cfg; ++ size_t ncfg; ++}; ++ ++/* Provide an enum and an API to write to misc TLMM registers */ ++enum msm_tlmm_misc_reg { ++ TLMM_ETM_MODE_REG = 0x2014, ++ TLMM_SDC2_HDRV_PULL_CTL = 0x2048, ++ TLMM_SPARE_REG = 0x2024, ++ TLMM_CDC_HDRV_CTL = 0x2054, ++ TLMM_CDC_HDRV_PULL_CTL = 0x2058, ++}; ++ ++#ifdef CONFIG_MSM_GPIOMUX ++ ++/* Before using gpiomux, initialize the subsystem by telling it how many ++ * gpios are going to be managed. Calling any other gpiomux functions before ++ * msm_gpiomux_init is unsupported. ++ */ ++int msm_gpiomux_init(size_t ngpio); ++ ++/* DT Variant of msm_gpiomux_init. This will look up the number of gpios from ++ * device tree rather than relying on NR_GPIO_IRQS ++ */ ++int msm_gpiomux_init_dt(void); ++ ++/* Install a block of gpiomux configurations in gpiomux. This is functionally ++ * identical to calling msm_gpiomux_write many times. ++ */ ++void msm_gpiomux_install(struct msm_gpiomux_config *configs, unsigned nconfigs); ++ ++/* Install a block of gpiomux configurations in gpiomux. Do not however write ++ * to hardware. Just store the settings to be retrieved at a later time ++ */ ++void msm_gpiomux_install_nowrite(struct msm_gpiomux_config *configs, ++ unsigned nconfigs); ++ ++/* Increment a gpio's reference count, possibly activating the line. */ ++int __must_check msm_gpiomux_get(unsigned gpio); ++ ++/* Decrement a gpio's reference count, possibly suspending the line. */ ++int msm_gpiomux_put(unsigned gpio); ++ ++/* Install a new setting in a gpio. To erase a slot, use NULL. ++ * The old setting that was overwritten can be passed back to the caller ++ * old_setting can be NULL if the caller is not interested in the previous ++ * setting ++ * If a previous setting was not available to return (NULL configuration) ++ * - the function returns 1 ++ * else function returns 0 ++ */ ++int msm_gpiomux_write(unsigned gpio, enum msm_gpiomux_setting which, ++ struct gpiomux_setting *setting, struct gpiomux_setting *old_setting); ++ ++/* Architecture-internal function for use by the framework only. ++ * This function can assume the following: ++ * - the gpio value has passed a bounds-check ++ * - the gpiomux spinlock has been obtained ++ * ++ * This function is not for public consumption. External users ++ * should use msm_gpiomux_write. ++ */ ++void __msm_gpiomux_write(unsigned gpio, struct gpiomux_setting val); ++ ++/* Functions that provide an API for drivers to read from and write to ++ * miscellaneous TLMM registers. ++ */ ++int msm_tlmm_misc_reg_read(enum msm_tlmm_misc_reg misc_reg); ++ ++void msm_tlmm_misc_reg_write(enum msm_tlmm_misc_reg misc_reg, int val); ++ ++#else ++static inline int msm_gpiomux_init(size_t ngpio) ++{ ++ return -ENOSYS; ++} ++ ++static inline void ++msm_gpiomux_install(struct msm_gpiomux_config *configs, unsigned nconfigs) {} ++ ++static inline int __must_check msm_gpiomux_get(unsigned gpio) ++{ ++ return -ENOSYS; ++} ++ ++static inline int msm_gpiomux_put(unsigned gpio) ++{ ++ return -ENOSYS; ++} ++ ++static inline int msm_gpiomux_write(unsigned gpio, ++ enum msm_gpiomux_setting which, struct gpiomux_setting *setting, ++ struct gpiomux_setting *old_setting) ++{ ++ return -ENOSYS; ++} ++ ++static inline int msm_tlmm_misc_reg_read(enum msm_tlmm_misc_reg misc_reg) ++{ ++ return -ENOSYS; ++} ++ ++static inline void msm_tlmm_misc_reg_write(enum msm_tlmm_misc_reg misc_reg, ++ int val) ++{ ++} ++ ++#endif ++#endif +diff --git a/arch/arm/mach-qcom/include/mach/msm_pcie.h b/arch/arm/mach-qcom/include/mach/msm_pcie.h +new file mode 100644 +index 0000000..c2fbdea +--- /dev/null ++++ b/arch/arm/mach-qcom/include/mach/msm_pcie.h +@@ -0,0 +1,134 @@ ++/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef __ASM_ARCH_MSM_PCIE_H ++#define __ASM_ARCH_MSM_PCIE_H ++ ++#include ++#include ++ ++enum msm_pcie_config { ++ MSM_PCIE_CONFIG_INVALID = 0, ++ MSM_PCIE_CONFIG_NO_CFG_RESTORE = 0x1, ++ MSM_PCIE_CONFIG_LINKDOWN = 0x2, ++ MSM_PCIE_CONFIG_NO_RECOVERY = 0x4, ++}; ++ ++enum msm_pcie_pm_opt { ++ MSM_PCIE_SUSPEND, ++ MSM_PCIE_RESUME, ++ MSM_PCIE_REQ_EXIT_L1, ++}; ++ ++enum msm_pcie_event { ++ MSM_PCIE_EVENT_INVALID = 0, ++ MSM_PCIE_EVENT_LINKDOWN = 0x1, ++ MSM_PCIE_EVENT_LINKUP = 0x2, ++ MSM_PCIE_EVENT_WAKEUP = 0x4, ++ MSM_PCIE_EVENT_WAKE_RECOVERY = 0x8, ++ MSM_PCIE_EVENT_NO_ACCESS = 0x10, ++}; ++ ++enum msm_pcie_trigger { ++ MSM_PCIE_TRIGGER_CALLBACK, ++ MSM_PCIE_TRIGGER_COMPLETION, ++}; ++ ++struct msm_pcie_notify { ++ enum msm_pcie_event event; ++ void *user; ++ void *data; ++ u32 options; ++}; ++ ++struct msm_pcie_register_event { ++ u32 events; ++ void *user; ++ enum msm_pcie_trigger mode; ++ void (*callback)(struct msm_pcie_notify *notify); ++ struct msm_pcie_notify notify; ++ struct completion *completion; ++ u32 options; ++}; ++ ++/** ++ * msm_pcie_pm_control - control the power state of a PCIe link. ++ * @pm_opt: power management operation ++ * @busnr: bus number of PCIe endpoint ++ * @user: handle of the caller ++ * @data: private data from the caller ++ * @options: options for pm control ++ * ++ * This function gives PCIe endpoint device drivers the control to change ++ * the power state of a PCIe link for their device. ++ * ++ * Return: 0 on success, negative value on error ++ */ ++int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user, ++ void *data, u32 options); ++ ++/** ++ * msm_pcie_register_event - register an event with PCIe bus driver. ++ * @reg: event structure ++ * ++ * This function gives PCIe endpoint device drivers an option to register ++ * events with PCIe bus driver. ++ * ++ * Return: 0 on success, negative value on error ++ */ ++int msm_pcie_register_event(struct msm_pcie_register_event *reg); ++ ++/** ++ * msm_pcie_deregister_event - deregister an event with PCIe bus driver. ++ * @reg: event structure ++ * ++ * This function gives PCIe endpoint device drivers an option to deregister ++ * events with PCIe bus driver. ++ * ++ * Return: 0 on success, negative value on error ++ */ ++int msm_pcie_deregister_event(struct msm_pcie_register_event *reg); ++ ++/** ++ * msm_pcie_recover_config - recover config space. ++ * @dev: pci device structure ++ * ++ * This function recovers the config space of both RC and Endpoint. ++ * ++ * Return: 0 on success, negative value on error ++ */ ++int msm_pcie_recover_config(struct pci_dev *dev); ++ ++/** ++ * msm_pcie_shadow_control - control the shadowing of PCIe config space. ++ * @dev: pci device structure ++ * @enable: shadowing should be enabled or disabled ++ * ++ * This function gives PCIe endpoint device drivers the control to enable ++ * or disable the shadowing of PCIe config space. ++ * ++ * Return: 0 on success, negative value on error ++ */ ++int msm_pcie_shadow_control(struct pci_dev *dev, bool enable); ++ ++/* ++ * msm_pcie_access_control - access control to PCIe address range. ++ * @dev: pci device structure ++ * @enable: enable or disable the access ++ * ++ * This function gives PCIe endpoint device drivers the control to enable ++ * or disable the access to PCIe address range. ++ * ++ * Return: 0 on success, negative value on error ++ */ ++int msm_pcie_access_control(struct pci_dev *dev, bool enable); ++#endif +diff --git a/arch/arm/mach-qcom/pcie.c b/arch/arm/mach-qcom/pcie.c +new file mode 100644 +index 0000000..b471479 +--- /dev/null ++++ b/arch/arm/mach-qcom/pcie.c +@@ -0,0 +1,2389 @@ ++/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++/* ++ * MSM PCIe controller driver. ++ */ ++ ++#define pr_fmt(fmt) "%s: " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++//#include ++#include ++#include ++#include ++#include "msm-bus.h" ++#include "msm-bus-board.h" ++ ++#include "pcie.h" ++ ++/* Root Complex Port vendor/device IDs */ ++#define PCIE_VENDOR_ID_RCP 0x17cb ++#ifdef CONFIG_ARCH_MDM9630 ++#define PCIE_DEVICE_ID_RCP 0x300 ++#else ++#define PCIE_DEVICE_ID_RCP 0x0101 ++#endif ++ ++#define PCIE20_PARF_SYS_CTRL 0x00 ++#define PCIE20_PARF_PM_CTRL 0x20 ++#define PCIE20_PARF_PM_STTS 0x24 ++#define PCIE20_PARF_PCS_DEEMPH 0x34 ++#define PCIE20_PARF_PCS_SWING 0x38 ++#define PCIE20_PARF_PHY_CTRL 0x40 ++#define PCIE20_PARF_PHY_REFCLK 0x4C ++#define PCIE20_PARF_CONFIG_BITS 0x50 ++#define PCIE20_PARF_DBI_BASE_ADDR 0x168 ++#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 ++#define PCIE20_PARF_Q2A_FLUSH 0x1AC ++#define PCIE20_PARF_LTSSM 0x1B0 ++ ++#define PCIE20_ELBI_VERSION 0x00 ++#define PCIE20_ELBI_SYS_CTRL 0x04 ++#define PCIE20_ELBI_SYS_STTS 0x08 ++ ++#define PCIE20_CAP 0x70 ++#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10) ++#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC) ++#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14) ++ ++#define PCIE20_COMMAND_STATUS 0x04 ++#define PCIE20_BUSNUMBERS 0x18 ++#define PCIE20_MEMORY_BASE_LIMIT 0x20 ++#define PCIE20_L1SUB_CONTROL1 0x158 ++#define PCIE20_EP_L1SUB_CTL1_OFFSET 0x30 ++#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98 ++ ++#define PCIE20_ACK_F_ASPM_CTRL_REG 0x70C ++#define PCIE20_ACK_N_FTS 0xff00 ++#define PCIE20_GEN2_CTRL_REG 0x80C ++#define PCIE20_MISC_CONTROL_1_REG 0x8BC ++ ++#define PCIE20_PLR_IATU_VIEWPORT 0x900 ++#define PCIE20_PLR_IATU_CTRL1 0x904 ++#define PCIE20_PLR_IATU_CTRL2 0x908 ++#define PCIE20_PLR_IATU_LBAR 0x90C ++#define PCIE20_PLR_IATU_UBAR 0x910 ++#define PCIE20_PLR_IATU_LAR 0x914 ++#define PCIE20_PLR_IATU_LTAR 0x918 ++#define PCIE20_PLR_IATU_UTAR 0x91c ++ ++#define RD 0 ++#define WR 1 ++ ++/* Timing Delays */ ++#define PERST_PROPAGATION_DELAY_US_MIN 1000 ++#define PERST_PROPAGATION_DELAY_US_MAX 1005 ++#define REFCLK_STABILIZATION_DELAY_US_MIN 1000 ++#define REFCLK_STABILIZATION_DELAY_US_MAX 1500 ++#define LINK_RETRY_TIMEOUT_US_MIN 20000 ++#define LINK_RETRY_TIMEOUT_US_MAX 25000 ++#define LINK_UP_TIMEOUT_US_MIN 5000 ++#define LINK_UP_TIMEOUT_US_MAX 5100 ++#define LINK_UP_CHECK_MAX_COUNT 20 ++#define PHY_STABILIZATION_DELAY_US_MIN 995 ++#define PHY_STABILIZATION_DELAY_US_MAX 1005 ++#define REQ_EXIT_L1_DELAY_US 1 ++ ++#define PHY_READY_TIMEOUT_COUNT 10 ++#define XMLH_LINK_UP 0x400 ++#define MAX_LINK_RETRIES 5 ++#define MAX_BUS_NUM 3 ++#define MAX_PROP_SIZE 32 ++#define MAX_RC_NAME_LEN 15 ++ ++#define CMD_BME_VAL 0x4 ++#define DBI_RO_WR_EN 1 ++#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 ++#define LTSSM_EN (1 << 8) ++#define PCIE_CAP_ACTIVE_STATE_LINK_PM_SUPPORT_MASK 0xc00 ++#define PCIE_CAP_LINK1_VAL 0x2fd7f ++ ++/* Config Space Offsets */ ++#define BDF_OFFSET(bus, device, function) \ ++ ((bus << 24) | (device << 15) | (function << 8)) ++ ++/* debug mask sys interface */ ++static int msm_pcie_debug_mask; ++module_param_named(debug_mask, msm_pcie_debug_mask, ++ int, S_IRUGO | S_IWUSR | S_IWGRP); ++static atomic_t rc_removed; ++ ++/** ++ * PCIe driver state ++ */ ++struct pcie_drv_sta { ++ u32 rc_num; ++ u32 rc_expected; ++ u32 current_rc; ++ bool vreg_on; ++ struct mutex drv_lock; ++} pcie_drv; ++ ++/* msm pcie device data */ ++static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM]; ++ ++/* regulators */ ++static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = { ++ {NULL, "vreg-3.3", 0, 0, 0, false}, ++ {NULL, "vreg-1.8", 1800000, 1800000, 1000, true}, ++ {NULL, "vreg-0.9", 1000000, 1000000, 24000, true} ++}; ++ ++/* GPIOs */ ++static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = { ++ {"perst-gpio", 0, 1, 0, 0}, ++ {"wake-gpio", 0, 0, 0, 0}, ++ {"clkreq-gpio", 0, 0, 0, 0} ++}; ++ ++/* clocks */ ++static struct msm_pcie_clk_info_t ++ msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = { ++ { ++ {NULL, "pcie_0_cfg_ahb_clk", 0, true}, ++ {NULL, "pcie_0_mstr_axi_clk", 0, true}, ++ {NULL, "pcie_0_slv_axi_clk", 0, true}, ++ }, ++}; ++ ++/* RESETs */ ++static struct msm_pcie_rst_info_t msm_pcie_rst_info[MSM_PCIE_MAX_RESET] = { ++ {NULL, "pcie_rst_axi_m_ares"}, ++ {NULL, "pcie_rst_axi_s_ares"}, ++ {NULL, "pcie_rst_pipe_ares"}, ++ {NULL, "pcie_rst_axi_m_vmidmt_ares"}, ++ {NULL, "pcie_rst_axi_s_xpu_ares"}, ++ {NULL, "pcie_rst_parf_xpu_ares"}, ++ {NULL, "pcie_rst_phy_ares"}, ++ {NULL, "pcie_rst_axi_m_sticky_ares"}, ++ {NULL, "pcie_rst_pipe_sticky_ares"}, ++ {NULL, "pcie_rst_pwr_ares"}, ++ {NULL, "pcie_rst_ahb_res"}, ++ {NULL, "pcie_rst_phy_ahb_ares"} ++}; ++ ++/* resources */ ++static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = { ++ {"parf", 0, 0}, ++ {"phy", 0, 0}, ++ {"dm_core", 0, 0}, ++ {"elbi", 0, 0}, ++ {"conf", 0, 0}, ++ {"io", 0, 0}, ++ {"bars", 0, 0} ++}; ++ ++/* irqs */ ++static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = { ++ {"int_msi", 0}, ++ {"int_a", 0}, ++ {"int_b", 0}, ++ {"int_c", 0}, ++ {"int_d", 0}, ++ {"int_pls_pme", 0}, ++ {"int_pme_legacy", 0}, ++ {"int_pls_err", 0}, ++ {"int_aer_legacy", 0}, ++ {"int_pls_link_up", 0}, ++ {"int_pls_link_down", 0}, ++ {"int_bridge_flush_n", 0}, ++ {"int_wake", 0} ++}; ++ ++int msm_pcie_get_debug_mask(void) ++{ ++ return msm_pcie_debug_mask; ++} ++ ++bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev, ++ bool check_sw_stts, ++ bool check_ep) ++{ ++ u32 val; ++ ++ if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) { ++ PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n", ++ dev->rc_idx); ++ return false; ++ } ++ ++ if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29))) { ++ PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n", ++ dev->rc_idx); ++ return false; ++ } ++ ++ val = readl_relaxed(dev->dm_core); ++ PCIE_DBG(dev, "PCIe: device ID and vender ID of RC %d are 0x%x.\n", ++ dev->rc_idx, val); ++ if (val == PCIE_LINK_DOWN) { ++ PCIE_ERR(dev, ++ "PCIe: The link of RC %d is not really up; device ID and vender ID of RC %d are 0x%x.\n", ++ dev->rc_idx, dev->rc_idx, val); ++ return false; ++ } ++ ++ if (check_ep) { ++ val = readl_relaxed(dev->conf); ++ PCIE_DBG(dev, ++ "PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n", ++ dev->rc_idx, val); ++ if (val == PCIE_LINK_DOWN) { ++ PCIE_ERR(dev, ++ "PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n", ++ dev->rc_idx, dev->rc_idx, val); ++ return false; ++ } ++ } ++ ++ return true; ++} ++ ++void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc) ++{ ++ int i; ++ u32 val = 0; ++ u32 *shadow; ++ void *cfg; ++ ++ if (rc) { ++ shadow = dev->rc_shadow; ++ cfg = dev->dm_core; ++ } else { ++ shadow = dev->ep_shadow; ++ cfg = dev->conf; ++ } ++ ++ for (i = PCIE_CONF_SPACE_DW - 1; i >= 0; i--) { ++ val = shadow[i]; ++ if (val != PCIE_CLEAR) { ++ PCIE_DBG3(dev, "PCIe: before recovery:cfg 0x%x:0x%x\n", ++ i * 4, readl_relaxed(cfg + i * 4)); ++ PCIE_DBG3(dev, "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n", ++ i, i * 4, val); ++ writel_relaxed(val, cfg + i * 4); ++ wmb(); ++ PCIE_DBG3(dev, "PCIe: after recovery:cfg 0x%x:0x%x\n\n", ++ i * 4, readl_relaxed(cfg + i * 4)); ++ } ++ } ++ ++ readl_relaxed(dev->elbi); ++} ++ ++static void msm_pcie_write_mask(void __iomem *addr, ++ uint32_t clear_mask, uint32_t set_mask) ++{ ++ uint32_t val; ++ ++ val = (readl_relaxed(addr) & ~clear_mask) | set_mask; ++ writel_relaxed(val, addr); ++ wmb(); /* ensure data is written to hardware register */ ++} ++ ++static int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev) ++{ ++ return readl_relaxed(dev->dm_core + ++ PCIE20_CAP_LINKCTRLSTATUS) & BIT(29); ++} ++ ++static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper, ++ int where, int size, u32 *val) ++{ ++ uint32_t word_offset, byte_offset, mask; ++ uint32_t rd_val, wr_val; ++ struct msm_pcie_dev_t *dev; ++ void __iomem *config_base; ++ bool rc = false; ++ u32 rc_idx; ++ int rv = 0; ++ ++ dev = ((struct msm_pcie_dev_t *) ++ (((struct pci_sys_data *)bus->sysdata)->private_data)); ++ ++ if (!dev) { ++ pr_err("PCIe: No device found for this bus.\n"); ++ *val = ~0; ++ rv = PCIBIOS_DEVICE_NOT_FOUND; ++ goto out; ++ } ++ ++ /* Do the bus->number based access control since we don't support ++ ECAM mechanism */ ++ ++ switch (bus->number) { ++ case 0: ++ rc = true; ++ case 1: ++ rc_idx = dev->rc_idx; ++ break; ++ default: ++ PCIE_ERR(dev, "PCIe: unsupported bus number:%d\n", bus->number); ++ *val = ~0; ++ rv = PCIBIOS_DEVICE_NOT_FOUND; ++ goto out; ++ } ++ ++ if ((bus->number > MAX_BUS_NUM) || (devfn != 0)) { ++ PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx, ++ (oper == RD) ? "rd" : "wr", bus->number, devfn); ++ *val = ~0; ++ rv = PCIBIOS_DEVICE_NOT_FOUND; ++ goto out; ++ } ++ ++ spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags); ++ ++ if (!dev->cfg_access) { ++ PCIE_DBG3(dev, ++ "Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n", ++ rc_idx, bus->number, devfn, where, size); ++ *val = ~0; ++ rv = PCIBIOS_DEVICE_NOT_FOUND; ++ goto unlock; ++ } ++ ++ if (dev->link_status != MSM_PCIE_LINK_ENABLED) { ++ PCIE_DBG3(dev, ++ "Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n", ++ rc_idx, bus->number, devfn, where, size); ++ *val = ~0; ++ rv = PCIBIOS_DEVICE_NOT_FOUND; ++ goto unlock; ++ } ++ ++ /* check if the link is up for endpoint */ ++ if (!rc && !msm_pcie_is_link_up(dev)) { ++ PCIE_ERR(dev, ++ "PCIe: RC%d %s fail, link down - bus %d devfn %d\n", ++ rc_idx, (oper == RD) ? "rd" : "wr", ++ bus->number, devfn); ++ *val = ~0; ++ rv = PCIBIOS_DEVICE_NOT_FOUND; ++ goto unlock; ++ } ++ ++ word_offset = where & ~0x3; ++ byte_offset = where & 0x3; ++ mask = (~0 >> (8 * (4 - size))) << (8 * byte_offset); ++ ++ config_base = rc ? dev->dm_core : dev->conf; ++ rd_val = readl_relaxed(config_base + word_offset); ++ ++ if (oper == RD) { ++ *val = ((rd_val & mask) >> (8 * byte_offset)); ++ PCIE_DBG3(dev, ++ "RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n", ++ rc_idx, bus->number, devfn, where, size, *val, rd_val); ++ } else { ++ wr_val = (rd_val & ~mask) | ++ ((*val << (8 * byte_offset)) & mask); ++ writel_relaxed(wr_val, config_base + word_offset); ++ wmb(); /* ensure config data is written to hardware register */ ++ readl_relaxed(dev->elbi); ++ ++ if (rd_val == PCIE_LINK_DOWN) { ++ PCIE_ERR(dev, ++ "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n", ++ rc_idx, bus->number, devfn, where, size); ++ } else if (dev->shadow_en) { ++ if (rc) ++ dev->rc_shadow[word_offset / 4] = wr_val; ++ else ++ dev->ep_shadow[word_offset / 4] = wr_val; ++ } ++ ++ PCIE_DBG3(dev, ++ "RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n", ++ rc_idx, bus->number, devfn, where, size, ++ wr_val, rd_val, *val); ++ } ++ ++unlock: ++ spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags); ++out: ++ return rv; ++} ++ ++static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, ++ int size, u32 *val) ++{ ++ int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val); ++ ++ if ((bus->number == 0) && (where == PCI_CLASS_REVISION)) { ++ *val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16); ++ pr_debug("change class for RC:0x%x\n", *val); ++ } ++ ++ return ret; ++} ++ ++static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn, ++ int where, int size, u32 val) ++{ ++ return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val); ++} ++ ++static struct pci_ops msm_pcie_ops = { ++ .read = msm_pcie_rd_conf, ++ .write = msm_pcie_wr_conf, ++}; ++ ++static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev) ++{ ++ int rc = 0, i; ++ struct msm_pcie_gpio_info_t *info; ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ for (i = 0; i < dev->gpio_n; i++) { ++ info = &dev->gpio[i]; ++ ++ if (!info->num) ++ continue; ++ ++ rc = gpio_request(info->num, info->name); ++ if (rc) { ++ PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n", ++ dev->rc_idx, info->name, rc); ++ break; ++ } ++ ++ if (info->out) ++ rc = gpio_direction_output(info->num, info->init); ++ else ++ rc = gpio_direction_input(info->num); ++ if (rc) { ++ PCIE_ERR(dev, ++ "PCIe: RC%d can't set direction for GPIO %s:%d\n", ++ dev->rc_idx, info->name, rc); ++ gpio_free(info->num); ++ break; ++ } ++ } ++ ++ if (rc) ++ while (i--) ++ gpio_free(dev->gpio[i].num); ++ ++ return rc; ++} ++ ++static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev) ++{ ++ int i; ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ for (i = 0; i < dev->gpio_n; i++) ++ gpio_free(dev->gpio[i].num); ++} ++ ++int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev) ++{ ++ int i, rc = 0; ++ struct regulator *vreg; ++ struct msm_pcie_vreg_info_t *info; ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ for (i = 0; i < MSM_PCIE_MAX_VREG; i++) { ++ info = &dev->vreg[i]; ++ vreg = info->hdl; ++ ++ if (!vreg) ++ continue; ++ ++ PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n", ++ dev->rc_idx, info->name); ++ if (info->max_v) { ++ rc = regulator_set_voltage(vreg, ++ info->min_v, info->max_v); ++ if (rc) { ++ PCIE_ERR(dev, ++ "PCIe: RC%d can't set voltage for %s: %d\n", ++ dev->rc_idx, info->name, rc); ++ break; ++ } ++ } ++ ++ if (info->opt_mode) { ++ rc = regulator_set_mode(vreg, info->opt_mode); ++ if (rc < 0) { ++ PCIE_ERR(dev, ++ "PCIe: RC%d can't set mode for %s: %d\n", ++ dev->rc_idx, info->name, rc); ++ break; ++ } ++ } ++ ++ rc = regulator_enable(vreg); ++ if (rc) { ++ PCIE_ERR(dev, ++ "PCIe: RC%d can't enable regulator %s: %d\n", ++ dev->rc_idx, info->name, rc); ++ break; ++ } ++ } ++ ++ if (rc) ++ while (i--) { ++ struct regulator *hdl = dev->vreg[i].hdl; ++ if (hdl) ++ regulator_disable(hdl); ++ } ++ ++ return rc; ++} ++ ++static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev) ++{ ++ int i; ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) { ++ if (dev->vreg[i].hdl) { ++ PCIE_DBG(dev, "Vreg %s is being disabled\n", ++ dev->vreg[i].name); ++ regulator_disable(dev->vreg[i].hdl); ++ } ++ } ++} ++ ++static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev) ++{ ++ int i, rc = 0; ++ struct msm_pcie_clk_info_t *info; ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ rc = regulator_enable(dev->gdsc); ++ ++ if (rc) { ++ PCIE_ERR(dev, "PCIe: fail to enable GDSC for RC%d (%s)\n", ++ dev->rc_idx, dev->pdev->name); ++ return rc; ++ } ++ ++ if (dev->bus_client) { ++ rc = msm_bus_scale_client_update_request(dev->bus_client, 1); ++ if (rc) { ++ PCIE_ERR(dev, ++ "PCIe: fail to set bus bandwidth for RC%d:%d.\n", ++ dev->rc_idx, rc); ++ return rc; ++ } else { ++ PCIE_DBG2(dev, ++ "PCIe: set bus bandwidth for RC%d.\n", ++ dev->rc_idx); ++ } ++ } ++ ++ for (i = 0; i < MSM_PCIE_MAX_CLK; i++) { ++ info = &dev->clk[i]; ++ ++ if (!info->hdl) ++ continue; ++ ++ if (info->freq) { ++ rc = clk_set_rate(info->hdl, info->freq); ++ if (rc) { ++ PCIE_ERR(dev, ++ "PCIe: RC%d can't set rate for clk %s: %d.\n", ++ dev->rc_idx, info->name, rc); ++ break; ++ } else { ++ PCIE_DBG2(dev, ++ "PCIe: RC%d set rate for clk %s.\n", ++ dev->rc_idx, info->name); ++ } ++ } ++ ++ rc = clk_prepare_enable(info->hdl); ++ ++ if (rc) ++ PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n", ++ dev->rc_idx, info->name); ++ else ++ PCIE_DBG2(dev, "enable clk %s for RC%d.\n", ++ info->name, dev->rc_idx); ++ } ++ ++ if (rc) { ++ PCIE_DBG(dev, "RC%d disable clocks for error handling.\n", ++ dev->rc_idx); ++ while (i--) { ++ struct clk *hdl = dev->clk[i].hdl; ++ if (hdl) ++ clk_disable_unprepare(hdl); ++ } ++ ++ regulator_disable(dev->gdsc); ++ } ++ ++ return rc; ++} ++ ++static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev) ++{ ++ int i; ++ int rc; ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ for (i = 0; i < MSM_PCIE_MAX_CLK; i++) ++ if (dev->clk[i].hdl) ++ clk_disable_unprepare(dev->clk[i].hdl); ++ ++ if (dev->bus_client) { ++ rc = msm_bus_scale_client_update_request(dev->bus_client, 0); ++ if (rc) ++ PCIE_ERR(dev, ++ "PCIe: fail to relinquish bus bandwidth for RC%d:%d.\n", ++ dev->rc_idx, rc); ++ else ++ PCIE_DBG(dev, ++ "PCIe: relinquish bus bandwidth for RC%d.\n", ++ dev->rc_idx); ++ } ++ ++ regulator_disable(dev->gdsc); ++} ++ ++static void msm_pcie_controller_reset(struct msm_pcie_dev_t *dev) ++{ ++ /* Assert pcie_pipe_ares */ ++ reset_control_assert(dev->rst[MSM_PCIE_AXI_M_ARES].hdl); ++ reset_control_assert(dev->rst[MSM_PCIE_AXI_S_ARES].hdl); ++ usleep_range(10000, 12000); /* wait 12ms */ ++ ++ reset_control_assert(dev->rst[MSM_PCIE_PIPE_ARES].hdl); ++ reset_control_assert(dev->rst[MSM_PCIE_PIPE_STICKY_ARES].hdl); ++ reset_control_assert(dev->rst[MSM_PCIE_PHY_ARES].hdl); ++ reset_control_assert(dev->rst[MSM_PCIE_PHY_AHB_ARES].hdl); ++ usleep_range(10000, 12000); /* wait 12ms */ ++ ++ reset_control_assert(dev->rst[MSM_PCIE_AXI_M_STICKY_ARES].hdl); ++ reset_control_assert(dev->rst[MSM_PCIE_PWR_ARES].hdl); ++ reset_control_assert(dev->rst[MSM_PCIE_AHB_ARES].hdl); ++ usleep_range(10000, 12000); /* wait 12ms */ ++ ++ reset_control_deassert(dev->rst[MSM_PCIE_PHY_AHB_ARES].hdl); ++ reset_control_deassert(dev->rst[MSM_PCIE_PHY_ARES].hdl); ++ reset_control_deassert(dev->rst[MSM_PCIE_PIPE_ARES].hdl); ++ reset_control_deassert(dev->rst[MSM_PCIE_PIPE_STICKY_ARES].hdl); ++ usleep_range(10000, 12000); /* wait 12ms */ ++ ++ reset_control_deassert(dev->rst[MSM_PCIE_AXI_M_ARES].hdl); ++ reset_control_deassert(dev->rst[MSM_PCIE_AXI_M_STICKY_ARES].hdl); ++ reset_control_deassert(dev->rst[MSM_PCIE_AXI_S_ARES].hdl); ++ reset_control_deassert(dev->rst[MSM_PCIE_PWR_ARES].hdl); ++ reset_control_deassert(dev->rst[MSM_PCIE_AHB_ARES].hdl); ++ usleep_range(10000, 12000); /* wait 12ms */ ++ wmb(); /* ensure data is written to hw register */ ++} ++ ++static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev) ++{ ++ struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource; ++ u32 dev_conf, upper, lower, limit; ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ if (IS_ENABLED(CONFIG_ARM_LPAE)) { ++ lower = PCIE_LOWER_ADDR(axi_conf->start); ++ upper = PCIE_UPPER_ADDR(axi_conf->start); ++ limit = PCIE_LOWER_ADDR(axi_conf->end); ++ } else { ++ lower = axi_conf->start; ++ upper = 0; ++ limit = axi_conf->end; ++ } ++ ++ dev_conf = BDF_OFFSET(1, 0, 0); ++ ++ if (dev->shadow_en) { ++ dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] = 0; ++ dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] = 4; ++ dev->rc_shadow[PCIE20_PLR_IATU_LBAR / 4] = lower; ++ dev->rc_shadow[PCIE20_PLR_IATU_UBAR / 4] = upper; ++ dev->rc_shadow[PCIE20_PLR_IATU_LAR / 4] = limit; ++ dev->rc_shadow[PCIE20_PLR_IATU_LTAR / 4] = dev_conf; ++ dev->rc_shadow[PCIE20_PLR_IATU_UTAR / 4] = 0; ++ dev->rc_shadow[PCIE20_PLR_IATU_CTRL2 / 4] = BIT(31); ++ } ++ ++ /* ++ * program and enable address translation region 0 (device config ++ * address space); region type config; ++ * axi config address range to device config address range ++ */ ++ writel_relaxed(0, dev->dm_core + PCIE20_PLR_IATU_VIEWPORT); ++ /* ensure that hardware locks the region before programming it */ ++ wmb(); ++ ++ writel_relaxed(4, dev->dm_core + PCIE20_PLR_IATU_CTRL1); ++ writel_relaxed(lower, dev->dm_core + PCIE20_PLR_IATU_LBAR); ++ writel_relaxed(upper, dev->dm_core + PCIE20_PLR_IATU_UBAR); ++ writel_relaxed(limit, dev->dm_core + PCIE20_PLR_IATU_LAR); ++ writel_relaxed(dev_conf, dev->dm_core + PCIE20_PLR_IATU_LTAR); ++ writel_relaxed(0, dev->dm_core + PCIE20_PLR_IATU_UTAR); ++ writel_relaxed(BIT(31), dev->dm_core + PCIE20_PLR_IATU_CTRL2); ++ /* ensure that hardware registers the configuration */ ++ wmb(); ++ PCIE_DBG2(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT)); ++ PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1)); ++ PCIE_DBG2(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR)); ++ PCIE_DBG2(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR)); ++ PCIE_DBG2(dev, "PCIE20_PLR_IATU_LAR:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR)); ++ PCIE_DBG2(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR)); ++ PCIE_DBG2(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR)); ++ PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2)); ++ ++ /* configure N_FTS */ ++ PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG)); ++ if (!dev->n_fts) ++ msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG, ++ 0, BIT(15)); ++ else ++ msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG, ++ PCIE20_ACK_N_FTS, ++ dev->n_fts << 8); ++ readl_relaxed(dev->elbi); ++ ++ if (dev->shadow_en) ++ dev->rc_shadow[PCIE20_ACK_F_ASPM_CTRL_REG / 4] = ++ readl_relaxed(dev->dm_core + ++ PCIE20_ACK_F_ASPM_CTRL_REG); ++ ++ PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG)); ++} ++ ++static void msm_pcie_config_l1ss(struct msm_pcie_dev_t *dev) ++{ ++ u32 offset = 0; ++ ++ if (!dev->rc_idx) ++ offset = PCIE20_EP_L1SUB_CTL1_OFFSET; ++ ++ /* Enable the AUX Clock and the Core Clk to be synchronous for L1SS*/ ++ if (!dev->aux_clk_sync) ++ msm_pcie_write_mask(dev->parf + ++ PCIE20_PARF_SYS_CTRL, BIT(3), 0); ++ ++ /* Enable L1SS on RC */ ++ msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS, 0, ++ BIT(1)|BIT(0)); ++ msm_pcie_write_mask(dev->dm_core + PCIE20_L1SUB_CONTROL1, 0, ++ BIT(3)|BIT(2)|BIT(1)|BIT(0)); ++ msm_pcie_write_mask(dev->dm_core + PCIE20_DEVICE_CONTROL2_STATUS2, 0, ++ BIT(10)); ++ readl_relaxed(dev->elbi); ++ if (dev->shadow_en) { ++ dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] = ++ readl_relaxed(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS); ++ dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] = ++ readl_relaxed(dev->dm_core + PCIE20_L1SUB_CONTROL1); ++ dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] = ++ readl_relaxed(dev->dm_core + ++ PCIE20_DEVICE_CONTROL2_STATUS2); ++ } ++ PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS)); ++ PCIE_DBG2(dev, "RC's L1SUB_CONTROL1:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_L1SUB_CONTROL1)); ++ PCIE_DBG2(dev, "RC's DEVICE_CONTROL2_STATUS2:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_DEVICE_CONTROL2_STATUS2)); ++ ++ /* Enable L1SS on EP */ ++ msm_pcie_write_mask(dev->conf + PCIE20_CAP_LINKCTRLSTATUS, 0, ++ BIT(1)|BIT(0)); ++ msm_pcie_write_mask(dev->conf + PCIE20_L1SUB_CONTROL1 + ++ offset, 0, ++ BIT(3)|BIT(2)|BIT(1)|BIT(0)); ++ msm_pcie_write_mask(dev->conf + PCIE20_DEVICE_CONTROL2_STATUS2, 0, ++ BIT(10)); ++ readl_relaxed(dev->elbi); ++ if (dev->shadow_en) { ++ dev->ep_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] = ++ readl_relaxed(dev->conf + ++ PCIE20_CAP_LINKCTRLSTATUS); ++ dev->ep_shadow[PCIE20_L1SUB_CONTROL1 / 4 + offset / 4] = ++ readl_relaxed(dev->conf + ++ PCIE20_L1SUB_CONTROL1 + offset); ++ dev->ep_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] = ++ readl_relaxed(dev->conf + ++ PCIE20_DEVICE_CONTROL2_STATUS2); ++ } ++ PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n", ++ readl_relaxed(dev->conf + PCIE20_CAP_LINKCTRLSTATUS)); ++ PCIE_DBG2(dev, "EP's L1SUB_CONTROL1:0x%x\n", ++ readl_relaxed(dev->conf + PCIE20_L1SUB_CONTROL1 + ++ offset)); ++ PCIE_DBG2(dev, "EP's DEVICE_CONTROL2_STATUS2:0x%x\n", ++ readl_relaxed(dev->conf + PCIE20_DEVICE_CONTROL2_STATUS2)); ++} ++ ++static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev, ++ struct platform_device *pdev) ++{ ++ int i, len, cnt, ret = 0; ++ struct msm_pcie_vreg_info_t *vreg_info; ++ struct msm_pcie_gpio_info_t *gpio_info; ++ struct msm_pcie_clk_info_t *clk_info; ++ struct msm_pcie_rst_info_t *rst_info; ++ struct resource *res; ++ struct msm_pcie_res_info_t *res_info; ++ struct msm_pcie_irq_info_t *irq_info; ++ char prop_name[MAX_PROP_SIZE]; ++ const __be32 *prop; ++ u32 *clkfreq = NULL; ++ ++ cnt = of_property_count_strings((&pdev->dev)->of_node, ++ "clock-names"); ++ if (cnt > 0) { ++ clkfreq = kzalloc(cnt * sizeof(*clkfreq), ++ GFP_KERNEL); ++ if (!clkfreq) { ++ PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n", ++ dev->rc_idx); ++ return -ENOMEM; ++ } ++ ret = of_property_read_u32_array( ++ (&pdev->dev)->of_node, ++ "max-clock-frequency-hz", clkfreq, cnt); ++ if (ret) { ++ PCIE_ERR(dev, ++ "PCIe: invalid max-clock-frequency-hz property for RC%d:%d\n", ++ dev->rc_idx, ret); ++ goto out; ++ } ++ } ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ for (i = 0; i < MSM_PCIE_MAX_VREG; i++) { ++ vreg_info = &dev->vreg[i]; ++ vreg_info->hdl = ++ devm_regulator_get(&pdev->dev, vreg_info->name); ++ ++ if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) { ++ PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n", ++ vreg_info->name); ++ ret = PTR_ERR(vreg_info->hdl); ++ goto out; ++ } ++ ++ if (IS_ERR(vreg_info->hdl)) { ++ if (vreg_info->required) { ++ PCIE_DBG(dev, "Vreg %s doesn't exist\n", ++ vreg_info->name); ++ ret = PTR_ERR(vreg_info->hdl); ++ goto out; ++ } else { ++ PCIE_DBG(dev, ++ "Optional Vreg %s doesn't exist\n", ++ vreg_info->name); ++ vreg_info->hdl = NULL; ++ } ++ } else { ++ dev->vreg_n++; ++ snprintf(prop_name, MAX_PROP_SIZE, ++ "qcom,%s-voltage-level", vreg_info->name); ++ prop = of_get_property((&pdev->dev)->of_node, ++ prop_name, &len); ++ if (!prop || (len != (3 * sizeof(__be32)))) { ++ PCIE_DBG(dev, "%s %s property\n", ++ prop ? "invalid format" : ++ "no", prop_name); ++ vreg_info->hdl = NULL; ++ } else { ++ vreg_info->max_v = be32_to_cpup(&prop[0]); ++ vreg_info->min_v = be32_to_cpup(&prop[1]); ++ vreg_info->opt_mode = ++ be32_to_cpup(&prop[2]); ++ } ++ } ++ } ++ ++ dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd"); ++ ++ if (IS_ERR(dev->gdsc)) { ++ PCIE_ERR(dev, "PCIe: RC%d Failed to get %s GDSC:%ld\n", ++ dev->rc_idx, dev->pdev->name, PTR_ERR(dev->gdsc)); ++ if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER) ++ PCIE_DBG(dev, "PCIe: EPROBE_DEFER for %s GDSC\n", ++ dev->pdev->name); ++ ret = PTR_ERR(dev->gdsc); ++ goto out; ++ } ++ ++ dev->gpio_n = 0; ++ for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) { ++ gpio_info = &dev->gpio[i]; ++ ret = of_get_named_gpio((&pdev->dev)->of_node, ++ gpio_info->name, 0); ++ if (ret >= 0) { ++ gpio_info->num = ret; ++ ret = 0; ++ dev->gpio_n++; ++ PCIE_DBG(dev, "GPIO num for %s is %d\n", ++ gpio_info->name, gpio_info->num); ++ } else { ++ goto out; ++ } ++ } ++ ++ for (i = 0; i < MSM_PCIE_MAX_CLK; i++) { ++ clk_info = &dev->clk[i]; ++ ++ clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name); ++ ++ if (IS_ERR(clk_info->hdl)) { ++ if (clk_info->required) { ++ PCIE_DBG(dev, "Clock %s isn't available:%ld\n", ++ clk_info->name, PTR_ERR(clk_info->hdl)); ++ ret = PTR_ERR(clk_info->hdl); ++ goto out; ++ } else { ++ PCIE_DBG(dev, "Ignoring Clock %s\n", ++ clk_info->name); ++ clk_info->hdl = NULL; ++ } ++ } else { ++ if (clkfreq != NULL) { ++ clk_info->freq = clkfreq[i + ++ MSM_PCIE_MAX_PIPE_CLK]; ++ PCIE_DBG(dev, "Freq of Clock %s is:%d\n", ++ clk_info->name, clk_info->freq); ++ } ++ } ++ } ++ ++ for (i = 0; i < MSM_PCIE_MAX_RESET; i++) { ++ rst_info = &dev->rst[i]; ++ ++ rst_info->hdl = devm_reset_control_get(&pdev->dev, rst_info->name); ++ ++ if (IS_ERR(rst_info->hdl)) { ++ PCIE_DBG(dev, "Reset %s isn't available:%ld\n", ++ rst_info->name, PTR_ERR(rst_info->hdl)); ++ ret = PTR_ERR(rst_info->hdl); ++ goto out; ++ } ++ } ++ ++ ++ ++ dev->bus_scale_table = msm_bus_cl_get_pdata(pdev); ++ if (!dev->bus_scale_table) { ++ PCIE_DBG(dev, "PCIe: No bus scale table for RC%d (%s)\n", ++ dev->rc_idx, dev->pdev->name); ++ dev->bus_client = 0; ++ } else { ++ dev->bus_client = ++ msm_bus_scale_register_client(dev->bus_scale_table); ++ if (!dev->bus_client) { ++ PCIE_ERR(dev, ++ "PCIe: Failed to register bus client for RC%d (%s)\n", ++ dev->rc_idx, dev->pdev->name); ++ msm_bus_cl_clear_pdata(dev->bus_scale_table); ++ ret = -ENODEV; ++ goto out; ++ } ++ } ++ ++ for (i = 0; i < MSM_PCIE_MAX_RES; i++) { ++ res_info = &dev->res[i]; ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, ++ res_info->name); ++ ++ if (!res) { ++ PCIE_ERR(dev, "PCIe: RC%d can't get %s resource.\n", ++ dev->rc_idx, res_info->name); ++ ret = -ENOMEM; ++ goto out; ++ } else ++ PCIE_DBG(dev, "start addr for %s is %pa.\n", ++ res_info->name, &res->start); ++ ++ res_info->base = devm_ioremap(&pdev->dev, ++ res->start, resource_size(res)); ++ if (!res_info->base) { ++ PCIE_ERR(dev, "PCIe: RC%d can't remap %s.\n", ++ dev->rc_idx, res_info->name); ++ ret = -ENOMEM; ++ goto out; ++ } ++ res_info->resource = res; ++ } ++ ++ for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) { ++ irq_info = &dev->irq[i]; ++ ++ /* Skip if wakeirq is not present in device tree */ ++ if (!dev->ep_wakeirq && (i == MSM_PCIE_INT_WAKE)) ++ continue; ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, ++ irq_info->name); ++ ++ if (!res) { ++ int j; ++ for (j = 0; j < MSM_PCIE_MAX_RES; j++) { ++ iounmap(dev->res[j].base); ++ dev->res[j].base = NULL; ++ } ++ PCIE_ERR(dev, "PCIe: RC%d can't find IRQ # for %s.\n", ++ dev->rc_idx, irq_info->name); ++ ret = -ENODEV; ++ goto out; ++ } else { ++ irq_info->num = res->start; ++ PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name, ++ irq_info->num); ++ } ++ } ++ ++ /* All allocations succeeded */ ++ ++ dev->wake_n = dev->irq[MSM_PCIE_INT_WAKE].num; ++ ++ dev->parf = dev->res[MSM_PCIE_RES_PARF].base; ++ dev->phy = dev->res[MSM_PCIE_RES_PHY].base; ++ dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base; ++ dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base; ++ dev->conf = dev->res[MSM_PCIE_RES_CONF].base; ++ dev->bars = dev->res[MSM_PCIE_RES_BARS].base; ++ dev->dev_mem_res = dev->res[MSM_PCIE_RES_BARS].resource; ++ dev->dev_io_res = dev->res[MSM_PCIE_RES_IO].resource; ++ dev->dev_io_res->flags = IORESOURCE_IO; ++ ++out: ++ kfree(clkfreq); ++ return ret; ++} ++ ++static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev) ++{ ++ dev->parf = NULL; ++ dev->elbi = NULL; ++ dev->dm_core = NULL; ++ dev->conf = NULL; ++ dev->bars = NULL; ++ dev->dev_mem_res = NULL; ++ dev->dev_io_res = NULL; ++} ++ ++int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options) ++{ ++ int ret = 0; ++ uint32_t val; ++ long int retries = 0; ++ int link_check_count = 0; ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ mutex_lock(&dev->setup_lock); ++ ++ if (dev->link_status == MSM_PCIE_LINK_ENABLED) { ++ PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n", ++ dev->rc_idx); ++ goto out; ++ } ++ ++ ++ /* assert PCIe reset link to keep EP in reset */ ++ ++ PCIE_INFO(dev, "PCIe: trigger the reset of endpoint of RC%d.\n", ++ dev->rc_idx); ++ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, ++ dev->gpio[MSM_PCIE_GPIO_PERST].on); ++ usleep_range(PERST_PROPAGATION_DELAY_US_MIN, ++ PERST_PROPAGATION_DELAY_US_MAX); ++ ++ /* enable power */ ++ ++ if (options & PM_VREG) { ++ ret = msm_pcie_vreg_init(dev); ++ if (ret) ++ goto out; ++ } ++ ++ /* enable clocks */ ++ if (options & PM_CLK) { ++ ret = msm_pcie_clk_init(dev); ++ wmb(); ++ if (ret) ++ goto clk_fail; ++ } ++ ++ /* enable PCIe clocks and resets */ ++ msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0); ++ ++ /* change DBI base address */ ++ writel_relaxed(0, dev->parf + PCIE20_PARF_DBI_BASE_ADDR); ++ ++ if (dev->rc_idx) ++ writel_relaxed(0x361c, dev->parf + PCIE20_PARF_SYS_CTRL); ++ else ++ writel_relaxed(0x3656, dev->parf + PCIE20_PARF_SYS_CTRL); ++ ++ writel_relaxed(0, dev->parf + PCIE20_PARF_Q2A_FLUSH); ++ ++ if (dev->use_msi) { ++ PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx); ++ msm_pcie_write_mask(dev->parf + ++ PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT, 0, BIT(31)); ++ } ++ ++ /* init PCIe PHY */ ++ if (dev->is_emulation) ++ pcie_phy_init(dev); ++ ++ writel_relaxed(CMD_BME_VAL, dev->dm_core + PCIE20_COMMAND_STATUS); ++ writel_relaxed(DBI_RO_WR_EN, ++ dev->dm_core + PCIE20_MISC_CONTROL_1_REG); ++ writel_relaxed(PCIE_CAP_LINK1_VAL, dev->dm_core + PCIE20_CAP_LINK_1); ++ msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINK_CAPABILITIES, ++ BIT(10) | BIT(11), 0); ++ writel_relaxed(PCIE_CAP_CPL_TIMEOUT_DISABLE, ++ dev->dm_core + PCIE20_DEVICE_CONTROL2_STATUS2); ++ writel_relaxed(LTSSM_EN, dev->parf + PCIE20_PARF_LTSSM); ++ ++ PCIE_DBG(dev, "RC%d: waiting for phy ready...\n", dev->rc_idx); ++ ++ do { ++ if (pcie_phy_is_ready(dev)) ++ break; ++ retries++; ++ usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN, ++ REFCLK_STABILIZATION_DELAY_US_MAX); ++ } while (retries < PHY_READY_TIMEOUT_COUNT); ++ ++ PCIE_DBG(dev, "RC%d: number of PHY retries:%ld.\n", ++ dev->rc_idx, retries); ++ ++ if (pcie_phy_is_ready(dev)) ++ PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx); ++ else { ++ PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n", ++ dev->rc_idx); ++ ret = -ENODEV; ++ goto link_fail; ++ } ++ ++ if (dev->ep_latency) ++ msleep(dev->ep_latency); ++ ++ /* de-assert PCIe reset link to bring EP out of reset */ ++ ++ PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n", ++ dev->rc_idx); ++ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, ++ 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on); ++ usleep_range(PERST_PROPAGATION_DELAY_US_MIN, ++ PERST_PROPAGATION_DELAY_US_MAX); ++ ++ /* enable link training */ ++ msm_pcie_write_mask(dev->elbi + PCIE20_ELBI_SYS_CTRL, 0, BIT(0)); ++ ++ PCIE_DBG(dev, "%s", "check if link is up\n"); ++ ++ if (dev->rc_idx == 1) { ++ PCIE_DBG(dev, "optimized link training for RC%d\n", ++ dev->rc_idx); ++ /* Wait for up to 100ms for the link to come up */ ++ do { ++ usleep_range(LINK_UP_TIMEOUT_US_MIN, ++ LINK_UP_TIMEOUT_US_MAX); ++ val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS); ++ } while ((!(val & XMLH_LINK_UP) || ++ !msm_pcie_confirm_linkup(dev, false, false)) ++ && (link_check_count++ < LINK_UP_CHECK_MAX_COUNT)); ++ ++ if ((val & XMLH_LINK_UP) && ++ msm_pcie_confirm_linkup(dev, false, false)) ++ PCIE_DBG(dev, "Link is up after %d checkings\n", ++ link_check_count); ++ else ++ PCIE_DBG(dev, "Initial link training failed for RC%d\n", ++ dev->rc_idx); ++ } else { ++ PCIE_DBG(dev, "non-optimized link training for RC%d\n", ++ dev->rc_idx); ++ usleep_range(LINK_RETRY_TIMEOUT_US_MIN * 5 , ++ LINK_RETRY_TIMEOUT_US_MAX * 5); ++ val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS); ++ } ++ ++ retries = 0; ++ ++ while ((!(val & XMLH_LINK_UP) || ++ !msm_pcie_confirm_linkup(dev, false, false)) ++ && (retries < MAX_LINK_RETRIES)) { ++ PCIE_ERR(dev, "RC%d:No. %ld:LTSSM_STATE:0x%x\n", dev->rc_idx, ++ retries + 1, (val >> 0xC) & 0x1f); ++ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, ++ dev->gpio[MSM_PCIE_GPIO_PERST].on); ++ usleep_range(PERST_PROPAGATION_DELAY_US_MIN, ++ PERST_PROPAGATION_DELAY_US_MAX); ++ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, ++ 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on); ++ usleep_range(LINK_RETRY_TIMEOUT_US_MIN, ++ LINK_RETRY_TIMEOUT_US_MAX); ++ retries++; ++ val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS); ++ } ++ ++ PCIE_DBG(dev, "number of link training retries: %ld\n", retries); ++ ++ if ((val & XMLH_LINK_UP) && ++ msm_pcie_confirm_linkup(dev, false, false)) { ++ PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx); ++ } else { ++ PCIE_INFO(dev, "PCIe: trigger the reset of endpoint of RC%d.\n", ++ dev->rc_idx); ++ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, ++ dev->gpio[MSM_PCIE_GPIO_PERST].on); ++ PCIE_ERR(dev, "PCIe RC%d link initialization failed\n", ++ dev->rc_idx); ++ ret = -1; ++ goto link_fail; ++ } ++ ++ msm_pcie_config_controller(dev); ++ ++ if (!dev->msi_gicm_addr) ++ msm_pcie_config_msi_controller(dev); ++ ++ if (dev->l1ss_supported) ++ msm_pcie_config_l1ss(dev); ++ ++ dev->link_status = MSM_PCIE_LINK_ENABLED; ++ dev->power_on = true; ++ dev->suspending = false; ++ goto out; ++ ++link_fail: ++ msm_pcie_clk_deinit(dev); ++clk_fail: ++ msm_pcie_vreg_deinit(dev); ++out: ++ mutex_unlock(&dev->setup_lock); ++ ++ return ret; ++} ++ ++ ++void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options) ++{ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ mutex_lock(&dev->setup_lock); ++ ++ if (!dev->power_on) { ++ PCIE_DBG(dev, ++ "PCIe: the link of RC%d is already power down.\n", ++ dev->rc_idx); ++ mutex_unlock(&dev->setup_lock); ++ return; ++ } ++ ++ dev->link_status = MSM_PCIE_LINK_DISABLED; ++ dev->power_on = false; ++ ++ PCIE_INFO(dev, "PCIe: trigger the reset of endpoint of RC%d.\n", ++ dev->rc_idx); ++ ++ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, ++ dev->gpio[MSM_PCIE_GPIO_PERST].on); ++ ++ if (options & PM_CLK) { ++ msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0, ++ BIT(0)); ++ msm_pcie_clk_deinit(dev); ++ } ++ ++ if (options & PM_VREG) ++ msm_pcie_vreg_deinit(dev); ++ ++ mutex_unlock(&dev->setup_lock); ++} ++ ++static int msm_pcie_setup(int nr, struct pci_sys_data *sys) ++{ ++ struct msm_pcie_dev_t *dev = ++ (struct msm_pcie_dev_t *)(sys->private_data); ++ ++ PCIE_DBG(dev, "bus %d\n", nr); ++ /* ++ * specify linux PCI framework to allocate device memory (BARs) ++ * from msm_pcie_dev.dev_mem_res resource. ++ */ ++ sys->mem_offset = 0; ++ sys->io_offset = 0; ++ ++ pci_add_resource(&sys->resources, dev->dev_io_res); ++ pci_add_resource(&sys->resources, dev->dev_mem_res); ++ return 1; ++} ++ ++static int msm_rc_remove(struct msm_pcie_dev_t *dev) ++{ ++ msm_pcie_disable(dev, PM_PIPE_CLK | PM_CLK | PM_VREG); ++ pci_stop_root_bus(dev->pci_bus); ++ pci_remove_root_bus(dev->pci_bus); ++ dev->pci_bus = NULL; ++ dev->enumerated = false; ++ return 0; ++} ++ ++void msm_pcie_remove_bus(void) ++{ ++ int i; ++ ++ if (atomic_read(&rc_removed)) ++ return; ++ ++ for (i = 0; i < pcie_drv.rc_num; i++) { ++ pr_notice("---> Removing %d", i); ++ msm_rc_remove(&msm_pcie_dev[i]); ++ pr_notice(" ... done<---\n"); ++ } ++ ++ atomic_set(&rc_removed, 1); ++} ++ ++static struct pci_bus *msm_pcie_scan_bus(int nr, ++ struct pci_sys_data *sys) ++{ ++ struct pci_bus *bus = NULL; ++ struct msm_pcie_dev_t *dev = ++ (struct msm_pcie_dev_t *)(sys->private_data); ++ ++ PCIE_DBG(dev, "bus %d\n", nr); ++ ++ bus = pci_scan_root_bus(NULL, sys->busnr, &msm_pcie_ops, sys, ++ &sys->resources); ++ dev->pci_bus = bus; ++ ++ return bus; ++} ++ ++static int msm_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) ++{ ++ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev); ++ int ret = 0; ++ ++ PCIE_DBG(pcie_dev, "rc %s slot %d pin %d\n", pcie_dev->pdev->name, ++ slot, pin); ++ ++ switch (pin) { ++ case 1: ++ ret = pcie_dev->irq[MSM_PCIE_INT_A].num; ++ break; ++ case 2: ++ ret = pcie_dev->irq[MSM_PCIE_INT_B].num; ++ break; ++ case 3: ++ ret = pcie_dev->irq[MSM_PCIE_INT_C].num; ++ break; ++ case 4: ++ ret = pcie_dev->irq[MSM_PCIE_INT_D].num; ++ break; ++ default: ++ PCIE_ERR(pcie_dev, "PCIe: RC%d: unsupported pin number.\n", ++ pcie_dev->rc_idx); ++ } ++ ++ return ret; ++} ++ ++#if 0 ++static void msm_pcie_add_bus(struct pci_bus *bus) ++{ ++ struct pci_sys_data *sys = bus->sysdata; ++ struct msm_pcie_dev_t *dev = ++ (struct msm_pcie_dev_t *)(sys->private_data); ++ ++ bus->msi = dev->msi_chip; ++} ++#endif ++ ++static struct hw_pci msm_pci[MAX_RC_NUM] = { ++ { ++ //.domain = 0, ++ .nr_controllers = 1, ++ .swizzle = pci_common_swizzle, ++ .setup = msm_pcie_setup, ++ .scan = msm_pcie_scan_bus, ++ .map_irq = msm_pcie_map_irq, ++ //.add_bus = msm_pcie_add_bus, ++ }, ++}; ++ ++int msm_pcie_rescan(void) ++{ ++ int i; ++ ++ if (!atomic_read(&rc_removed)) ++ return 0; ++ ++ for (i = 0; i < pcie_drv.rc_num; i++) { ++ /* reset the RC and enumurate devices */ ++ msm_pcie_controller_reset(&msm_pcie_dev[i]); ++ msm_pcie_enumerate(i); ++ } ++ ++ atomic_set(&rc_removed, 0); ++ ++ return 0; ++} ++ ++int msm_pcie_enumerate(u32 rc_idx) ++{ ++ int ret = 0; ++ struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx]; ++ ++ PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx); ++ ++ if (!dev->enumerated) { ++ ret = msm_pcie_enable(dev, PM_ALL); ++ ++ /* kick start ARM PCI configuration framework */ ++ if (!ret) { ++ struct pci_dev *pcidev = NULL; ++ bool found = false; ++ u32 ids = readl_relaxed(msm_pcie_dev[rc_idx].dm_core); ++ u32 vendor_id = ids & 0xffff; ++ u32 device_id = (ids & 0xffff0000) >> 16; ++ ++ PCIE_DBG(dev, "vendor-id:0x%x device_id:0x%x\n", ++ vendor_id, device_id); ++ ++ msm_pci[rc_idx].private_data = (void **)&dev; ++ pci_common_init(&msm_pci[rc_idx]); ++ /* This has to happen only once */ ++ dev->enumerated = true; ++ ++ do { ++ pcidev = pci_get_device(vendor_id, ++ device_id, pcidev); ++ if (pcidev && (&msm_pcie_dev[rc_idx] == ++ (struct msm_pcie_dev_t *) ++ PCIE_BUS_PRIV_DATA(pcidev))) { ++ msm_pcie_dev[rc_idx].dev = pcidev; ++ found = true; ++ PCIE_DBG(&msm_pcie_dev[rc_idx], ++ "PCI device is found for RC%d\n", ++ rc_idx); ++ } ++ } while (!found && pcidev); ++ ++ if (!pcidev) { ++ PCIE_ERR(dev, ++ "PCIe: Did not find PCI device for RC%d.\n", ++ dev->rc_idx); ++ return -ENODEV; ++ } ++ } else { ++ PCIE_ERR(dev, "PCIe: failed to enable RC%d.\n", ++ dev->rc_idx); ++ } ++ } else { ++ PCIE_ERR(dev, "PCIe: RC%d has already been enumerated.\n", ++ dev->rc_idx); ++ } ++ ++ return ret; ++} ++ ++static ssize_t msm_bus_rescan_store(struct bus_type *bus, const char *buf, ++ size_t count) ++{ ++ unsigned long val; ++ ++ if (kstrtoul(buf, 0, &val) < 0) ++ return -EINVAL; ++ ++ if (val) { ++ pci_lock_rescan_remove(); ++ msm_pcie_rescan(); ++ pci_unlock_rescan_remove(); ++ } ++ return count; ++} ++static BUS_ATTR(rcrescan, (S_IWUSR|S_IWGRP), NULL, msm_bus_rescan_store); ++ ++static ssize_t msm_bus_remove_store(struct bus_type *bus, const char *buf, ++ size_t count) ++{ ++ unsigned long val; ++ ++ if (kstrtoul(buf, 0, &val) < 0) ++ return -EINVAL; ++ ++ if (val) { ++ pci_lock_rescan_remove(); ++ msm_pcie_remove_bus(); ++ pci_unlock_rescan_remove(); ++ } ++ return count; ++} ++static BUS_ATTR(rcremove, (S_IWUSR|S_IWGRP), NULL, msm_bus_remove_store); ++ ++static int msm_pcie_probe(struct platform_device *pdev) ++{ ++ int ret = 0; ++ int rc_idx = -1; ++ int i; ++ ++ pr_debug("%s\n", __func__); ++ ++ mutex_lock(&pcie_drv.drv_lock); ++ ++ ret = of_property_read_u32((&pdev->dev)->of_node, ++ "qcom,ctrl-amt", &pcie_drv.rc_expected); ++ if (ret) { ++ pr_err("PCIe: does not find controller amount.\n"); ++ goto out; ++ } else { ++ if (pcie_drv.rc_expected > MAX_RC_NUM) { ++ pr_debug("Expected number of devices %d\n", ++ pcie_drv.rc_expected); ++ pr_debug("Exceeded max supported devices %d\n", ++ MAX_RC_NUM); ++ goto out; ++ } ++ pr_debug("Target has %d RC(s).\n", pcie_drv.rc_expected); ++ } ++ ++ ret = of_property_read_u32((&pdev->dev)->of_node, ++ "cell-index", &rc_idx); ++ if (ret) { ++ pr_debug("Did not find RC index.\n"); ++ goto out; ++ } else { ++ if (rc_idx >= MAX_RC_NUM) { ++ pr_err( ++ "PCIe: Invalid RC Index %d (max supported = %d)\n", ++ rc_idx, MAX_RC_NUM); ++ goto out; ++ } ++ pcie_drv.rc_num++; ++ PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC index is %d.\n", ++ rc_idx); ++ } ++ ++ msm_pcie_dev[rc_idx].l1ss_supported = ++ of_property_read_bool((&pdev->dev)->of_node, ++ "qcom,l1ss-supported"); ++ PCIE_DBG(&msm_pcie_dev[rc_idx], "L1ss is %s supported.\n", ++ msm_pcie_dev[rc_idx].l1ss_supported ? "" : "not"); ++ msm_pcie_dev[rc_idx].aux_clk_sync = ++ of_property_read_bool((&pdev->dev)->of_node, ++ "qcom,aux-clk-sync"); ++ PCIE_DBG(&msm_pcie_dev[rc_idx], ++ "AUX clock is %s synchronous to Core clock.\n", ++ msm_pcie_dev[rc_idx].aux_clk_sync ? "" : "not"); ++ ++ msm_pcie_dev[rc_idx].ep_wakeirq = ++ of_property_read_bool((&pdev->dev)->of_node, ++ "qcom,ep-wakeirq"); ++ PCIE_DBG(&msm_pcie_dev[rc_idx], ++ "PCIe: EP of RC%d does %s assert wake when it is up.\n", ++ rc_idx, msm_pcie_dev[rc_idx].ep_wakeirq ? "" : "not"); ++ ++ msm_pcie_dev[rc_idx].n_fts = 0; ++ ret = of_property_read_u32((&pdev->dev)->of_node, ++ "qcom,n-fts", ++ &msm_pcie_dev[rc_idx].n_fts); ++ ++ if (ret) ++ PCIE_DBG(&msm_pcie_dev[rc_idx], ++ "n-fts does not exist. ret=%d\n", ret); ++ else ++ PCIE_DBG(&msm_pcie_dev[rc_idx], "n-fts: 0x%x.\n", ++ msm_pcie_dev[rc_idx].n_fts); ++ ++ msm_pcie_dev[rc_idx].is_emulation = ++ of_property_read_bool((&pdev->dev)->of_node, ++ "qcom,is_emulation"); ++ ++ PCIE_DBG(&msm_pcie_dev[rc_idx], "is_emulation: 0x%x.\n", ++ msm_pcie_dev[rc_idx].is_emulation); ++ ++ msm_pcie_dev[rc_idx].ext_ref_clk = ++ of_property_read_bool((&pdev->dev)->of_node, ++ "qcom,ext-ref-clk"); ++ PCIE_DBG(&msm_pcie_dev[rc_idx], "ref clk is %s.\n", ++ msm_pcie_dev[rc_idx].ext_ref_clk ? "external" : "internal"); ++ ++ msm_pcie_dev[rc_idx].ep_latency = 0; ++ ret = of_property_read_u32((&pdev->dev)->of_node, ++ "qcom,ep-latency", ++ &msm_pcie_dev[rc_idx].ep_latency); ++ if (ret) ++ PCIE_DBG(&msm_pcie_dev[rc_idx], ++ "RC%d: ep-latency does not exist.\n", ++ rc_idx); ++ else ++ PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n", ++ rc_idx, msm_pcie_dev[rc_idx].ep_latency); ++ ++ msm_pcie_dev[rc_idx].msi_gicm_addr = 0; ++ msm_pcie_dev[rc_idx].msi_gicm_base = 0; ++ ret = of_property_read_u32((&pdev->dev)->of_node, ++ "qcom,msi-gicm-addr", ++ &msm_pcie_dev[rc_idx].msi_gicm_addr); ++ ++ if (ret) { ++ PCIE_DBG(&msm_pcie_dev[rc_idx], "%s", ++ "msi-gicm-addr does not exist.\n"); ++ } else { ++ PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-addr: 0x%x.\n", ++ msm_pcie_dev[rc_idx].msi_gicm_addr); ++ ++ ret = of_property_read_u32((&pdev->dev)->of_node, ++ "qcom,msi-gicm-base", ++ &msm_pcie_dev[rc_idx].msi_gicm_base); ++ ++ if (ret) { ++ PCIE_ERR(&msm_pcie_dev[rc_idx], ++ "PCIe: RC%d: msi-gicm-base does not exist.\n", ++ rc_idx); ++ goto decrease_rc_num; ++ } else { ++ PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-base: 0x%x\n", ++ msm_pcie_dev[rc_idx].msi_gicm_base); ++ } ++ } ++ ++ msm_pcie_dev[rc_idx].rc_idx = rc_idx; ++ msm_pcie_dev[rc_idx].pdev = pdev; ++ msm_pcie_dev[rc_idx].vreg_n = 0; ++ msm_pcie_dev[rc_idx].gpio_n = 0; ++ msm_pcie_dev[rc_idx].parf_deemph = 0; ++ msm_pcie_dev[rc_idx].parf_swing = 0; ++ msm_pcie_dev[rc_idx].link_status = MSM_PCIE_LINK_DEINIT; ++ msm_pcie_dev[rc_idx].user_suspend = false; ++ msm_pcie_dev[rc_idx].saved_state = NULL; ++ msm_pcie_dev[rc_idx].enumerated = false; ++ msm_pcie_dev[rc_idx].linkdown_counter = 0; ++ msm_pcie_dev[rc_idx].suspending = false; ++ msm_pcie_dev[rc_idx].wake_counter = 0; ++ msm_pcie_dev[rc_idx].req_exit_l1_counter = 0; ++ msm_pcie_dev[rc_idx].power_on = false; ++ msm_pcie_dev[rc_idx].use_msi = false; ++ memcpy(msm_pcie_dev[rc_idx].vreg, msm_pcie_vreg_info, ++ sizeof(msm_pcie_vreg_info)); ++ memcpy(msm_pcie_dev[rc_idx].gpio, msm_pcie_gpio_info, ++ sizeof(msm_pcie_gpio_info)); ++ memcpy(msm_pcie_dev[rc_idx].clk, msm_pcie_clk_info[rc_idx], ++ sizeof(msm_pcie_clk_info)); ++ ++ memcpy(msm_pcie_dev[rc_idx].rst, msm_pcie_rst_info, ++ sizeof(msm_pcie_rst_info)); ++ memcpy(msm_pcie_dev[rc_idx].res, msm_pcie_res_info, ++ sizeof(msm_pcie_res_info)); ++ memcpy(msm_pcie_dev[rc_idx].irq, msm_pcie_irq_info, ++ sizeof(msm_pcie_irq_info)); ++ msm_pcie_dev[rc_idx].shadow_en = true; ++ for (i = 0; i < PCIE_CONF_SPACE_DW; i++) { ++ msm_pcie_dev[rc_idx].rc_shadow[i] = PCIE_CLEAR; ++ msm_pcie_dev[rc_idx].ep_shadow[i] = PCIE_CLEAR; ++ } ++ ++ ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx], ++ msm_pcie_dev[rc_idx].pdev); ++ ++ if (ret) ++ goto decrease_rc_num; ++ ++ ret = msm_pcie_gpio_init(&msm_pcie_dev[rc_idx]); ++ if (ret) { ++ msm_pcie_release_resources(&msm_pcie_dev[rc_idx]); ++ goto decrease_rc_num; ++ } ++ ++ /* s/w reset of pcie */ ++ msm_pcie_controller_reset(&msm_pcie_dev[rc_idx]); ++ ++ /* detect uni phy before accessing the pcie registers */ ++ if (msm_pcie_dev[rc_idx].is_emulation && !pcie_phy_detect(&msm_pcie_dev[rc_idx])) { ++ ret = -ENODEV; ++ msm_pcie_release_resources(&msm_pcie_dev[rc_idx]); ++ msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]); ++ goto decrease_rc_num; ++ } ++ ++ ret = msm_pcie_irq_init(&msm_pcie_dev[rc_idx]); ++ if (ret) { ++ msm_pcie_release_resources(&msm_pcie_dev[rc_idx]); ++ msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]); ++ goto decrease_rc_num; ++ } ++ ++ if (msm_pcie_dev[rc_idx].ep_wakeirq) { ++ PCIE_DBG(&msm_pcie_dev[rc_idx], ++ "PCIe: RC%d will be enumerated upon WAKE signal from Endpoint.\n", ++ rc_idx); ++ mutex_unlock(&pcie_drv.drv_lock); ++ return 0; ++ } ++ ++ ret = msm_pcie_enumerate(rc_idx); ++ ++ if (ret) { ++ PCIE_ERR(&msm_pcie_dev[rc_idx], ++ "PCIe: RC%d is not enabled during bootup; it will be enumerated upon WAKE signal.\n", ++ rc_idx); ++ goto decrease_rc_num; ++ } else { ++ PCIE_ERR(&msm_pcie_dev[rc_idx], "RC%d is enabled in bootup\n", ++ rc_idx); ++ } ++ ++ PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIE probed %s\n", ++ dev_name(&(pdev->dev))); ++ ++ /* create sysfs files to support power save mode */ ++ if (!rc_idx) { ++ ret = bus_create_file(&pci_bus_type, &bus_attr_rcrescan); ++ if (ret != 0) { ++ PCIE_ERR(&msm_pcie_dev[rc_idx], ++ "RC%d failed to create sysfs rcrescan file\n", ++ rc_idx); ++ } ++ ++ ret = bus_create_file(&pci_bus_type, &bus_attr_rcremove); ++ if (ret != 0) { ++ PCIE_ERR(&msm_pcie_dev[rc_idx], ++ "RC%d failed to create sysfs rcremove file\n", ++ rc_idx); ++ } ++ } ++ ++ mutex_unlock(&pcie_drv.drv_lock); ++ return 0; ++ ++decrease_rc_num: ++ pcie_drv.rc_num--; ++out: ++ PCIE_ERR(&msm_pcie_dev[rc_idx], ++ "PCIe: Driver probe failed for RC%d:%d\n", ++ rc_idx, ret); ++ mutex_unlock(&pcie_drv.drv_lock); ++ ++ return ret; ++} ++ ++static int __exit msm_pcie_remove(struct platform_device *pdev) ++{ ++ int ret = 0; ++ int rc_idx; ++ ++ pr_debug("PCIe:%s.\n", __func__); ++ ++ mutex_lock(&pcie_drv.drv_lock); ++ ++ ret = of_property_read_u32((&pdev->dev)->of_node, ++ "cell-index", &rc_idx); ++ if (ret) { ++ pr_err("%s: Did not find RC index.\n", __func__); ++ goto out; ++ } else { ++ pcie_drv.rc_num--; ++ pr_debug("%s: RC index is 0x%x.", __func__, rc_idx); ++ } ++ ++ msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]); ++ msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]); ++ msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]); ++ msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]); ++ msm_pcie_release_resources(&msm_pcie_dev[rc_idx]); ++ ++out: ++ mutex_unlock(&pcie_drv.drv_lock); ++ ++ return ret; ++} ++ ++static struct of_device_id msm_pcie_match[] = { ++ { .compatible = "qcom,msm_pcie", ++ }, ++ {} ++}; ++ ++static struct platform_driver msm_pcie_driver = { ++ .probe = msm_pcie_probe, ++ .remove = msm_pcie_remove, ++ .driver = { ++ .name = "msm_pcie", ++ .owner = THIS_MODULE, ++ .of_match_table = msm_pcie_match, ++ }, ++}; ++ ++static int __init pcie_init(void) ++{ ++ int ret = 0, i; ++#ifdef CONFIG_IPC_LOGGING ++ char rc_name[MAX_RC_NAME_LEN]; ++#endif ++ ++ pr_debug("pcie:%s.\n", __func__); ++ ++ pcie_drv.rc_num = 0; ++ pcie_drv.rc_expected = 0; ++ mutex_init(&pcie_drv.drv_lock); ++ ++ for (i = 0; i < MAX_RC_NUM; i++) { ++#ifdef CONFIG_IPC_LOGGING ++ snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i); ++ msm_pcie_dev[i].ipc_log = ++ ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0); ++ if (msm_pcie_dev[i].ipc_log == NULL) ++ pr_err("%s: unable to create IPC log context for %s\n", ++ __func__, rc_name); ++ else ++ PCIE_DBG(&msm_pcie_dev[i], ++ "PCIe IPC logging is enable for RC%d\n", ++ i); ++ snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i); ++ msm_pcie_dev[i].ipc_log_long = ++ ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0); ++ if (msm_pcie_dev[i].ipc_log_long == NULL) ++ pr_err("%s: unable to create IPC log context for %s\n", ++ __func__, rc_name); ++ else ++ PCIE_DBG(&msm_pcie_dev[i], ++ "PCIe IPC logging %s is enable for RC%d\n", ++ rc_name, i); ++#endif ++ ++ spin_lock_init(&msm_pcie_dev[i].cfg_lock); ++ msm_pcie_dev[i].cfg_access = true; ++ mutex_init(&msm_pcie_dev[i].setup_lock); ++ mutex_init(&msm_pcie_dev[i].recovery_lock); ++ spin_lock_init(&msm_pcie_dev[i].linkdown_lock); ++ spin_lock_init(&msm_pcie_dev[i].wakeup_lock); ++ } ++ ++ ret = platform_driver_register(&msm_pcie_driver); ++ ++ return ret; ++} ++ ++static void __exit pcie_exit(void) ++{ ++ pr_debug("pcie:%s.\n", __func__); ++ ++ platform_driver_unregister(&msm_pcie_driver); ++} ++ ++subsys_initcall_sync(pcie_init); ++module_exit(pcie_exit); ++ ++ ++/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */ ++static void msm_pcie_fixup_early(struct pci_dev *dev) ++{ ++ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev); ++ PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type); ++ if (dev->hdr_type == 1) ++ dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8); ++} ++DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP, ++ msm_pcie_fixup_early); ++ ++/* Suspend the PCIe link */ ++static int msm_pcie_pm_suspend(struct pci_dev *dev, ++ void *user, void *data, u32 options) ++{ ++ int ret = 0; ++ u32 val = 0; ++ int ret_l23; ++ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev); ++ ++ pcie_dev->suspending = true; ++ PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx); ++ ++ if (!pcie_dev->power_on) { ++ PCIE_DBG(pcie_dev, ++ "PCIe: power of RC%d has been turned off.\n", ++ pcie_dev->rc_idx); ++ return ret; ++ } ++ ++ if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE) ++ && msm_pcie_confirm_linkup(pcie_dev, true, true)) { ++ ret = pci_save_state(dev); ++ pcie_dev->saved_state = pci_store_saved_state(dev); ++ } ++ if (ret) { ++ PCIE_ERR(pcie_dev, "PCIe: fail to save state of RC%d:%d.\n", ++ pcie_dev->rc_idx, ret); ++ pcie_dev->suspending = false; ++ return ret; ++ } ++ ++ spin_lock_irqsave(&pcie_dev->cfg_lock, ++ pcie_dev->irqsave_flags); ++ pcie_dev->cfg_access = false; ++ spin_unlock_irqrestore(&pcie_dev->cfg_lock, ++ pcie_dev->irqsave_flags); ++ ++ msm_pcie_write_mask(pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL, 0, ++ BIT(4)); ++ ++ PCIE_DBG(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n", ++ pcie_dev->rc_idx); ++ ++ ret_l23 = readl_poll_timeout((pcie_dev->parf ++ + PCIE20_PARF_PM_STTS), val, (val & BIT(6)), 10000, 100000); ++ ++ /* check L23_Ready */ ++ if (!ret_l23) ++ PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n", ++ pcie_dev->rc_idx); ++ else ++ PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n", ++ pcie_dev->rc_idx); ++ ++ msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG); ++ ++ return ret; ++} ++ ++static void msm_pcie_fixup_suspend(struct pci_dev *dev) ++{ ++ int ret; ++ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev); ++ ++ PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx); ++ ++ if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED) ++ return; ++ ++ mutex_lock(&pcie_dev->recovery_lock); ++ ++ ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0); ++ if (ret) ++ PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n", ++ pcie_dev->rc_idx, ret); ++ ++ mutex_unlock(&pcie_dev->recovery_lock); ++} ++DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP, ++ msm_pcie_fixup_suspend); ++ ++/* Resume the PCIe link */ ++static int msm_pcie_pm_resume(struct pci_dev *dev, ++ void *user, void *data, u32 options) ++{ ++ int ret; ++ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev); ++ ++ PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx); ++ ++ spin_lock_irqsave(&pcie_dev->cfg_lock, ++ pcie_dev->irqsave_flags); ++ pcie_dev->cfg_access = true; ++ spin_unlock_irqrestore(&pcie_dev->cfg_lock, ++ pcie_dev->irqsave_flags); ++ ++ ret = msm_pcie_enable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG); ++ if (ret) { ++ PCIE_ERR(pcie_dev, ++ "PCIe: RC%d fail to enable PCIe link in resume.\n", ++ pcie_dev->rc_idx); ++ return ret; ++ } else { ++ pcie_dev->suspending = false; ++ PCIE_DBG(pcie_dev, ++ "dev->bus->number = %d dev->bus->primary = %d\n", ++ dev->bus->number, dev->bus->primary); ++ ++ if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) { ++ pci_load_and_free_saved_state(dev, ++ &pcie_dev->saved_state); ++ pci_restore_state(dev); ++ } ++ } ++ ++ return ret; ++} ++ ++void msm_pcie_fixup_resume(struct pci_dev *dev) ++{ ++ int ret; ++ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev); ++ ++ PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx); ++ ++ if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) || ++ pcie_dev->user_suspend) ++ return; ++ ++ mutex_lock(&pcie_dev->recovery_lock); ++ ret = msm_pcie_pm_resume(dev, NULL, NULL, 0); ++ if (ret) ++ PCIE_ERR(pcie_dev, ++ "PCIe: RC%d got failure in fixup resume:%d.\n", ++ pcie_dev->rc_idx, ret); ++ ++ mutex_unlock(&pcie_dev->recovery_lock); ++} ++DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP, ++ msm_pcie_fixup_resume); ++ ++void msm_pcie_fixup_resume_early(struct pci_dev *dev) ++{ ++ int ret; ++ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev); ++ ++ PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx); ++ ++ if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) || ++ pcie_dev->user_suspend) ++ return; ++ ++ mutex_lock(&pcie_dev->recovery_lock); ++ ret = msm_pcie_pm_resume(dev, NULL, NULL, 0); ++ if (ret) ++ PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n", ++ pcie_dev->rc_idx, ret); ++ ++ mutex_unlock(&pcie_dev->recovery_lock); ++} ++DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP, ++ msm_pcie_fixup_resume_early); ++ ++static void msm_pcie_fixup_final(struct pci_dev *dev) ++{ ++ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev); ++ PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx); ++ pcie_drv.current_rc++; ++} ++DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, msm_pcie_fixup_final); ++ ++int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user, ++ void *data, u32 options) ++{ ++ int ret = 0; ++ struct pci_dev *dev; ++ u32 rc_idx = 0; ++ ++ pr_debug("PCIe: pm_opt:%d;busnr:%d;options:%d\n", ++ pm_opt, busnr, options); ++ ++ switch (busnr) { ++ case 1: ++ if (user) { ++ struct msm_pcie_dev_t *pcie_dev ++ = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)); ++ ++ if (pcie_dev) { ++ rc_idx = pcie_dev->rc_idx; ++ PCIE_DBG(pcie_dev, ++ "PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n", ++ rc_idx, pm_opt, busnr, options); ++ } else { ++ pr_err( ++ "PCIe: did not find RC for pci endpoint device 0x%x.\n", ++ (u32)user); ++ ret = -ENODEV; ++ goto out; ++ } ++ } ++ break; ++ default: ++ pr_err("PCIe: unsupported bus number.\n"); ++ ret = PCIBIOS_DEVICE_NOT_FOUND; ++ goto out; ++ } ++ ++ dev = msm_pcie_dev[rc_idx].dev; ++ ++ switch (pm_opt) { ++ case MSM_PCIE_SUSPEND: ++ if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED) ++ PCIE_DBG(&msm_pcie_dev[rc_idx], ++ "PCIe: RC%d: requested to suspend when link is not enabled:%d.\n", ++ rc_idx, msm_pcie_dev[rc_idx].link_status); ++ ++ if (!msm_pcie_dev[rc_idx].power_on) { ++ PCIE_ERR(&msm_pcie_dev[rc_idx], ++ "PCIe: RC%d: requested to suspend when link is powered down:%d.\n", ++ rc_idx, msm_pcie_dev[rc_idx].link_status); ++ break; ++ } ++ ++ msm_pcie_dev[rc_idx].user_suspend = true; ++ ++ mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock); ++ ++ ret = msm_pcie_pm_suspend(dev, user, data, options); ++ if (ret) { ++ PCIE_ERR(&msm_pcie_dev[rc_idx], ++ "PCIe: RC%d: user failed to suspend the link.\n", ++ rc_idx); ++ msm_pcie_dev[rc_idx].user_suspend = false; ++ } ++ ++ mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock); ++ break; ++ case MSM_PCIE_RESUME: ++ PCIE_DBG(&msm_pcie_dev[rc_idx], ++ "User of RC%d requests to resume the link\n", rc_idx); ++ if (msm_pcie_dev[rc_idx].link_status != ++ MSM_PCIE_LINK_DISABLED) { ++ PCIE_ERR(&msm_pcie_dev[rc_idx], ++ "PCIe: RC%d: requested to resume when link is not disabled:%d.\n", ++ rc_idx, msm_pcie_dev[rc_idx].link_status); ++ break; ++ } ++ ++ mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock); ++ ret = msm_pcie_pm_resume(dev, user, data, options); ++ if (ret) { ++ PCIE_ERR(&msm_pcie_dev[rc_idx], ++ "PCIe: RC%d: user failed to resume the link.\n", ++ rc_idx); ++ } else { ++ PCIE_DBG(&msm_pcie_dev[rc_idx], ++ "PCIe: RC%d: user succeeded to resume the link.\n", ++ rc_idx); ++ ++ msm_pcie_dev[rc_idx].user_suspend = false; ++ } ++ ++ mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock); ++ break; ++ case MSM_PCIE_REQ_EXIT_L1: ++ msm_pcie_dev[rc_idx].req_exit_l1_counter++; ++ msm_pcie_write_mask(msm_pcie_dev[rc_idx].parf ++ + PCIE20_PARF_PM_CTRL, ++ 0, BIT(1)); ++ udelay(REQ_EXIT_L1_DELAY_US); ++ msm_pcie_write_mask(msm_pcie_dev[rc_idx].parf ++ + PCIE20_PARF_PM_CTRL, ++ BIT(1), 0); ++ break; ++ default: ++ PCIE_ERR(&msm_pcie_dev[rc_idx], ++ "PCIe: RC%d: unsupported pm operation:%d.\n", ++ rc_idx, pm_opt); ++ ret = -ENODEV; ++ goto out; ++ } ++ ++out: ++ return ret; ++} ++EXPORT_SYMBOL(msm_pcie_pm_control); ++ ++int msm_pcie_register_event(struct msm_pcie_register_event *reg) ++{ ++ int ret = 0; ++ struct msm_pcie_dev_t *pcie_dev; ++ ++ if (!reg) { ++ pr_err("PCIe: Event registration is NULL\n"); ++ return -ENODEV; ++ } ++ ++ if (!reg->user) { ++ pr_err("PCIe: User of event registration is NULL\n"); ++ return -ENODEV; ++ } ++ ++ pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)); ++ ++ if (pcie_dev) { ++ pcie_dev->event_reg = reg; ++ PCIE_DBG(pcie_dev, ++ "Event 0x%x is registered for RC %d\n", reg->events, ++ pcie_dev->rc_idx); ++ } else { ++ PCIE_ERR(pcie_dev, ++ "PCIe: did not find RC for pci endpoint device 0x%x.\n", ++ (u32)reg->user); ++ ret = -ENODEV; ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(msm_pcie_register_event); ++ ++int msm_pcie_deregister_event(struct msm_pcie_register_event *reg) ++{ ++ int ret = 0; ++ struct msm_pcie_dev_t *pcie_dev; ++ ++ if (!reg) { ++ pr_err("PCIe: Event deregistration is NULL\n"); ++ return -ENODEV; ++ } ++ ++ if (!reg->user) { ++ pr_err("PCIe: User of event deregistration is NULL\n"); ++ return -ENODEV; ++ } ++ ++ pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)); ++ ++ if (pcie_dev) { ++ pcie_dev->event_reg = NULL; ++ PCIE_DBG(pcie_dev, "Event is deregistered for RC %d\n", ++ pcie_dev->rc_idx); ++ } else { ++ PCIE_ERR(pcie_dev, ++ "PCIe: did not find RC for pci endpoint device 0x%x.\n", ++ (u32)reg->user); ++ ret = -ENODEV; ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(msm_pcie_deregister_event); ++ ++int msm_pcie_recover_config(struct pci_dev *dev) ++{ ++ int ret = 0; ++ struct msm_pcie_dev_t *pcie_dev; ++ ++ if (dev) { ++ pcie_dev = PCIE_BUS_PRIV_DATA(dev); ++ PCIE_DBG(pcie_dev, ++ "Recovery for the link of RC%d\n", pcie_dev->rc_idx); ++ } else { ++ pr_err("PCIe: the input pci dev is NULL.\n"); ++ return -ENODEV; ++ } ++ ++ if (msm_pcie_confirm_linkup(pcie_dev, true, true)) { ++ PCIE_DBG(pcie_dev, ++ "Recover config space of RC%d and its EP\n", ++ pcie_dev->rc_idx); ++ pcie_dev->shadow_en = false; ++ PCIE_DBG(pcie_dev, "Recover RC%d\n", pcie_dev->rc_idx); ++ msm_pcie_cfg_recover(pcie_dev, true); ++ PCIE_DBG(pcie_dev, "Recover EP of RC%d\n", pcie_dev->rc_idx); ++ msm_pcie_cfg_recover(pcie_dev, false); ++ PCIE_DBG(pcie_dev, ++ "Refreshing the saved config space in PCI framework for RC%d and its EP\n", ++ pcie_dev->rc_idx); ++ pci_save_state(pcie_dev->dev); ++ pci_save_state(dev); ++ pcie_dev->shadow_en = true; ++ PCIE_DBG(pcie_dev, "Turn on shadow for RC%d\n", ++ pcie_dev->rc_idx); ++ } else { ++ PCIE_ERR(pcie_dev, ++ "PCIe: the link of RC%d is not up yet; can't recover config space.\n", ++ pcie_dev->rc_idx); ++ ret = -ENODEV; ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(msm_pcie_recover_config); ++ ++int msm_pcie_shadow_control(struct pci_dev *dev, bool enable) ++{ ++ int ret = 0; ++ struct msm_pcie_dev_t *pcie_dev; ++ ++ if (dev) { ++ pcie_dev = PCIE_BUS_PRIV_DATA(dev); ++ PCIE_DBG(pcie_dev, ++ "Recovery for the link of RC%d\n", pcie_dev->rc_idx); ++ } else { ++ pr_err("PCIe: the input pci dev is NULL.\n"); ++ return -ENODEV; ++ } ++ ++ PCIE_DBG(pcie_dev, ++ "The shadowing of RC%d is %s enabled currently.\n", ++ pcie_dev->rc_idx, pcie_dev->shadow_en ? "" : "not"); ++ ++ pcie_dev->shadow_en = enable; ++ ++ PCIE_DBG(pcie_dev, ++ "Shadowing of RC%d is turned %s upon user's request.\n", ++ pcie_dev->rc_idx, enable ? "on" : "off"); ++ ++ return ret; ++} ++EXPORT_SYMBOL(msm_pcie_shadow_control); ++ ++int msm_pcie_access_control(struct pci_dev *dev, bool allow_access) ++{ ++ int ret = 0; ++ struct msm_pcie_dev_t *pcie_dev; ++ ++ if (dev) { ++ pcie_dev = PCIE_BUS_PRIV_DATA(dev); ++ PCIE_DBG(pcie_dev, ++ "access control for the link of RC%d\n", ++ pcie_dev->rc_idx); ++ } else { ++ pr_err("PCIe: the input pci dev is NULL.\n"); ++ return -ENODEV; ++ } ++ ++ mutex_lock(&pcie_dev->recovery_lock); ++ ++ PCIE_DBG(pcie_dev, ++ "The config space of RC%d is %savailable currently.\n", ++ pcie_dev->rc_idx, pcie_dev->cfg_access ? "" : "un"); ++ ++ pcie_dev->cfg_access = allow_access; ++ ++ PCIE_DBG(pcie_dev, ++ "The config space of RC%d becomes %savailable upon user's request.\n", ++ pcie_dev->rc_idx, pcie_dev->cfg_access ? "" : "un"); ++ ++ mutex_unlock(&pcie_dev->recovery_lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL(msm_pcie_access_control); +diff --git a/arch/arm/mach-qcom/pcie.h b/arch/arm/mach-qcom/pcie.h +new file mode 100644 +index 0000000..94c0417 +--- /dev/null ++++ b/arch/arm/mach-qcom/pcie.h +@@ -0,0 +1,329 @@ ++/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef __ARCH_ARM_MACH_MSM_PCIE_H ++#define __ARCH_ARM_MACH_MSM_PCIE_H ++ ++#include ++#include ++#ifdef CONFIG_IPC_LOGGING ++#include ++#endif ++#include ++#include ++#include ++#include ++#include ++ ++#define MSM_PCIE_MAX_VREG 3 ++#define MSM_PCIE_MAX_CLK 3 ++#define MSM_PCIE_MAX_PIPE_CLK 1 ++ ++#define MAX_RC_NUM 1 ++ ++#ifdef CONFIG_ARM_LPAE ++#define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32)) ++#else ++#define PCIE_UPPER_ADDR(addr) (0x0) ++#endif ++#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff)) ++ ++#define PCIE_MSI_NR_IRQS 256 ++ ++#define PCIE_LOG_PAGES (50) ++ ++#ifdef CONFIG_IPC_LOGGING ++ ++#define PCIE_DBG(dev, fmt, arg...) do { \ ++ if ((dev) && (dev)->ipc_log_long) \ ++ ipc_log_string((dev)->ipc_log_long, \ ++ "DBG1:%s: " fmt, __func__, arg); \ ++ if ((dev) && (dev)->ipc_log) \ ++ ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \ ++ if (msm_pcie_get_debug_mask()) \ ++ pr_alert("%s: " fmt, __func__, arg); \ ++ } while (0) ++ ++#define PCIE_DBG2(dev, fmt, arg...) do { \ ++ if ((dev) && (dev)->ipc_log) \ ++ ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\ ++ if (msm_pcie_get_debug_mask()) \ ++ pr_alert("%s: " fmt, __func__, arg); \ ++ } while (0) ++ ++#define PCIE_DBG3(dev, fmt, arg...) do { \ ++ if ((dev) && (dev)->ipc_log) \ ++ ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\ ++ if (msm_pcie_get_debug_mask()) \ ++ pr_alert("%s: " fmt, __func__, arg); \ ++ } while (0) ++ ++#define PCIE_INFO(dev, fmt, arg...) do { \ ++ if ((dev) && (dev)->ipc_log_long) \ ++ ipc_log_string((dev)->ipc_log_long, \ ++ "INFO:%s: " fmt, __func__, arg); \ ++ if ((dev) && (dev)->ipc_log) \ ++ ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \ ++ pr_info("%s: " fmt, __func__, arg); \ ++ } while (0) ++ ++#define PCIE_ERR(dev, fmt, arg...) do { \ ++ if ((dev) && (dev)->ipc_log_long) \ ++ ipc_log_string((dev)->ipc_log_long, \ ++ "ERR:%s: " fmt, __func__, arg); \ ++ if ((dev) && (dev)->ipc_log) \ ++ ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \ ++ pr_err("%s: " fmt, __func__, arg); \ ++ } while (0) ++ ++#else ++ ++#define PCIE_DBG(dev, fmt, arg...) do { \ ++ if (msm_pcie_get_debug_mask()) \ ++ pr_alert("%s: " fmt, __func__, arg); \ ++ } while (0) ++ ++#define PCIE_DBG2(dev, fmt, arg...) do { \ ++ if (msm_pcie_get_debug_mask()) \ ++ pr_alert("%s: " fmt, __func__, arg); \ ++ } while (0) ++ ++#define PCIE_DBG3(dev, fmt, arg...) do { \ ++ if (msm_pcie_get_debug_mask()) \ ++ pr_alert("%s: " fmt, __func__, arg); \ ++ } while (0) ++ ++#define PCIE_INFO(dev, fmt, arg...) do { \ ++ pr_info("%s: " fmt, __func__, arg); \ ++ } while (0) ++ ++#define PCIE_ERR(dev, fmt, arg...) do { \ ++ pr_err("%s: " fmt, __func__, arg); \ ++ } while (0) ++ ++#endif ++ ++#define PCIE_BUS_PRIV_DATA(pdev) \ ++ (((struct pci_sys_data *)pdev->bus->sysdata)->private_data) ++ ++/* PM control options */ ++#define PM_IRQ 0x1 ++#define PM_CLK 0x2 ++#define PM_GPIO 0x4 ++#define PM_VREG 0x8 ++#define PM_PIPE_CLK 0x10 ++#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK) ++ ++#define PCIE_CONF_SPACE_DW 1024 ++#define PCIE_CLEAR 0xDEADBEEF ++#define PCIE_LINK_DOWN 0xFFFFFFFF ++ ++enum msm_pcie_res { ++ MSM_PCIE_RES_PARF, ++ MSM_PCIE_RES_PHY, ++ MSM_PCIE_RES_DM_CORE, ++ MSM_PCIE_RES_ELBI, ++ MSM_PCIE_RES_CONF, ++ MSM_PCIE_RES_IO, ++ MSM_PCIE_RES_BARS, ++ MSM_PCIE_MAX_RES, ++}; ++ ++enum msm_pcie_rst { ++ MSM_PCIE_AXI_M_ARES, ++ MSM_PCIE_AXI_S_ARES, ++ MSM_PCIE_PIPE_ARES, ++ MSM_PCIE_AXI_M_VMIDMT_ARES, ++ MSM_PCIE_AXI_S_XPU_ARES, ++ MSM_PCIE_PARF_XPU_ARES, ++ MSM_PCIE_PHY_ARES, ++ MSM_PCIE_AXI_M_STICKY_ARES, ++ MSM_PCIE_PIPE_STICKY_ARES, ++ MSM_PCIE_PWR_ARES, ++ MSM_PCIE_AHB_ARES, ++ MSM_PCIE_PHY_AHB_ARES, ++ MSM_PCIE_MAX_RESET, ++}; ++ ++enum msm_pcie_irq { ++ MSM_PCIE_INT_MSI, ++ MSM_PCIE_INT_A, ++ MSM_PCIE_INT_B, ++ MSM_PCIE_INT_C, ++ MSM_PCIE_INT_D, ++ MSM_PCIE_INT_PLS_PME, ++ MSM_PCIE_INT_PME_LEGACY, ++ MSM_PCIE_INT_PLS_ERR, ++ MSM_PCIE_INT_AER_LEGACY, ++ MSM_PCIE_INT_LINK_UP, ++ MSM_PCIE_INT_LINK_DOWN, ++ MSM_PCIE_INT_BRIDGE_FLUSH_N, ++ MSM_PCIE_INT_WAKE, ++ MSM_PCIE_MAX_IRQ, ++}; ++ ++/* gpios */ ++enum msm_pcie_gpio { ++ MSM_PCIE_GPIO_PERST, ++ MSM_PCIE_GPIO_WAKE, ++ MSM_PCIE_GPIO_CLKREQ, ++ MSM_PCIE_MAX_GPIO ++}; ++ ++enum msm_pcie_link_status { ++ MSM_PCIE_LINK_DEINIT, ++ MSM_PCIE_LINK_ENABLED, ++ MSM_PCIE_LINK_DISABLED ++}; ++ ++/* gpio info structure */ ++struct msm_pcie_gpio_info_t { ++ char *name; ++ uint32_t num; ++ bool out; ++ uint32_t on; ++ uint32_t init; ++}; ++ ++/* voltage regulator info structrue */ ++struct msm_pcie_vreg_info_t { ++ struct regulator *hdl; ++ char *name; ++ uint32_t max_v; ++ uint32_t min_v; ++ uint32_t opt_mode; ++ bool required; ++}; ++ ++/* reset info structure */ ++struct msm_pcie_rst_info_t { ++ struct reset_control *hdl; ++ char *name; ++}; ++ ++/* clock info structure */ ++struct msm_pcie_clk_info_t { ++ struct clk *hdl; ++ char *name; ++ u32 freq; ++ bool required; ++}; ++ ++/* resource info structure */ ++struct msm_pcie_res_info_t { ++ char *name; ++ struct resource *resource; ++ void __iomem *base; ++}; ++ ++/* irq info structrue */ ++struct msm_pcie_irq_info_t { ++ char *name; ++ uint32_t num; ++}; ++ ++/* msm pcie device structure */ ++struct msm_pcie_dev_t { ++ struct platform_device *pdev; ++ struct pci_dev *dev; ++ struct msi_controller *msi_chip; ++ struct regulator *gdsc; ++ struct msm_pcie_vreg_info_t vreg[MSM_PCIE_MAX_VREG]; ++ struct msm_pcie_gpio_info_t gpio[MSM_PCIE_MAX_GPIO]; ++ struct msm_pcie_clk_info_t clk[MSM_PCIE_MAX_CLK]; ++ struct msm_pcie_res_info_t res[MSM_PCIE_MAX_RES]; ++ struct msm_pcie_irq_info_t irq[MSM_PCIE_MAX_IRQ]; ++ struct msm_pcie_rst_info_t rst[MSM_PCIE_MAX_RESET]; ++ ++ void __iomem *parf; ++ void __iomem *phy; ++ void __iomem *elbi; ++ void __iomem *dm_core; ++ void __iomem *conf; ++ void __iomem *bars; ++ ++ uint32_t axi_bar_start; ++ uint32_t axi_bar_end; ++ ++ struct resource *dev_mem_res; ++ struct resource *dev_io_res; ++ ++ uint32_t wake_n; ++ uint32_t vreg_n; ++ uint32_t gpio_n; ++ uint32_t parf_deemph; ++ uint32_t parf_swing; ++ ++ bool cfg_access; ++ spinlock_t cfg_lock; ++ unsigned long irqsave_flags; ++ struct mutex setup_lock; ++ ++ struct irq_domain *irq_domain; ++ DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS); ++ uint32_t msi_gicm_addr; ++ uint32_t msi_gicm_base; ++ bool use_msi; ++ bool is_emulation; ++ ++ enum msm_pcie_link_status link_status; ++ bool user_suspend; ++ struct pci_saved_state *saved_state; ++ ++ struct wakeup_source ws; ++ struct msm_bus_scale_pdata *bus_scale_table; ++ uint32_t bus_client; ++ ++ bool l1ss_supported; ++ bool aux_clk_sync; ++ uint32_t n_fts; ++ bool ext_ref_clk; ++ uint32_t ep_latency; ++ bool ep_wakeirq; ++ ++ uint32_t rc_idx; ++ bool enumerated; ++ struct work_struct handle_wake_work; ++ struct mutex recovery_lock; ++ spinlock_t linkdown_lock; ++ spinlock_t wakeup_lock; ++ ulong linkdown_counter; ++ bool suspending; ++ ulong wake_counter; ++ ulong req_exit_l1_counter; ++ u32 ep_shadow[PCIE_CONF_SPACE_DW]; ++ u32 rc_shadow[PCIE_CONF_SPACE_DW]; ++ bool shadow_en; ++ struct msm_pcie_register_event *event_reg; ++ bool power_on; ++ void *ipc_log; ++ void *ipc_log_long; ++ struct pci_bus *pci_bus; ++}; ++ ++extern int msm_pcie_enumerate(u32 rc_idx); ++extern int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options); ++extern void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options); ++extern void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc); ++extern void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev); ++extern int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev); ++extern void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev); ++extern int msm_pcie_get_debug_mask(void); ++extern bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev, ++ bool check_sw_stts, bool check_ep); ++ ++extern void pcie_phy_init(struct msm_pcie_dev_t *dev); ++extern bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev); ++extern void pcie20_uni_phy_init(struct msm_pcie_dev_t *dev); ++extern bool pcie_phy_detect(struct msm_pcie_dev_t *dev); ++ ++#endif +diff --git a/arch/arm/mach-qcom/pcie_irq.c b/arch/arm/mach-qcom/pcie_irq.c +new file mode 100644 +index 0000000..ae21d05 +--- /dev/null ++++ b/arch/arm/mach-qcom/pcie_irq.c +@@ -0,0 +1,598 @@ ++/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++/* ++ * MSM PCIe controller IRQ driver. ++ */ ++ ++#define pr_fmt(fmt) "%s: " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "pcie.h" ++ ++/* Any address will do here, as it won't be dereferenced */ ++#define MSM_PCIE_MSI_PHY 0xa0000000 ++ ++#define PCIE20_MSI_CTRL_ADDR (0x820) ++#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824) ++#define PCIE20_MSI_CTRL_INTR_EN (0x828) ++#define PCIE20_MSI_CTRL_INTR_MASK (0x82C) ++#define PCIE20_MSI_CTRL_INTR_STATUS (0x830) ++ ++#define PCIE20_MSI_CTRL_MAX 8 ++ ++#define LINKDOWN_INIT_WAITING_US_MIN 995 ++#define LINKDOWN_INIT_WAITING_US_MAX 1005 ++#define LINKDOWN_WAITING_US_MIN 4900 ++#define LINKDOWN_WAITING_US_MAX 5100 ++#define LINKDOWN_WAITING_COUNT 200 ++ ++static void msm_pcie_notify_client(struct msm_pcie_dev_t *dev, ++ enum msm_pcie_event event) ++{ ++ if (dev->event_reg && dev->event_reg->callback && ++ (dev->event_reg->events & event)) { ++ struct msm_pcie_notify *notify = &dev->event_reg->notify; ++ notify->event = event; ++ notify->user = dev->event_reg->user; ++ PCIE_DBG(dev, "PCIe: callback RC%d for event %d.\n", ++ dev->rc_idx, event); ++ dev->event_reg->callback(notify); ++ ++ if ((dev->event_reg->options & MSM_PCIE_CONFIG_NO_RECOVERY) && ++ (event == MSM_PCIE_EVENT_LINKDOWN)) { ++ dev->user_suspend = true; ++ PCIE_DBG(dev, ++ "PCIe: Client of RC%d will recover the link later.\n", ++ dev->rc_idx); ++ return; ++ } ++ } else { ++ PCIE_DBG2(dev, ++ "PCIe: Client of RC%d does not have registration for event %d.\n", ++ dev->rc_idx, event); ++ } ++} ++ ++static void handle_wake_func(struct work_struct *work) ++{ ++ int ret; ++ struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t, ++ handle_wake_work); ++ ++ PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx); ++ ++ mutex_lock(&dev->recovery_lock); ++ ++ if (!dev->enumerated) { ++ PCIE_DBG(dev, ++ "PCIe: Start enumeration for RC%d upon the wake from endpoint.\n", ++ dev->rc_idx); ++ ++ ret = msm_pcie_enumerate(dev->rc_idx); ++ ++ if (ret) { ++ PCIE_ERR(dev, ++ "PCIe: failed to enable RC%d upon wake request from the device.\n", ++ dev->rc_idx); ++ goto out; ++ } ++ ++ if ((dev->link_status == MSM_PCIE_LINK_ENABLED) && ++ dev->event_reg && dev->event_reg->callback && ++ (dev->event_reg->events & MSM_PCIE_EVENT_LINKUP)) { ++ struct msm_pcie_notify *notify = ++ &dev->event_reg->notify; ++ notify->event = MSM_PCIE_EVENT_LINKUP; ++ notify->user = dev->event_reg->user; ++ PCIE_DBG(dev, ++ "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n", ++ dev->rc_idx); ++ dev->event_reg->callback(notify); ++ } else { ++ PCIE_DBG(dev, ++ "PCIe: Client of RC%d does not have registration for linkup event.\n", ++ dev->rc_idx); ++ } ++ goto out; ++ } else { ++ PCIE_ERR(dev, ++ "PCIe: The enumeration for RC%d has already been done.\n", ++ dev->rc_idx); ++ goto out; ++ } ++ ++out: ++ mutex_unlock(&dev->recovery_lock); ++} ++ ++static irqreturn_t handle_wake_irq(int irq, void *data) ++{ ++ struct msm_pcie_dev_t *dev = data; ++ unsigned long irqsave_flags; ++ ++ spin_lock_irqsave(&dev->wakeup_lock, irqsave_flags); ++ ++ dev->wake_counter++; ++ PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n", ++ dev->wake_counter, dev->rc_idx); ++ ++ PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n", ++ dev->rc_idx); ++ ++ if (!dev->enumerated) { ++ PCIE_DBG(dev, "Start enumeating RC%d\n", dev->rc_idx); ++ schedule_work(&dev->handle_wake_work); ++ } else { ++ PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx); ++ __pm_stay_awake(&dev->ws); ++ __pm_relax(&dev->ws); ++ msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP); ++ } ++ ++ spin_unlock_irqrestore(&dev->wakeup_lock, irqsave_flags); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t handle_linkdown_irq(int irq, void *data) ++{ ++ struct msm_pcie_dev_t *dev = data; ++ unsigned long irqsave_flags; ++ ++ spin_lock_irqsave(&dev->linkdown_lock, irqsave_flags); ++ ++ dev->linkdown_counter++; ++ PCIE_DBG(dev, ++ "PCIe: No. %ld linkdown IRQ for RC%d.\n", ++ dev->linkdown_counter, dev->rc_idx); ++ ++ if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) { ++ PCIE_DBG(dev, ++ "PCIe:Linkdown IRQ for RC%d when the link is not enabled\n", ++ dev->rc_idx); ++ } else if (dev->suspending) { ++ PCIE_DBG(dev, ++ "PCIe:the link of RC%d is suspending.\n", ++ dev->rc_idx); ++ } else { ++ dev->link_status = MSM_PCIE_LINK_DISABLED; ++ dev->shadow_en = false; ++ /* assert PERST */ ++ gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num, ++ dev->gpio[MSM_PCIE_GPIO_PERST].on); ++ PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx); ++ msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN); ++ } ++ ++ spin_unlock_irqrestore(&dev->linkdown_lock, irqsave_flags); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t handle_msi_irq(int irq, void *data) ++{ ++ int i, j; ++ unsigned long val; ++ struct msm_pcie_dev_t *dev = data; ++ void __iomem *ctrl_status; ++ ++ PCIE_DBG(dev, "irq=%d\n", irq); ++ ++ /* check for set bits, clear it by setting that bit ++ and trigger corresponding irq */ ++ for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) { ++ ctrl_status = dev->dm_core + ++ PCIE20_MSI_CTRL_INTR_STATUS + (i * 12); ++ ++ val = readl_relaxed(ctrl_status); ++ while (val) { ++ j = find_first_bit(&val, 32); ++ writel_relaxed(BIT(j), ctrl_status); ++ /* ensure that interrupt is cleared (acked) */ ++ wmb(); ++ generic_handle_irq( ++ irq_find_mapping(dev->irq_domain, (j + (32*i))) ++ ); ++ val = readl_relaxed(ctrl_status); ++ } ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev) ++{ ++ int i; ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ /* program MSI controller and enable all interrupts */ ++ writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR); ++ writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR); ++ ++ for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) ++ writel_relaxed(~0, dev->dm_core + ++ PCIE20_MSI_CTRL_INTR_EN + (i * 12)); ++ ++ /* ensure that hardware is configured before proceeding */ ++ wmb(); ++} ++ ++void msm_pcie_destroy_irq(unsigned int irq, struct msm_pcie_dev_t *pcie_dev) ++{ ++ int pos; ++ struct msm_pcie_dev_t *dev; ++ ++ if (pcie_dev) { ++ dev = pcie_dev; ++ } else { ++ dev = irq_get_chip_data(irq); ++ if (!dev) ++ return; ++ } ++ ++ if (dev->msi_gicm_addr) { ++ PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq); ++ pos = irq - dev->msi_gicm_base; ++ } else { ++ PCIE_DBG(dev, "destroy default MSI irq %d\n", irq); ++ pos = irq - irq_find_mapping(dev->irq_domain, 0); ++ } ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ if (!dev->msi_gicm_addr) ++ irq_dispose_mapping(irq); ++ ++ PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n", ++ pos, *dev->msi_irq_in_use); ++ clear_bit(pos, dev->msi_irq_in_use); ++ PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n", ++ pos, *dev->msi_irq_in_use); ++} ++ ++/* hookup to linux pci msi framework */ ++void msm_pcie_teardown_msi_irq(struct msi_controller *chip, unsigned int irq) ++{ ++ pr_debug("irq %d deallocated\n", irq); ++ msm_pcie_destroy_irq(irq, NULL); ++} ++ ++#if 0 ++void msm_pcie_teardown_msi_irqs(struct pci_dev *dev) ++{ ++ struct msi_desc *entry; ++ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev); ++ ++ PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n", ++ pcie_dev->rc_idx, dev->vendor, dev->device); ++ ++ pcie_dev->use_msi = false; ++ ++ list_for_each_entry(entry, &dev->msi_list, list) { ++ int i, nvec; ++ if (entry->irq == 0) ++ continue; ++ nvec = 1 << entry->msi_attrib.multiple; ++ for (i = 0; i < nvec; i++) ++ msm_pcie_destroy_irq(entry->irq + i, pcie_dev); ++ } ++} ++#endif ++ ++static void msm_pcie_msi_nop(struct irq_data *d) ++{ ++ return; ++} ++ ++static struct irq_chip pcie_msi_chip = { ++ .name = "msm-pcie-msi", ++ .irq_ack = msm_pcie_msi_nop, ++ .irq_enable = unmask_msi_irq, ++ .irq_disable = mask_msi_irq, ++ .irq_mask = mask_msi_irq, ++ .irq_unmask = unmask_msi_irq, ++}; ++ ++static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev) ++{ ++ int irq, pos; ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++again: ++ pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS); ++ ++ if (pos >= PCIE_MSI_NR_IRQS) ++ return -ENOSPC; ++ ++ PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use); ++ ++ if (test_and_set_bit(pos, dev->msi_irq_in_use)) ++ goto again; ++ else ++ PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos); ++ ++ irq = irq_create_mapping(dev->irq_domain, pos); ++ if (!irq) ++ return -EINVAL; ++ ++ return irq; ++} ++ ++static int arch_setup_msi_irq_default(struct pci_dev *pdev, ++ struct msi_desc *desc, int nvec) ++{ ++ int irq; ++ struct msi_msg msg; ++ struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev); ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ irq = msm_pcie_create_irq(dev); ++ ++ PCIE_DBG(dev, "IRQ %d is allocated.\n", irq); ++ ++ if (irq < 0) ++ return irq; ++ ++ PCIE_DBG(dev, "irq %d allocated\n", irq); ++ ++ irq_set_msi_desc(irq, desc); ++ ++ /* write msi vector and data */ ++ msg.address_hi = 0; ++ msg.address_lo = MSM_PCIE_MSI_PHY; ++ msg.data = irq - irq_find_mapping(dev->irq_domain, 0); ++ write_msi_msg(irq, &msg); ++ ++ return 0; ++} ++ ++static int msm_pcie_create_irq_qgic(struct msm_pcie_dev_t *dev) ++{ ++ int irq, pos; ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++again: ++ pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS); ++ ++ if (pos >= PCIE_MSI_NR_IRQS) ++ return -ENOSPC; ++ ++ PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use); ++ ++ if (test_and_set_bit(pos, dev->msi_irq_in_use)) ++ goto again; ++ else ++ PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos); ++ ++ irq = dev->msi_gicm_base + pos; ++ if (!irq) { ++ PCIE_ERR(dev, "PCIe: RC%d failed to create QGIC MSI IRQ.\n", ++ dev->rc_idx); ++ return -EINVAL; ++ } ++ ++ return irq; ++} ++ ++static int arch_setup_msi_irq_qgic(struct pci_dev *pdev, ++ struct msi_desc *desc, int nvec) ++{ ++ int irq, index, firstirq = 0; ++ struct msi_msg msg; ++ struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev); ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ for (index = 0; index < nvec; index++) { ++ irq = msm_pcie_create_irq_qgic(dev); ++ PCIE_DBG(dev, "irq %d is allocated\n", irq); ++ ++ if (irq < 0) ++ return irq; ++ ++ if (index == 0) ++ firstirq = irq; ++ ++ irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); ++ } ++ ++ /* write msi vector and data */ ++ irq_set_msi_desc(firstirq, desc); ++ msg.address_hi = 0; ++ msg.address_lo = dev->msi_gicm_addr; ++ msg.data = firstirq; ++ write_msi_msg(firstirq, &msg); ++ ++ return 0; ++} ++ ++int msm_pcie_setup_msi_irq(struct msi_controller *chip, struct pci_dev *pdev, ++ struct msi_desc *desc) ++{ ++ struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev); ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ if (dev->msi_gicm_addr) ++ return arch_setup_msi_irq_qgic(pdev, desc, 1); ++ else ++ return arch_setup_msi_irq_default(pdev, desc, 1); ++} ++ ++#if 0 ++static int msm_pcie_get_msi_multiple(int nvec) ++{ ++ int msi_multiple = 0; ++ ++ while (nvec) { ++ nvec = nvec >> 1; ++ msi_multiple++; ++ } ++ pr_debug("log2 number of MSI multiple:%d\n", ++ msi_multiple - 1); ++ ++ return msi_multiple - 1; ++} ++ ++int msm_pcie_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) ++{ ++ struct msi_desc *entry; ++ int ret; ++ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev); ++ ++ PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx); ++ ++ if (type != PCI_CAP_ID_MSI || nvec > 32) ++ return -ENOSPC; ++ ++ PCIE_DBG(pcie_dev, "nvec = %d\n", nvec); ++ ++ list_for_each_entry(entry, &dev->msi_list, list) { ++ entry->msi_attrib.multiple = ++ msm_pcie_get_msi_multiple(nvec); ++ ++ if (pcie_dev->msi_gicm_addr) ++ ret = arch_setup_msi_irq_qgic(dev, entry, nvec); ++ else ++ ret = arch_setup_msi_irq_default(dev, entry, nvec); ++ ++ PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret); ++ ++ if (ret < 0) ++ return ret; ++ if (ret > 0) ++ return -ENOSPC; ++ } ++ ++ pcie_dev->use_msi = true; ++ ++ return 0; ++} ++#endif ++ ++static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq, ++ irq_hw_number_t hwirq) ++{ ++ irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq); ++ irq_set_chip_data(irq, domain->host_data); ++ //set_irq_flags(irq, IRQF_VALID); ++ return 0; ++} ++ ++static const struct irq_domain_ops msm_pcie_msi_ops = { ++ .map = msm_pcie_msi_map, ++}; ++ ++int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev) ++{ ++ int rc; ++ int msi_start = 0; ++ struct device *pdev = &dev->pdev->dev; ++ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ dev->msi_chip = kzalloc(sizeof(struct msi_controller), GFP_KERNEL); ++ if (!dev->msi_chip) ++ return -ENOMEM; ++ ++ dev->msi_chip->setup_irq = msm_pcie_setup_msi_irq; ++ dev->msi_chip->teardown_irq = msm_pcie_teardown_msi_irq; ++ ++ if (dev->ep_wakeirq) ++ wakeup_source_init(&dev->ws, "pcie_wakeup_source"); ++ ++ /* register handler for linkdown interrupt */ ++ rc = devm_request_irq(pdev, ++ dev->irq[MSM_PCIE_INT_LINK_DOWN].num, handle_linkdown_irq, ++ IRQF_TRIGGER_RISING, dev->irq[MSM_PCIE_INT_LINK_DOWN].name, ++ dev); ++ if (rc) { ++ PCIE_ERR(dev, "PCIe: Unable to request linkdown interrupt:%d\n", ++ dev->irq[MSM_PCIE_INT_LINK_DOWN].num); ++ return rc; ++ } ++ ++ /* register handler for physical MSI interrupt line */ ++ rc = devm_request_irq(pdev, ++ dev->irq[MSM_PCIE_INT_MSI].num, handle_msi_irq, ++ IRQF_TRIGGER_RISING, dev->irq[MSM_PCIE_INT_MSI].name, dev); ++ if (rc) { ++ PCIE_ERR(dev, "PCIe: RC%d: Unable to request MSI interrupt\n", ++ dev->rc_idx); ++ return rc; ++ } ++ ++ if (dev->ep_wakeirq) { ++ /* register handler for PCIE_WAKE_N interrupt line */ ++ rc = devm_request_irq(pdev, ++ dev->wake_n, handle_wake_irq, IRQF_TRIGGER_FALLING, ++ "msm_pcie_wake", dev); ++ if (rc) { ++ PCIE_ERR(dev, "PCIe: RC%d: Unable to request wake interrupt\n", ++ dev->rc_idx); ++ return rc; ++ } ++ ++ INIT_WORK(&dev->handle_wake_work, handle_wake_func); ++ ++ rc = enable_irq_wake(dev->wake_n); ++ if (rc) { ++ PCIE_ERR(dev, "PCIe: RC%d: Unable to enable wake interrupt\n", ++ dev->rc_idx); ++ return rc; ++ } ++ } ++ ++ /* Create a virtual domain of interrupts */ ++ if (!dev->msi_gicm_addr) { ++ dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node, ++ PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev); ++ ++ if (!dev->irq_domain) { ++ PCIE_ERR(dev, ++ "PCIe: RC%d: Unable to initialize irq domain\n", ++ dev->rc_idx); ++ disable_irq(dev->wake_n); ++ return PTR_ERR(dev->irq_domain); ++ } ++ ++ msi_start = irq_create_mapping(dev->irq_domain, 0); ++ } ++ ++ return 0; ++} ++ ++void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev) ++{ ++ PCIE_DBG(dev, "RC%d\n", dev->rc_idx); ++ ++ kfree(dev->msi_chip); ++ ++ if (dev->ep_wakeirq) { ++ wakeup_source_trash(&dev->ws); ++ disable_irq(dev->wake_n); ++ } ++} +diff --git a/arch/arm/mach-qcom/pcie_phy.c b/arch/arm/mach-qcom/pcie_phy.c +new file mode 100644 +index 0000000..32f3c79 +--- /dev/null ++++ b/arch/arm/mach-qcom/pcie_phy.c +@@ -0,0 +1,403 @@ ++/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++/* ++ * MSM PCIe PHY driver. ++ */ ++ ++#include ++#include ++#include "pcie.h" ++#include "pcie_phy.h" ++ ++#define MDIO_CTRL_0_REG 0x40 ++#define MDIO_CTRL_1_REG 0x44 ++#define MDIO_CTRL_2_REG 0x48 ++#define MDIO_CTRL_3_REG 0x4C ++#define MDIO_CTRL_4_REG 0x50 ++ ++#define MDIO_PCIE_PHY_ID (0x5 << 13) ++#define MDC_MODE (0x1 << 12) ++#define MDIO_CLAUSE_22 (0x0 << 8) ++#define MDIO_CLAUSE_45 (0x1 << 8) ++#define MDIO_PCIE_CLK_DIV (0xF) ++#define MDIO_MMD_ID (0x1) ++ ++#define MDIO_ACCESS_BUSY (0x1 << 16) ++#define MDIO_ACCESS_START (0x1 << 8) ++#define MDIO_TIMEOUT_STATIC 1000 ++ ++#define MDIO_ACCESS_22_WRITE (0x1) ++#define MDIO_ACCESS_22_READ (0x0) ++#define MDIO_ACCESS_45_WRITE (0x2) ++#define MDIO_ACCESS_45_READ (0x1) ++#define MDIO_ACCESS_45_READ_ADDR (0x0) ++ ++static inline void write_phy(void *base, u32 offset, u32 value) ++{ ++ writel_relaxed(value, base + offset); ++ wmb(); ++} ++ ++#ifndef CONFIG_ARCH_MDM9630 ++static inline void pcie20_phy_init_default(struct msm_pcie_dev_t *dev) ++{ ++ ++ PCIE_DBG(dev, "RC%d: Initializing 28nm QMP phy - 19.2MHz\n", ++ dev->rc_idx); ++ ++ write_phy(dev->phy, PCIE_PHY_POWER_DOWN_CONTROL, 0x03); ++ write_phy(dev->phy, QSERDES_COM_SYSCLK_EN_SEL, 0x08); ++ write_phy(dev->phy, QSERDES_COM_DEC_START1, 0x82); ++ write_phy(dev->phy, QSERDES_COM_DEC_START2, 0x03); ++ write_phy(dev->phy, QSERDES_COM_DIV_FRAC_START1, 0xd5); ++ write_phy(dev->phy, QSERDES_COM_DIV_FRAC_START2, 0xaa); ++ write_phy(dev->phy, QSERDES_COM_DIV_FRAC_START3, 0x13); ++ write_phy(dev->phy, QSERDES_COM_PLLLOCK_CMP_EN, 0x01); ++ write_phy(dev->phy, QSERDES_COM_PLLLOCK_CMP1, 0x2b); ++ write_phy(dev->phy, QSERDES_COM_PLLLOCK_CMP2, 0x68); ++ write_phy(dev->phy, QSERDES_COM_PLL_CRCTRL, 0xff); ++ write_phy(dev->phy, QSERDES_COM_PLL_CP_SETI, 0x3f); ++ write_phy(dev->phy, QSERDES_COM_PLL_IP_SETP, 0x07); ++ write_phy(dev->phy, QSERDES_COM_PLL_CP_SETP, 0x03); ++ write_phy(dev->phy, QSERDES_RX_CDR_CONTROL, 0xf3); ++ write_phy(dev->phy, QSERDES_RX_CDR_CONTROL2, 0x6b); ++ write_phy(dev->phy, QSERDES_COM_RESETSM_CNTRL, 0x10); ++ write_phy(dev->phy, QSERDES_RX_RX_TERM_HIGHZ_CM_AC_COUPLE, 0x87); ++ write_phy(dev->phy, QSERDES_RX_RX_EQ_GAIN12, 0x54); ++ write_phy(dev->phy, PCIE_PHY_POWER_STATE_CONFIG1, 0xa3); ++ write_phy(dev->phy, PCIE_PHY_POWER_STATE_CONFIG2, 0xcb); ++ write_phy(dev->phy, QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10); ++ write_phy(dev->phy, PCIE_PHY_ENDPOINT_REFCLK_DRIVE, 0x10); ++ write_phy(dev->phy, PCIE_PHY_SW_RESET, 0x00); ++ write_phy(dev->phy, PCIE_PHY_START, 0x03); ++} ++#endif ++ ++#ifdef CONFIG_ARCH_MDM9630 ++void pcie_phy_init(struct msm_pcie_dev_t *dev) ++{ ++ ++ PCIE_DBG(dev, "RC%d: Initializing 28nm QMP phy - 19.2MHz\n", ++ dev->rc_idx); ++ ++ write_phy(dev->phy, PCIE_PHY_POWER_DOWN_CONTROL, 0x03); ++ ++ write_phy(dev->phy, QSERDES_COM_SYSCLK_EN_SEL_TXBAND, 0x08); ++ write_phy(dev->phy, QSERDES_COM_DEC_START1, 0x82); ++ write_phy(dev->phy, QSERDES_COM_DEC_START2, 0x03); ++ write_phy(dev->phy, QSERDES_COM_DIV_FRAC_START1, 0xD5); ++ write_phy(dev->phy, QSERDES_COM_DIV_FRAC_START2, 0xAA); ++ write_phy(dev->phy, QSERDES_COM_DIV_FRAC_START3, 0x4D); ++ write_phy(dev->phy, QSERDES_COM_PLLLOCK_CMP_EN, 0x01); ++ write_phy(dev->phy, QSERDES_COM_PLLLOCK_CMP1, 0x2B); ++ write_phy(dev->phy, QSERDES_COM_PLLLOCK_CMP2, 0x68); ++ write_phy(dev->phy, QSERDES_COM_PLL_CRCTRL, 0x7C); ++ write_phy(dev->phy, QSERDES_COM_PLL_CP_SETI, 0x02); ++ write_phy(dev->phy, QSERDES_COM_PLL_IP_SETP, 0x1F); ++ write_phy(dev->phy, QSERDES_COM_PLL_CP_SETP, 0x0F); ++ write_phy(dev->phy, QSERDES_COM_PLL_IP_SETI, 0x01); ++ write_phy(dev->phy, QSERDES_COM_IE_TRIM, 0x0F); ++ write_phy(dev->phy, QSERDES_COM_IP_TRIM, 0x0F); ++ write_phy(dev->phy, QSERDES_COM_PLL_CNTRL, 0x46); ++ ++ /* CDR Settings */ ++ write_phy(dev->phy, QSERDES_RX_CDR_CONTROL1, 0xF3); ++ write_phy(dev->phy, QSERDES_RX_CDR_CONTROL_HALF, 0x2B); ++ ++ write_phy(dev->phy, QSERDES_COM_PLL_VCOTAIL_EN, 0xE1); ++ ++ /* Calibration Settings */ ++ write_phy(dev->phy, QSERDES_COM_RESETSM_CNTRL, 0x90); ++ write_phy(dev->phy, QSERDES_COM_RESETSM_CNTRL2, 0x7); ++ ++ /* Additional writes */ ++ write_phy(dev->phy, QSERDES_COM_RES_CODE_START_SEG1, 0x20); ++ write_phy(dev->phy, QSERDES_COM_RES_CODE_CAL_CSR, 0x77); ++ write_phy(dev->phy, QSERDES_COM_RES_TRIM_CONTROL, 0x15); ++ write_phy(dev->phy, QSERDES_TX_RCV_DETECT_LVL, 0x03); ++ write_phy(dev->phy, QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF); ++ write_phy(dev->phy, QSERDES_RX_RX_EQ_GAIN1_MSB, 0x1F); ++ write_phy(dev->phy, QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF); ++ write_phy(dev->phy, QSERDES_RX_RX_EQ_GAIN2_MSB, 0x00); ++ write_phy(dev->phy, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x1A); ++ write_phy(dev->phy, QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x80); ++ write_phy(dev->phy, QSERDES_RX_SIGDET_ENABLES, 0x40); ++ write_phy(dev->phy, QSERDES_RX_SIGDET_CNTRL, 0x70); ++ write_phy(dev->phy, QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x06); ++ write_phy(dev->phy, QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10); ++ write_phy(dev->phy, PCIE_PHY_ENDPOINT_REFCLK_DRIVE, 0x10); ++ write_phy(dev->phy, PCIE_PHY_POWER_STATE_CONFIG1, 0x23); ++ write_phy(dev->phy, PCIE_PHY_POWER_STATE_CONFIG2, 0xCB); ++ write_phy(dev->phy, QSERDES_RX_RX_RCVR_IQ_EN, 0x31); ++ ++ write_phy(dev->phy, PCIE_PHY_SW_RESET, 0x00); ++ write_phy(dev->phy, PCIE_PHY_START, 0x03); ++} ++#elif defined(CONFIG_ARCH_FSM9900) ++void pcie_phy_init(struct msm_pcie_dev_t *dev) ++{ ++ if (dev->ext_ref_clk == false) { ++ pcie20_phy_init_default(dev); ++ return; ++ } ++ ++ PCIE_DBG(dev, "RC%d: Initializing 28nm ATE phy - 100MHz\n", ++ dev->rc_idx); ++ ++ /* 1 */ ++ write_phy(dev->phy, PCIE_PHY_POWER_DOWN_CONTROL, 0x01); ++ /* 2 */ ++ write_phy(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x3e); ++ /* 3 */ ++ write_phy(dev->phy, QSERDES_COM_PLL_CP_SETI, 0x0f); ++ /* 4 */ ++ write_phy(dev->phy, QSERDES_COM_PLL_IP_SETP, 0x23); ++ /* 5 */ ++ write_phy(dev->phy, QSERDES_COM_PLL_IP_SETI, 0x3f); ++ /* 6 */ ++ write_phy(dev->phy, QSERDES_RX_CDR_CONTROL, 0xf3); ++ /* 7 */ ++ write_phy(dev->phy, QSERDES_RX_CDR_CONTROL2, 0x6b); ++ /* 8 */ ++ write_phy(dev->phy, QSERDES_COM_RESETSM_CNTRL, 0x10); ++ /* 9 */ ++ write_phy(dev->phy, QSERDES_RX_RX_TERM_HIGHZ_CM_AC_COUPLE, 0x87); ++ /* 10 */ ++ write_phy(dev->phy, QSERDES_RX_RX_EQ_GAIN12, 0x54); ++ /* 11 */ ++ write_phy(dev->phy, PCIE_PHY_POWER_STATE_CONFIG1, 0xa3); ++ /* 12 */ ++ write_phy(dev->phy, PCIE_PHY_POWER_STATE_CONFIG2, 0x1b); ++ /* 13 */ ++ write_phy(dev->phy, PCIE_PHY_SW_RESET, 0x00); ++ /* 14 */ ++ write_phy(dev->phy, PCIE_PHY_START, 0x03); ++} ++#else ++void pcie_phy_init(struct msm_pcie_dev_t *dev) ++{ ++ if (dev->is_emulation) ++ pcie20_uni_phy_init(dev); ++ else ++ pcie20_phy_init_default(dev); ++} ++ ++#endif ++ ++bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev) ++{ ++ if (!dev->is_emulation && ++ readl_relaxed(dev->phy + PCIE_PHY_PCS_STATUS) & BIT(6)) ++ return false; ++ else ++ return true; ++} ++/** ++ * Write register ++ * ++ * @base - PHY base virtual address. ++ * @offset - register offset. ++ */ ++static u32 qca_uni_phy_read(void __iomem *base, u32 offset) ++{ ++ u32 value; ++ value = readl_relaxed(base + offset); ++ return value; ++} ++ ++/** ++ * Write register ++ * @base - PHY base virtual address. ++ * @offset - register offset. ++ * @val - value to write. ++ */ ++static void qca_uni_phy_write(void __iomem *base, u32 offset, u32 val) ++{ ++ writel(val, base + offset); ++ udelay(100); ++} ++ ++static int mdio_wait(void __iomem *base) ++{ ++ unsigned int mdio_access; ++ unsigned int timeout = MDIO_TIMEOUT_STATIC; ++ ++ do { ++ mdio_access = qca_uni_phy_read(base, MDIO_CTRL_4_REG); ++ if (!timeout--) ++ return -EFAULT; ++ } while (mdio_access & MDIO_ACCESS_BUSY); ++ ++ return 0; ++} ++ ++static int mdio_mii_read(void __iomem *base, unsigned char regAddr, ++ unsigned short *data) ++{ ++ unsigned short mdio_ctl_0 = (MDIO_PCIE_PHY_ID | MDC_MODE | ++ MDIO_CLAUSE_22 | MDIO_PCIE_CLK_DIV); ++ unsigned int regVal; ++ ++ qca_uni_phy_write(base, MDIO_CTRL_0_REG, mdio_ctl_0); ++ qca_uni_phy_write(base, MDIO_CTRL_1_REG, regAddr); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_22_READ); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_22_READ | ++ MDIO_ACCESS_START); ++ ++ /* wait for access busy to be cleared */ ++ if (mdio_wait(base)) { ++ pr_err("%s MDIO Access Busy Timeout %x\n", __func__, regAddr); ++ return -EFAULT; ++ } ++ ++ regVal = qca_uni_phy_read(base, MDIO_CTRL_3_REG); ++ *data = (unsigned short)regVal; ++ return 0; ++} ++ ++static int mdio_mii_write(void __iomem *base, unsigned char regAddr, ++ unsigned short data) ++{ ++ unsigned short mdio_ctl_0 = (MDIO_PCIE_PHY_ID | MDC_MODE | ++ MDIO_CLAUSE_22 | MDIO_PCIE_CLK_DIV); ++ ++ qca_uni_phy_write(base, MDIO_CTRL_0_REG, mdio_ctl_0); ++ qca_uni_phy_write(base, MDIO_CTRL_1_REG, regAddr); ++ qca_uni_phy_write(base, MDIO_CTRL_2_REG, data); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_22_WRITE); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_22_WRITE | ++ MDIO_ACCESS_START); ++ ++ /* wait for access busy to be cleared */ ++ if (mdio_wait(base)) { ++ pr_err("%s MDIO Access Busy Timeout %x\n", __func__, regAddr); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static int mdio_mmd_read(void __iomem *base, unsigned short regAddr, ++ unsigned short *data) ++{ ++ unsigned short mdio_ctl_0 = (MDIO_PCIE_PHY_ID | MDC_MODE | ++ MDIO_CLAUSE_45 | MDIO_PCIE_CLK_DIV); ++ unsigned int regVal; ++ ++ qca_uni_phy_write(base, MDIO_CTRL_0_REG, mdio_ctl_0); ++ qca_uni_phy_write(base, MDIO_CTRL_1_REG, MDIO_MMD_ID); ++ qca_uni_phy_write(base, MDIO_CTRL_2_REG, regAddr); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_45_READ_ADDR); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_45_READ_ADDR | ++ MDIO_ACCESS_START); ++ ++ /* wait for access busy to be cleared */ ++ if (mdio_wait(base)) { ++ pr_err("%s MDIO Access Busy Timeout %x\n", __func__, regAddr); ++ return -EFAULT; ++ } ++ ++ qca_uni_phy_write(base, MDIO_CTRL_1_REG, MDIO_MMD_ID); ++ qca_uni_phy_write(base, MDIO_CTRL_2_REG, regAddr); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_45_WRITE); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_45_WRITE | ++ MDIO_ACCESS_START); ++ ++ /* wait for access busy to be cleared */ ++ if (mdio_wait(base)) { ++ pr_err("%s MDIO Access Busy Timeout %x\n", __func__, regAddr); ++ return -EFAULT; ++ } ++ ++ regVal = qca_uni_phy_read(base, MDIO_CTRL_3_REG); ++ *data = (unsigned short)regVal; ++ ++ return 0; ++} ++ ++static int mdio_mmd_write(void __iomem *base, unsigned short regAddr, ++ unsigned short data) ++{ ++ ++ unsigned short mdio_ctl_0 = (MDIO_PCIE_PHY_ID | MDC_MODE | ++ MDIO_CLAUSE_45 | MDIO_PCIE_CLK_DIV); ++ ++ qca_uni_phy_write(base, MDIO_CTRL_0_REG, mdio_ctl_0); ++ qca_uni_phy_write(base, MDIO_CTRL_1_REG, MDIO_MMD_ID); ++ qca_uni_phy_write(base, MDIO_CTRL_2_REG, regAddr); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_45_READ_ADDR); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_45_READ_ADDR | ++ MDIO_ACCESS_START); ++ ++ /* wait for access busy to be cleared */ ++ if (mdio_wait(base)) { ++ pr_err("%s MDIO Access Busy Timeout %x\n", __func__, regAddr); ++ return -EFAULT; ++ } ++ ++ qca_uni_phy_write(base, MDIO_CTRL_2_REG, data); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_45_READ); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_45_READ | ++ MDIO_ACCESS_START); ++ ++ qca_uni_phy_write(base, MDIO_CTRL_2_REG, regAddr); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_45_WRITE); ++ qca_uni_phy_write(base, MDIO_CTRL_4_REG, MDIO_ACCESS_45_WRITE | ++ MDIO_ACCESS_START); ++ ++ /* wait for access busy to be cleared */ ++ if (mdio_wait(base)) { ++ pr_err("%s MDIO Access Busy Timeout %x\n", __func__, regAddr); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++void pcie20_uni_phy_init(struct msm_pcie_dev_t *dev) ++{ ++ unsigned short data; ++ ++ mdio_mii_write(dev->phy, 0x1, 0x801c); ++ mdio_mii_write(dev->phy, 0xb, 0x300d); ++ ++ mdio_mmd_write(dev->phy, 0x2d, 0x681a); ++ mdio_mmd_write(dev->phy, 0x7d, 0x8); ++ mdio_mmd_write(dev->phy, 0x7f, 0x5ed5); ++ mdio_mmd_write(dev->phy, 0x87, 0xaa0a); ++ mdio_mmd_write(dev->phy, 0x4, 0x0802); ++ mdio_mmd_write(dev->phy, 0x8, 0x0280); ++ mdio_mmd_write(dev->phy, 0x9, 0x8854); ++ mdio_mmd_write(dev->phy, 0xa, 0x2815); ++ mdio_mmd_write(dev->phy, 0xb, 0x0120); ++ mdio_mmd_write(dev->phy, 0xc, 0x0480); ++ mdio_mmd_write(dev->phy, 0x13, 0x8000); ++ ++ mdio_mmd_read(dev->phy, 0x7e, &data); ++ ++ mdio_mii_read(dev->phy, 0x7, &data); ++} ++ ++bool pcie_phy_detect(struct msm_pcie_dev_t *dev) ++{ ++ unsigned short data; ++ ++ mdio_mii_read(dev->phy, 0x0, &data); ++ ++ if (data == 0x7f) { ++ pr_info("PCIe UNI PHY detected\n"); ++ return true; ++ } else { ++ return false; ++ } ++} +diff --git a/arch/arm/mach-qcom/pcie_phy.h b/arch/arm/mach-qcom/pcie_phy.h +new file mode 100644 +index 0000000..99297c3 +--- /dev/null ++++ b/arch/arm/mach-qcom/pcie_phy.h +@@ -0,0 +1,545 @@ ++/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef __ARCH_ARM_MACH_MSM_PCIE_PHY_H ++#define __ARCH_ARM_MACH_MSM_PCIE_PHY_H ++ ++#ifdef CONFIG_ARCH_MDM9630 ++#define QSERDES_COM_SYS_CLK_CTRL 0x000 ++#define QSERDES_COM_PLL_VCOTAIL_EN 0x004 ++#define QSERDES_COM_CMN_MODE 0x008 ++#define QSERDES_COM_IE_TRIM 0x00C ++#define QSERDES_COM_IP_TRIM 0x010 ++#define QSERDES_COM_PLL_CNTRL 0x014 ++#define QSERDES_COM_PLL_PHSEL_CONTROL 0x018 ++#define QSERDES_COM_IPTAT_TRIM_VCCA_TX_SEL 0x01C ++#define QSERDES_COM_PLL_PHSEL_DC 0x020 ++#define QSERDES_COM_PLL_IP_SETI 0x024 ++#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL 0x028 ++#define QSERDES_COM_PLL_BKG_KVCO_CAL_EN 0x02C ++#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x030 ++#define QSERDES_COM_PLL_CP_SETI 0x034 ++#define QSERDES_COM_PLL_IP_SETP 0x038 ++#define QSERDES_COM_PLL_CP_SETP 0x03C ++#define QSERDES_COM_ATB_SEL1 0x040 ++#define QSERDES_COM_ATB_SEL2 0x044 ++#define QSERDES_COM_SYSCLK_EN_SEL_TXBAND 0x048 ++#define QSERDES_COM_RESETSM_CNTRL 0x04C ++#define QSERDES_COM_RESETSM_CNTRL2 0x050 ++#define QSERDES_COM_RESETSM_CNTRL3 0x054 ++#define QSERDES_COM_DIV_REF1 0x058 ++#define QSERDES_COM_DIV_REF2 0x05C ++#define QSERDES_COM_KVCO_COUNT1 0x060 ++#define QSERDES_COM_KVCO_COUNT2 0x064 ++#define QSERDES_COM_KVCO_CAL_CNTRL 0x068 ++#define QSERDES_COM_KVCO_CODE 0x06C ++#define QSERDES_COM_VREF_CFG1 0x070 ++#define QSERDES_COM_VREF_CFG2 0x074 ++#define QSERDES_COM_VREF_CFG3 0x078 ++#define QSERDES_COM_VREF_CFG4 0x07C ++#define QSERDES_COM_VREF_CFG5 0x080 ++#define QSERDES_COM_VREF_CFG6 0x084 ++#define QSERDES_COM_PLLLOCK_CMP1 0x088 ++#define QSERDES_COM_PLLLOCK_CMP2 0x08C ++#define QSERDES_COM_PLLLOCK_CMP3 0x090 ++#define QSERDES_COM_PLLLOCK_CMP_EN 0x094 ++#define QSERDES_COM_BGTC 0x098 ++#define QSERDES_COM_PLL_TEST_UPDN 0x09C ++#define QSERDES_COM_PLL_VCO_TUNE 0x0A0 ++#define QSERDES_COM_DEC_START1 0x0A4 ++#define QSERDES_COM_PLL_AMP_OS 0x0A8 ++#define QSERDES_COM_SSC_EN_CENTER 0x0AC ++#define QSERDES_COM_SSC_ADJ_PER1 0x0B0 ++#define QSERDES_COM_SSC_ADJ_PER2 0x0B4 ++#define QSERDES_COM_SSC_PER1 0x0B8 ++#define QSERDES_COM_SSC_PER2 0x0BC ++#define QSERDES_COM_SSC_STEP_SIZE1 0x0C0 ++#define QSERDES_COM_SSC_STEP_SIZE2 0x0C4 ++#define QSERDES_COM_RES_CODE_UP 0x0C8 ++#define QSERDES_COM_RES_CODE_DN 0x0CC ++#define QSERDES_COM_RES_CODE_UP_OFFSET 0x0D0 ++#define QSERDES_COM_RES_CODE_DN_OFFSET 0x0D4 ++#define QSERDES_COM_RES_CODE_START_SEG1 0x0D8 ++#define QSERDES_COM_RES_CODE_START_SEG2 0x0DC ++#define QSERDES_COM_RES_CODE_CAL_CSR 0x0E0 ++#define QSERDES_COM_RES_CODE 0x0E4 ++#define QSERDES_COM_RES_TRIM_CONTROL 0x0E8 ++#define QSERDES_COM_RES_TRIM_CONTROL2 0x0EC ++#define QSERDES_COM_RES_TRIM_EN_VCOCALDONE 0x0F0 ++#define QSERDES_COM_FAUX_EN 0x0F4 ++#define QSERDES_COM_DIV_FRAC_START1 0x0F8 ++#define QSERDES_COM_DIV_FRAC_START2 0x0FC ++#define QSERDES_COM_DIV_FRAC_START3 0x100 ++#define QSERDES_COM_DEC_START2 0x104 ++#define QSERDES_COM_PLL_RXTXEPCLK_EN 0x108 ++#define QSERDES_COM_PLL_CRCTRL 0x10C ++#define QSERDES_COM_PLL_CLKEPDIV 0x110 ++#define QSERDES_COM_PLL_FREQUPDATE 0x114 ++#define QSERDES_COM_PLL_BKGCAL_TRIM_UP 0x118 ++#define QSERDES_COM_PLL_BKGCAL_TRIM_DN 0x11C ++#define QSERDES_COM_PLL_BKGCAL_TRIM_MUX 0x120 ++#define QSERDES_COM_PLL_BKGCAL_VREF_CFG 0x124 ++#define QSERDES_COM_PLL_BKGCAL_DIV_REF1 0x128 ++#define QSERDES_COM_PLL_BKGCAL_DIV_REF2 0x12C ++#define QSERDES_COM_MUXADDR 0x130 ++#define QSERDES_COM_LOW_POWER_RO_CONTROL 0x134 ++#define QSERDES_COM_POST_DIVIDER_CONTROL 0x138 ++#define QSERDES_COM_HR_OCLK2_DIVIDER 0x13C ++#define QSERDES_COM_HR_OCLK3_DIVIDER 0x140 ++#define QSERDES_COM_PLL_VCO_HIGH 0x144 ++#define QSERDES_COM_RESET_SM 0x148 ++#define QSERDES_COM_MUXVAL 0x14C ++ ++#define QSERDES_RX_CDR_CONTROL1 0x400 ++#define QSERDES_RX_CDR_CONTROL2 0x404 ++#define QSERDES_RX_CDR_CONTROL_HALF 0x408 ++#define QSERDES_RX_CDR_CONTROL_QUARTER 0x40C ++#define QSERDES_RX_CDR_CONTROL_EIGHTH 0x410 ++#define QSERDES_RX_UCDR_FO_GAIN 0x414 ++#define QSERDES_RX_UCDR_SO_GAIN 0x418 ++#define QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE 0x41C ++#define QSERDES_RX_UCDR_FO_TO_SO_DELAY 0x420 ++#define QSERDES_RX_AUX_CONTROL 0x424 ++#define QSERDES_RX_AUX_DATA_TCOARSE 0x428 ++#define QSERDES_RX_AUX_DATA_TFINE_LSB 0x42C ++#define QSERDES_RX_AUX_DATA_TFINE_MSB 0x430 ++#define QSERDES_RX_RCLK_AUXDATA_SEL 0x434 ++#define QSERDES_RX_AC_JTAG_ENABLE 0x438 ++#define QSERDES_RX_AC_JTAG_INITP 0x43C ++#define QSERDES_RX_AC_JTAG_INITN 0x440 ++#define QSERDES_RX_AC_JTAG_LVL 0x444 ++#define QSERDES_RX_AC_JTAG_MODE 0x448 ++#define QSERDES_RX_AC_JTAG_RESET 0x44C ++#define QSERDES_RX_RX_RCVR_IQ_EN 0x450 ++#define QSERDES_RX_RX_IDAC_I_DC_OFFSETS 0x454 ++#define QSERDES_RX_RX_IDAC_Q_DC_OFFSETS 0x458 ++#define QSERDES_RX_RX_IDAC_A_DC_OFFSETS 0x45C ++#define QSERDES_RX_RX_IDAC_EN 0x460 ++#define QSERDES_RX_RX_IDAC_CTRL0 0x464 ++#define QSERDES_RX_RX_IDAC_CTRL1 0x468 ++#define QSERDES_RX_RX_EOM_EN 0x46C ++#define QSERDES_RX_RX_EOM_CTRL0 0x470 ++#define QSERDES_RX_RX_EOM_CTRL1 0x474 ++#define QSERDES_RX_RX_EOM_CTRL2 0x478 ++#define QSERDES_RX_RX_EOM_CTRL3 0x47C ++#define QSERDES_RX_RX_EOM_CTRL4 0x480 ++#define QSERDES_RX_RX_EOM_CTRL5 0x484 ++#define QSERDES_RX_RX_EOM_CTRL6 0x488 ++#define QSERDES_RX_RX_EOM_CTRL7 0x48C ++#define QSERDES_RX_RX_EOM_CTRL8 0x490 ++#define QSERDES_RX_RX_EOM_CTRL9 0x494 ++#define QSERDES_RX_RX_EOM_CTRL10 0x498 ++#define QSERDES_RX_RX_EOM_CTRL11 0x49C ++#define QSERDES_RX_RX_HIGHZ_HIGHRATE 0x4A0 ++#define QSERDES_RX_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x4A4 ++#define QSERDES_RX_RX_EQ_GAIN1_LSB 0x4A8 ++#define QSERDES_RX_RX_EQ_GAIN1_MSB 0x4AC ++#define QSERDES_RX_RX_EQ_GAIN2_LSB 0x4B0 ++#define QSERDES_RX_RX_EQ_GAIN2_MSB 0x4B4 ++#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL1 0x4B8 ++#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 0x4BC ++#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 0x4C0 ++#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 0x4C4 ++#define QSERDES_RX_RX_IDAC_CAL_CONFIGURATION 0x4C8 ++#define QSERDES_RX_RX_IDAC_CAL_CONFIGURATION_2 0x4CC ++#define QSERDES_RX_RX_IDAC_TSETTLE_LOW 0x4D0 ++#define QSERDES_RX_RX_IDAC_TSETTLE_HIGH 0x4D4 ++#define QSERDES_RX_RX_IDAC_ENDSAMP_LOW 0x4D8 ++#define QSERDES_RX_RX_IDAC_ENDSAMP_HIGH 0x4DC ++#define QSERDES_RX_RX_IDAC_MIDPOINT_LOW 0x4E0 ++#define QSERDES_RX_RX_IDAC_MIDPOINT_HIGH 0x4E4 ++#define QSERDES_RX_RX_EQ_OFFSET_LSB 0x4E8 ++#define QSERDES_RX_RX_EQ_OFFSET_MSB 0x4EC ++#define QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x4F0 ++#define QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x4F4 ++#define QSERDES_RX_SIGDET_ENABLES 0x4F8 ++#define QSERDES_RX_SIGDET_ENABLES_2 0x4FC ++#define QSERDES_RX_SIGDET_CNTRL 0x500 ++#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL 0x504 ++#define QSERDES_RX_SIGDET_TIMER_LIMIT 0x508 ++#define QSERDES_RX_RX_BAND 0x50C ++#define QSERDES_RX_CDR_FREEZE_UP_DN 0x510 ++#define QSERDES_RX_RX_INTERFACE_MODE 0x514 ++#define QSERDES_RX_JITTER_GEN_MODE 0x518 ++#define QSERDES_RX_BUJ_AMP 0x51C ++#define QSERDES_RX_SJ_AMP1 0x520 ++#define QSERDES_RX_SJ_AMP2 0x524 ++#define QSERDES_RX_SJ_PER1 0x528 ++#define QSERDES_RX_SJ_PER2 0x52C ++#define QSERDES_RX_BUJ_STEP_FREQ1 0x530 ++#define QSERDES_RX_BUJ_STEP_FREQ2 0x534 ++#define QSERDES_RX_PPM_OFFSET1 0x538 ++#define QSERDES_RX_PPM_OFFSET2 0x53C ++#define QSERDES_RX_SIGN_PPM_PERIOD1 0x540 ++#define QSERDES_RX_SIGN_PPM_PERIOD2 0x544 ++#define QSERDES_RX_SSC_CTRL 0x548 ++#define QSERDES_RX_SSC_COUNT1 0x54C ++#define QSERDES_RX_SSC_COUNT2 0x550 ++#define QSERDES_RX_RX_ALOG_INTF_OBSV_CNTL 0x554 ++#define QSERDES_RX_PI_CTRL1 0x558 ++#define QSERDES_RX_PI_CTRL2 0x55C ++#define QSERDES_RX_PI_QUAD 0x560 ++#define QSERDES_RX_IDATA1 0x564 ++#define QSERDES_RX_IDATA2 0x568 ++#define QSERDES_RX_AUX_DATA1 0x56C ++#define QSERDES_RX_AUX_DATA2 0x570 ++#define QSERDES_RX_AC_JTAG_OUTP 0x574 ++#define QSERDES_RX_AC_JTAG_OUTN 0x578 ++#define QSERDES_RX_RX_SIGDET 0x57C ++#define QSERDES_RX_RX_VDCOFF 0x580 ++#define QSERDES_RX_IDAC_CAL_ON 0x584 ++#define QSERDES_RX_IDAC_STATUS_I 0x588 ++#define QSERDES_RX_IDAC_STATUS_Q 0x58C ++#define QSERDES_RX_IDAC_STATUS_A 0x590 ++#define QSERDES_RX_CALST_STATUS_I 0x594 ++#define QSERDES_RX_CALST_STATUS_Q 0x598 ++#define QSERDES_RX_CALST_STATUS_A 0x59C ++#define QSERDES_RX_EOM_STATUS0 0x5A0 ++#define QSERDES_RX_EOM_STATUS1 0x5A4 ++#define QSERDES_RX_EOM_STATUS2 0x5A8 ++#define QSERDES_RX_EOM_STATUS3 0x5AC ++#define QSERDES_RX_EOM_STATUS4 0x5B0 ++#define QSERDES_RX_EOM_STATUS5 0x5B4 ++#define QSERDES_RX_EOM_STATUS6 0x5B8 ++#define QSERDES_RX_EOM_STATUS7 0x5BC ++#define QSERDES_RX_EOM_STATUS8 0x5C0 ++#define QSERDES_RX_EOM_STATUS9 0x5C4 ++#define QSERDES_RX_RX_ALOG_INTF_OBSV 0x5C8 ++#define QSERDES_RX_READ_EQCODE 0x5CC ++#define QSERDES_RX_READ_OFFSETCODE 0x5D0 ++ ++#define QSERDES_TX_BIST_MODE_LANENO 0x200 ++#define QSERDES_TX_CLKBUF_ENABLE 0x204 ++#define QSERDES_TX_TX_EMP_POST1_LVL 0x208 ++#define QSERDES_TX_TX_DRV_LVL 0x20C ++#define QSERDES_TX_RESET_TSYNC_EN 0x210 ++#define QSERDES_TX_LPB_EN 0x214 ++#define QSERDES_TX_RES_CODE_UP 0x218 ++#define QSERDES_TX_RES_CODE_DN 0x21C ++#define QSERDES_TX_PERL_LENGTH1 0x220 ++#define QSERDES_TX_PERL_LENGTH2 0x224 ++#define QSERDES_TX_SERDES_BYP_EN_OUT 0x228 ++#define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x22C ++#define QSERDES_TX_PARRATE_REC_DETECT_IDLE_EN 0x230 ++#define QSERDES_TX_BIST_PATTERN1 0x234 ++#define QSERDES_TX_BIST_PATTERN2 0x238 ++#define QSERDES_TX_BIST_PATTERN3 0x23C ++#define QSERDES_TX_BIST_PATTERN4 0x240 ++#define QSERDES_TX_BIST_PATTERN5 0x244 ++#define QSERDES_TX_BIST_PATTERN6 0x248 ++#define QSERDES_TX_BIST_PATTERN7 0x24C ++#define QSERDES_TX_BIST_PATTERN8 0x250 ++#define QSERDES_TX_LANE_MODE 0x254 ++#define QSERDES_TX_IDAC_CAL_LANE_MODE 0x258 ++#define QSERDES_TX_IDAC_CAL_LANE_MODE_CONFIGURATION 0x25C ++#define QSERDES_TX_ATB_SEL1 0x260 ++#define QSERDES_TX_ATB_SEL2 0x264 ++#define QSERDES_TX_RCV_DETECT_LVL 0x268 ++#define QSERDES_TX_PRBS_SEED1 0x26C ++#define QSERDES_TX_PRBS_SEED2 0x270 ++#define QSERDES_TX_PRBS_SEED3 0x274 ++#define QSERDES_TX_PRBS_SEED4 0x278 ++#define QSERDES_TX_RESET_GEN 0x27C ++#define QSERDES_TX_TRAN_DRVR_EMP_EN 0x280 ++#define QSERDES_TX_TX_INTERFACE_MODE 0x284 ++#define QSERDES_TX_PWM_CTRL 0x288 ++#define QSERDES_TX_PWM_DATA 0x28C ++#define QSERDES_TX_PWM_ENC_DIV_CTRL 0x290 ++#define QSERDES_TX_VMODE_CTRL1 0x294 ++#define QSERDES_TX_VMODE_CTRL2 0x298 ++#define QSERDES_TX_VMODE_CTRL3 0x29C ++#define QSERDES_TX_VMODE_CTRL4 0x2A0 ++#define QSERDES_TX_VMODE_CTRL5 0x2A4 ++#define QSERDES_TX_VMODE_CTRL6 0x2A8 ++#define QSERDES_TX_VMODE_CTRL7 0x2AC ++#define QSERDES_TX_TX_ALOG_INTF_OBSV_CNTL 0x2B0 ++#define QSERDES_TX_BIST_STATUS 0x2B4 ++#define QSERDES_TX_BIST_ERROR_COUNT1 0x2B8 ++#define QSERDES_TX_BIST_ERROR_COUNT2 0x2BC ++#define QSERDES_TX_TX_ALOG_INTF_OBSV 0x2C0 ++#define QSERDES_TX_PWM_DEC_STATUS 0x2C4 ++ ++#define PCIE_PHY_SW_RESET 0x600 ++#define PCIE_PHY_POWER_DOWN_CONTROL 0x604 ++#define PCIE_PHY_START 0x608 ++#define PCIE_PHY_TXMGN_V1_V0 0x60C ++#define PCIE_PHY_TXMGN_V3_V2 0x610 ++#define PCIE_PHY_TXMGN_LS_V4 0x614 ++#define PCIE_PHY_TXDEEMPH_M6DB_V0 0x618 ++#define PCIE_PHY_TXDEEMPH_M3P5DB_V0 0x61C ++#define PCIE_PHY_TXDEEMPH_M6DB_V1 0x620 ++#define PCIE_PHY_TXDEEMPH_M3P5DB_V1 0x624 ++#define PCIE_PHY_TXDEEMPH_M6DB_V2 0x628 ++#define PCIE_PHY_TXDEEMPH_M3P5DB_V2 0x62C ++#define PCIE_PHY_TXDEEMPH_M6DB_V3 0x630 ++#define PCIE_PHY_TXDEEMPH_M3P5DB_V3 0x634 ++#define PCIE_PHY_TXDEEMPH_M6DB_V4 0x638 ++#define PCIE_PHY_TXDEEMPH_M3P5DB_V4 0x63C ++#define PCIE_PHY_TXDEEMPH_M6DB_LS 0x640 ++#define PCIE_PHY_TXDEEMPH_M3P5DB_LS 0x644 ++#define PCIE_PHY_ENDPOINT_REFCLK_DRIVE 0x648 ++#define PCIE_PHY_RX_IDLE_DTCT_CNTRL 0x64C ++#define PCIE_PHY_POWER_STATE_CONFIG1 0x650 ++#define PCIE_PHY_POWER_STATE_CONFIG2 0x654 ++#define PCIE_PHY_POWER_STATE_CONFIG3 0x658 ++#define PCIE_PHY_RCVR_DTCT_DLY_P1U2_L 0x65C ++#define PCIE_PHY_RCVR_DTCT_DLY_P1U2_H 0x660 ++#define PCIE_PHY_RCVR_DTCT_DLY_U3_L 0x664 ++#define PCIE_PHY_RCVR_DTCT_DLY_U3_H 0x668 ++#define PCIE_PHY_LOCK_DETECT_CONFIG1 0x66C ++#define PCIE_PHY_LOCK_DETECT_CONFIG2 0x670 ++#define PCIE_PHY_LOCK_DETECT_CONFIG3 0x674 ++#define PCIE_PHY_TSYNC_RSYNC_TIME 0x678 ++#define PCIE_PHY_SIGDET_LOW_2_IDLE_TIME 0x67C ++#define PCIE_PHY_BEACON_2_IDLE_TIME_L 0x680 ++#define PCIE_PHY_BEACON_2_IDLE_TIME_H 0x684 ++#define PCIE_PHY_PWRUP_RESET_DLY_TIME_SYSCLK 0x688 ++#define PCIE_PHY_PWRUP_RESET_DLY_TIME_AUXCLK 0x68C ++#define PCIE_PHY_LFPS_DET_HIGH_COUNT_VAL 0x690 ++#define PCIE_PHY_LFPS_TX_ECSTART_EQTLOCK 0x694 ++#define PCIE_PHY_LFPS_TX_END_CNT_P2U3_START 0x698 ++#define PCIE_PHY_RXEQTRAINING_WAIT_TIME 0x69C ++#define PCIE_PHY_RXEQTRAINING_RUN_TIME 0x6A0 ++#define PCIE_PHY_TXONESZEROS_RUN_LENGTH 0x6A4 ++#define PCIE_PHY_FLL_CNTRL1 0x6A8 ++#define PCIE_PHY_FLL_CNTRL2 0x6AC ++#define PCIE_PHY_FLL_CNT_VAL_L 0x6B0 ++#define PCIE_PHY_FLL_CNT_VAL_H_TOL 0x6B4 ++#define PCIE_PHY_FLL_MAN_CODE 0x6B8 ++#define PCIE_PHY_AUTONOMOUS_MODE_CTRL 0x6BC ++#define PCIE_PHY_LFPS_RXTERM_IRQ_CLEAR 0x6C0 ++#define PCIE_PHY_ARCVR_DTCT_EN_PERIOD 0x6C4 ++#define PCIE_PHY_ARCVR_DTCT_CM_DLY 0x6C8 ++#define PCIE_PHY_ALFPS_DEGLITCH_VAL 0x6CC ++#define PCIE_PHY_INSIG_SW_CTRL1 0x6D0 ++#define PCIE_PHY_INSIG_SW_CTRL2 0x6D4 ++#define PCIE_PHY_INSIG_SW_CTRL3 0x6D8 ++#define PCIE_PHY_INSIG_MX_CTRL1 0x6DC ++#define PCIE_PHY_INSIG_MX_CTRL2 0x6E0 ++#define PCIE_PHY_INSIG_MX_CTRL3 0x6E4 ++#define PCIE_PHY_TEST_CONTROL 0x6E8 ++#define PCIE_PHY_BIST_CTRL 0x6EC ++#define PCIE_PHY_PRBS_POLY0 0x6F0 ++#define PCIE_PHY_PRBS_POLY1 0x6F4 ++#define PCIE_PHY_PRBS_SEED0 0x6F8 ++#define PCIE_PHY_PRBS_SEED1 0x6FC ++#define PCIE_PHY_FIXED_PAT_CTRL 0x700 ++#define PCIE_PHY_FIXED_PAT0 0x704 ++#define PCIE_PHY_FIXED_PAT1 0x708 ++#define PCIE_PHY_FIXED_PAT2 0x70C ++#define PCIE_PHY_FIXED_PAT3 0x710 ++#define PCIE_PHY_SPARE1 0x714 ++#define PCIE_PHY_BIST_CHK_ERR_CNT_L 0x718 ++#define PCIE_PHY_BIST_CHK_ERR_CNT_H 0x71C ++#define PCIE_PHY_BIST_CHK_STATUS 0x720 ++#define PCIE_PHY_LFPS_RXTERM_IRQ_SOURCE 0x724 ++#define PCIE_PHY_PCS_STATUS 0x728 ++#define PCIE_PHY_PCS_STATUS2 0x72C ++#define PCIE_PHY_REVISION_ID0 0x730 ++#define PCIE_PHY_REVISION_ID1 0x734 ++#define PCIE_PHY_REVISION_ID2 0x738 ++#define PCIE_PHY_REVISION_ID3 0x73C ++#define PCIE_PHY_DEBUG_BUS_0_STATUS 0x740 ++#define PCIE_PHY_DEBUG_BUS_1_STATUS 0x744 ++#define PCIE_PHY_DEBUG_BUS_2_STATUS 0x748 ++#define PCIE_PHY_DEBUG_BUS_3_STATUS 0x74C ++ ++#else ++ ++#define QSERDES_COM_SYS_CLK_CTRL 0x000 ++#define QSERDES_COM_PLL_VCOTAIL_EN 0x004 ++#define QSERDES_COM_CMN_MODE 0x008 ++#define QSERDES_COM_IE_TRIM 0x00c ++#define QSERDES_COM_IP_TRIM 0x010 ++#define QSERDES_COM_PLL_CNTRL 0x014 ++#define QSERDES_COM_PLL_IP_SETI 0x018 ++#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL 0x01c ++#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x020 ++#define QSERDES_COM_PLL_CP_SETI 0x024 ++#define QSERDES_COM_PLL_IP_SETP 0x028 ++#define QSERDES_COM_PLL_CP_SETP 0x02c ++#define QSERDES_COM_ATB_SEL1 0x030 ++#define QSERDES_COM_ATB_SEL2 0x034 ++#define QSERDES_COM_SYSCLK_EN_SEL 0x038 ++#define QSERDES_COM_RES_CODE_TXBAND 0x03c ++#define QSERDES_COM_RESETSM_CNTRL 0x040 ++#define QSERDES_COM_PLLLOCK_CMP1 0x044 ++#define QSERDES_COM_PLLLOCK_CMP2 0x048 ++#define QSERDES_COM_PLLLOCK_CMP3 0x04c ++#define QSERDES_COM_PLLLOCK_CMP_EN 0x050 ++#define QSERDES_COM_RES_TRIM_OFFSET 0x054 ++#define QSERDES_COM_BGTC 0x058 ++#define QSERDES_COM_PLL_TEST_UPDN_RESTRIMSTEP 0x05c ++#define QSERDES_COM_PLL_VCO_TUNE 0x060 ++#define QSERDES_COM_DEC_START1 0x064 ++#define QSERDES_COM_PLL_AMP_OS 0x068 ++#define QSERDES_COM_SSC_EN_CENTER 0x06c ++#define QSERDES_COM_SSC_ADJ_PER1 0x070 ++#define QSERDES_COM_SSC_ADJ_PER2 0x074 ++#define QSERDES_COM_SSC_PER1 0x078 ++#define QSERDES_COM_SSC_PER2 0x07c ++#define QSERDES_COM_SSC_STEP_SIZE1 0x080 ++#define QSERDES_COM_SSC_STEP_SIZE2 0x084 ++#define QSERDES_COM_RES_TRIM_SEARCH 0x088 ++#define QSERDES_COM_RES_TRIM_FREEZE 0x08c ++#define QSERDES_COM_RES_TRIM_EN_VCOCALDONE 0x090 ++#define QSERDES_COM_FAUX_EN 0x094 ++#define QSERDES_COM_DIV_FRAC_START1 0x098 ++#define QSERDES_COM_DIV_FRAC_START2 0x09c ++#define QSERDES_COM_DIV_FRAC_START3 0x0a0 ++#define QSERDES_COM_DEC_START2 0x0a4 ++#define QSERDES_COM_PLL_RXTXEPCLK_EN 0x0a8 ++#define QSERDES_COM_PLL_CRCTRL 0x0ac ++#define QSERDES_COM_PLL_CLKEPDIV 0x0b0 ++#define QSERDES_COM_PLL_FREQUPDATE 0x0b4 ++#define QSERDES_COM_PLL_VCO_HIGH 0x0b8 ++#define QSERDES_COM_RESET_SM 0x0bc ++#define QSERDES_TX_BIST_MODE_LANENO 0x200 ++#define QSERDES_TX_CLKBUF_ENABLE 0x204 ++#define QSERDES_TX_TX_EMP_POST1_LVL 0x208 ++#define QSERDES_TX_TX_DRV_LVL 0x20c ++#define QSERDES_TX_RESET_TSYNC_EN 0x210 ++#define QSERDES_TX_LPB_EN 0x214 ++#define QSERDES_TX_RES_CODE 0x218 ++#define QSERDES_TX_PERL_LENGTH1 0x21c ++#define QSERDES_TX_PERL_LENGTH2 0x220 ++#define QSERDES_TX_SERDES_BYP_EN_OUT 0x224 ++#define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_EN 0x228 ++#define QSERDES_TX_PARRATE_REC_DETECT_IDLE_EN 0x22c ++#define QSERDES_TX_BIST_PATTERN1 0x230 ++#define QSERDES_TX_BIST_PATTERN2 0x234 ++#define QSERDES_TX_BIST_PATTERN3 0x238 ++#define QSERDES_TX_BIST_PATTERN4 0x23c ++#define QSERDES_TX_BIST_PATTERN5 0x240 ++#define QSERDES_TX_BIST_PATTERN6 0x244 ++#define QSERDES_TX_BIST_PATTERN7 0x248 ++#define QSERDES_TX_BIST_PATTERN8 0x24c ++#define QSERDES_TX_LANE_MODE 0x250 ++#define QSERDES_TX_ATB_SEL 0x254 ++#define QSERDES_TX_REC_DETECT_LVL 0x258 ++#define QSERDES_TX_PRBS_SEED1 0x25c ++#define QSERDES_TX_PRBS_SEED2 0x260 ++#define QSERDES_TX_PRBS_SEED3 0x264 ++#define QSERDES_TX_PRBS_SEED4 0x268 ++#define QSERDES_TX_RESET_GEN 0x26c ++#define QSERDES_TX_TRAN_DRVR_EMP_EN 0x270 ++#define QSERDES_TX_TX_INTERFACE_MODE 0x274 ++#define QSERDES_TX_BIST_STATUS 0x278 ++#define QSERDES_TX_BIST_ERROR_COUNT1 0x27c ++#define QSERDES_TX_BIST_ERROR_COUNT2 0x280 ++#define QSERDES_RX_CDR_CONTROL 0x400 ++#define QSERDES_RX_AUX_CONTROL 0x404 ++#define QSERDES_RX_AUX_DATA_TCODE 0x408 ++#define QSERDES_RX_RCLK_AUXDATA_SEL 0x40c ++#define QSERDES_RX_CDR_CONTROL2 0x410 ++#define QSERDES_RX_AC_JTAG_INITP 0x414 ++#define QSERDES_RX_AC_JTAG_INITN 0x418 ++#define QSERDES_RX_AC_JTAG_LVL_EN 0x41c ++#define QSERDES_RX_AC_JTAG_MODE 0x420 ++#define QSERDES_RX_AC_JTAG_RESET 0x424 ++#define QSERDES_RX_RX_IQ_RXDET_EN 0x428 ++#define QSERDES_RX_RX_TERM_HIGHZ_CM_AC_COUPLE 0x42c ++#define QSERDES_RX_RX_EQ_GAIN12 0x430 ++#define QSERDES_RX_SIGDET_CNTRL 0x434 ++#define QSERDES_RX_RX_BAND 0x438 ++#define QSERDES_RX_CDR_FREEZE_UP_DN 0x43c ++#define QSERDES_RX_RX_INTERFACE_MODE 0x440 ++#define QSERDES_RX_JITTER_GEN_MODE 0x444 ++#define QSERDES_RX_BUJ_AMP 0x448 ++#define QSERDES_RX_SJ_AMP1 0x44c ++#define QSERDES_RX_SJ_AMP2 0x450 ++#define QSERDES_RX_SJ_PER1 0x454 ++#define QSERDES_RX_SJ_PER2 0x458 ++#define QSERDES_RX_BUJ_STEP_FREQ1 0x45c ++#define QSERDES_RX_BUJ_STEP_FREQ2 0x460 ++#define QSERDES_RX_PPM_OFFSET1 0x464 ++#define QSERDES_RX_PPM_OFFSET2 0x468 ++#define QSERDES_RX_SIGN_PPM_PERIOD1 0x46c ++#define QSERDES_RX_SIGN_PPM_PERIOD2 0x470 ++#define QSERDES_RX_SSC_CTRL 0x474 ++#define QSERDES_RX_SSC_COUNT1 0x478 ++#define QSERDES_RX_SSC_COUNT2 0x47c ++#define QSERDES_RX_PWM_CNTRL1 0x480 ++#define QSERDES_RX_PWM_CNTRL2 0x484 ++#define QSERDES_RX_PWM_NDIV 0x488 ++#define QSERDES_RX_PI_CTRL1 0x48c ++#define QSERDES_RX_PI_CTRL2 0x490 ++#define QSERDES_RX_PI_QUAD 0x494 ++#define QSERDES_RX_IDATA1 0x498 ++#define QSERDES_RX_IDATA2 0x49c ++#define QSERDES_RX_AUX_DATA1 0x4a0 ++#define QSERDES_RX_AUX_DATA2 0x4a4 ++#define QSERDES_RX_AC_JTAG_OUTP 0x4a8 ++#define QSERDES_RX_AC_JTAG_OUTN 0x4ac ++#define QSERDES_RX_RX_SIGDET_PWMDECSTATUS 0x4b0 ++#define PCIE_PHY_SW_RESET 0x600 ++#define PCIE_PHY_POWER_DOWN_CONTROL 0x604 ++#define PCIE_PHY_START 0x608 ++#define PCIE_PHY_TXMGN_V1_V0 0x60c ++#define PCIE_PHY_TXMGN_V3_V2 0x610 ++#define PCIE_PHY_TXMGN_LS_V4 0x614 ++#define PCIE_PHY_TXDEEMPH_M6DB_V0 0x618 ++#define PCIE_PHY_TXDEEMPH_M3P5DB_V0 0x61c ++#define PCIE_PHY_TXDEEMPH_M6DB_V1 0x620 ++#define PCIE_PHY_TXDEEMPH_M3P5DB_V1 0x624 ++#define PCIE_PHY_TXDEEMPH_M6DB_V2 0x628 ++#define PCIE_PHY_TXDEEMPH_M3P5DB_V2 0x62c ++#define PCIE_PHY_TXDEEMPH_M6DB_V3 0x630 ++#define PCIE_PHY_TXDEEMPH_M3P5DB_V3 0x634 ++#define PCIE_PHY_TXDEEMPH_M6DB_V4 0x638 ++#define PCIE_PHY_TXDEEMPH_M3P5DB_V4 0x63c ++#define PCIE_PHY_TXDEEMPH_M6DB_LS 0x640 ++#define PCIE_PHY_TXDEEMPH_M3P5DB_LS 0x644 ++#define PCIE_PHY_ENDPOINT_REFCLK_DRIVE 0x648 ++#define PCIE_PHY_RX_IDLE_DTCT_CNTRL 0x64c ++#define PCIE_PHY_POWER_STATE_CONFIG1 0x650 ++#define PCIE_PHY_POWER_STATE_CONFIG2 0x654 ++#define PCIE_PHY_RCVR_DTCT_DLY_L 0x658 ++#define PCIE_PHY_RCVR_DTCT_DLY_H 0x65c ++#define PCIE_PHY_LOCK_DETECT_CONFIG1 0x660 ++#define PCIE_PHY_LOCK_DETECT_CONFIG2 0x664 ++#define PCIE_PHY_TSYNC_RSYNC_TIME 0x668 ++#define PCIE_PHY_SIGDET_LOW_2_IDLE_TIME 0x66c ++#define PCIE_PHY_BEACON_2_IDLE_TIME_L 0x670 ++#define PCIE_PHY_BEACON_2_IDLE_TIME_H 0x674 ++#define PCIE_PHY_PWRUP_RESET_DLY_TIME_SYSCLK 0x678 ++#define PCIE_PHY_PWRUP_RESET_DLY_TIME_AUXCLK 0x67c ++#define PCIE_PHY_INSIG_SW_CTRL1 0x680 ++#define PCIE_PHY_INSIG_SW_CTRL2 0x684 ++#define PCIE_PHY_INSIG_MX_CTRL1 0x688 ++#define PCIE_PHY_INSIG_MX_CTRL2 0x68c ++#define PCIE_PHY_TEST_CONTROL 0x690 ++#define PCIE_PHY_BIST_CTRL 0x694 ++#define PCIE_PHY_PRBS_POLY0 0x698 ++#define PCIE_PHY_PRBS_POLY1 0x69c ++#define PCIE_PHY_PRBS_SEED0 0x6a0 ++#define PCIE_PHY_PRBS_SEED1 0x6a4 ++#define PCIE_PHY_FIXED_PAT_CTRL 0x6a8 ++#define PCIE_PHY_FIXED_PAT0 0x6ac ++#define PCIE_PHY_FIXED_PAT1 0x6b0 ++#define PCIE_PHY_FIXED_PAT2 0x6b4 ++#define PCIE_PHY_FIXED_PAT3 0x6b8 ++#define PCIE_PHY_BIST_CHK_ERR_CNT_L 0x6bc ++#define PCIE_PHY_BIST_CHK_ERR_CNT_H 0x6c0 ++#define PCIE_PHY_BIST_CHK_STATUS 0x6c4 ++#define PCIE_PHY_PCS_STATUS 0x6c8 ++#define PCIE_PHY_REVISION_ID0 0x6cc ++#define PCIE_PHY_REVISION_ID1 0x6d0 ++#define PCIE_PHY_REVISION_ID2 0x6d4 ++#define PCIE_PHY_REVISION_ID3 0x6d8 ++#define PCIE_PHY_DEBUG_BUS_0_STATUS 0x6dc ++#define PCIE_PHY_DEBUG_BUS_1_STATUS 0x6e0 ++#define PCIE_PHY_DEBUG_BUS_2_STATUS 0x6e4 ++#define PCIE_PHY_DEBUG_BUS_3_STATUS 0x6e8 ++#endif ++ ++#endif +-- +2.7.4 +