diff --git a/target/linux/qualcommax/Makefile b/target/linux/qualcommax/Makefile index feafd5a95..5829f1fb5 100644 --- a/target/linux/qualcommax/Makefile +++ b/target/linux/qualcommax/Makefile @@ -6,7 +6,7 @@ BOARDNAME:=Qualcomm Atheros 802.11ax WiSoC-s FEATURES:=squashfs ramdisk fpu nand rtc emmc KERNELNAME:=Image dtbs CPU_TYPE:=cortex-a53 -SUBTARGETS:=ipq60xx ipq807x +SUBTARGETS:=ipq50xx ipq60xx ipq807x KERNEL_PATCHVER:=6.1 diff --git a/target/linux/qualcommax/config-6.6 b/target/linux/qualcommax/config-6.6 new file mode 100644 index 000000000..1d05868ca --- /dev/null +++ b/target/linux/qualcommax/config-6.6 @@ -0,0 +1,582 @@ +CONFIG_64BIT=y +CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_FORCE_MAX_ORDER=10 +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=24 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_STACKWALK=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_ARM64=y +CONFIG_ARM64_4K_PAGES=y +CONFIG_ARM64_ERRATUM_1165522=y +CONFIG_ARM64_ERRATUM_1286807=y +CONFIG_ARM64_ERRATUM_2051678=y +CONFIG_ARM64_ERRATUM_2054223=y +CONFIG_ARM64_ERRATUM_2067961=y +CONFIG_ARM64_ERRATUM_2077057=y +CONFIG_ARM64_ERRATUM_2658417=y +CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_PA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PTR_AUTH=y +CONFIG_ARM64_PTR_AUTH_KERNEL=y +CONFIG_ARM64_SME=y +CONFIG_ARM64_SVE=y +CONFIG_ARM64_TAGGED_ADDR_ABI=y +CONFIG_ARM64_VA_BITS=39 +CONFIG_ARM64_VA_BITS_39=y +CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y +CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y +CONFIG_ARM_AMBA=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +# CONFIG_ARM_MHU_V2 is not set +CONFIG_ARM_PSCI_CPUIDLE=y +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_QCOM_CPUFREQ_HW is not set +CONFIG_ARM_QCOM_CPUFREQ_NVMEM=y +CONFIG_AT803X_PHY=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y +CONFIG_CAVIUM_TX2_ERRATUM_219=y +CONFIG_CC_HAVE_SHADOW_CALL_STACK=y +CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_COMMON_CLK=y +CONFIG_COMMON_CLK_QCOM=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +# CONFIG_COMPAT_32BIT_TIME is not set +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y +CONFIG_COREDUMP=y +CONFIG_CPUFREQ_DT=y +CONFIG_CPUFREQ_DT_PLATDEV=y +CONFIG_CPU_FREQ=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_THERMAL=y +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_PM=y +CONFIG_CPU_RMAP=y +CONFIG_CPU_THERMAL=y +CONFIG_CRC16=y +CONFIG_CRC8=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_DEV_QCE=y +CONFIG_CRYPTO_DEV_QCE_AEAD=y +# CONFIG_CRYPTO_DEV_QCE_ENABLE_AEAD is not set +CONFIG_CRYPTO_DEV_QCE_ENABLE_ALL=y +# CONFIG_CRYPTO_DEV_QCE_ENABLE_SHA is not set +# CONFIG_CRYPTO_DEV_QCE_ENABLE_SKCIPHER is not set +CONFIG_CRYPTO_DEV_QCE_SHA=y +CONFIG_CRYPTO_DEV_QCE_SKCIPHER=y +CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN=512 +CONFIG_CRYPTO_DEV_QCOM_RNG=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_LIB_DES=y +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_XTS=y +CONFIG_CRYPTO_ZSTD=y +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +CONFIG_DEV_COREDUMP=y +CONFIG_DMADEVICES=y +CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y +CONFIG_DMA_DIRECT_REMAP=y +CONFIG_DMA_ENGINE=y +CONFIG_DMA_OF=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DTC=y +CONFIG_DT_IDLE_STATES=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_FIXED_PHY=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_FRAME_POINTER=y +CONFIG_FS_IOMAP=y +CONFIG_FUJITSU_ERRATUM_010001=y +CONFIG_FUNCTION_ALIGNMENT=4 +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FWNODE_MDIO=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_FW_LOADER_SYSFS=y +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND=y +CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_GENERIC_ARCH_TOPOLOGY=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_GENERIC_IOREMAP=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_PHY=y +CONFIG_GENERIC_PINCONF=y +CONFIG_GENERIC_PINCTRL_GROUPS=y +CONFIG_GENERIC_PINMUX_FUNCTIONS=y +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GLOB=y +CONFIG_GPIOLIB_IRQCHIP=y +CONFIG_GPIO_CDEV=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_HELPER_AUTO=y +# CONFIG_I2C_QCOM_CCI is not set +CONFIG_I2C_QUP=y +CONFIG_IIO=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_INITRAMFS_SOURCE="" +CONFIG_IPQ_APSS_6018=y +CONFIG_IPQ_APSS_PLL=y +# CONFIG_IPQ_GCC_4019 is not set +# CONFIG_IPQ_GCC_5018 is not set +# CONFIG_IPQ_GCC_5332 is not set +# CONFIG_IPQ_GCC_6018 is not set +# CONFIG_IPQ_GCC_8074 is not set +# CONFIG_IPQ_GCC_9574 is not set +CONFIG_IRQCHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_WORK=y +# CONFIG_KPSS_XCC is not set +CONFIG_LEDS_TLC591XX=y +CONFIG_LIBFDT=y +CONFIG_LOCK_DEBUGGING_SUPPORT=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_MAILBOX=y +# CONFIG_MAILBOX_TEST is not set +CONFIG_MDIO_BUS=y +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_IPQ4019=y +# CONFIG_MFD_QCOM_RPM is not set +CONFIG_MFD_SYSCON=y +CONFIG_MIGRATION=y +# CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY is not set +CONFIG_MMC=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_CQHCI=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_MSM=y +# CONFIG_MMC_SDHCI_PCI is not set +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_MODULES_USE_ELF_RELA=y +# CONFIG_MSM_GCC_8916 is not set +# CONFIG_MSM_GCC_8917 is not set +# CONFIG_MSM_GCC_8939 is not set +# CONFIG_MSM_GCC_8976 is not set +# CONFIG_MSM_GCC_8994 is not set +# CONFIG_MSM_GCC_8996 is not set +# CONFIG_MSM_GCC_8998 is not set +# CONFIG_MSM_GPUCC_8998 is not set +# CONFIG_MSM_MMCC_8996 is not set +# CONFIG_MSM_MMCC_8998 is not set +CONFIG_MTD_NAND_CORE=y +CONFIG_MTD_NAND_ECC=y +CONFIG_MTD_NAND_ECC_SW_HAMMING=y +CONFIG_MTD_NAND_QCOM=y +CONFIG_MTD_QCOMSMEM_PARTS=y +CONFIG_MTD_RAW_NAND=y +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_BEB_LIMIT=20 +CONFIG_MTD_UBI_BLOCK=y +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NET_EGRESS=y +CONFIG_NET_FLOW_LIMIT=y +CONFIG_NET_INGRESS=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_XGRESS=y +CONFIG_NLS=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=4 +CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y +CONFIG_NVMEM=y +CONFIG_NVMEM_LAYOUTS=y +CONFIG_NVMEM_QCOM_QFPROM=y +# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set +CONFIG_NVMEM_SYSFS=y +CONFIG_NVMEM_U_BOOT_ENV=y +CONFIG_OF=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_GPIO=y +CONFIG_OF_IRQ=y +CONFIG_OF_KOBJ=y +CONFIG_OF_MDIO=y +CONFIG_PADATA=y +CONFIG_PAGE_POOL=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAHOLE_HAS_LANG_EXCLUDE=y +CONFIG_PARTITION_PERCPU=y +CONFIG_PCI=y +CONFIG_PCIEAER=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_PERFORMANCE is not set +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +CONFIG_PCIEPORTBUS=y +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +CONFIG_PCIE_PME=y +CONFIG_PCIE_QCOM=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_MSI=y +CONFIG_PER_VMA_LOCK=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_PHYLIB=y +CONFIG_PHYLIB_LEDS=y +CONFIG_PHYS_ADDR_T_64BIT=y +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_EDP is not set +# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set +# CONFIG_PHY_QCOM_IPQ4019_USB is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +# CONFIG_PHY_QCOM_IPQ806X_USB is not set +# CONFIG_PHY_QCOM_M31_USB is not set +# CONFIG_PHY_QCOM_PCIE2 is not set +CONFIG_PHY_QCOM_QMP=y +CONFIG_PHY_QCOM_QMP_COMBO=y +CONFIG_PHY_QCOM_QMP_PCIE=y +CONFIG_PHY_QCOM_QMP_PCIE_8996=y +CONFIG_PHY_QCOM_QMP_UFS=y +CONFIG_PHY_QCOM_QMP_USB=y +# CONFIG_PHY_QCOM_QMP_USB_LEGACY is not set +CONFIG_PHY_QCOM_QUSB2=y +# CONFIG_PHY_QCOM_SGMII_ETH is not set +# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set +# CONFIG_PHY_QCOM_USB_HS_28NM is not set +# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set +# CONFIG_PHY_QCOM_USB_SS is not set +CONFIG_PINCTRL=y +# CONFIG_PINCTRL_IPQ5018 is not set +# CONFIG_PINCTRL_IPQ5332 is not set +# CONFIG_PINCTRL_IPQ6018 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_IPQ9574 is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8976 is not set +# CONFIG_PINCTRL_MSM8994 is not set +# CONFIG_PINCTRL_MSM8996 is not set +# CONFIG_PINCTRL_MSM8998 is not set +# CONFIG_PINCTRL_QCM2290 is not set +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PINCTRL_QCS404 is not set +# CONFIG_PINCTRL_QDU1000 is not set +# CONFIG_PINCTRL_SA8775P is not set +# CONFIG_PINCTRL_SC7180 is not set +# CONFIG_PINCTRL_SC8280XP is not set +# CONFIG_PINCTRL_SDM660 is not set +# CONFIG_PINCTRL_SDM670 is not set +# CONFIG_PINCTRL_SDM845 is not set +# CONFIG_PINCTRL_SDX75 is not set +# CONFIG_PINCTRL_SM6350 is not set +# CONFIG_PINCTRL_SM6375 is not set +# CONFIG_PINCTRL_SM7150 is not set +# CONFIG_PINCTRL_SM8150 is not set +# CONFIG_PINCTRL_SM8250 is not set +# CONFIG_PINCTRL_SM8450 is not set +# CONFIG_PINCTRL_SM8550 is not set +CONFIG_PM=y +CONFIG_PM_CLK=y +CONFIG_PM_OPP=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_MSM is not set +CONFIG_POWER_SUPPLY=y +CONFIG_PREEMPT_NONE_BUILD=y +CONFIG_PRINTK_TIME=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_QCA807X_PHY=y +CONFIG_QCA808X_PHY=y +# CONFIG_QCM_DISPCC_2290 is not set +# CONFIG_QCM_GCC_2290 is not set +# CONFIG_QCOM_A53PLL is not set +# CONFIG_QCOM_AOSS_QMP is not set +CONFIG_QCOM_APCS_IPC=y +# CONFIG_QCOM_APM is not set +# CONFIG_QCOM_APR is not set +CONFIG_QCOM_BAM_DMA=y +# CONFIG_QCOM_CLK_APCC_MSM8996 is not set +# CONFIG_QCOM_CLK_APCS_MSM8916 is not set +# CONFIG_QCOM_COMMAND_DB is not set +# CONFIG_QCOM_CPR is not set +# CONFIG_QCOM_EBI2 is not set +# CONFIG_QCOM_FASTRPC is not set +# CONFIG_QCOM_GENI_SE is not set +# CONFIG_QCOM_GSBI is not set +# CONFIG_QCOM_HFPLL is not set +# CONFIG_QCOM_ICC_BWMON is not set +# CONFIG_QCOM_IPCC is not set +# CONFIG_QCOM_LLCC is not set +CONFIG_QCOM_MDT_LOADER=y +# CONFIG_QCOM_MPM is not set +CONFIG_QCOM_NET_PHYLIB=y +# CONFIG_QCOM_OCMEM is not set +# CONFIG_QCOM_PDC is not set +CONFIG_QCOM_PIL_INFO=y +# CONFIG_QCOM_Q6V5_ADSP is not set +CONFIG_QCOM_Q6V5_COMMON=y +# CONFIG_QCOM_Q6V5_MSS is not set +# CONFIG_QCOM_Q6V5_PAS is not set +CONFIG_QCOM_Q6V5_WCSS=y +# CONFIG_QCOM_RAMP_CTRL is not set +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPMH is not set +# CONFIG_QCOM_RPM_MASTER_STATS is not set +CONFIG_QCOM_RPROC_COMMON=y +CONFIG_QCOM_SCM=y +# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set +# CONFIG_QCOM_SMD_RPM is not set +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_SMEM_STATE=y +CONFIG_QCOM_SMP2P=y +# CONFIG_QCOM_SMSM is not set +CONFIG_QCOM_SOCINFO=y +# CONFIG_QCOM_SPM is not set +# CONFIG_QCOM_STATS is not set +# CONFIG_QCOM_SYSMON is not set +CONFIG_QCOM_TSENS=y +# CONFIG_QCOM_WCNSS_CTRL is not set +# CONFIG_QCOM_WCNSS_PIL is not set +CONFIG_QCOM_WDT=y +# CONFIG_QCS_GCC_404 is not set +# CONFIG_QCS_Q6SSTOP_404 is not set +# CONFIG_QCS_TURING_404 is not set +# CONFIG_QDU_GCC_1000 is not set +CONFIG_QUEUED_RWLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_RANDSTRUCT_NONE=y +CONFIG_RAS=y +CONFIG_RATIONAL=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_MMIO=y +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_CPR3 is not set +CONFIG_REGULATOR_FIXED_VOLTAGE=y +# CONFIG_REGULATOR_QCOM_REFGEN is not set +# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set +CONFIG_RELOCATABLE=y +CONFIG_REMOTEPROC=y +CONFIG_REMOTEPROC_CDEV=y +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_QCOM_AOSS is not set +# CONFIG_RESET_QCOM_PDC is not set +CONFIG_RFS_ACCEL=y +CONFIG_RODATA_FULL_DEFAULT_ENABLED=y +CONFIG_RPMSG=y +CONFIG_RPMSG_CHAR=y +# CONFIG_RPMSG_CTRL is not set +# CONFIG_RPMSG_NS is not set +CONFIG_RPMSG_QCOM_GLINK=y +CONFIG_RPMSG_QCOM_GLINK_RPM=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y +CONFIG_RPMSG_QCOM_SMD=y +# CONFIG_RPMSG_TTY is not set +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_I2C_AND_SPI=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +# CONFIG_SA_GCC_8775P is not set +# CONFIG_SA_GPUCC_8775P is not set +# CONFIG_SCHED_CORE is not set +CONFIG_SCHED_MC=y +CONFIG_SCHED_SMT=y +CONFIG_SCHED_THERMAL_PRESSURE=y +CONFIG_SCSI=y +CONFIG_SCSI_COMMON=y +# CONFIG_SCSI_LOWLEVEL is not set +# CONFIG_SCSI_PROC_FS is not set +# CONFIG_SC_CAMCC_7280 is not set +# CONFIG_SC_DISPCC_7180 is not set +# CONFIG_SC_DISPCC_8280XP is not set +# CONFIG_SC_GCC_7180 is not set +# CONFIG_SC_GCC_8280XP is not set +# CONFIG_SC_GPUCC_7180 is not set +# CONFIG_SC_LPASSCC_7280 is not set +# CONFIG_SC_LPASSCC_8280XP is not set +# CONFIG_SC_LPASS_CORECC_7180 is not set +# CONFIG_SC_LPASS_CORECC_7280 is not set +# CONFIG_SC_MSS_7180 is not set +# CONFIG_SC_VIDEOCC_7180 is not set +# CONFIG_SDM_CAMCC_845 is not set +# CONFIG_SDM_DISPCC_845 is not set +# CONFIG_SDM_GCC_660 is not set +# CONFIG_SDM_GCC_845 is not set +# CONFIG_SDM_GPUCC_845 is not set +# CONFIG_SDM_LPASSCC_845 is not set +# CONFIG_SDM_VIDEOCC_845 is not set +# CONFIG_SDX_GCC_75 is not set +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +CONFIG_SGL_ALLOC=y +CONFIG_SG_POOL=y +CONFIG_SMP=y +# CONFIG_SM_CAMCC_6350 is not set +# CONFIG_SM_CAMCC_8450 is not set +# CONFIG_SM_GCC_7150 is not set +# CONFIG_SM_GCC_8150 is not set +# CONFIG_SM_GCC_8250 is not set +# CONFIG_SM_GCC_8450 is not set +# CONFIG_SM_GCC_8550 is not set +# CONFIG_SM_GPUCC_6115 is not set +# CONFIG_SM_GPUCC_6125 is not set +# CONFIG_SM_GPUCC_6350 is not set +# CONFIG_SM_GPUCC_6375 is not set +# CONFIG_SM_GPUCC_8150 is not set +# CONFIG_SM_GPUCC_8250 is not set +# CONFIG_SM_GPUCC_8350 is not set +# CONFIG_SM_GPUCC_8450 is not set +# CONFIG_SM_GPUCC_8550 is not set +# CONFIG_SM_TCSRCC_8550 is not set +# CONFIG_SM_VIDEOCC_8150 is not set +# CONFIG_SM_VIDEOCC_8250 is not set +# CONFIG_SM_VIDEOCC_8350 is not set +# CONFIG_SM_VIDEOCC_8450 is not set +# CONFIG_SM_VIDEOCC_8550 is not set +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_SOC_BUS=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSE_IRQ=y +CONFIG_SPI=y +CONFIG_SPI_MASTER=y +CONFIG_SPI_MEM=y +CONFIG_SPI_QUP=y +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y +CONFIG_SWIOTLB=y +CONFIG_SWPHY=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_THERMAL=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_OF=y +CONFIG_THREAD_INFO_IN_TASK=y +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_TIMER_OF=y +CONFIG_TIMER_PROBE=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_TREE_RCU=y +CONFIG_TREE_SRCU=y +CONFIG_UBIFS_FS=y +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +# CONFIG_UCLAMP_TASK is not set +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_USB=y +CONFIG_USB_COMMON=y +CONFIG_USB_SUPPORT=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_ANCHOR=y +# CONFIG_VIRTIO_BLK is not set +# CONFIG_VIRTIO_NET is not set +CONFIG_VMAP_STACK=y +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_XPS=y +CONFIG_XXHASH=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZONE_DMA32=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y diff --git a/target/linux/qualcommax/image/ipq50xx.mk b/target/linux/qualcommax/image/ipq50xx.mk new file mode 100644 index 000000000..2af8e55c6 --- /dev/null +++ b/target/linux/qualcommax/image/ipq50xx.mk @@ -0,0 +1,50 @@ +define Device/glinet_gl-b3000 + $(call Device/FitImage) + $(call Device/UbiFit) + SOC := ipq5000 + DEVICE_VENDOR := GL.iNET + DEVICE_MODEL := GL-B3000 + BLOCKSIZE := 128k + PAGESIZE := 2048 + DEVICE_DTS_CONFIG := config@mp03.5-c1 + UBINIZE_OPTS := -E 5 + DEVICE_PACKAGES := ath11k-firmware-qcn6122 ipq-wifi-gl-b3000 +endef +TARGET_DEVICES += glinet_gl-b3000 + +define Device/linksys_mx2000 + $(call Device/FitImageLzma) + DEVICE_VENDOR := Linksys + DEVICE_MODEL := MX2000 + BLOCKSIZE := 128k + PAGESIZE := 2048 + KERNEL_SIZE := 8192k + IMAGE_SIZE := 83968k + DEVICE_DTS_CONFIG := config@mp03.5-c1 + SOC := ipq5018 + UBINIZE_OPTS := -E 5 # EOD marks to "hide" factory sig at EOF + IMAGES += factory.bin + IMAGE/factory.bin := append-kernel | pad-to $$$$(KERNEL_SIZE) | append-ubi | linksys-image type=MX2000 + DEVICE_PACKAGES := ath11k-firmware-qcn6122 \ + ipq-wifi-linksys_mx2000 +endef +TARGET_DEVICES += linksys_mx2000 + +define Device/linksys_mx5500 + $(call Device/FitImageLzma) + DEVICE_VENDOR := Linksys + DEVICE_MODEL := MX5500 + BLOCKSIZE := 128k + PAGESIZE := 2048 + KERNEL_SIZE := 8192k + IMAGE_SIZE := 83968k + DEVICE_DTS_CONFIG := config@mp03.1 + SOC := ipq5018 + UBINIZE_OPTS := -E 5 # EOD marks to "hide" factory sig at EOF + IMAGES += factory.bin + IMAGE/factory.bin := append-kernel | pad-to $$$$(KERNEL_SIZE) | append-ubi | linksys-image type=MX5500 + DEVICE_PACKAGES := kmod-ath11k-pci \ + ath11k-firmware-qcn9074 \ + ipq-wifi-linksys_mx5500 +endef +TARGET_DEVICES += linksys_mx5500 \ No newline at end of file diff --git a/target/linux/qualcommax/ipq50xx/base-files/etc/board.d/02_network b/target/linux/qualcommax/ipq50xx/base-files/etc/board.d/02_network new file mode 100644 index 000000000..3fa76e36e --- /dev/null +++ b/target/linux/qualcommax/ipq50xx/base-files/etc/board.d/02_network @@ -0,0 +1,48 @@ +#!/bin/sh + +. /lib/functions/uci-defaults.sh +. /lib/functions/system.sh + +ipq50xx_setup_interfaces() +{ + local board="$1" + case $board in + glinet,gl-b3000) + ucidef_set_interfaces_lan_wan "lan1 lan2" "wan" + ;; + linksys,mx2000|\ + linksys,mx5500) + ucidef_set_interfaces_lan_wan "lan1 lan2 lan3" "wan" + ;; + esac +} + +ipq50xx_setup_macs() +{ + local board="$1" + local lan_mac="" + local wan_mac="" + local label_mac="" + + case "$board" in + linksys,mx2000|\ + linksys,mx5500) + label_mac=$(mtd_get_mac_ascii devinfo hw_mac_addr) + lan_mac=$label_mac + wan_mac=$label_mac + ucidef_set_network_device_mac eth1 $label_mac + ;; + esac + + [ -n "$lan_mac" ] && ucidef_set_interface_macaddr "lan" $lan_mac + [ -n "$wan_mac" ] && ucidef_set_interface_macaddr "wan" $wan_mac + [ -n "$label_mac" ] && ucidef_set_label_macaddr $label_mac +} + +board_config_update +board=$(board_name) +ipq50xx_setup_interfaces $board +ipq50xx_setup_macs $board +board_config_flush + +exit 0 diff --git a/target/linux/qualcommax/ipq50xx/base-files/etc/hotplug.d/firmware/11-ath11k-caldata b/target/linux/qualcommax/ipq50xx/base-files/etc/hotplug.d/firmware/11-ath11k-caldata new file mode 100644 index 000000000..1369788e3 --- /dev/null +++ b/target/linux/qualcommax/ipq50xx/base-files/etc/hotplug.d/firmware/11-ath11k-caldata @@ -0,0 +1,55 @@ +#!/bin/sh + +[ -e /lib/firmware/$FIRMWARE ] && exit 0 + +. /lib/functions/caldata.sh + +board=$(board_name) + +case "$FIRMWARE" in +"ath11k/IPQ5018/hw1.0/cal-ahb-c000000.wifi.bin") + case "$board" in + linksys,mx2000|\ + linksys,mx5500) + caldata_extract "0:ART" 0x1000 0x20000 + label_mac=$(mtd_get_mac_ascii devinfo hw_mac_addr) + ath11k_patch_mac $(macaddr_add $label_mac 1) 0 + ath11k_remove_regdomain + ath11k_set_macflag + ;; + glinet,gl-b3000) + caldata_extract "0:ART" 0x1000 0x20000 + ;; + esac + ;; +"ath11k/qcn6122/hw1.0/cal-ahb-soc@0:wifi1@c000000.bin") + case "$board" in + linksys,mx2000) + caldata_extract "0:ART" 0x26800 0x20000 + label_mac=$(mtd_get_mac_ascii devinfo hw_mac_addr) + ath11k_patch_mac $(macaddr_add $label_mac 2) 0 + ath11k_remove_regdomain + ath11k_set_macflag + ;; + glinet,gl-b3000) + caldata_extract "0:ART" 0x26800 0x20000 + ;; + esac + ;; +"ath11k/QCN9074/hw1.0/cal-pci-0001:01:00.0.bin") + case "$board" in + linksys,mx5500) + caldata_extract "0:ART" 0x26800 0x20000 + label_mac=$(mtd_get_mac_ascii devinfo hw_mac_addr) + ath11k_patch_mac $(macaddr_add $label_mac 2) 0 + ath11k_remove_regdomain + ath11k_set_macflag + ;; + glinet,gl-b3000) + caldata_extract "0:ART" 0x26800 0x20000 + esac + ;; +*) + exit 1 + ;; +esac diff --git a/target/linux/qualcommax/ipq50xx/base-files/etc/init.d/bootcount b/target/linux/qualcommax/ipq50xx/base-files/etc/init.d/bootcount new file mode 100755 index 000000000..b570428ae --- /dev/null +++ b/target/linux/qualcommax/ipq50xx/base-files/etc/init.d/bootcount @@ -0,0 +1,12 @@ +#!/bin/sh /etc/rc.common + +START=99 + +boot() { + case $(board_name) in + linksys,mx2000|\ + linksys,mx5500) + mtd resetbc s_env || true + ;; + esac +} diff --git a/target/linux/qualcommax/ipq50xx/base-files/lib/upgrade/linksys.sh b/target/linux/qualcommax/ipq50xx/base-files/lib/upgrade/linksys.sh new file mode 100644 index 000000000..18366fc62 --- /dev/null +++ b/target/linux/qualcommax/ipq50xx/base-files/lib/upgrade/linksys.sh @@ -0,0 +1,125 @@ +linksys_get_target_firmware() { + local cur_boot_part mtd_ubi0 + + cur_boot_part="$(/usr/sbin/fw_printenv -n boot_part)" + if [ -z "${cur_boot_part}" ]; then + mtd_ubi0=$(cat /sys/class/ubi/ubi0/mtd_num) + case "$(grep -E "^mtd${mtd_ubi0}:" /proc/mtd | cut -d '"' -f 2)" in + kernel|rootfs) + cur_boot_part=1 + ;; + alt_kernel|alt_rootfs) + cur_boot_part=2 + ;; + esac + >&2 printf "Current boot_part='%s' selected from ubi0/mtd_num='%s'" \ + "${cur_boot_part}" "${mtd_ubi0}" + fi + + # OEM U-Boot for EA6350v3, EA8300 and MR8300; bootcmd= + # if test $auto_recovery = no; + # then bootipq; + # elif test $boot_part = 1; + # then run bootpart1; + # else run bootpart2; + # fi + + case "$cur_boot_part" in + 1) + fw_setenv -s - <<-EOF + boot_part 2 + auto_recovery yes + EOF + printf "alt_kernel" + return + ;; + 2) + fw_setenv -s - <<-EOF + boot_part 1 + auto_recovery yes + EOF + printf "kernel" + return + ;; + *) + return + ;; + esac +} + +linksys_is_factory_image() { + local board=$(board_name) + board=${board##*,} + + # check matching footer signature + tail -c 256 $1 | grep -q -i "\.LINKSYS\.........${board}" +} + +platform_do_upgrade_linksys() { + local magic_long="$(get_magic_long "$1")" + + local rm_oem_fw_vols="squashfs ubifs" # from OEM [alt_]rootfs UBI + local vol + + mkdir -p /var/lock + local part_label="$(linksys_get_target_firmware)" + touch /var/lock/fw_printenv.lock + + if [ -z "$part_label" ]; then + echo "cannot find target partition" + exit 1 + fi + + local target_mtd=$(find_mtd_part "$part_label") + + [ "$magic_long" = "73797375" ] && { + CI_KERNPART="$part_label" + if [ "$part_label" = "kernel" ]; then + CI_UBIPART="rootfs" + else + CI_UBIPART="alt_rootfs" + fi + + local mtdnum="$(find_mtd_index "$CI_UBIPART")" + if [ ! "$mtdnum" ]; then + echo "cannot find ubi mtd partition $CI_UBIPART" + return 1 + fi + + local ubidev="$(nand_find_ubi "$CI_UBIPART")" + if [ ! "$ubidev" ]; then + ubiattach -m "$mtdnum" + sync + ubidev="$(nand_find_ubi "$CI_UBIPART")" + fi + + if [ "$ubidev" ]; then + for vol in $rm_oem_fw_vols; do + ubirmvol "/dev/$ubidev" -N "$vol" 2>/dev/null + done + fi + + # complete std upgrade + if nand_upgrade_tar "$1" ; then + nand_do_upgrade_success + else + nand_do_upgrade_failed + fi + + } + + [ "$magic_long" = "27051956" ] && { + echo "writing \"$1\" image to \"$part_label\"" + get_image "$1" | mtd write - "$part_label" + } + + [ "$magic_long" = "d00dfeed" ] && { + if ! linksys_is_factory_image "$1"; then + echo "factory image doesn't match device" + return 1 + fi + + echo "writing \"$1\" factory image to \"$part_label\"" + get_image "$1" | mtd -e "$part_label" write - "$part_label" + } +} diff --git a/target/linux/qualcommax/ipq50xx/base-files/lib/upgrade/platform.sh b/target/linux/qualcommax/ipq50xx/base-files/lib/upgrade/platform.sh new file mode 100644 index 000000000..1702201f1 --- /dev/null +++ b/target/linux/qualcommax/ipq50xx/base-files/lib/upgrade/platform.sh @@ -0,0 +1,35 @@ +PART_NAME=firmware +REQUIRE_IMAGE_METADATA=1 + +RAMFS_COPY_BIN='fw_printenv fw_setenv head' +RAMFS_COPY_DATA='/etc/fw_env.config /var/lock/fw_printenv.lock' + +platform_check_image() { + return 0; +} + +platform_do_upgrade() { + case "$(board_name)" in + glinet,gl-b3000) + nand_do_upgrade "$1" + ;; + linksys,mx2000|\ + linksys,mx5500) + boot_part="$(fw_printenv -n boot_part)" + if [ "$boot_part" -eq "1" ]; then + fw_setenv boot_part 2 + CI_KERNPART="alt_kernel" + CI_UBIPART="alt_rootfs" + else + fw_setenv boot_part 1 + CI_UBIPART="rootfs" + fi + fw_setenv boot_part_ready 3 + fw_setenv auto_recovery yes + nand_do_upgrade "$1" + ;; + *) + default_do_upgrade "$1" + ;; + esac +} diff --git a/target/linux/qualcommax/ipq50xx/config-default b/target/linux/qualcommax/ipq50xx/config-default new file mode 100644 index 000000000..bc5daabd1 --- /dev/null +++ b/target/linux/qualcommax/ipq50xx/config-default @@ -0,0 +1,23 @@ +CONFIG_QCOM_APM=y +CONFIG_IPQ_GCC_5018=y +CONFIG_PINCTRL_IPQ5018=y + +CONFIG_MTD_SPI_NAND=y +CONFIG_SPI_QPIC_SNAND=y + +CONFIG_IPQ_CMN_PLL=y +CONFIG_IPQ5018_PHY=y +CONFIG_NET_DSA=y +CONFIG_NET_DSA_QCA8K=y +CONFIG_NET_DSA_TAG_QCA=y +CONFIG_QCA83XX_PHY=y + +CONFIG_QCOM_Q6V5_MPD=y +CONFIG_QCOM_QMI_HELPERS=y + +CONFIG_PHY_QCOM_IPQ5018_UNIPHY_PCIE=y +CONFIG_PCIE_QCOM=y + +CONFIG_PWM=y +CONFIG_PWM_IPQ=y +CONFIG_LEDS_PWM=y \ No newline at end of file diff --git a/target/linux/qualcommax/ipq50xx/target.mk b/target/linux/qualcommax/ipq50xx/target.mk new file mode 100644 index 000000000..2e6832926 --- /dev/null +++ b/target/linux/qualcommax/ipq50xx/target.mk @@ -0,0 +1,7 @@ +SUBTARGET:=ipq50xx +BOARDNAME:=Qualcomm Atheros IPQ50xx +DEFAULT_PACKAGES += ath11k-firmware-ipq5018 + +define Target/Description + Build firmware images for Qualcomm Atheros IPQ50xx based boards. +endef diff --git a/target/linux/qualcommax/patches-6.6/0024-v6.7-dt-bindings-arm-qcom-ids-Add-IDs-for-IPQ8174-family.patch b/target/linux/qualcommax/patches-6.6/0024-v6.7-dt-bindings-arm-qcom-ids-Add-IDs-for-IPQ8174-family.patch new file mode 100644 index 000000000..c1381a7bf --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0024-v6.7-dt-bindings-arm-qcom-ids-Add-IDs-for-IPQ8174-family.patch @@ -0,0 +1,29 @@ +From 93e161c8f4b9b051e5e746814138cb5520b4b897 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Fri, 1 Sep 2023 20:10:04 +0200 +Subject: [PATCH] dt-bindings: arm: qcom,ids: Add IDs for IPQ8174 family + +IPQ8174 (Oak) family is part of the IPQ8074 family, but the ID-s for it +are missing so lets add them. + +Signed-off-by: Robert Marko +Reviewed-by: Kathiravan T +Acked-by: Conor Dooley +Link: https://lore.kernel.org/r/20230901181041.1538999-1-robimarko@gmail.com +Signed-off-by: Bjorn Andersson +--- + include/dt-bindings/arm/qcom,ids.h | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/include/dt-bindings/arm/qcom,ids.h ++++ b/include/dt-bindings/arm/qcom,ids.h +@@ -203,6 +203,9 @@ + #define QCOM_ID_SM6125 394 + #define QCOM_ID_IPQ8070A 395 + #define QCOM_ID_IPQ8071A 396 ++#define QCOM_ID_IPQ8172 397 ++#define QCOM_ID_IPQ8173 398 ++#define QCOM_ID_IPQ8174 399 + #define QCOM_ID_IPQ6018 402 + #define QCOM_ID_IPQ6028 403 + #define QCOM_ID_SDM429W 416 diff --git a/target/linux/qualcommax/patches-6.6/0025-v6.7-cpufreq-qcom-nvmem-add-support-for-IPQ6018.patch b/target/linux/qualcommax/patches-6.6/0025-v6.7-cpufreq-qcom-nvmem-add-support-for-IPQ6018.patch new file mode 100644 index 000000000..95c6c9505 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0025-v6.7-cpufreq-qcom-nvmem-add-support-for-IPQ6018.patch @@ -0,0 +1,123 @@ +From 47e161a7873b0891f4e01a69a839f6161d816ea8 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Wed, 25 Oct 2023 14:57:57 +0530 +Subject: [PATCH] cpufreq: qcom-nvmem: add support for IPQ6018 + +IPQ6018 SoC series comes in multiple SKU-s, and not all of them support +high frequency OPP points. + +SoC itself does however have a single bit in QFPROM to indicate the CPU +speed-bin. +That bit is used to indicate frequency limit of 1.5GHz, but that alone is +not enough as IPQ6000 only goes up to 1.2GHz, but SMEM ID can be used to +limit it further. + +IPQ6018 compatible is blacklisted from DT platdev as the cpufreq device +will get created by NVMEM CPUFreq driver. + +Signed-off-by: Robert Marko +[ Viresh: Fixed rebase conflict. ] +Signed-off-by: Viresh Kumar +--- + drivers/cpufreq/cpufreq-dt-platdev.c | 1 + + drivers/cpufreq/qcom-cpufreq-nvmem.c | 58 ++++++++++++++++++++++++++++ + 2 files changed, 59 insertions(+) + +--- a/drivers/cpufreq/cpufreq-dt-platdev.c ++++ b/drivers/cpufreq/cpufreq-dt-platdev.c +@@ -177,6 +177,7 @@ static const struct of_device_id blockli + { .compatible = "ti,am625", }, + { .compatible = "ti,am62a7", }, + ++ { .compatible = "qcom,ipq6018", }, + { .compatible = "qcom,ipq8064", }, + { .compatible = "qcom,apq8064", }, + { .compatible = "qcom,msm8974", }, +--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c ++++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c +@@ -30,6 +30,8 @@ + + #include + ++#define IPQ6000_VERSION BIT(2) ++ + struct qcom_cpufreq_drv; + + struct qcom_cpufreq_match_data { +@@ -207,6 +209,57 @@ len_error: + return ret; + } + ++static int qcom_cpufreq_ipq6018_name_version(struct device *cpu_dev, ++ struct nvmem_cell *speedbin_nvmem, ++ char **pvs_name, ++ struct qcom_cpufreq_drv *drv) ++{ ++ u32 msm_id; ++ int ret; ++ u8 *speedbin; ++ *pvs_name = NULL; ++ ++ ret = qcom_smem_get_soc_id(&msm_id); ++ if (ret) ++ return ret; ++ ++ speedbin = nvmem_cell_read(speedbin_nvmem, NULL); ++ if (IS_ERR(speedbin)) ++ return PTR_ERR(speedbin); ++ ++ switch (msm_id) { ++ case QCOM_ID_IPQ6005: ++ case QCOM_ID_IPQ6010: ++ case QCOM_ID_IPQ6018: ++ case QCOM_ID_IPQ6028: ++ /* Fuse Value Freq BIT to set ++ * --------------------------------- ++ * 2’b0 No Limit BIT(0) ++ * 2’b1 1.5 GHz BIT(1) ++ */ ++ drv->versions = 1 << (unsigned int)(*speedbin); ++ break; ++ case QCOM_ID_IPQ6000: ++ /* ++ * IPQ6018 family only has one bit to advertise the CPU ++ * speed-bin, but that is not enough for IPQ6000 which ++ * is only rated up to 1.2GHz. ++ * So for IPQ6000 manually set BIT(2) based on SMEM ID. ++ */ ++ drv->versions = IPQ6000_VERSION; ++ break; ++ default: ++ dev_err(cpu_dev, ++ "SoC ID %u is not part of IPQ6018 family, limiting to 1.2GHz!\n", ++ msm_id); ++ drv->versions = IPQ6000_VERSION; ++ break; ++ } ++ ++ kfree(speedbin); ++ return 0; ++} ++ + static const struct qcom_cpufreq_match_data match_data_kryo = { + .get_version = qcom_cpufreq_kryo_name_version, + }; +@@ -221,6 +274,10 @@ static const struct qcom_cpufreq_match_d + .genpd_names = qcs404_genpd_names, + }; + ++static const struct qcom_cpufreq_match_data match_data_ipq6018 = { ++ .get_version = qcom_cpufreq_ipq6018_name_version, ++}; ++ + static int qcom_cpufreq_probe(struct platform_device *pdev) + { + struct qcom_cpufreq_drv *drv; +@@ -353,6 +410,7 @@ static const struct of_device_id qcom_cp + { .compatible = "qcom,apq8096", .data = &match_data_kryo }, + { .compatible = "qcom,msm8996", .data = &match_data_kryo }, + { .compatible = "qcom,qcs404", .data = &match_data_qcs404 }, ++ { .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 }, + { .compatible = "qcom,ipq8064", .data = &match_data_krait }, + { .compatible = "qcom,apq8064", .data = &match_data_krait }, + { .compatible = "qcom,msm8974", .data = &match_data_krait }, diff --git a/target/linux/qualcommax/patches-6.6/0026-v6.7-cpufreq-qcom-nvmem-add-support-for-IPQ8074.patch b/target/linux/qualcommax/patches-6.6/0026-v6.7-cpufreq-qcom-nvmem-add-support-for-IPQ8074.patch new file mode 100644 index 000000000..564003840 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0026-v6.7-cpufreq-qcom-nvmem-add-support-for-IPQ8074.patch @@ -0,0 +1,113 @@ +From 0b9cd949136f1b63f7aa9424b6e583a1ab261e36 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Fri, 13 Oct 2023 19:20:02 +0200 +Subject: [PATCH] cpufreq: qcom-nvmem: add support for IPQ8074 + +IPQ8074 comes in 3 families: +* IPQ8070A/IPQ8071A (Acorn) up to 1.4GHz +* IPQ8172/IPQ8173/IPQ8174 (Oak) up to 1.4GHz +* IPQ8072A/IPQ8074A/IPQ8076A/IPQ8078A (Hawkeye) up to 2.2GHz + +So, in order to be able to share one OPP table lets add support for IPQ8074 +family based of SMEM SoC ID-s as speedbin fuse is always 0 on IPQ8074. + +IPQ8074 compatible is blacklisted from DT platdev as the cpufreq device +will get created by NVMEM CPUFreq driver. + +Signed-off-by: Robert Marko +Acked-by: Konrad Dybcio +[ Viresh: Fixed rebase conflict. ] +Signed-off-by: Viresh Kumar +--- + drivers/cpufreq/cpufreq-dt-platdev.c | 1 + + drivers/cpufreq/qcom-cpufreq-nvmem.c | 48 ++++++++++++++++++++++++++++ + 2 files changed, 49 insertions(+) + +--- a/drivers/cpufreq/cpufreq-dt-platdev.c ++++ b/drivers/cpufreq/cpufreq-dt-platdev.c +@@ -179,6 +179,7 @@ static const struct of_device_id blockli + + { .compatible = "qcom,ipq6018", }, + { .compatible = "qcom,ipq8064", }, ++ { .compatible = "qcom,ipq8074", }, + { .compatible = "qcom,apq8064", }, + { .compatible = "qcom,msm8974", }, + { .compatible = "qcom,msm8960", }, +--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c ++++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c +@@ -32,6 +32,11 @@ + + #define IPQ6000_VERSION BIT(2) + ++enum ipq8074_versions { ++ IPQ8074_HAWKEYE_VERSION = 0, ++ IPQ8074_ACORN_VERSION, ++}; ++ + struct qcom_cpufreq_drv; + + struct qcom_cpufreq_match_data { +@@ -260,6 +265,44 @@ static int qcom_cpufreq_ipq6018_name_ver + return 0; + } + ++static int qcom_cpufreq_ipq8074_name_version(struct device *cpu_dev, ++ struct nvmem_cell *speedbin_nvmem, ++ char **pvs_name, ++ struct qcom_cpufreq_drv *drv) ++{ ++ u32 msm_id; ++ int ret; ++ *pvs_name = NULL; ++ ++ ret = qcom_smem_get_soc_id(&msm_id); ++ if (ret) ++ return ret; ++ ++ switch (msm_id) { ++ case QCOM_ID_IPQ8070A: ++ case QCOM_ID_IPQ8071A: ++ case QCOM_ID_IPQ8172: ++ case QCOM_ID_IPQ8173: ++ case QCOM_ID_IPQ8174: ++ drv->versions = BIT(IPQ8074_ACORN_VERSION); ++ break; ++ case QCOM_ID_IPQ8072A: ++ case QCOM_ID_IPQ8074A: ++ case QCOM_ID_IPQ8076A: ++ case QCOM_ID_IPQ8078A: ++ drv->versions = BIT(IPQ8074_HAWKEYE_VERSION); ++ break; ++ default: ++ dev_err(cpu_dev, ++ "SoC ID %u is not part of IPQ8074 family, limiting to 1.4GHz!\n", ++ msm_id); ++ drv->versions = BIT(IPQ8074_ACORN_VERSION); ++ break; ++ } ++ ++ return 0; ++} ++ + static const struct qcom_cpufreq_match_data match_data_kryo = { + .get_version = qcom_cpufreq_kryo_name_version, + }; +@@ -278,6 +321,10 @@ static const struct qcom_cpufreq_match_d + .get_version = qcom_cpufreq_ipq6018_name_version, + }; + ++static const struct qcom_cpufreq_match_data match_data_ipq8074 = { ++ .get_version = qcom_cpufreq_ipq8074_name_version, ++}; ++ + static int qcom_cpufreq_probe(struct platform_device *pdev) + { + struct qcom_cpufreq_drv *drv; +@@ -412,6 +459,7 @@ static const struct of_device_id qcom_cp + { .compatible = "qcom,qcs404", .data = &match_data_qcs404 }, + { .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 }, + { .compatible = "qcom,ipq8064", .data = &match_data_krait }, ++ { .compatible = "qcom,ipq8074", .data = &match_data_ipq8074 }, + { .compatible = "qcom,apq8064", .data = &match_data_krait }, + { .compatible = "qcom,msm8974", .data = &match_data_krait }, + { .compatible = "qcom,msm8960", .data = &match_data_krait }, diff --git a/target/linux/qualcommax/patches-6.6/0027-v6.7-clk-qcom-apss-ipq6018-add-the-GPLL0-clock-also-as-cl.patch b/target/linux/qualcommax/patches-6.6/0027-v6.7-clk-qcom-apss-ipq6018-add-the-GPLL0-clock-also-as-cl.patch new file mode 100644 index 000000000..ddd53f9d4 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0027-v6.7-clk-qcom-apss-ipq6018-add-the-GPLL0-clock-also-as-cl.patch @@ -0,0 +1,43 @@ +From c917237a7cb17b97cc48e073881a9873f3caeaa2 Mon Sep 17 00:00:00 2001 +From: Kathiravan Thirumoorthy +Date: Thu, 14 Sep 2023 12:29:57 +0530 +Subject: [PATCH] clk: qcom: apss-ipq6018: add the GPLL0 clock also as clock + provider + +While the kernel is booting up, APSS PLL will be running at 800MHz with +GPLL0 as source. Once the cpufreq driver is available, APSS PLL will be +configured and select the rate based on the opp table and the source will +be changed to APSS_PLL_EARLY. + +Without this patch, CPU Freq driver reports that CPU is running at 24MHz +instead of the 800MHz. + +Reviewed-by: Konrad Dybcio +Tested-by: Robert Marko +Signed-off-by: Kathiravan Thirumoorthy +--- + drivers/clk/qcom/apss-ipq6018.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/clk/qcom/apss-ipq6018.c ++++ b/drivers/clk/qcom/apss-ipq6018.c +@@ -20,16 +20,19 @@ + + enum { + P_XO, ++ P_GPLL0, + P_APSS_PLL_EARLY, + }; + + static const struct clk_parent_data parents_apcs_alias0_clk_src[] = { + { .fw_name = "xo" }, ++ { .fw_name = "gpll0" }, + { .fw_name = "pll" }, + }; + + static const struct parent_map parents_apcs_alias0_clk_src_map[] = { + { P_XO, 0 }, ++ { P_GPLL0, 4 }, + { P_APSS_PLL_EARLY, 5 }, + }; + diff --git a/target/linux/qualcommax/patches-6.6/0028-v6.7-arm64-dts-qcom-ipq8074-include-the-GPLL0-as-clock-pr.patch b/target/linux/qualcommax/patches-6.6/0028-v6.7-arm64-dts-qcom-ipq8074-include-the-GPLL0-as-clock-pr.patch new file mode 100644 index 000000000..6b7dd2f26 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0028-v6.7-arm64-dts-qcom-ipq8074-include-the-GPLL0-as-clock-pr.patch @@ -0,0 +1,32 @@ +From 3b48a7d925a757b3fa53c04baaf68bb8313c3ffb Mon Sep 17 00:00:00 2001 +From: Kathiravan Thirumoorthy +Date: Thu, 14 Sep 2023 12:29:58 +0530 +Subject: [PATCH] arm64: dts: qcom: ipq8074: include the GPLL0 as clock + provider for mailbox + +While the kernel is booting up, APSS PLL will be running at 800MHz with +GPLL0 as source. Once the cpufreq driver is available, APSS PLL will be +configured to the rate based on the opp table and the source also will +be changed to APSS_PLL_EARLY. So allow the mailbox to consume the GPLL0, +with this inclusion, CPU Freq correctly reports that CPU is running at +800MHz rather than 24MHz. + +Signed-off-by: Kathiravan Thirumoorthy +Reviewed-by: Konrad Dybcio +--- + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -723,8 +723,8 @@ + compatible = "qcom,ipq8074-apcs-apps-global", + "qcom,ipq6018-apcs-apps-global"; + reg = <0x0b111000 0x1000>; +- clocks = <&a53pll>, <&xo>; +- clock-names = "pll", "xo"; ++ clocks = <&a53pll>, <&xo>, <&gcc GPLL0>; ++ clock-names = "pll", "xo", "gpll0"; + + #clock-cells = <1>; + #mbox-cells = <1>; diff --git a/target/linux/qualcommax/patches-6.6/0052-v6.7-arm64-dts-qcom-ipq6018-include-the-GPLL0-as.patch b/target/linux/qualcommax/patches-6.6/0052-v6.7-arm64-dts-qcom-ipq6018-include-the-GPLL0-as.patch new file mode 100644 index 000000000..d407b9c5c --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0052-v6.7-arm64-dts-qcom-ipq6018-include-the-GPLL0-as.patch @@ -0,0 +1,35 @@ +From 0133c7af3aa0420778d106cb90db708cfa45f2c6 Mon Sep 17 00:00:00 2001 +From: Kathiravan Thirumoorthy +Date: Thu, 14 Sep 2023 12:29:59 +0530 +Subject: [PATCH] arm64: dts: qcom: ipq6018: include the GPLL0 as clock + provider for mailbox + +While the kernel is booting up, APSS clock / CPU clock will be running +at 800MHz with GPLL0 as source. Once the cpufreq driver is available, +APSS PLL will be configured to the rate based on the opp table and the +source also will be changed to APSS_PLL_EARLY. So allow the mailbox to +consume the GPLL0, with this inclusion, CPU Freq correctly reports that +CPU is running at 800MHz rather than 24MHz. + +Signed-off-by: Kathiravan Thirumoorthy +Reviewed-by: Konrad Dybcio +Link: https://lore.kernel.org/r/20230913-gpll_cleanup-v2-9-c8ceb1a37680@quicinc.com +[bjorn: Updated commit message, as requested by Kathiravan] +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -620,8 +620,8 @@ + compatible = "qcom,ipq6018-apcs-apps-global"; + reg = <0x0 0x0b111000 0x0 0x1000>; + #clock-cells = <1>; +- clocks = <&a53pll>, <&xo>; +- clock-names = "pll", "xo"; ++ clocks = <&a53pll>, <&xo>, <&gcc GPLL0>; ++ clock-names = "pll", "xo", "gpll0"; + #mbox-cells = <1>; + }; + diff --git a/target/linux/qualcommax/patches-6.6/0053-v6.7-clk-qcom-gcc-ipq6018-add-QUP6-I2C-clock.patch b/target/linux/qualcommax/patches-6.6/0053-v6.7-clk-qcom-gcc-ipq6018-add-QUP6-I2C-clock.patch new file mode 100644 index 000000000..eb68b8764 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0053-v6.7-clk-qcom-gcc-ipq6018-add-QUP6-I2C-clock.patch @@ -0,0 +1,57 @@ +From 3dcf7b59393812a5fbd83f8cd8d34b94afb4c4d1 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Sat, 21 Oct 2023 13:55:18 +0200 +Subject: [PATCH] clk: qcom: gcc-ipq6018: add QUP6 I2C clock + +QUP6 I2C clock is listed in the dt bindings but it was never included in +the GCC driver. +So lets add support for it, it is marked as criticial as it is used by RPM +to communicate to the external PMIC over I2C so this clock must not be +disabled. + +Signed-off-by: Robert Marko +Reviewed-by: Kathiravan Thirumoorthy +Reviewed-by: Konrad Dybcio +Link: https://lore.kernel.org/r/20231021115545.229060-1-robimarko@gmail.com +Signed-off-by: Bjorn Andersson +--- + drivers/clk/qcom/gcc-ipq6018.c | 21 +++++++++++++++++++++ + 1 file changed, 21 insertions(+) + +--- a/drivers/clk/qcom/gcc-ipq6018.c ++++ b/drivers/clk/qcom/gcc-ipq6018.c +@@ -2121,6 +2121,26 @@ static struct clk_branch gcc_blsp1_qup5_ + }, + }; + ++static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = { ++ .halt_reg = 0x07010, ++ .clkr = { ++ .enable_reg = 0x07010, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_blsp1_qup6_i2c_apps_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &blsp1_qup6_i2c_apps_clk_src.clkr.hw }, ++ .num_parents = 1, ++ /* ++ * RPM uses QUP6 I2C to communicate with the external ++ * PMIC so it must not be disabled. ++ */ ++ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ + static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = { + .halt_reg = 0x0700c, + .clkr = { +@@ -4277,6 +4297,7 @@ static struct clk_regmap *gcc_ipq6018_cl + [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr, + [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr, ++ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr, + [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr, + [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr, diff --git a/target/linux/qualcommax/patches-6.6/0054-v6.8-arm64-dts-qcom-ipq6018-use-CPUFreq-NVMEM.patch b/target/linux/qualcommax/patches-6.6/0054-v6.8-arm64-dts-qcom-ipq6018-use-CPUFreq-NVMEM.patch new file mode 100644 index 000000000..6198e24f3 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0054-v6.8-arm64-dts-qcom-ipq6018-use-CPUFreq-NVMEM.patch @@ -0,0 +1,85 @@ +From 83afcf14edb9217e58837eb119da96d734a4b3b1 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Sat, 21 Oct 2023 14:00:07 +0200 +Subject: [PATCH] arm64: dts: qcom: ipq6018: use CPUFreq NVMEM + +IPQ6018 comes in multiple SKU-s and some of them dont support all of the +OPP-s that are current set, so lets utilize CPUFreq NVMEM to allow only +supported OPP-s based on the SoC dynamically. + +As an example, IPQ6018 is generaly rated at 1.8GHz but some silicon only +goes up to 1.5GHz and is marked as such via an eFuse. + +Signed-off-by: Robert Marko +Reviewed-by: Konrad Dybcio +Link: https://lore.kernel.org/r/20231021120048.231239-1-robimarko@gmail.com +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 14 +++++++++++++- + 1 file changed, 13 insertions(+), 1 deletion(-) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -96,42 +96,49 @@ + }; + + cpu_opp_table: opp-table-cpu { +- compatible = "operating-points-v2"; ++ compatible = "operating-points-v2-kryo-cpu"; ++ nvmem-cells = <&cpu_speed_bin>; + opp-shared; + + opp-864000000 { + opp-hz = /bits/ 64 <864000000>; + opp-microvolt = <725000>; ++ opp-supported-hw = <0xf>; + clock-latency-ns = <200000>; + }; + + opp-1056000000 { + opp-hz = /bits/ 64 <1056000000>; + opp-microvolt = <787500>; ++ opp-supported-hw = <0xf>; + clock-latency-ns = <200000>; + }; + + opp-1320000000 { + opp-hz = /bits/ 64 <1320000000>; + opp-microvolt = <862500>; ++ opp-supported-hw = <0x3>; + clock-latency-ns = <200000>; + }; + + opp-1440000000 { + opp-hz = /bits/ 64 <1440000000>; + opp-microvolt = <925000>; ++ opp-supported-hw = <0x3>; + clock-latency-ns = <200000>; + }; + + opp-1608000000 { + opp-hz = /bits/ 64 <1608000000>; + opp-microvolt = <987500>; ++ opp-supported-hw = <0x1>; + clock-latency-ns = <200000>; + }; + + opp-1800000000 { + opp-hz = /bits/ 64 <1800000000>; + opp-microvolt = <1062500>; ++ opp-supported-hw = <0x1>; + clock-latency-ns = <200000>; + }; + }; +@@ -322,6 +329,11 @@ + reg = <0x0 0x000a4000 0x0 0x2000>; + #address-cells = <1>; + #size-cells = <1>; ++ ++ cpu_speed_bin: cpu-speed-bin@135 { ++ reg = <0x135 0x1>; ++ bits = <7 1>; ++ }; + }; + + prng: qrng@e3000 { diff --git a/target/linux/qualcommax/patches-6.6/0055-v6.8-arm64-dts-ipq6018-Add-remaining-QUP-UART-node.patch b/target/linux/qualcommax/patches-6.6/0055-v6.8-arm64-dts-ipq6018-Add-remaining-QUP-UART-node.patch new file mode 100644 index 000000000..af3e13258 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0055-v6.8-arm64-dts-ipq6018-Add-remaining-QUP-UART-node.patch @@ -0,0 +1,81 @@ +From e6c32770ef83f3e8cc057f3920b1c06aa9d1c9c2 Mon Sep 17 00:00:00 2001 +From: Chukun Pan +Date: Sun, 3 Dec 2023 23:39:14 +0800 +Subject: [PATCH] arm64: dts: qcom: ipq6018: Add remaining QUP UART node + +Add node to support all the QUP UART node controller inside of IPQ6018. +Some routers use these bus to connect Bluetooth chips. + +Signed-off-by: Chukun Pan +Link: https://lore.kernel.org/r/20231203153914.532654-1-amadeus@jmu.edu.cn +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 50 +++++++++++++++++++++++++++ + 1 file changed, 50 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -459,6 +459,26 @@ + qcom,ee = <0>; + }; + ++ blsp1_uart1: serial@78af000 { ++ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; ++ reg = <0x0 0x78af000 0x0 0x200>; ++ interrupts = ; ++ clocks = <&gcc GCC_BLSP1_UART1_APPS_CLK>, ++ <&gcc GCC_BLSP1_AHB_CLK>; ++ clock-names = "core", "iface"; ++ status = "disabled"; ++ }; ++ ++ blsp1_uart2: serial@78b0000 { ++ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; ++ reg = <0x0 0x78b0000 0x0 0x200>; ++ interrupts = ; ++ clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>, ++ <&gcc GCC_BLSP1_AHB_CLK>; ++ clock-names = "core", "iface"; ++ status = "disabled"; ++ }; ++ + blsp1_uart3: serial@78b1000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0x0 0x078b1000 0x0 0x200>; +@@ -467,6 +487,36 @@ + <&gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + status = "disabled"; ++ }; ++ ++ blsp1_uart4: serial@78b2000 { ++ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; ++ reg = <0x0 0x078b2000 0x0 0x200>; ++ interrupts = ; ++ clocks = <&gcc GCC_BLSP1_UART4_APPS_CLK>, ++ <&gcc GCC_BLSP1_AHB_CLK>; ++ clock-names = "core", "iface"; ++ status = "disabled"; ++ }; ++ ++ blsp1_uart5: serial@78b3000 { ++ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; ++ reg = <0x0 0x78b3000 0x0 0x200>; ++ interrupts = ; ++ clocks = <&gcc GCC_BLSP1_UART5_APPS_CLK>, ++ <&gcc GCC_BLSP1_AHB_CLK>; ++ clock-names = "core", "iface"; ++ status = "disabled"; ++ }; ++ ++ blsp1_uart6: serial@78b4000 { ++ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; ++ reg = <0x0 0x078b4000 0x0 0x200>; ++ interrupts = ; ++ clocks = <&gcc GCC_BLSP1_UART6_APPS_CLK>, ++ <&gcc GCC_BLSP1_AHB_CLK>; ++ clock-names = "core", "iface"; ++ status = "disabled"; + }; + + blsp1_spi1: spi@78b5000 { diff --git a/target/linux/qualcommax/patches-6.6/0056-v6.9-arm64-dts-qcom-Fix-hs_phy_irq-for-QUSB2-targets.patch b/target/linux/qualcommax/patches-6.6/0056-v6.9-arm64-dts-qcom-Fix-hs_phy_irq-for-QUSB2-targets.patch new file mode 100644 index 000000000..6559f3bbe --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0056-v6.9-arm64-dts-qcom-Fix-hs_phy_irq-for-QUSB2-targets.patch @@ -0,0 +1,95 @@ +From 2c6597c72e9722ac020102d5af40126df0437b82 Mon Sep 17 00:00:00 2001 +From: Krishna Kurapati +Date: Fri, 26 Jan 2024 00:29:18 +0530 +Subject: [PATCH] arm64: dts: qcom: Fix hs_phy_irq for QUSB2 targets + +On several QUSB2 Targets, the hs_phy_irq mentioned is actually +qusb2_phy interrupt specific to QUSB2 PHY's. Rename hs_phy_irq +to qusb2_phy for such targets. + +In actuality, the hs_phy_irq is also present in these targets, but +kept in for debug purposes in hw test environments. This is not +triggered by default and its functionality is mutually exclusive +to that of qusb2_phy interrupt. + +Add missing hs_phy_irq's, pwr_event irq's for QUSB2 PHY targets. +Add missing ss_phy_irq on some targets which allows for remote +wakeup to work on a Super Speed link. + +Also modify order of interrupts in accordance to bindings update. +Since driver looks up for interrupts by name and not by index, it +is safe to modify order of these interrupts in the DT. + +Signed-off-by: Krishna Kurapati +Link: https://lore.kernel.org/r/20240125185921.5062-2-quic_kriskura@quicinc.com +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 13 +++++++++++++ + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 14 ++++++++++++++ + arch/arm64/boot/dts/qcom/msm8953.dtsi | 7 +++++-- + arch/arm64/boot/dts/qcom/msm8996.dtsi | 8 ++++++-- + arch/arm64/boot/dts/qcom/msm8998.dtsi | 7 +++++-- + arch/arm64/boot/dts/qcom/sdm630.dtsi | 17 +++++++++++++---- + arch/arm64/boot/dts/qcom/sm6115.dtsi | 9 +++++++-- + arch/arm64/boot/dts/qcom/sm6125.dtsi | 9 +++++++-- + 8 files changed, 70 insertions(+), 14 deletions(-) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -431,6 +431,12 @@ + <&gcc GCC_USB1_MOCK_UTMI_CLK>; + assigned-clock-rates = <133330000>, + <24000000>; ++ ++ interrupts = , ++ ; ++ interrupt-names = "pwr_event", ++ "qusb2_phy"; ++ + resets = <&gcc GCC_USB1_BCR>; + status = "disabled"; + +@@ -629,6 +635,13 @@ + <133330000>, + <24000000>; + ++ interrupts = , ++ , ++ ; ++ interrupt-names = "pwr_event", ++ "qusb2_phy", ++ "ss_phy_irq"; ++ + resets = <&gcc GCC_USB0_BCR>; + status = "disabled"; + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -632,6 +632,13 @@ + <133330000>, + <19200000>; + ++ interrupts = , ++ , ++ ; ++ interrupt-names = "pwr_event", ++ "qusb2_phy", ++ "ss_phy_irq"; ++ + power-domains = <&gcc USB0_GDSC>; + + resets = <&gcc GCC_USB0_BCR>; +@@ -675,6 +682,13 @@ + <133330000>, + <19200000>; + ++ interrupts = , ++ , ++ ; ++ interrupt-names = "pwr_event", ++ "qusb2_phy", ++ "ss_phy_irq"; ++ + power-domains = <&gcc USB1_GDSC>; + + resets = <&gcc GCC_USB1_BCR>; diff --git a/target/linux/qualcommax/patches-6.6/0057-v6.8-hwspinlock-qcom-Remove-IPQ6018-SOC-specific-.patch b/target/linux/qualcommax/patches-6.6/0057-v6.8-hwspinlock-qcom-Remove-IPQ6018-SOC-specific-.patch new file mode 100644 index 000000000..25d56870a --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0057-v6.8-hwspinlock-qcom-Remove-IPQ6018-SOC-specific-.patch @@ -0,0 +1,32 @@ +From c3dc3d079d191c9149496b3c7fe1ece909386d93 Mon Sep 17 00:00:00 2001 +From: Vignesh Viswanathan +Date: Tue, 5 Sep 2023 15:25:35 +0530 +Subject: [PATCH] hwspinlock: qcom: Remove IPQ6018 SOC specific compatible + +IPQ6018 has 32 tcsr_mutex hwlock registers with stride 0x1000. +The compatible string qcom,ipq6018-tcsr-mutex is mapped to +of_msm8226_tcsr_mutex which has 32 locks configured with stride of 0x80 +and doesn't match the HW present in IPQ6018. + +Remove IPQ6018 specific compatible string so that it fallsback to +of_tcsr_mutex data which maps to the correct configuration for IPQ6018. + +Fixes: 5d4753f741d8 ("hwspinlock: qcom: add support for MMIO on older SoCs") +Signed-off-by: Vignesh Viswanathan +Reviewed-by: Konrad Dybcio +Link: https://lore.kernel.org/r/20230905095535.1263113-3-quic_viswanat@quicinc.com +Signed-off-by: Bjorn Andersson +--- + drivers/hwspinlock/qcom_hwspinlock.c | 1 - + 1 file changed, 1 deletion(-) + +--- a/drivers/hwspinlock/qcom_hwspinlock.c ++++ b/drivers/hwspinlock/qcom_hwspinlock.c +@@ -115,7 +115,6 @@ static const struct of_device_id qcom_hw + { .compatible = "qcom,sfpb-mutex", .data = &of_sfpb_mutex }, + { .compatible = "qcom,tcsr-mutex", .data = &of_tcsr_mutex }, + { .compatible = "qcom,apq8084-tcsr-mutex", .data = &of_msm8226_tcsr_mutex }, +- { .compatible = "qcom,ipq6018-tcsr-mutex", .data = &of_msm8226_tcsr_mutex }, + { .compatible = "qcom,msm8226-tcsr-mutex", .data = &of_msm8226_tcsr_mutex }, + { .compatible = "qcom,msm8974-tcsr-mutex", .data = &of_msm8226_tcsr_mutex }, + { .compatible = "qcom,msm8994-tcsr-mutex", .data = &of_msm8226_tcsr_mutex }, diff --git a/target/linux/qualcommax/patches-6.6/0058-v6.9-arm64-dts-qcom-ipq6018-add-tsens-node.patch b/target/linux/qualcommax/patches-6.6/0058-v6.9-arm64-dts-qcom-ipq6018-add-tsens-node.patch new file mode 100644 index 000000000..29d2de9b2 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0058-v6.9-arm64-dts-qcom-ipq6018-add-tsens-node.patch @@ -0,0 +1,34 @@ +From 0b17197055b528da22e9385200e61b847b499d48 Mon Sep 17 00:00:00 2001 +From: Mantas Pucka +Date: Thu, 25 Jan 2024 11:04:11 +0200 +Subject: [PATCH] arm64: dts: qcom: ipq6018: add tsens node + +IPQ6018 has temperature sensing HW block compatible with IPQ8074. Add +node for it. + +Signed-off-by: Mantas Pucka +Link: https://lore.kernel.org/r/1706173452-1017-3-git-send-email-mantas@8devices.com +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -343,6 +343,16 @@ + clock-names = "core"; + }; + ++ tsens: thermal-sensor@4a9000 { ++ compatible = "qcom,ipq6018-tsens", "qcom,ipq8074-tsens"; ++ reg = <0x0 0x004a9000 0x0 0x1000>, ++ <0x0 0x004a8000 0x0 0x1000>; ++ interrupts = ; ++ interrupt-names = "combined"; ++ #qcom,sensors = <16>; ++ #thermal-sensor-cells = <1>; ++ }; ++ + cryptobam: dma-controller@704000 { + compatible = "qcom,bam-v1.7.0"; + reg = <0x0 0x00704000 0x0 0x20000>; diff --git a/target/linux/qualcommax/patches-6.6/0059-v6.9-arm64-dts-qcom-ipq6018-add-thermal-zones.patch b/target/linux/qualcommax/patches-6.6/0059-v6.9-arm64-dts-qcom-ipq6018-add-thermal-zones.patch new file mode 100644 index 000000000..7e8c84558 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0059-v6.9-arm64-dts-qcom-ipq6018-add-thermal-zones.patch @@ -0,0 +1,180 @@ +From 8f053e5616352943e16966f195f5a7a161e6fe7d Mon Sep 17 00:00:00 2001 +From: Mantas Pucka +Date: Thu, 25 Jan 2024 11:04:12 +0200 +Subject: [PATCH] arm64: dts: qcom: ipq6018: add thermal zones + +Add thermal zones to make use of thermal sensors data. For CPU zone, +add cooling device that uses CPU frequency scaling. + +Signed-off-by: Mantas Pucka +Link: https://lore.kernel.org/r/1706173452-1017-4-git-send-email-mantas@8devices.com +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 121 ++++++++++++++++++++++++++ + 1 file changed, 121 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + + / { + #address-cells = <2>; +@@ -43,6 +44,7 @@ + clock-names = "cpu"; + operating-points-v2 = <&cpu_opp_table>; + cpu-supply = <&ipq6018_s2>; ++ #cooling-cells = <2>; + }; + + CPU1: cpu@1 { +@@ -55,6 +57,7 @@ + clock-names = "cpu"; + operating-points-v2 = <&cpu_opp_table>; + cpu-supply = <&ipq6018_s2>; ++ #cooling-cells = <2>; + }; + + CPU2: cpu@2 { +@@ -67,6 +70,7 @@ + clock-names = "cpu"; + operating-points-v2 = <&cpu_opp_table>; + cpu-supply = <&ipq6018_s2>; ++ #cooling-cells = <2>; + }; + + CPU3: cpu@3 { +@@ -79,6 +83,7 @@ + clock-names = "cpu"; + operating-points-v2 = <&cpu_opp_table>; + cpu-supply = <&ipq6018_s2>; ++ #cooling-cells = <2>; + }; + + L2_0: l2-cache { +@@ -890,6 +895,122 @@ + }; + }; + ++ thermal-zones { ++ nss-top-thermal { ++ polling-delay-passive = <250>; ++ polling-delay = <1000>; ++ thermal-sensors = <&tsens 4>; ++ ++ trips { ++ nss-top-critical { ++ temperature = <125000>; ++ hysteresis = <1000>; ++ type = "critical"; ++ }; ++ }; ++ }; ++ ++ nss-thermal { ++ polling-delay-passive = <250>; ++ polling-delay = <1000>; ++ thermal-sensors = <&tsens 5>; ++ ++ trips { ++ nss-critical { ++ temperature = <125000>; ++ hysteresis = <1000>; ++ type = "critical"; ++ }; ++ }; ++ }; ++ ++ wcss-phya0-thermal { ++ polling-delay-passive = <250>; ++ polling-delay = <1000>; ++ thermal-sensors = <&tsens 7>; ++ ++ trips { ++ wcss-phya0-critical { ++ temperature = <125000>; ++ hysteresis = <1000>; ++ type = "critical"; ++ }; ++ }; ++ }; ++ ++ wcss-phya1-thermal { ++ polling-delay-passive = <250>; ++ polling-delay = <1000>; ++ thermal-sensors = <&tsens 8>; ++ ++ trips { ++ wcss-phya1-critical { ++ temperature = <125000>; ++ hysteresis = <1000>; ++ type = "critical"; ++ }; ++ }; ++ }; ++ ++ cpu-thermal { ++ polling-delay-passive = <250>; ++ polling-delay = <1000>; ++ thermal-sensors = <&tsens 13>; ++ ++ trips { ++ cpu-critical { ++ temperature = <125000>; ++ hysteresis = <1000>; ++ type = "critical"; ++ }; ++ ++ cpu_alert: cpu-passive { ++ temperature = <110000>; ++ hysteresis = <1000>; ++ type = "passive"; ++ }; ++ }; ++ ++ cooling-maps { ++ map0 { ++ trip = <&cpu_alert>; ++ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, ++ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, ++ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, ++ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; ++ }; ++ }; ++ }; ++ ++ lpass-thermal { ++ polling-delay-passive = <250>; ++ polling-delay = <1000>; ++ thermal-sensors = <&tsens 14>; ++ ++ trips { ++ lpass-critical { ++ temperature = <125000>; ++ hysteresis = <1000>; ++ type = "critical"; ++ }; ++ }; ++ }; ++ ++ ddrss-top-thermal { ++ polling-delay-passive = <250>; ++ polling-delay = <1000>; ++ thermal-sensors = <&tsens 15>; ++ ++ trips { ++ ddrss-top-critical { ++ temperature = <125000>; ++ hysteresis = <1000>; ++ type = "critical"; ++ }; ++ }; ++ }; ++ }; ++ + timer { + compatible = "arm,armv8-timer"; + interrupts = , diff --git a/target/linux/qualcommax/patches-6.6/0060-v6.9-clk-qcom-gcc-ipq6018-add-qdss_at-clock-needed-for-wi.patch b/target/linux/qualcommax/patches-6.6/0060-v6.9-clk-qcom-gcc-ipq6018-add-qdss_at-clock-needed-for-wi.patch new file mode 100644 index 000000000..5f0af1352 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0060-v6.9-clk-qcom-gcc-ipq6018-add-qdss_at-clock-needed-for-wi.patch @@ -0,0 +1,50 @@ +From fd712118aa1aa758da1fd1546b3f8a1b00e42cbc Mon Sep 17 00:00:00 2001 +From: Mantas Pucka +Date: Tue, 23 Jan 2024 11:26:09 +0200 +Subject: [PATCH] clk: qcom: gcc-ipq6018: add qdss_at clock needed for wifi + operation + +Without it system hangs upon wifi firmware load. It should be enabled by +remoteproc/wifi driver. Bindings already exist for it, so add it based +on vendor code. + +Signed-off-by: Mantas Pucka +Link: https://lore.kernel.org/r/1706001970-26032-1-git-send-email-mantas@8devices.com +Signed-off-by: Bjorn Andersson +--- + drivers/clk/qcom/gcc-ipq6018.c | 17 +++++++++++++++++ + 1 file changed, 17 insertions(+) + +--- a/drivers/clk/qcom/gcc-ipq6018.c ++++ b/drivers/clk/qcom/gcc-ipq6018.c +@@ -3524,6 +3524,22 @@ static struct clk_branch gcc_prng_ahb_cl + }, + }; + ++static struct clk_branch gcc_qdss_at_clk = { ++ .halt_reg = 0x29024, ++ .clkr = { ++ .enable_reg = 0x29024, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_qdss_at_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_at_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ + static struct clk_branch gcc_qdss_dap_clk = { + .halt_reg = 0x29084, + .clkr = { +@@ -4363,6 +4379,7 @@ static struct clk_regmap *gcc_ipq6018_cl + [GCC_SYS_NOC_PCIE0_AXI_CLK] = &gcc_sys_noc_pcie0_axi_clk.clkr, + [GCC_PCIE0_PIPE_CLK] = &gcc_pcie0_pipe_clk.clkr, + [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, ++ [GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr, + [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr, + [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr, + [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr, diff --git a/target/linux/qualcommax/patches-6.6/0061-v6.8-phy-qcom-qmp-usb-fix-serdes-init-sequence-for-IPQ6018.patch b/target/linux/qualcommax/patches-6.6/0061-v6.8-phy-qcom-qmp-usb-fix-serdes-init-sequence-for-IPQ6018.patch new file mode 100644 index 000000000..4082e3d85 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0061-v6.8-phy-qcom-qmp-usb-fix-serdes-init-sequence-for-IPQ6018.patch @@ -0,0 +1,58 @@ +From 62a5df451ab911421da96655fcc4d1e269ff6e2f Mon Sep 17 00:00:00 2001 +From: Mantas Pucka +Date: Tue, 23 Jan 2024 18:09:20 +0200 +Subject: [PATCH] phy: qcom-qmp-usb: fix serdes init sequence for IPQ6018 + +Commit 23fd679249df ("phy: qcom-qmp: add USB3 PHY support for IPQ6018") +noted that IPQ6018 init is identical to IPQ8074. Yet downstream uses +separate serdes init sequence for IPQ6018. Since already existing IPQ9574 +serdes init sequence is identical, just reuse it and fix failing USB3 mode +in IPQ6018. + +Fixes: 23fd679249df ("phy: qcom-qmp: add USB3 PHY support for IPQ6018") +Signed-off-by: Mantas Pucka +Reviewed-by: Dmitry Baryshkov +Link: https://lore.kernel.org/r/1706026160-17520-3-git-send-email-mantas@8devices.com +Signed-off-by: Vinod Koul +--- + drivers/phy/qualcomm/phy-qcom-qmp-usb.c | 20 +++++++++++++++++++- + 1 file changed, 19 insertions(+), 1 deletion(-) + +--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c ++++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c +@@ -1314,6 +1314,26 @@ static const struct qmp_usb_offsets qmp_ + .rx = 0x1000, + }; + ++static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = { ++ .lanes = 1, ++ ++ .serdes_tbl = ipq9574_usb3_serdes_tbl, ++ .serdes_tbl_num = ARRAY_SIZE(ipq9574_usb3_serdes_tbl), ++ .tx_tbl = msm8996_usb3_tx_tbl, ++ .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl), ++ .rx_tbl = ipq8074_usb3_rx_tbl, ++ .rx_tbl_num = ARRAY_SIZE(ipq8074_usb3_rx_tbl), ++ .pcs_tbl = ipq8074_usb3_pcs_tbl, ++ .pcs_tbl_num = ARRAY_SIZE(ipq8074_usb3_pcs_tbl), ++ .clk_list = msm8996_phy_clk_l, ++ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l), ++ .reset_list = msm8996_usb3phy_reset_l, ++ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), ++ .vreg_list = qmp_phy_vreg_l, ++ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), ++ .regs = qmp_v3_usb3phy_regs_layout, ++}; ++ + static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = { + .lanes = 1, + +@@ -2238,7 +2258,7 @@ err_node_put: + static const struct of_device_id qmp_usb_of_match_table[] = { + { + .compatible = "qcom,ipq6018-qmp-usb3-phy", +- .data = &ipq8074_usb3phy_cfg, ++ .data = &ipq6018_usb3phy_cfg, + }, { + .compatible = "qcom,ipq8074-qmp-usb3-phy", + .data = &ipq8074_usb3phy_cfg, diff --git a/target/linux/qualcommax/patches-6.6/0062-v6.8-arm64-dts-qcom-ipq8074-Add-QUP4-SPI-node.patch b/target/linux/qualcommax/patches-6.6/0062-v6.8-arm64-dts-qcom-ipq8074-Add-QUP4-SPI-node.patch new file mode 100644 index 000000000..16d243749 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0062-v6.8-arm64-dts-qcom-ipq8074-Add-QUP4-SPI-node.patch @@ -0,0 +1,38 @@ +From 6a25e70214fde6dcf900271c819c8d7fe7b9a4b0 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Thu, 23 Nov 2023 13:12:54 +0100 +Subject: [PATCH] arm64: dts: qcom: ipq8074: Add QUP4 SPI node + +Add node to support the QUP4 SPI controller inside of IPQ8074. +Some devices use this bus to communicate to a Bluetooth controller. + +Signed-off-by: Robert Marko +Link: https://lore.kernel.org/r/20231123121324.1046164-1-robimarko@gmail.com +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -536,6 +536,20 @@ + status = "disabled"; + }; + ++ blsp1_spi4: spi@78b8000 { ++ compatible = "qcom,spi-qup-v2.2.1"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x78b8000 0x600>; ++ interrupts = ; ++ clocks = <&gcc GCC_BLSP1_QUP4_SPI_APPS_CLK>, ++ <&gcc GCC_BLSP1_AHB_CLK>; ++ clock-names = "core", "iface"; ++ dmas = <&blsp_dma 18>, <&blsp_dma 19>; ++ dma-names = "tx", "rx"; ++ status = "disabled"; ++ }; ++ + blsp1_i2c5: i2c@78b9000 { + compatible = "qcom,i2c-qup-v2.2.1"; + #address-cells = <1>; diff --git a/target/linux/qualcommax/patches-6.6/0063-v6.9-arm64-dts-qcom-ipq8074-Remove-unused-gpio-from-QPIC-.patch b/target/linux/qualcommax/patches-6.6/0063-v6.9-arm64-dts-qcom-ipq8074-Remove-unused-gpio-from-QPIC-.patch new file mode 100644 index 000000000..e075c590f --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0063-v6.9-arm64-dts-qcom-ipq8074-Remove-unused-gpio-from-QPIC-.patch @@ -0,0 +1,32 @@ +From 5f78d9213ae753e2242b0f6a5d4a5e98e55ddc76 Mon Sep 17 00:00:00 2001 +From: Paweł Owoc +Date: Wed, 13 Mar 2024 11:27:06 +0100 +Subject: [PATCH] arm64: dts: qcom: ipq8074: Remove unused gpio from QPIC pins + +gpio16 will only be used for LCD support, as its NAND/LCDC data[8] +so its bit 9 of the parallel QPIC interface, and ONFI NAND is only 8 +or 16-bit with only 8-bit one being supported in our case so that pin +is unused. + +It should be dropped from the default NAND pinctrl configuration +as its unused and only needed for LCD. + +Signed-off-by: Paweł Owoc +Reviewed-by: Kathiravan Thirumoorthy +Link: https://lore.kernel.org/r/20240313102713.1727458-1-frut3k7@gmail.com +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -372,7 +372,7 @@ + "gpio5", "gpio6", "gpio7", + "gpio8", "gpio10", "gpio11", + "gpio12", "gpio13", "gpio14", +- "gpio15", "gpio16", "gpio17"; ++ "gpio15", "gpio17"; + function = "qpic"; + drive-strength = <8>; + bias-disable; diff --git a/target/linux/qualcommax/patches-6.6/0065-v6.7-arm64-dts-qcom-ipq5018-add-watchdog.patch b/target/linux/qualcommax/patches-6.6/0065-v6.7-arm64-dts-qcom-ipq5018-add-watchdog.patch new file mode 100644 index 000000000..cedf6cf02 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0065-v6.7-arm64-dts-qcom-ipq5018-add-watchdog.patch @@ -0,0 +1,33 @@ +From 9cbaee8379e620f82112002f973adde19679df31 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Wed, 16 Aug 2023 18:14:00 +0200 +Subject: [PATCH] arm64: dts: qcom: ipq5018: add watchdog + +Add the required DT node for watchdog operation. + +Signed-off-by: Robert Marko +Reviewed-by: Konrad Dybcio +Link: https://lore.kernel.org/r/20230816161455.3310629-2-robimarko@gmail.com +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/arch/arm64/boot/dts/qcom/ipq5018.dtsi b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +index 9f13d2dcdfd589..288758c91379df 100644 +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -181,6 +181,13 @@ + }; + }; + ++ watchdog: watchdog@b017000 { ++ compatible = "qcom,apss-wdt-ipq5018", "qcom,kpss-wdt"; ++ reg = <0x0b017000 0x40>; ++ interrupts = ; ++ clocks = <&sleep_clk>; ++ }; ++ + timer@b120000 { + compatible = "arm,armv7-timer-mem"; + reg = <0x0b120000 0x1000>; diff --git a/target/linux/qualcommax/patches-6.6/0066-v6.7-dt-bindings-firmware-qcom-scm-support-indicating-SDI-default-state.patch b/target/linux/qualcommax/patches-6.6/0066-v6.7-dt-bindings-firmware-qcom-scm-support-indicating-SDI-default-state.patch new file mode 100644 index 000000000..46d3c6139 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0066-v6.7-dt-bindings-firmware-qcom-scm-support-indicating-SDI-default-state.patch @@ -0,0 +1,41 @@ +From 92dab9ea5f389c12828283146c60054642453a91 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Wed, 16 Aug 2023 18:45:38 +0200 +Subject: [PATCH] dt-bindings: firmware: qcom,scm: support indicating SDI + default state + +IPQ5018 has SDI (Secure Debug Image) enabled by TZ by default, and that +means that WDT being asserted or just trying to reboot will hang the board +in the debug mode and only pulling the power and repowering will help. +Some IPQ4019 boards like Google WiFI have it enabled as well. + +So, lets add a boolean property to indicate that SDI is enabled by default +and thus needs to be disabled by the kernel. + +Signed-off-by: Robert Marko +Acked-by: Mukesh Ojha +Reviewed-by: Krzysztof Kozlowski +Reviewed-by: Brian Norris +Link: https://lore.kernel.org/r/20230816164641.3371878-1-robimarko@gmail.com +Signed-off-by: Bjorn Andersson +--- + Documentation/devicetree/bindings/firmware/qcom,scm.yaml | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/Documentation/devicetree/bindings/firmware/qcom,scm.yaml ++++ b/Documentation/devicetree/bindings/firmware/qcom,scm.yaml +@@ -89,6 +89,14 @@ properties: + protocol to handle sleeping SCM calls. + maxItems: 1 + ++ qcom,sdi-enabled: ++ description: ++ Indicates that the SDI (Secure Debug Image) has been enabled by TZ ++ by default and it needs to be disabled. ++ If not disabled WDT assertion or reboot will cause the board to hang ++ in the debug mode. ++ type: boolean ++ + qcom,dload-mode: + $ref: /schemas/types.yaml#/definitions/phandle-array + items: diff --git a/target/linux/qualcommax/patches-6.6/0067-v6.7-firmware-qcom-scm-disable-SDI-if-required.patch b/target/linux/qualcommax/patches-6.6/0067-v6.7-firmware-qcom-scm-disable-SDI-if-required.patch new file mode 100644 index 000000000..9df758ae1 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0067-v6.7-firmware-qcom-scm-disable-SDI-if-required.patch @@ -0,0 +1,83 @@ +From ff4aa3bc98258a240b9bbab53fd8d2fb8184c485 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Wed, 16 Aug 2023 18:45:39 +0200 +Subject: [PATCH] firmware: qcom_scm: disable SDI if required + +IPQ5018 has SDI (Secure Debug Image) enabled by TZ by default, and that +means that WDT being asserted or just trying to reboot will hang the board +in the debug mode and only pulling the power and repowering will help. +Some IPQ4019 boards like Google WiFI have it enabled as well. + +Luckily, SDI can be disabled via an SCM call. + +So, lets use the boolean DT property to identify boards that have SDI +enabled by default and use the SCM call to disable SDI during SCM probe. +It is important to disable it as soon as possible as we might have a WDT +assertion at any time which would then leave the board in debug mode, +thus disabling it during SCM removal is not enough. + +Signed-off-by: Robert Marko +Reviewed-by: Guru Das Srinagesh +Link: https://lore.kernel.org/r/20230816164641.3371878-2-robimarko@gmail.com +Signed-off-by: Bjorn Andersson +--- + drivers/firmware/qcom_scm.c | 30 ++++++++++++++++++++++++++++++ + drivers/firmware/qcom_scm.h | 1 + + 2 files changed, 31 insertions(+) + +--- a/drivers/firmware/qcom_scm.c ++++ b/drivers/firmware/qcom_scm.c +@@ -410,6 +410,29 @@ int qcom_scm_set_remote_state(u32 state, + } + EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state); + ++static int qcom_scm_disable_sdi(void) ++{ ++ int ret; ++ struct qcom_scm_desc desc = { ++ .svc = QCOM_SCM_SVC_BOOT, ++ .cmd = QCOM_SCM_BOOT_SDI_CONFIG, ++ .args[0] = 1, /* Disable watchdog debug */ ++ .args[1] = 0, /* Disable SDI */ ++ .arginfo = QCOM_SCM_ARGS(2), ++ .owner = ARM_SMCCC_OWNER_SIP, ++ }; ++ struct qcom_scm_res res; ++ ++ ret = qcom_scm_clk_enable(); ++ if (ret) ++ return ret; ++ ret = qcom_scm_call(__scm->dev, &desc, &res); ++ ++ qcom_scm_clk_disable(); ++ ++ return ret ? : res.result[0]; ++} ++ + static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) + { + struct qcom_scm_desc desc = { +@@ -1473,6 +1496,13 @@ static int qcom_scm_probe(struct platfor + + __get_convention(); + ++ ++ /* ++ * Disable SDI if indicated by DT that it is enabled by default. ++ */ ++ if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled")) ++ qcom_scm_disable_sdi(); ++ + /* + * If requested enable "download mode", from this point on warmboot + * will cause the boot stages to enter download mode, unless +--- a/drivers/firmware/qcom_scm.h ++++ b/drivers/firmware/qcom_scm.h +@@ -80,6 +80,7 @@ extern int scm_legacy_call(struct device + #define QCOM_SCM_SVC_BOOT 0x01 + #define QCOM_SCM_BOOT_SET_ADDR 0x01 + #define QCOM_SCM_BOOT_TERMINATE_PC 0x02 ++#define QCOM_SCM_BOOT_SDI_CONFIG 0x09 + #define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10 + #define QCOM_SCM_BOOT_SET_ADDR_MC 0x11 + #define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a diff --git a/target/linux/qualcommax/patches-6.6/0068-v6.7-dt-bindings-qcom-scm-document-IPQ5018-compatible.patch b/target/linux/qualcommax/patches-6.6/0068-v6.7-dt-bindings-qcom-scm-document-IPQ5018-compatible.patch new file mode 100644 index 000000000..d3c3fa857 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0068-v6.7-dt-bindings-qcom-scm-document-IPQ5018-compatible.patch @@ -0,0 +1,27 @@ +From f6aa7386bc40b552eea8ec1b1d2168afe3b31110 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Wed, 16 Aug 2023 18:45:40 +0200 +Subject: [PATCH] dt-bindings: firmware: qcom,scm: document IPQ5018 compatible + +It seems that IPQ5018 compatible was never documented in the bindings. + +Signed-off-by: Robert Marko +Reviewed-by: Krzysztof Kozlowski +Link: https://lore.kernel.org/r/20230816164641.3371878-3-robimarko@gmail.com +Signed-off-by: Bjorn Andersson +--- + Documentation/devicetree/bindings/firmware/qcom,scm.yaml | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.yaml b/Documentation/devicetree/bindings/firmware/qcom,scm.yaml +index cb706145ae04c1..0613a37a851af4 100644 +--- a/Documentation/devicetree/bindings/firmware/qcom,scm.yaml ++++ b/Documentation/devicetree/bindings/firmware/qcom,scm.yaml +@@ -24,6 +24,7 @@ properties: + - qcom,scm-apq8064 + - qcom,scm-apq8084 + - qcom,scm-ipq4019 ++ - qcom,scm-ipq5018 + - qcom,scm-ipq5332 + - qcom,scm-ipq6018 + - qcom,scm-ipq806x diff --git a/target/linux/qualcommax/patches-6.6/0069-v6.7-arm64-dts-qcom-IPQ5018-indicate-that-SDI-shoud-be-disabled.patch b/target/linux/qualcommax/patches-6.6/0069-v6.7-arm64-dts-qcom-IPQ5018-indicate-that-SDI-shoud-be-disabled.patch new file mode 100644 index 000000000..93b33feee --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0069-v6.7-arm64-dts-qcom-IPQ5018-indicate-that-SDI-shoud-be-disabled.patch @@ -0,0 +1,28 @@ +From 79796e87215db9587d6c66ec6f6781e091bc6464 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Wed, 16 Aug 2023 18:45:41 +0200 +Subject: [PATCH] arm64: dts: qcom: ipq5018: indicate that SDI should be + disabled + +Now that SCM has support for indicating that SDI has been enabled by +default, lets set the property so SCM disables it during probing. + +Signed-off-by: Robert Marko +Link: https://lore.kernel.org/r/20230816164641.3371878-4-robimarko@gmail.com +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/arch/arm64/boot/dts/qcom/ipq5018.dtsi b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +index 288758c91379df..38ffdc3cbdcd7c 100644 +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -57,6 +57,7 @@ + firmware { + scm { + compatible = "qcom,scm-ipq5018", "qcom,scm"; ++ qcom,sdi-enabled; + }; + }; + diff --git a/target/linux/qualcommax/patches-6.6/0070-v6.7-dt-bindings-phy-qcom-m31-Add-IPQ5018-compatible.patch b/target/linux/qualcommax/patches-6.6/0070-v6.7-dt-bindings-phy-qcom-m31-Add-IPQ5018-compatible.patch new file mode 100644 index 000000000..13e4fb103 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0070-v6.7-dt-bindings-phy-qcom-m31-Add-IPQ5018-compatible.patch @@ -0,0 +1,30 @@ +From 1852dfaacd3f4358bbfca134b63a02bbb30c1136 Mon Sep 17 00:00:00 2001 +From: Nitheesh Sekar +Date: Mon, 4 Sep 2023 12:06:32 +0530 +Subject: [PATCH] dt-bindings: phy: qcom,m31: Add IPQ5018 compatible + +IPQ5332 qcom,m31 phy driver can support IPQ5018. + +Reviewed-by: Krzysztof Kozlowski +Signed-off-by: Nitheesh Sekar +Link: https://lore.kernel.org/r/20230904063635.24975-2-quic_nsekar@quicinc.com +Signed-off-by: Vinod Koul +--- + .../devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml +index 2671a048c926c2..e77576d06c0e4e 100644 +--- a/Documentation/devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml ++++ b/Documentation/devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml +@@ -17,7 +17,9 @@ description: + properties: + compatible: + items: +- - const: qcom,ipq5332-usb-hsphy ++ - enum: ++ - qcom,ipq5018-usb-hsphy ++ - qcom,ipq5332-usb-hsphy + + "#phy-cells": + const: 0 diff --git a/target/linux/qualcommax/patches-6.6/0071-v6.7-phy-qcom-m31-Add-compatible-phy-init-sequence-for-IPQ5018.patch b/target/linux/qualcommax/patches-6.6/0071-v6.7-phy-qcom-m31-Add-compatible-phy-init-sequence-for-IPQ5018.patch new file mode 100644 index 000000000..248b47c69 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0071-v6.7-phy-qcom-m31-Add-compatible-phy-init-sequence-for-IPQ5018.patch @@ -0,0 +1,89 @@ +From 68320e35f8cb1987b4ad34347fc7033832da99e3 Mon Sep 17 00:00:00 2001 +From: Nitheesh Sekar +Date: Mon, 4 Sep 2023 12:06:33 +0530 +Subject: [PATCH] phy: qcom-m31: Add compatible, phy init sequence for IPQ5018 + +Add phy init sequence and compatible string for IPQ5018 +chipset. + +Signed-off-by: Nitheesh Sekar +Link: https://lore.kernel.org/r/20230904063635.24975-3-quic_nsekar@quicinc.com +Signed-off-by: Vinod Koul +--- + drivers/phy/qualcomm/phy-qcom-m31.c | 51 +++++++++++++++++++++++++++++ + 1 file changed, 51 insertions(+) + +--- a/drivers/phy/qualcomm/phy-qcom-m31.c ++++ b/drivers/phy/qualcomm/phy-qcom-m31.c +@@ -82,6 +82,50 @@ struct m31_priv_data { + unsigned int nregs; + }; + ++static const struct m31_phy_regs m31_ipq5018_regs[] = { ++ { ++ .off = USB_PHY_CFG0, ++ .val = UTMI_PHY_OVERRIDE_EN ++ }, ++ { ++ .off = USB_PHY_UTMI_CTRL5, ++ .val = POR_EN, ++ .delay = 15 ++ }, ++ { ++ .off = USB_PHY_FSEL_SEL, ++ .val = FREQ_SEL ++ }, ++ { ++ .off = USB_PHY_HS_PHY_CTRL_COMMON0, ++ .val = COMMONONN | FSEL | RETENABLEN ++ }, ++ { ++ .off = USB_PHY_REFCLK_CTRL, ++ .val = CLKCORE ++ }, ++ { ++ .off = USB_PHY_UTMI_CTRL5, ++ .val = POR_EN ++ }, ++ { ++ .off = USB_PHY_HS_PHY_CTRL2, ++ .val = USB2_SUSPEND_N_SEL | USB2_SUSPEND_N | USB2_UTMI_CLK_EN ++ }, ++ { ++ .off = USB_PHY_UTMI_CTRL5, ++ .val = 0x0 ++ }, ++ { ++ .off = USB_PHY_HS_PHY_CTRL2, ++ .val = USB2_SUSPEND_N | USB2_UTMI_CLK_EN ++ }, ++ { ++ .off = USB_PHY_CFG0, ++ .val = 0x0 ++ }, ++}; ++ + static struct m31_phy_regs m31_ipq5332_regs[] = { + { + USB_PHY_CFG0, +@@ -267,6 +311,12 @@ static int m31usb_phy_probe(struct platf + return PTR_ERR_OR_ZERO(phy_provider); + } + ++static const struct m31_priv_data m31_ipq5018_data = { ++ .ulpi_mode = false, ++ .regs = m31_ipq5018_regs, ++ .nregs = ARRAY_SIZE(m31_ipq5018_regs), ++}; ++ + static const struct m31_priv_data m31_ipq5332_data = { + .ulpi_mode = false, + .regs = m31_ipq5332_regs, +@@ -274,6 +324,7 @@ static const struct m31_priv_data m31_ip + }; + + static const struct of_device_id m31usb_phy_id_table[] = { ++ { .compatible = "qcom,ipq5018-usb-hsphy", .data = &m31_ipq5018_data }, + { .compatible = "qcom,ipq5332-usb-hsphy", .data = &m31_ipq5332_data }, + { }, + }; diff --git a/target/linux/qualcommax/patches-6.6/0072-v6.7-dt-bindings-usb-dwc3-Add-IPQ5018-compatible.patch b/target/linux/qualcommax/patches-6.6/0072-v6.7-dt-bindings-usb-dwc3-Add-IPQ5018-compatible.patch new file mode 100644 index 000000000..be6b3eac6 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0072-v6.7-dt-bindings-usb-dwc3-Add-IPQ5018-compatible.patch @@ -0,0 +1,43 @@ +From 3865a64284cc4845c61cf3dc6c7246349d80cc49 Mon Sep 17 00:00:00 2001 +From: Nitheesh Sekar +Date: Thu, 31 Aug 2023 08:35:03 +0530 +Subject: [PATCH] dt-bindings: usb: dwc3: Add IPQ5018 compatible + +Document the IPQ5018 dwc3 compatible. + +Reviewed-by: Krzysztof Kozlowski +Signed-off-by: Nitheesh Sekar +Link: https://lore.kernel.org/r/20230831030503.17100-1-quic_nsekar@quicinc.com +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/devicetree/bindings/usb/qcom,dwc3.yaml | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml +index 67591057f2349b..1ad62e55dfe2e2 100644 +--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml ++++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml +@@ -14,6 +14,7 @@ properties: + items: + - enum: + - qcom,ipq4019-dwc3 ++ - qcom,ipq5018-dwc3 + - qcom,ipq5332-dwc3 + - qcom,ipq6018-dwc3 + - qcom,ipq8064-dwc3 +@@ -238,6 +239,7 @@ allOf: + compatible: + contains: + enum: ++ - qcom,ipq5018-dwc3 + - qcom,ipq5332-dwc3 + - qcom,msm8994-dwc3 + - qcom,qcs404-dwc3 +@@ -411,6 +413,7 @@ allOf: + compatible: + contains: + enum: ++ - qcom,ipq5018-dwc3 + - qcom,ipq5332-dwc3 + - qcom,sdm660-dwc3 + then: diff --git a/target/linux/qualcommax/patches-6.6/0073-v6.8-arm64-dts-qcom-ipq5018-Add-USB-related-nodes.patch b/target/linux/qualcommax/patches-6.6/0073-v6.8-arm64-dts-qcom-ipq5018-Add-USB-related-nodes.patch new file mode 100644 index 000000000..38eaa2816 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0073-v6.8-arm64-dts-qcom-ipq5018-Add-USB-related-nodes.patch @@ -0,0 +1,88 @@ +From e7166f2774aafefd29ff26ffbbb7f6d40ac8ea1c Mon Sep 17 00:00:00 2001 +From: Nitheesh Sekar +Date: Mon, 4 Sep 2023 12:06:34 +0530 +Subject: [PATCH] arm64: dts: qcom: ipq5018: Add USB related nodes + +Add USB phy and controller nodes. + +Co-developed-by: Amandeep Singh +Signed-off-by: Amandeep Singh +Signed-off-by: Nitheesh Sekar +Link: https://lore.kernel.org/r/20230904063635.24975-4-quic_nsekar@quicinc.com +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 54 +++++++++++++++++++++++++++ + 1 file changed, 54 insertions(+) + +diff --git a/arch/arm64/boot/dts/qcom/ipq5018.dtsi b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +index 38ffdc3cbdcd7c..340b90cc17db85 100644 +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -94,6 +94,19 @@ + #size-cells = <1>; + ranges = <0 0 0 0xffffffff>; + ++ usbphy0: phy@5b000 { ++ compatible = "qcom,ipq5018-usb-hsphy"; ++ reg = <0x0005b000 0x120>; ++ ++ clocks = <&gcc GCC_USB0_PHY_CFG_AHB_CLK>; ++ ++ resets = <&gcc GCC_QUSB2_0_PHY_BCR>; ++ ++ #phy-cells = <0>; ++ ++ status = "disabled"; ++ }; ++ + tlmm: pinctrl@1000000 { + compatible = "qcom,ipq5018-tlmm"; + reg = <0x01000000 0x300000>; +@@ -156,6 +169,47 @@ + status = "disabled"; + }; + ++ usb: usb@8af8800 { ++ compatible = "qcom,ipq5018-dwc3", "qcom,dwc3"; ++ reg = <0x08af8800 0x400>; ++ ++ interrupts = ; ++ interrupt-names = "hs_phy_irq"; ++ ++ clocks = <&gcc GCC_USB0_MASTER_CLK>, ++ <&gcc GCC_SYS_NOC_USB0_AXI_CLK>, ++ <&gcc GCC_USB0_SLEEP_CLK>, ++ <&gcc GCC_USB0_MOCK_UTMI_CLK>; ++ clock-names = "core", ++ "iface", ++ "sleep", ++ "mock_utmi"; ++ ++ resets = <&gcc GCC_USB0_BCR>; ++ ++ qcom,select-utmi-as-pipe-clk; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges; ++ ++ status = "disabled"; ++ ++ usb_dwc: usb@8a00000 { ++ compatible = "snps,dwc3"; ++ reg = <0x08a00000 0xe000>; ++ clocks = <&gcc GCC_USB0_MOCK_UTMI_CLK>; ++ clock-names = "ref"; ++ interrupts = ; ++ phy-names = "usb2-phy"; ++ phys = <&usbphy0>; ++ tx-fifo-resize; ++ snps,is-utmi-l1-suspend; ++ snps,hird-threshold = /bits/ 8 <0x0>; ++ snps,dis_u2_susphy_quirk; ++ snps,dis_u3_susphy_quirk; ++ }; ++ }; ++ + intc: interrupt-controller@b000000 { + compatible = "qcom,msm-qgic2"; + reg = <0x0b000000 0x1000>, /* GICD */ diff --git a/target/linux/qualcommax/patches-6.6/0074-v6.8-arm64-dts-qcom-ipq5018-add-QUP1-SPI-controller.patch b/target/linux/qualcommax/patches-6.6/0074-v6.8-arm64-dts-qcom-ipq5018-add-QUP1-SPI-controller.patch new file mode 100644 index 000000000..c06cfa757 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0074-v6.8-arm64-dts-qcom-ipq5018-add-QUP1-SPI-controller.patch @@ -0,0 +1,58 @@ +From a1f42e08f0f04b72a6597f080db4bfbb3737910c Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Wed, 4 Oct 2023 21:12:30 +0200 +Subject: [PATCH] arm64: dts: qcom: ipq5018: add QUP1 SPI controller + +Add the required BAM and QUP nodes for the QUP1 SPI controller on IPQ5018. + +Signed-off-by: Robert Marko +Reviewed-by: Kathiravan Thirumoorthy +Link: https://lore.kernel.org/r/20231004191303.331055-1-robimarko@gmail.com +[bjorn: Padded address to 8 digits, fixed node sort order] +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 24 ++++++++++++++++++++++++ + 1 file changed, 24 insertions(+) + +diff --git a/arch/arm64/boot/dts/qcom/ipq5018.dtsi b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +index 340b90cc17db85..0b739077ed7079 100644 +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -159,6 +159,16 @@ + status = "disabled"; + }; + ++ blsp_dma: dma-controller@7884000 { ++ compatible = "qcom,bam-v1.7.0"; ++ reg = <0x07884000 0x1d000>; ++ interrupts = ; ++ clocks = <&gcc GCC_BLSP1_AHB_CLK>; ++ clock-names = "bam_clk"; ++ #dma-cells = <1>; ++ qcom,ee = <0>; ++ }; ++ + blsp1_uart1: serial@78af000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0x078af000 0x200>; +@@ -169,6 +179,20 @@ + status = "disabled"; + }; + ++ blsp1_spi1: spi@78b5000 { ++ compatible = "qcom,spi-qup-v2.2.1"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x078b5000 0x600>; ++ interrupts = ; ++ clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>, ++ <&gcc GCC_BLSP1_AHB_CLK>; ++ clock-names = "core", "iface"; ++ dmas = <&blsp_dma 4>, <&blsp_dma 5>; ++ dma-names = "tx", "rx"; ++ status = "disabled"; ++ }; ++ + usb: usb@8af8800 { + compatible = "qcom,ipq5018-dwc3", "qcom,dwc3"; + reg = <0x08af8800 0x400>; diff --git a/target/linux/qualcommax/patches-6.6/0075-v6.8-dt-bindings-clock-qcom-a53pll-add-IPQ5018-compatible.patch b/target/linux/qualcommax/patches-6.6/0075-v6.8-dt-bindings-clock-qcom-a53pll-add-IPQ5018-compatible.patch new file mode 100644 index 000000000..25e0f8253 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0075-v6.8-dt-bindings-clock-qcom-a53pll-add-IPQ5018-compatible.patch @@ -0,0 +1,27 @@ +From 4d45d56e17348c6b6bb2bce126a4a5ea97b19900 Mon Sep 17 00:00:00 2001 +From: Gokul Sriram Palanisamy +Date: Mon, 25 Sep 2023 15:58:24 +0530 +Subject: [PATCH] dt-bindings: clock: qcom,a53pll: add IPQ5018 compatible + +Add IPQ5018 compatible to A53 PLL bindings. + +Signed-off-by: Gokul Sriram Palanisamy +Reviewed-by: Krzysztof Kozlowski +Link: https://lore.kernel.org/r/20230925102826.405446-2-quic_gokulsri@quicinc.com +Signed-off-by: Bjorn Andersson +--- + Documentation/devicetree/bindings/clock/qcom,a53pll.yaml | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml +index 9436266828afaf..5ca927a8b1d538 100644 +--- a/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml ++++ b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml +@@ -16,6 +16,7 @@ description: + properties: + compatible: + enum: ++ - qcom,ipq5018-a53pll + - qcom,ipq5332-a53pll + - qcom,ipq6018-a53pll + - qcom,ipq8074-a53pll diff --git a/target/linux/qualcommax/patches-6.6/0076-v6.8-clk-qcom-apss-ipq-pll-add-support-for-IPQ5018.patch b/target/linux/qualcommax/patches-6.6/0076-v6.8-clk-qcom-apss-ipq-pll-add-support-for-IPQ5018.patch new file mode 100644 index 000000000..f8eb83659 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0076-v6.8-clk-qcom-apss-ipq-pll-add-support-for-IPQ5018.patch @@ -0,0 +1,64 @@ +From 50492f929486c044b43cb3e2c0e040aa9b61ea2b Mon Sep 17 00:00:00 2001 +From: Gokul Sriram Palanisamy +Date: Mon, 25 Sep 2023 15:58:25 +0530 +Subject: [PATCH] clk: qcom: apss-ipq-pll: add support for IPQ5018 + +IPQ5018 APSS PLL is of type Stromer. Reuse Stromer Plus PLL offsets, +add configuration values and the compatible. + +Co-developed-by: Sricharan Ramabadhran +Signed-off-by: Sricharan Ramabadhran +Signed-off-by: Gokul Sriram Palanisamy +Reviewed-by: Dmitry Baryshkov +Link: https://lore.kernel.org/r/20230925102826.405446-3-quic_gokulsri@quicinc.com +Signed-off-by: Bjorn Andersson +--- + drivers/clk/qcom/apss-ipq-pll.c | 21 +++++++++++++++++++++ + 1 file changed, 21 insertions(+) + +diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c +index 41279e5437a620..678b805f13d455 100644 +--- a/drivers/clk/qcom/apss-ipq-pll.c ++++ b/drivers/clk/qcom/apss-ipq-pll.c +@@ -73,6 +73,20 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = { + }, + }; + ++static const struct alpha_pll_config ipq5018_pll_config = { ++ .l = 0x32, ++ .config_ctl_val = 0x4001075b, ++ .config_ctl_hi_val = 0x304, ++ .main_output_mask = BIT(0), ++ .aux_output_mask = BIT(1), ++ .early_output_mask = BIT(3), ++ .alpha_en_mask = BIT(24), ++ .status_val = 0x3, ++ .status_mask = GENMASK(10, 8), ++ .lock_det = BIT(2), ++ .test_ctl_hi_val = 0x00400003, ++}; ++ + static const struct alpha_pll_config ipq5332_pll_config = { + .l = 0x2d, + .config_ctl_val = 0x4001075b, +@@ -129,6 +143,12 @@ struct apss_pll_data { + const struct alpha_pll_config *pll_config; + }; + ++static const struct apss_pll_data ipq5018_pll_data = { ++ .pll_type = CLK_ALPHA_PLL_TYPE_STROMER_PLUS, ++ .pll = &ipq_pll_stromer_plus, ++ .pll_config = &ipq5018_pll_config, ++}; ++ + static struct apss_pll_data ipq5332_pll_data = { + .pll_type = CLK_ALPHA_PLL_TYPE_STROMER_PLUS, + .pll = &ipq_pll_stromer_plus, +@@ -195,6 +215,7 @@ static int apss_ipq_pll_probe(struct platform_device *pdev) + } + + static const struct of_device_id apss_ipq_pll_match_table[] = { ++ { .compatible = "qcom,ipq5018-a53pll", .data = &ipq5018_pll_data }, + { .compatible = "qcom,ipq5332-a53pll", .data = &ipq5332_pll_data }, + { .compatible = "qcom,ipq6018-a53pll", .data = &ipq6018_pll_data }, + { .compatible = "qcom,ipq8074-a53pll", .data = &ipq8074_pll_data }, diff --git a/target/linux/qualcommax/patches-6.6/0077-v6.8-arm64-dts-qcom-ipq5018-enable-the-CPUFreq-support.patch b/target/linux/qualcommax/patches-6.6/0077-v6.8-arm64-dts-qcom-ipq5018-enable-the-CPUFreq-support.patch new file mode 100644 index 000000000..8b2262d76 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0077-v6.8-arm64-dts-qcom-ipq5018-enable-the-CPUFreq-support.patch @@ -0,0 +1,100 @@ +From 3e4b53e04281ed3d9c7a4329c027097265c04d54 Mon Sep 17 00:00:00 2001 +From: Gokul Sriram Palanisamy +Date: Mon, 25 Sep 2023 15:58:26 +0530 +Subject: [PATCH] arm64: dts: qcom: ipq5018: enable the CPUFreq support + +Add the APCS, A53 PLL, cpu-opp-table nodes to set +the CPU frequency at 800MHz (idle) or 1.008GHz. + +Co-developed-by: Sricharan Ramabadhran +Signed-off-by: Sricharan Ramabadhran +Signed-off-by: Gokul Sriram Palanisamy +Reviewed-by: Dmitry Baryshkov +Reviewed-by: Krzysztof Kozlowski +Link: https://lore.kernel.org/r/20230925102826.405446-4-quic_gokulsri@quicinc.com +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 40 +++++++++++++++++++++++++++ + 1 file changed, 40 insertions(+) + +diff --git a/arch/arm64/boot/dts/qcom/ipq5018.dtsi b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +index 0b739077ed7079..ae31bd72f0b739 100644 +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -5,6 +5,7 @@ + * Copyright (c) 2023 The Linux Foundation. All rights reserved. + */ + ++#include + #include + #include + #include +@@ -36,6 +37,8 @@ + reg = <0x0>; + enable-method = "psci"; + next-level-cache = <&L2_0>; ++ clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; ++ operating-points-v2 = <&cpu_opp_table>; + }; + + CPU1: cpu@1 { +@@ -44,6 +47,8 @@ + reg = <0x1>; + enable-method = "psci"; + next-level-cache = <&L2_0>; ++ clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; ++ operating-points-v2 = <&cpu_opp_table>; + }; + + L2_0: l2-cache { +@@ -54,6 +59,25 @@ + }; + }; + ++ cpu_opp_table: opp-table-cpu { ++ compatible = "operating-points-v2"; ++ opp-shared; ++ ++ /* ++ opp-800000000 { ++ opp-hz = /bits/ 64 <800000000>; ++ opp-microvolt = <1100000>; ++ clock-latency-ns = <200000>; ++ }; ++ */ ++ ++ opp-1008000000 { ++ opp-hz = /bits/ 64 <1008000000>; ++ opp-microvolt = <1100000>; ++ clock-latency-ns = <200000>; ++ }; ++ }; ++ + firmware { + scm { + compatible = "qcom,scm-ipq5018", "qcom,scm"; +@@ -267,6 +291,24 @@ + clocks = <&sleep_clk>; + }; + ++ apcs_glb: mailbox@b111000 { ++ compatible = "qcom,ipq5018-apcs-apps-global", ++ "qcom,ipq6018-apcs-apps-global"; ++ reg = <0x0b111000 0x1000>; ++ #clock-cells = <1>; ++ clocks = <&a53pll>, <&xo_board_clk>, <&gcc GPLL0>; ++ clock-names = "pll", "xo", "gpll0"; ++ #mbox-cells = <1>; ++ }; ++ ++ a53pll: clock@b116000 { ++ compatible = "qcom,ipq5018-a53pll"; ++ reg = <0x0b116000 0x40>; ++ #clock-cells = <0>; ++ clocks = <&xo_board_clk>; ++ clock-names = "xo"; ++ }; ++ + timer@b120000 { + compatible = "arm,armv7-timer-mem"; + reg = <0x0b120000 0x1000>; diff --git a/target/linux/qualcommax/patches-6.6/0078-v6.8-arm64-dts-qcom-ipq5018-add-few-more-reserved-memory-regions.patch b/target/linux/qualcommax/patches-6.6/0078-v6.8-arm64-dts-qcom-ipq5018-add-few-more-reserved-memory-regions.patch new file mode 100644 index 000000000..97631e715 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0078-v6.8-arm64-dts-qcom-ipq5018-add-few-more-reserved-memory-regions.patch @@ -0,0 +1,66 @@ +From a427dd16e61f3d145bc24f0ed09692fc25931250 Mon Sep 17 00:00:00 2001 +From: Kathiravan Thirumoorthy +Date: Wed, 25 Oct 2023 22:12:12 +0530 +Subject: [PATCH] arm64: dts: qcom: ipq5018: add few more reserved memory + regions + +Like all other IPQ SoCs, bootloader will collect the system RAM contents +upon crash for the post morterm analysis. If we don't reserve the memory +region used by bootloader, obviously linux will consume it and upon next +boot on crash, bootloader will be loaded in the same region, which will +lead to loose some of the data, sometimes we may miss out critical +information. So lets reserve the region used by the bootloader. + +Similarly SBL copies some data into the reserved region and it will be +used in the crash scenario. So reserve 1MB for SBL as well. + +While at it, enable the SMEM support along with TCSR mutex. + +Signed-off-by: Kathiravan Thirumoorthy +Reviewed-by: Konrad Dybcio +Link: https://lore.kernel.org/r/20231025-ipq5018-misc-v1-1-7d14fde97fe7@quicinc.com +Signed-off-by: Bjorn Andersson +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 24 ++++++++++++++++++++++++ + 1 file changed, 24 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -106,6 +106,24 @@ + #size-cells = <2>; + ranges; + ++ bootloader@4a800000 { ++ reg = <0x0 0x4a800000 0x0 0x200000>; ++ no-map; ++ }; ++ ++ sbl@4aa00000 { ++ reg = <0x0 0x4aa00000 0x0 0x100000>; ++ no-map; ++ }; ++ ++ smem@4ab00000 { ++ compatible = "qcom,smem"; ++ reg = <0x0 0x4ab00000 0x0 0x100000>; ++ no-map; ++ ++ hwlocks = <&tcsr_mutex 3>; ++ }; ++ + tz_region: tz@4ac00000 { + reg = <0x0 0x4ac00000 0x0 0x200000>; + no-map; +@@ -166,6 +184,12 @@ + #power-domain-cells = <1>; + }; + ++ tcsr_mutex: hwlock@1905000 { ++ compatible = "qcom,tcsr-mutex"; ++ reg = <0x01905000 0x20000>; ++ #hwlock-cells = <1>; ++ }; ++ + sdhc_1: mmc@7804000 { + compatible = "qcom,ipq5018-sdhci", "qcom,sdhci-msm-v5"; + reg = <0x7804000 0x1000>; diff --git a/target/linux/qualcommax/patches-6.6/0100-clk-qcom-clk-rcg2-introduce-support-for-multiple-con.patch b/target/linux/qualcommax/patches-6.6/0100-clk-qcom-clk-rcg2-introduce-support-for-multiple-con.patch new file mode 100644 index 000000000..54d16ba9f --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0100-clk-qcom-clk-rcg2-introduce-support-for-multiple-con.patch @@ -0,0 +1,203 @@ +From 032be4f49dda786fea9e1501212f6cd09a7ded96 Mon Sep 17 00:00:00 2001 +From: Christian Marangi +Date: Thu, 3 Nov 2022 14:49:43 +0100 +Subject: [PATCH] clk: qcom: clk-rcg2: introduce support for multiple conf for + same freq + +Some RCG frequency can be reached by multiple configuration. + +We currently declare multiple configuration for the same frequency but +that is not supported and always the first configuration will be taken. + +These multiple configuration are needed as based on the current parent +configuration, it may be needed to use a different configuration to +reach the same frequency. + +To handle this introduce 2 new macro, FM and C. + +- FM is used to declare an empty freq_tbl with just the frequency and an + array of confs to insert all the config for the provided frequency. + +- C is used to declare a fre_conf where src, pre_div, m and n are + provided. + +The driver is changed to handle this special freq_tbl and select the +correct config by calculating the final rate and deciding based on the +one that is less different than the requested one. + +Tested-by: Robert Marko +Signed-off-by: Christian Marangi +--- + drivers/clk/qcom/clk-rcg.h | 14 ++++++- + drivers/clk/qcom/clk-rcg2.c | 84 +++++++++++++++++++++++++++++++++---- + 2 files changed, 88 insertions(+), 10 deletions(-) + +--- a/drivers/clk/qcom/clk-rcg.h ++++ b/drivers/clk/qcom/clk-rcg.h +@@ -7,7 +7,17 @@ + #include + #include "clk-regmap.h" + +-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } ++#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n), 0, NULL } ++ ++#define FM(_f, _confs) { .freq = (_f), .confs_num = ARRAY_SIZE(_confs), .confs = (_confs) } ++#define C(s, h, m, n) { (s), (2 * (h) - 1), (m), (n) } ++ ++struct freq_conf { ++ u8 src; ++ u8 pre_div; ++ u16 m; ++ u16 n; ++}; + + struct freq_tbl { + unsigned long freq; +@@ -15,6 +25,8 @@ struct freq_tbl { + u8 pre_div; + u16 m; + u16 n; ++ int confs_num; ++ const struct freq_conf *confs; + }; + + /** +--- a/drivers/clk/qcom/clk-rcg2.c ++++ b/drivers/clk/qcom/clk-rcg2.c +@@ -203,11 +203,60 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, + return __clk_rcg2_recalc_rate(hw, parent_rate, cfg); + } + ++static void ++clk_rcg2_select_conf(struct clk_hw *hw, struct freq_tbl *f_tbl, ++ const struct freq_tbl *f, unsigned long req_rate) ++{ ++ unsigned long best_rate = 0, parent_rate, rate; ++ const struct freq_conf *conf, *best_conf; ++ struct clk_rcg2 *rcg = to_clk_rcg2(hw); ++ struct clk_hw *p; ++ int index, i; ++ ++ /* Search in each provided config the one that is near the wanted rate */ ++ for (i = 0, conf = f->confs; i < f->confs_num; i++, conf++) { ++ index = qcom_find_src_index(hw, rcg->parent_map, conf->src); ++ if (index < 0) ++ continue; ++ ++ p = clk_hw_get_parent_by_index(hw, index); ++ if (!p) ++ continue; ++ ++ parent_rate = clk_hw_get_rate(p); ++ rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div); ++ ++ if (rate == req_rate) { ++ best_conf = conf; ++ break; ++ } ++ ++ if (abs(req_rate - rate) < abs(best_rate - rate)) { ++ best_rate = rate; ++ best_conf = conf; ++ } ++ } ++ ++ /* ++ * Very unlikely. ++ * Force the first conf if we can't find a correct config. ++ */ ++ if (unlikely(i == f->confs_num)) ++ best_conf = f->confs; ++ ++ /* Apply the config */ ++ f_tbl->src = best_conf->src; ++ f_tbl->pre_div = best_conf->pre_div; ++ f_tbl->m = best_conf->m; ++ f_tbl->n = best_conf->n; ++} ++ + static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, + struct clk_rate_request *req, + enum freq_policy policy) + { + unsigned long clk_flags, rate = req->rate; ++ struct freq_tbl f_tbl; + struct clk_hw *p; + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + int index; +@@ -226,7 +275,15 @@ static int _freq_tbl_determine_rate(stru + if (!f) + return -EINVAL; + +- index = qcom_find_src_index(hw, rcg->parent_map, f->src); ++ f_tbl = *f; ++ /* ++ * A single freq may be reached by multiple configuration. ++ * Try to find the bast one if we have this kind of freq_table. ++ */ ++ if (f->confs) ++ clk_rcg2_select_conf(hw, &f_tbl, f, rate); ++ ++ index = qcom_find_src_index(hw, rcg->parent_map, f_tbl.src); + if (index < 0) + return index; + +@@ -236,18 +293,18 @@ static int _freq_tbl_determine_rate(stru + return -EINVAL; + + if (clk_flags & CLK_SET_RATE_PARENT) { +- rate = f->freq; +- if (f->pre_div) { ++ rate = f_tbl.freq; ++ if (f_tbl.pre_div) { + if (!rate) + rate = req->rate; + rate /= 2; +- rate *= f->pre_div + 1; ++ rate *= f_tbl.pre_div + 1; + } + +- if (f->n) { ++ if (f_tbl.n) { + u64 tmp = rate; +- tmp = tmp * f->n; +- do_div(tmp, f->m); ++ tmp = tmp * f_tbl.n; ++ do_div(tmp, f_tbl.m); + rate = tmp; + } + } else { +@@ -255,7 +312,7 @@ static int _freq_tbl_determine_rate(stru + } + req->best_parent_hw = p; + req->best_parent_rate = rate; +- req->rate = f->freq; ++ req->rate = f_tbl.freq; + + return 0; + } +@@ -353,6 +410,7 @@ static int __clk_rcg2_set_rate(struct cl + { + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + const struct freq_tbl *f; ++ struct freq_tbl f_tbl; + + switch (policy) { + case FLOOR: +@@ -368,7 +426,15 @@ static int __clk_rcg2_set_rate(struct cl + if (!f) + return -EINVAL; + +- return clk_rcg2_configure(rcg, f); ++ f_tbl = *f; ++ /* ++ * A single freq may be reached by multiple configuration. ++ * Try to find the best one if we have this kind of freq_table. ++ */ ++ if (f->confs) ++ clk_rcg2_select_conf(hw, &f_tbl, f, rate); ++ ++ return clk_rcg2_configure(rcg, &f_tbl); + } + + static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, diff --git a/target/linux/qualcommax/patches-6.6/0101-clk-qcom-gcc-ipq8074-rework-nss_port5-6-clock-to-mul.patch b/target/linux/qualcommax/patches-6.6/0101-clk-qcom-gcc-ipq8074-rework-nss_port5-6-clock-to-mul.patch new file mode 100644 index 000000000..2f915e2a2 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0101-clk-qcom-gcc-ipq8074-rework-nss_port5-6-clock-to-mul.patch @@ -0,0 +1,129 @@ +From f778553f296792f4d1e8b3552603ad6116ea3eb3 Mon Sep 17 00:00:00 2001 +From: Christian Marangi +Date: Thu, 3 Nov 2022 14:49:44 +0100 +Subject: [PATCH] clk: qcom: gcc-ipq8074: rework nss_port5/6 clock to multiple + conf + +Rework nss_port5/6 to use the new multiple configuration implementation +and correctly fix the clocks for these port under some corner case. + +This is particularly relevant for device that have 2.5G or 10G port +connected to port5 or port 6 on ipq8074. As the parent are shared +across multiple port it may be required to select the correct +configuration to accomplish the desired clock. Without this patch such +port doesn't work in some specific ethernet speed as the clock will be +set to the wrong frequency as we just select the first configuration for +the related frequency instead of selecting the best one. + +Tested-by: Robert Marko # ipq8074 Qnap QHora-301W +Signed-off-by: Christian Marangi +--- + drivers/clk/qcom/gcc-ipq8074.c | 64 +++++++++++++++++++++++++--------- + 1 file changed, 48 insertions(+), 16 deletions(-) + +--- a/drivers/clk/qcom/gcc-ipq8074.c ++++ b/drivers/clk/qcom/gcc-ipq8074.c +@@ -1677,13 +1677,21 @@ static struct clk_regmap_div nss_port4_t + }, + }; + ++static const struct freq_conf ftbl_nss_port5_rx_clk_src_25[] = { ++ C(P_UNIPHY1_RX, 12.5, 0, 0), ++ C(P_UNIPHY0_RX, 5, 0, 0), ++}; ++ ++static const struct freq_conf ftbl_nss_port5_rx_clk_src_125[] = { ++ C(P_UNIPHY1_RX, 2.5, 0, 0), ++ C(P_UNIPHY0_RX, 1, 0, 0), ++}; ++ + static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), +- F(25000000, P_UNIPHY1_RX, 12.5, 0, 0), +- F(25000000, P_UNIPHY0_RX, 5, 0, 0), ++ FM(25000000, ftbl_nss_port5_rx_clk_src_25), + F(78125000, P_UNIPHY1_RX, 4, 0, 0), +- F(125000000, P_UNIPHY1_RX, 2.5, 0, 0), +- F(125000000, P_UNIPHY0_RX, 1, 0, 0), ++ FM(125000000, ftbl_nss_port5_rx_clk_src_125), + F(156250000, P_UNIPHY1_RX, 2, 0, 0), + F(312500000, P_UNIPHY1_RX, 1, 0, 0), + { } +@@ -1739,13 +1747,21 @@ static struct clk_regmap_div nss_port5_r + }, + }; + ++static struct freq_conf ftbl_nss_port5_tx_clk_src_25[] = { ++ C(P_UNIPHY1_TX, 12.5, 0, 0), ++ C(P_UNIPHY0_TX, 5, 0, 0), ++}; ++ ++static struct freq_conf ftbl_nss_port5_tx_clk_src_125[] = { ++ C(P_UNIPHY1_TX, 2.5, 0, 0), ++ C(P_UNIPHY0_TX, 1, 0, 0), ++}; ++ + static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), +- F(25000000, P_UNIPHY1_TX, 12.5, 0, 0), +- F(25000000, P_UNIPHY0_TX, 5, 0, 0), ++ FM(25000000, ftbl_nss_port5_tx_clk_src_25), + F(78125000, P_UNIPHY1_TX, 4, 0, 0), +- F(125000000, P_UNIPHY1_TX, 2.5, 0, 0), +- F(125000000, P_UNIPHY0_TX, 1, 0, 0), ++ FM(125000000, ftbl_nss_port5_tx_clk_src_125), + F(156250000, P_UNIPHY1_TX, 2, 0, 0), + F(312500000, P_UNIPHY1_TX, 1, 0, 0), + { } +@@ -1801,13 +1817,21 @@ static struct clk_regmap_div nss_port5_t + }, + }; + ++static struct freq_conf ftbl_nss_port6_rx_clk_src_25[] = { ++ C(P_UNIPHY2_RX, 5, 0, 0), ++ C(P_UNIPHY2_RX, 12.5, 0, 0), ++}; ++ ++static struct freq_conf ftbl_nss_port6_rx_clk_src_125[] = { ++ C(P_UNIPHY2_RX, 1, 0, 0), ++ C(P_UNIPHY2_RX, 2.5, 0, 0), ++}; ++ + static const struct freq_tbl ftbl_nss_port6_rx_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), +- F(25000000, P_UNIPHY2_RX, 5, 0, 0), +- F(25000000, P_UNIPHY2_RX, 12.5, 0, 0), ++ FM(25000000, ftbl_nss_port6_rx_clk_src_25), + F(78125000, P_UNIPHY2_RX, 4, 0, 0), +- F(125000000, P_UNIPHY2_RX, 1, 0, 0), +- F(125000000, P_UNIPHY2_RX, 2.5, 0, 0), ++ FM(125000000, ftbl_nss_port6_rx_clk_src_125), + F(156250000, P_UNIPHY2_RX, 2, 0, 0), + F(312500000, P_UNIPHY2_RX, 1, 0, 0), + { } +@@ -1858,13 +1882,21 @@ static struct clk_regmap_div nss_port6_r + }, + }; + ++static struct freq_conf ftbl_nss_port6_tx_clk_src_25[] = { ++ C(P_UNIPHY2_TX, 5, 0, 0), ++ C(P_UNIPHY2_TX, 12.5, 0, 0), ++}; ++ ++static struct freq_conf ftbl_nss_port6_tx_clk_src_125[] = { ++ C(P_UNIPHY2_TX, 1, 0, 0), ++ C(P_UNIPHY2_TX, 2.5, 0, 0), ++}; ++ + static const struct freq_tbl ftbl_nss_port6_tx_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), +- F(25000000, P_UNIPHY2_TX, 5, 0, 0), +- F(25000000, P_UNIPHY2_TX, 12.5, 0, 0), ++ FM(25000000, ftbl_nss_port6_tx_clk_src_25), + F(78125000, P_UNIPHY2_TX, 4, 0, 0), +- F(125000000, P_UNIPHY2_TX, 1, 0, 0), +- F(125000000, P_UNIPHY2_TX, 2.5, 0, 0), ++ FM(125000000, ftbl_nss_port6_tx_clk_src_125), + F(156250000, P_UNIPHY2_TX, 2, 0, 0), + F(312500000, P_UNIPHY2_TX, 1, 0, 0), + { } diff --git a/target/linux/qualcommax/patches-6.6/0102-arm64-dts-ipq8074-add-reserved-memory-nodes.patch b/target/linux/qualcommax/patches-6.6/0102-arm64-dts-ipq8074-add-reserved-memory-nodes.patch new file mode 100644 index 000000000..6d97641f6 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0102-arm64-dts-ipq8074-add-reserved-memory-nodes.patch @@ -0,0 +1,60 @@ +From ad2d07f71739351eeea1d8a120c0918e2c4b265f Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Wed, 22 Dec 2021 12:23:34 +0100 +Subject: [PATCH] arm64: dts: ipq8074: add reserved memory nodes + +IPQ8074 has multiple reserved memory ranges, if they are not defined +then weird things tend to happen, board hangs and resets when PCI or +WLAN is used etc. + +So, to avoid all of that add the reserved memory nodes from the downstream +5.4 kernel from QCA. +This is their default layout meant for devices with 1GB of RAM, but +devices with lower ammounts can override the Q6 node. + +Signed-off-by: Robert Marko +--- + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 35 +++++++++++++++++++++++++++ + 1 file changed, 35 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -86,6 +86,16 @@ + #size-cells = <2>; + ranges; + ++ nss@40000000 { ++ no-map; ++ reg = <0x0 0x40000000 0x0 0x01000000>; ++ }; ++ ++ tzapp_region: tzapp@4a400000 { ++ no-map; ++ reg = <0x0 0x4a400000 0x0 0x00200000>; ++ }; ++ + bootloader@4a600000 { + reg = <0x0 0x4a600000 0x0 0x400000>; + no-map; +@@ -108,6 +118,21 @@ + reg = <0x0 0x4ac00000 0x0 0x400000>; + no-map; + }; ++ ++ q6_region: wcnss@4b000000 { ++ no-map; ++ reg = <0x0 0x4b000000 0x0 0x05f00000>; ++ }; ++ ++ q6_etr_region: q6_etr_dump@50f00000 { ++ no-map; ++ reg = <0x0 0x50f00000 0x0 0x00100000>; ++ }; ++ ++ m3_dump_region: m3_dump@51000000 { ++ no-map; ++ reg = <0x0 0x51000000 0x0 0x100000>; ++ }; + }; + + firmware { diff --git a/target/linux/qualcommax/patches-6.6/0110-arm64-dts-qcom-ipq8074-pass-QMP-PCI-PHY-PIPE-clocks-.patch b/target/linux/qualcommax/patches-6.6/0110-arm64-dts-qcom-ipq8074-pass-QMP-PCI-PHY-PIPE-clocks-.patch new file mode 100644 index 000000000..9753fa84d --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0110-arm64-dts-qcom-ipq8074-pass-QMP-PCI-PHY-PIPE-clocks-.patch @@ -0,0 +1,30 @@ +From 8a576b5bc9f0555d1d970cacabcaa24a3b74fa57 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Wed, 16 Nov 2022 22:15:01 +0100 +Subject: [PATCH] arm64: dts: qcom: ipq8074: pass QMP PCI PHY PIPE clocks to + GCC + +Pass QMP PCI PHY PIPE clocks to the GCC controller so it does not have to +find them by matching globaly by name. + +If not passed directly, driver maintains backwards compatibility by then +falling back to global lookup. + +Signed-off-by: Robert Marko +--- + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -407,8 +407,8 @@ + gcc: gcc@1800000 { + compatible = "qcom,gcc-ipq8074"; + reg = <0x01800000 0x80000>; +- clocks = <&xo>, <&sleep_clk>; +- clock-names = "xo", "sleep_clk"; ++ clocks = <&xo>, <&sleep_clk>, <&pcie_phy0>, <&pcie_phy1>; ++ clock-names = "xo", "sleep_clk", "pcie0_pipe", "pcie1_pipe"; + #clock-cells = <1>; + #power-domain-cells = <1>; + #reset-cells = <1>; diff --git a/target/linux/qualcommax/patches-6.6/0111-arm64-dts-qcom-ipq8074-use-msi-parent-for-PCIe.patch b/target/linux/qualcommax/patches-6.6/0111-arm64-dts-qcom-ipq8074-use-msi-parent-for-PCIe.patch new file mode 100644 index 000000000..fdf7f84b9 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0111-arm64-dts-qcom-ipq8074-use-msi-parent-for-PCIe.patch @@ -0,0 +1,43 @@ +From fb1f6850be00d8dd8a54017be4c1336e224069ac Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Wed, 16 Nov 2022 22:26:25 +0100 +Subject: [PATCH] arm64: dts: qcom: ipq8074: use msi-parent for PCIe + +Instead of hardcoding the IRQ, simply use msi-parent instead. + +Signed-off-by: Robert Marko +--- + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 8 +++----- + 1 file changed, 3 insertions(+), 5 deletions(-) + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -757,7 +757,7 @@ + reg = <0x0b000000 0x1000>, <0x0b002000 0x1000>; + ranges = <0 0xb00a000 0xffd>; + +- v2m@0 { ++ gic_v2m0: v2m@0 { + compatible = "arm,gic-v2m-frame"; + msi-controller; + reg = <0x0 0xffd>; +@@ -870,8 +870,7 @@ + ranges = <0x81000000 0x0 0x00000000 0x10200000 0x0 0x10000>, /* I/O */ + <0x82000000 0x0 0x10220000 0x10220000 0x0 0xfde0000>; /* MEM */ + +- interrupts = ; +- interrupt-names = "msi"; ++ msi-parent = <&gic_v2m0>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0x7>; + interrupt-map = <0 0 0 1 &intc 0 0 142 +@@ -932,8 +931,7 @@ + ranges = <0x81000000 0x0 0x00000000 0x20200000 0x0 0x10000>, /* I/O */ + <0x82000000 0x0 0x20220000 0x20220000 0x0 0xfde0000>; /* MEM */ + +- interrupts = ; +- interrupt-names = "msi"; ++ msi-parent = <&gic_v2m0>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0x7>; + interrupt-map = <0 0 0 1 &intc 0 0 75 diff --git a/target/linux/qualcommax/patches-6.6/0112-remoteproc-qcom-Add-PRNG-proxy-clock.patch b/target/linux/qualcommax/patches-6.6/0112-remoteproc-qcom-Add-PRNG-proxy-clock.patch new file mode 100644 index 000000000..d3664f293 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0112-remoteproc-qcom-Add-PRNG-proxy-clock.patch @@ -0,0 +1,155 @@ +From 125681433c8e526356947acf572fe8ca8ad32291 Mon Sep 17 00:00:00 2001 +From: Gokul Sriram Palanisamy +Date: Sat, 30 Jan 2021 10:50:05 +0530 +Subject: [PATCH] remoteproc: qcom: Add PRNG proxy clock + +PRNG clock is needed by the secure PIL, support for the same +is added in subsequent patches. + +Signed-off-by: Gokul Sriram Palanisamy +Signed-off-by: Sricharan R +Signed-off-by: Nikhil Prakash V +--- + drivers/remoteproc/qcom_q6v5_wcss.c | 65 +++++++++++++++++++++-------- + 1 file changed, 47 insertions(+), 18 deletions(-) + +--- a/drivers/remoteproc/qcom_q6v5_wcss.c ++++ b/drivers/remoteproc/qcom_q6v5_wcss.c +@@ -91,19 +91,6 @@ enum { + WCSS_QCS404, + }; + +-struct wcss_data { +- const char *firmware_name; +- unsigned int crash_reason_smem; +- u32 version; +- bool aon_reset_required; +- bool wcss_q6_reset_required; +- const char *ssr_name; +- const char *sysmon_name; +- int ssctl_id; +- const struct rproc_ops *ops; +- bool requires_force_stop; +-}; +- + struct q6v5_wcss { + struct device *dev; + +@@ -128,6 +115,7 @@ struct q6v5_wcss { + struct clk *qdsp6ss_xo_cbcr; + struct clk *qdsp6ss_core_gfmux; + struct clk *lcc_bcr_sleep; ++ struct clk *prng_clk; + struct regulator *cx_supply; + struct qcom_sysmon *sysmon; + +@@ -151,6 +139,21 @@ struct q6v5_wcss { + struct qcom_rproc_ssr ssr_subdev; + }; + ++struct wcss_data { ++ int (*init_clock)(struct q6v5_wcss *wcss); ++ int (*init_regulator)(struct q6v5_wcss *wcss); ++ const char *firmware_name; ++ unsigned int crash_reason_smem; ++ u32 version; ++ bool aon_reset_required; ++ bool wcss_q6_reset_required; ++ const char *ssr_name; ++ const char *sysmon_name; ++ int ssctl_id; ++ const struct rproc_ops *ops; ++ bool requires_force_stop; ++}; ++ + static int q6v5_wcss_reset(struct q6v5_wcss *wcss) + { + int ret; +@@ -240,6 +243,12 @@ static int q6v5_wcss_start(struct rproc + struct q6v5_wcss *wcss = rproc->priv; + int ret; + ++ ret = clk_prepare_enable(wcss->prng_clk); ++ if (ret) { ++ dev_err(wcss->dev, "prng clock enable failed\n"); ++ return ret; ++ } ++ + qcom_q6v5_prepare(&wcss->q6v5); + + /* Release Q6 and WCSS reset */ +@@ -733,6 +742,7 @@ static int q6v5_wcss_stop(struct rproc * + return ret; + } + ++ clk_disable_unprepare(wcss->prng_clk); + qcom_q6v5_unprepare(&wcss->q6v5); + + return 0; +@@ -899,7 +909,21 @@ static int q6v5_alloc_memory_region(stru + return 0; + } + +-static int q6v5_wcss_init_clock(struct q6v5_wcss *wcss) ++static int ipq8074_init_clock(struct q6v5_wcss *wcss) ++{ ++ int ret; ++ ++ wcss->prng_clk = devm_clk_get(wcss->dev, "prng"); ++ if (IS_ERR(wcss->prng_clk)) { ++ ret = PTR_ERR(wcss->prng_clk); ++ if (ret != -EPROBE_DEFER) ++ dev_err(wcss->dev, "Failed to get prng clock\n"); ++ return ret; ++ } ++ return 0; ++} ++ ++static int qcs404_init_clock(struct q6v5_wcss *wcss) + { + int ret; + +@@ -989,7 +1013,7 @@ static int q6v5_wcss_init_clock(struct q + return 0; + } + +-static int q6v5_wcss_init_regulator(struct q6v5_wcss *wcss) ++static int qcs404_init_regulator(struct q6v5_wcss *wcss) + { + wcss->cx_supply = devm_regulator_get(wcss->dev, "cx"); + if (IS_ERR(wcss->cx_supply)) +@@ -1033,12 +1057,14 @@ static int q6v5_wcss_probe(struct platfo + if (ret) + goto free_rproc; + +- if (wcss->version == WCSS_QCS404) { +- ret = q6v5_wcss_init_clock(wcss); ++ if (desc->init_clock) { ++ ret = desc->init_clock(wcss); + if (ret) + goto free_rproc; ++ } + +- ret = q6v5_wcss_init_regulator(wcss); ++ if (desc->init_regulator) { ++ ret = desc->init_regulator(wcss); + if (ret) + goto free_rproc; + } +@@ -1084,6 +1110,7 @@ static void q6v5_wcss_remove(struct plat + } + + static const struct wcss_data wcss_ipq8074_res_init = { ++ .init_clock = ipq8074_init_clock, + .firmware_name = "IPQ8074/q6_fw.mdt", + .crash_reason_smem = WCSS_CRASH_REASON, + .aon_reset_required = true, +@@ -1093,6 +1120,8 @@ static const struct wcss_data wcss_ipq80 + }; + + static const struct wcss_data wcss_qcs404_res_init = { ++ .init_clock = qcs404_init_clock, ++ .init_regulator = qcs404_init_regulator, + .crash_reason_smem = WCSS_CRASH_REASON, + .firmware_name = "wcnss.mdt", + .version = WCSS_QCS404, diff --git a/target/linux/qualcommax/patches-6.6/0113-remoteproc-qcom-Add-secure-PIL-support.patch b/target/linux/qualcommax/patches-6.6/0113-remoteproc-qcom-Add-secure-PIL-support.patch new file mode 100644 index 000000000..ef2a35bbd --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0113-remoteproc-qcom-Add-secure-PIL-support.patch @@ -0,0 +1,143 @@ +From 7358d42dfbdfdb5d4f1d0d4c2e5c2bb4143a29b0 Mon Sep 17 00:00:00 2001 +From: Gokul Sriram Palanisamy +Date: Sat, 30 Jan 2021 10:50:06 +0530 +Subject: [PATCH] remoteproc: qcom: Add secure PIL support + +IPQ8074 uses secure PIL. Hence, adding the support for the same. + +Signed-off-by: Gokul Sriram Palanisamy +Signed-off-by: Sricharan R +Signed-off-by: Nikhil Prakash V +--- + drivers/remoteproc/qcom_q6v5_wcss.c | 43 +++++++++++++++++++++++++++-- + 1 file changed, 40 insertions(+), 3 deletions(-) + +--- a/drivers/remoteproc/qcom_q6v5_wcss.c ++++ b/drivers/remoteproc/qcom_q6v5_wcss.c +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + #include "qcom_common.h" + #include "qcom_pil_info.h" + #include "qcom_q6v5.h" +@@ -86,6 +87,9 @@ + #define TCSR_WCSS_CLK_ENABLE 0x14 + + #define MAX_HALT_REG 3 ++ ++#define WCNSS_PAS_ID 6 ++ + enum { + WCSS_IPQ8074, + WCSS_QCS404, +@@ -134,6 +138,7 @@ struct q6v5_wcss { + unsigned int crash_reason_smem; + u32 version; + bool requires_force_stop; ++ bool need_mem_protection; + + struct qcom_rproc_glink glink_subdev; + struct qcom_rproc_ssr ssr_subdev; +@@ -152,6 +157,7 @@ struct wcss_data { + int ssctl_id; + const struct rproc_ops *ops; + bool requires_force_stop; ++ bool need_mem_protection; + }; + + static int q6v5_wcss_reset(struct q6v5_wcss *wcss) +@@ -251,6 +257,15 @@ static int q6v5_wcss_start(struct rproc + + qcom_q6v5_prepare(&wcss->q6v5); + ++ if (wcss->need_mem_protection) { ++ ret = qcom_scm_pas_auth_and_reset(WCNSS_PAS_ID); ++ if (ret) { ++ dev_err(wcss->dev, "wcss_reset failed\n"); ++ return ret; ++ } ++ goto wait_for_reset; ++ } ++ + /* Release Q6 and WCSS reset */ + ret = reset_control_deassert(wcss->wcss_reset); + if (ret) { +@@ -285,6 +300,7 @@ static int q6v5_wcss_start(struct rproc + if (ret) + goto wcss_q6_reset; + ++wait_for_reset: + ret = qcom_q6v5_wait_for_start(&wcss->q6v5, 5 * HZ); + if (ret == -ETIMEDOUT) + dev_err(wcss->dev, "start timed out\n"); +@@ -718,6 +734,15 @@ static int q6v5_wcss_stop(struct rproc * + struct q6v5_wcss *wcss = rproc->priv; + int ret; + ++ if (wcss->need_mem_protection) { ++ ret = qcom_scm_pas_shutdown(WCNSS_PAS_ID); ++ if (ret) { ++ dev_err(wcss->dev, "not able to shutdown\n"); ++ return ret; ++ } ++ goto pas_done; ++ } ++ + /* WCSS powerdown */ + if (wcss->requires_force_stop) { + ret = qcom_q6v5_request_stop(&wcss->q6v5, NULL); +@@ -742,6 +767,7 @@ static int q6v5_wcss_stop(struct rproc * + return ret; + } + ++pas_done: + clk_disable_unprepare(wcss->prng_clk); + qcom_q6v5_unprepare(&wcss->q6v5); + +@@ -765,9 +791,15 @@ static int q6v5_wcss_load(struct rproc * + struct q6v5_wcss *wcss = rproc->priv; + int ret; + +- ret = qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware, +- 0, wcss->mem_region, wcss->mem_phys, +- wcss->mem_size, &wcss->mem_reloc); ++ if (wcss->need_mem_protection) ++ ret = qcom_mdt_load(wcss->dev, fw, rproc->firmware, ++ WCNSS_PAS_ID, wcss->mem_region, ++ wcss->mem_phys, wcss->mem_size, ++ &wcss->mem_reloc); ++ else ++ ret = qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware, ++ 0, wcss->mem_region, wcss->mem_phys, ++ wcss->mem_size, &wcss->mem_reloc); + if (ret) + return ret; + +@@ -1035,6 +1067,9 @@ static int q6v5_wcss_probe(struct platfo + if (!desc) + return -EINVAL; + ++ if (desc->need_mem_protection && !qcom_scm_is_available()) ++ return -EPROBE_DEFER; ++ + rproc = rproc_alloc(&pdev->dev, pdev->name, desc->ops, + desc->firmware_name, sizeof(*wcss)); + if (!rproc) { +@@ -1048,6 +1083,7 @@ static int q6v5_wcss_probe(struct platfo + + wcss->version = desc->version; + wcss->requires_force_stop = desc->requires_force_stop; ++ wcss->need_mem_protection = desc->need_mem_protection; + + ret = q6v5_wcss_init_mmio(wcss, pdev); + if (ret) +@@ -1117,6 +1153,7 @@ static const struct wcss_data wcss_ipq80 + .wcss_q6_reset_required = true, + .ops = &q6v5_wcss_ipq8074_ops, + .requires_force_stop = true, ++ .need_mem_protection = true, + }; + + static const struct wcss_data wcss_qcs404_res_init = { diff --git a/target/linux/qualcommax/patches-6.6/0114-remoteproc-qcom-Add-support-for-split-q6-m3-wlan-fir.patch b/target/linux/qualcommax/patches-6.6/0114-remoteproc-qcom-Add-support-for-split-q6-m3-wlan-fir.patch new file mode 100644 index 000000000..f7e576cf8 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0114-remoteproc-qcom-Add-support-for-split-q6-m3-wlan-fir.patch @@ -0,0 +1,103 @@ +From b422c9d4f048b086ce83f44a7cfcddcce162897f Mon Sep 17 00:00:00 2001 +From: Gokul Sriram Palanisamy +Date: Sat, 30 Jan 2021 10:50:07 +0530 +Subject: [PATCH] remoteproc: qcom: Add support for split q6 + m3 wlan firmware + +IPQ8074 supports split firmware for q6 and m3 as well. +So add support for loading the m3 firmware before q6. +Now the drivers works fine for both split and unified +firmwares. + +Signed-off-by: Gokul Sriram Palanisamy +Signed-off-by: Sricharan R +Signed-off-by: Nikhil Prakash V +--- + drivers/remoteproc/qcom_q6v5_wcss.c | 33 +++++++++++++++++++++++++---- + 1 file changed, 29 insertions(+), 4 deletions(-) + +--- a/drivers/remoteproc/qcom_q6v5_wcss.c ++++ b/drivers/remoteproc/qcom_q6v5_wcss.c +@@ -139,6 +139,7 @@ struct q6v5_wcss { + u32 version; + bool requires_force_stop; + bool need_mem_protection; ++ const char *m3_firmware_name; + + struct qcom_rproc_glink glink_subdev; + struct qcom_rproc_ssr ssr_subdev; +@@ -147,7 +148,8 @@ struct q6v5_wcss { + struct wcss_data { + int (*init_clock)(struct q6v5_wcss *wcss); + int (*init_regulator)(struct q6v5_wcss *wcss); +- const char *firmware_name; ++ const char *q6_firmware_name; ++ const char *m3_firmware_name; + unsigned int crash_reason_smem; + u32 version; + bool aon_reset_required; +@@ -789,8 +791,29 @@ static void *q6v5_wcss_da_to_va(struct r + static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw) + { + struct q6v5_wcss *wcss = rproc->priv; ++ const struct firmware *m3_fw; + int ret; + ++ if (wcss->m3_firmware_name) { ++ ret = request_firmware(&m3_fw, wcss->m3_firmware_name, ++ wcss->dev); ++ if (ret) ++ goto skip_m3; ++ ++ ret = qcom_mdt_load_no_init(wcss->dev, m3_fw, ++ wcss->m3_firmware_name, 0, ++ wcss->mem_region, wcss->mem_phys, ++ wcss->mem_size, &wcss->mem_reloc); ++ ++ release_firmware(m3_fw); ++ ++ if (ret) { ++ dev_err(wcss->dev, "can't load m3_fw.bXX\n"); ++ return ret; ++ } ++ } ++ ++skip_m3: + if (wcss->need_mem_protection) + ret = qcom_mdt_load(wcss->dev, fw, rproc->firmware, + WCNSS_PAS_ID, wcss->mem_region, +@@ -1071,7 +1094,7 @@ static int q6v5_wcss_probe(struct platfo + return -EPROBE_DEFER; + + rproc = rproc_alloc(&pdev->dev, pdev->name, desc->ops, +- desc->firmware_name, sizeof(*wcss)); ++ desc->q6_firmware_name, sizeof(*wcss)); + if (!rproc) { + dev_err(&pdev->dev, "failed to allocate rproc\n"); + return -ENOMEM; +@@ -1084,6 +1107,7 @@ static int q6v5_wcss_probe(struct platfo + wcss->version = desc->version; + wcss->requires_force_stop = desc->requires_force_stop; + wcss->need_mem_protection = desc->need_mem_protection; ++ wcss->m3_firmware_name = desc->m3_firmware_name; + + ret = q6v5_wcss_init_mmio(wcss, pdev); + if (ret) +@@ -1147,7 +1171,8 @@ static void q6v5_wcss_remove(struct plat + + static const struct wcss_data wcss_ipq8074_res_init = { + .init_clock = ipq8074_init_clock, +- .firmware_name = "IPQ8074/q6_fw.mdt", ++ .q6_firmware_name = "IPQ8074/q6_fw.mdt", ++ .m3_firmware_name = "IPQ8074/m3_fw.mdt", + .crash_reason_smem = WCSS_CRASH_REASON, + .aon_reset_required = true, + .wcss_q6_reset_required = true, +@@ -1160,7 +1185,7 @@ static const struct wcss_data wcss_qcs40 + .init_clock = qcs404_init_clock, + .init_regulator = qcs404_init_regulator, + .crash_reason_smem = WCSS_CRASH_REASON, +- .firmware_name = "wcnss.mdt", ++ .q6_firmware_name = "wcnss.mdt", + .version = WCSS_QCS404, + .aon_reset_required = false, + .wcss_q6_reset_required = false, diff --git a/target/linux/qualcommax/patches-6.6/0115-remoteproc-qcom-Add-ssr-subdevice-identifier.patch b/target/linux/qualcommax/patches-6.6/0115-remoteproc-qcom-Add-ssr-subdevice-identifier.patch new file mode 100644 index 000000000..7a07b561e --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0115-remoteproc-qcom-Add-ssr-subdevice-identifier.patch @@ -0,0 +1,24 @@ +From 3a8f67b4770c817b04794c9a02e3f88f85d86280 Mon Sep 17 00:00:00 2001 +From: Gokul Sriram Palanisamy +Date: Sat, 30 Jan 2021 10:50:08 +0530 +Subject: [PATCH] remoteproc: qcom: Add ssr subdevice identifier + +Add name for ssr subdevice on IPQ8074 SoC. + +Signed-off-by: Gokul Sriram Palanisamy +Signed-off-by: Sricharan R +Signed-off-by: Nikhil Prakash V +--- + drivers/remoteproc/qcom_q6v5_wcss.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/remoteproc/qcom_q6v5_wcss.c ++++ b/drivers/remoteproc/qcom_q6v5_wcss.c +@@ -1176,6 +1176,7 @@ static const struct wcss_data wcss_ipq80 + .crash_reason_smem = WCSS_CRASH_REASON, + .aon_reset_required = true, + .wcss_q6_reset_required = true, ++ .ssr_name = "q6wcss", + .ops = &q6v5_wcss_ipq8074_ops, + .requires_force_stop = true, + .need_mem_protection = true, diff --git a/target/linux/qualcommax/patches-6.6/0116-remoteproc-qcom-Update-regmap-offsets-for-halt-regis.patch b/target/linux/qualcommax/patches-6.6/0116-remoteproc-qcom-Update-regmap-offsets-for-halt-regis.patch new file mode 100644 index 000000000..7ef6884e4 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0116-remoteproc-qcom-Update-regmap-offsets-for-halt-regis.patch @@ -0,0 +1,79 @@ +From 8c73af6e8d78c66cfef0f551b00d375ec0b67ff3 Mon Sep 17 00:00:00 2001 +From: Gokul Sriram Palanisamy +Date: Sat, 30 Jan 2021 10:50:09 +0530 +Subject: [PATCH] remoteproc: qcom: Update regmap offsets for halt register + +Fixed issue in reading halt-regs parameter from device-tree. + +Signed-off-by: Gokul Sriram Palanisamy +Signed-off-by: Sricharan R +--- + drivers/remoteproc/qcom_q6v5_wcss.c | 22 ++++++++++++++-------- + 1 file changed, 14 insertions(+), 8 deletions(-) + +--- a/drivers/remoteproc/qcom_q6v5_wcss.c ++++ b/drivers/remoteproc/qcom_q6v5_wcss.c +@@ -86,7 +86,7 @@ + #define TCSR_WCSS_CLK_MASK 0x1F + #define TCSR_WCSS_CLK_ENABLE 0x14 + +-#define MAX_HALT_REG 3 ++#define MAX_HALT_REG 4 + + #define WCNSS_PAS_ID 6 + +@@ -154,6 +154,7 @@ struct wcss_data { + u32 version; + bool aon_reset_required; + bool wcss_q6_reset_required; ++ bool bcr_reset_required; + const char *ssr_name; + const char *sysmon_name; + int ssctl_id; +@@ -875,10 +876,13 @@ static int q6v5_wcss_init_reset(struct q + } + } + +- wcss->wcss_q6_bcr_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_bcr_reset"); +- if (IS_ERR(wcss->wcss_q6_bcr_reset)) { +- dev_err(wcss->dev, "unable to acquire wcss_q6_bcr_reset\n"); +- return PTR_ERR(wcss->wcss_q6_bcr_reset); ++ if (desc->bcr_reset_required) { ++ wcss->wcss_q6_bcr_reset = devm_reset_control_get_exclusive(dev, ++ "wcss_q6_bcr_reset"); ++ if (IS_ERR(wcss->wcss_q6_bcr_reset)) { ++ dev_err(wcss->dev, "unable to acquire wcss_q6_bcr_reset\n"); ++ return PTR_ERR(wcss->wcss_q6_bcr_reset); ++ } + } + + return 0; +@@ -928,9 +932,9 @@ static int q6v5_wcss_init_mmio(struct q6 + return -EINVAL; + } + +- wcss->halt_q6 = halt_reg[0]; +- wcss->halt_wcss = halt_reg[1]; +- wcss->halt_nc = halt_reg[2]; ++ wcss->halt_q6 = halt_reg[1]; ++ wcss->halt_wcss = halt_reg[2]; ++ wcss->halt_nc = halt_reg[3]; + + return 0; + } +@@ -1176,6 +1180,7 @@ static const struct wcss_data wcss_ipq80 + .crash_reason_smem = WCSS_CRASH_REASON, + .aon_reset_required = true, + .wcss_q6_reset_required = true, ++ .bcr_reset_required = false, + .ssr_name = "q6wcss", + .ops = &q6v5_wcss_ipq8074_ops, + .requires_force_stop = true, +@@ -1190,6 +1195,7 @@ static const struct wcss_data wcss_qcs40 + .version = WCSS_QCS404, + .aon_reset_required = false, + .wcss_q6_reset_required = false, ++ .bcr_reset_required = true, + .ssr_name = "mpss", + .sysmon_name = "wcnss", + .ssctl_id = 0x12, diff --git a/target/linux/qualcommax/patches-6.6/0117-dt-bindings-clock-qcom-Add-reset-for-WCSSAON.patch b/target/linux/qualcommax/patches-6.6/0117-dt-bindings-clock-qcom-Add-reset-for-WCSSAON.patch new file mode 100644 index 000000000..fe0e0f9e0 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0117-dt-bindings-clock-qcom-Add-reset-for-WCSSAON.patch @@ -0,0 +1,26 @@ +From ff7c6533ed8c4de58ed6c8aab03ea59c03eb4f31 Mon Sep 17 00:00:00 2001 +From: Gokul Sriram Palanisamy +Date: Sat, 30 Jan 2021 10:50:10 +0530 +Subject: [PATCH] dt-bindings: clock: qcom: Add reset for WCSSAON + +Add binding for WCSSAON reset required for Q6v5 reset on IPQ8074 SoC. + +Signed-off-by: Gokul Sriram Palanisamy +Signed-off-by: Sricharan R +Signed-off-by: Nikhil Prakash V +Acked-by: Rob Herring +Acked-by: Stephen Boyd +--- + include/dt-bindings/clock/qcom,gcc-ipq8074.h | 1 + + 1 file changed, 1 insertion(+) + +--- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h ++++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h +@@ -381,6 +381,7 @@ + #define GCC_NSSPORT4_RESET 143 + #define GCC_NSSPORT5_RESET 144 + #define GCC_NSSPORT6_RESET 145 ++#define GCC_WCSSAON_RESET 146 + + #define USB0_GDSC 0 + #define USB1_GDSC 1 diff --git a/target/linux/qualcommax/patches-6.6/0118-clk-qcom-Add-WCSSAON-reset.patch b/target/linux/qualcommax/patches-6.6/0118-clk-qcom-Add-WCSSAON-reset.patch new file mode 100644 index 000000000..7938b1823 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0118-clk-qcom-Add-WCSSAON-reset.patch @@ -0,0 +1,25 @@ +From 43d9788f546d24df22d8ba3fcc2497d7ccc198f3 Mon Sep 17 00:00:00 2001 +From: Gokul Sriram Palanisamy +Date: Sat, 30 Jan 2021 10:50:11 +0530 +Subject: [PATCH] clk: qcom: Add WCSSAON reset + +Add WCSSAON reset required for Q6v5 on IPQ8074 SoC. + +Signed-off-by: Gokul Sriram Palanisamy +Signed-off-by: Sricharan R +Signed-off-by: Nikhil Prakash V +Acked-by: Stephen Boyd +--- + drivers/clk/qcom/gcc-ipq8074.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/clk/qcom/gcc-ipq8074.c ++++ b/drivers/clk/qcom/gcc-ipq8074.c +@@ -4712,6 +4712,7 @@ static const struct qcom_reset_map gcc_i + [GCC_NSSPORT4_RESET] = { .reg = 0x68014, .bitmask = BIT(27) | GENMASK(9, 8) }, + [GCC_NSSPORT5_RESET] = { .reg = 0x68014, .bitmask = BIT(28) | GENMASK(11, 10) }, + [GCC_NSSPORT6_RESET] = { .reg = 0x68014, .bitmask = BIT(29) | GENMASK(13, 12) }, ++ [GCC_WCSSAON_RESET] = { 0x59010, 0 }, + }; + + static struct gdsc *gcc_ipq8074_gdscs[] = { diff --git a/target/linux/qualcommax/patches-6.6/0119-remoteproc-wcss-disable-auto-boot-for-IPQ8074.patch b/target/linux/qualcommax/patches-6.6/0119-remoteproc-wcss-disable-auto-boot-for-IPQ8074.patch new file mode 100644 index 000000000..ecd87ac7a --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0119-remoteproc-wcss-disable-auto-boot-for-IPQ8074.patch @@ -0,0 +1,48 @@ +From 406a332fd1bcc4e18d73cce390f56272fe9111d7 Mon Sep 17 00:00:00 2001 +From: Sivaprakash Murugesan +Date: Fri, 17 Apr 2020 16:37:10 +0530 +Subject: [PATCH] remoteproc: wcss: disable auto boot for IPQ8074 + +There is no need for remoteproc to boot automatically, ath11k will trigger +booting when its probing. + +Signed-off-by: Sivaprakash Murugesan +Signed-off-by: Robert Marko +--- + drivers/remoteproc/qcom_q6v5_wcss.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/drivers/remoteproc/qcom_q6v5_wcss.c ++++ b/drivers/remoteproc/qcom_q6v5_wcss.c +@@ -161,6 +161,7 @@ struct wcss_data { + const struct rproc_ops *ops; + bool requires_force_stop; + bool need_mem_protection; ++ bool need_auto_boot; + }; + + static int q6v5_wcss_reset(struct q6v5_wcss *wcss) +@@ -1149,6 +1150,7 @@ static int q6v5_wcss_probe(struct platfo + desc->sysmon_name, + desc->ssctl_id); + ++ rproc->auto_boot = desc->need_auto_boot; + ret = rproc_add(rproc); + if (ret) + goto free_rproc; +@@ -1185,6 +1187,7 @@ static const struct wcss_data wcss_ipq80 + .ops = &q6v5_wcss_ipq8074_ops, + .requires_force_stop = true, + .need_mem_protection = true, ++ .need_auto_boot = false, + }; + + static const struct wcss_data wcss_qcs404_res_init = { +@@ -1201,6 +1204,7 @@ static const struct wcss_data wcss_qcs40 + .ssctl_id = 0x12, + .ops = &q6v5_wcss_qcs404_ops, + .requires_force_stop = false, ++ .need_auto_boot = true, + }; + + static const struct of_device_id q6v5_wcss_of_match[] = { diff --git a/target/linux/qualcommax/patches-6.6/0120-arm64-dts-qcom-Enable-Q6v5-WCSS-for-ipq8074-SoC.patch b/target/linux/qualcommax/patches-6.6/0120-arm64-dts-qcom-Enable-Q6v5-WCSS-for-ipq8074-SoC.patch new file mode 100644 index 000000000..07f514410 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0120-arm64-dts-qcom-Enable-Q6v5-WCSS-for-ipq8074-SoC.patch @@ -0,0 +1,120 @@ +From 7388400b8bd42f71d040dbf2fdbdcb834fcc0ede Mon Sep 17 00:00:00 2001 +From: Gokul Sriram Palanisamy +Date: Sat, 30 Jan 2021 10:50:13 +0530 +Subject: [PATCH] arm64: dts: qcom: Enable Q6v5 WCSS for ipq8074 SoC + +Enable remoteproc WCSS PIL driver with glink and ssr subdevices. +Also enables smp2p and mailboxes required for IPC. + +Signed-off-by: Gokul Sriram Palanisamy +Signed-off-by: Sricharan R +Signed-off-by: Nikhil Prakash V +Signed-off-by: Robert Marko +--- + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 81 +++++++++++++++++++++++++++ + 1 file changed, 81 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -142,6 +142,32 @@ + }; + }; + ++ wcss: smp2p-wcss { ++ compatible = "qcom,smp2p"; ++ qcom,smem = <435>, <428>; ++ ++ interrupt-parent = <&intc>; ++ interrupts = <0 322 1>; ++ ++ mboxes = <&apcs_glb 9>; ++ ++ qcom,local-pid = <0>; ++ qcom,remote-pid = <1>; ++ ++ wcss_smp2p_out: master-kernel { ++ qcom,entry-name = "master-kernel"; ++ qcom,smp2p-feature-ssr-ack; ++ #qcom,smem-state-cells = <1>; ++ }; ++ ++ wcss_smp2p_in: slave-kernel { ++ qcom,entry-name = "slave-kernel"; ++ ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ }; ++ + soc: soc@0 { + #address-cells = <1>; + #size-cells = <1>; +@@ -425,6 +451,11 @@ + reg = <0x01937000 0x21000>; + }; + ++ tcsr_q6: syscon@1945000 { ++ compatible = "syscon"; ++ reg = <0x01945000 0xe000>; ++ }; ++ + spmi_bus: spmi@200f000 { + compatible = "qcom,spmi-pmic-arb"; + reg = <0x0200f000 0x001000>, +@@ -972,6 +1003,56 @@ + "axi_s_sticky"; + status = "disabled"; + }; ++ ++ q6v5_wcss: q6v5_wcss@cd00000 { ++ compatible = "qcom,ipq8074-wcss-pil"; ++ reg = <0x0cd00000 0x4040>, ++ <0x004ab000 0x20>; ++ reg-names = "qdsp6", ++ "rmb"; ++ qca,auto-restart; ++ qca,extended-intc; ++ interrupts-extended = <&intc 0 325 1>, ++ <&wcss_smp2p_in 0 0>, ++ <&wcss_smp2p_in 1 0>, ++ <&wcss_smp2p_in 2 0>, ++ <&wcss_smp2p_in 3 0>; ++ interrupt-names = "wdog", ++ "fatal", ++ "ready", ++ "handover", ++ "stop-ack"; ++ ++ resets = <&gcc GCC_WCSSAON_RESET>, ++ <&gcc GCC_WCSS_BCR>, ++ <&gcc GCC_WCSS_Q6_BCR>; ++ ++ reset-names = "wcss_aon_reset", ++ "wcss_reset", ++ "wcss_q6_reset"; ++ ++ clocks = <&gcc GCC_PRNG_AHB_CLK>; ++ clock-names = "prng"; ++ ++ qcom,halt-regs = <&tcsr_q6 0xa000 0xd000 0x0>; ++ ++ qcom,smem-states = <&wcss_smp2p_out 0>, ++ <&wcss_smp2p_out 1>; ++ qcom,smem-state-names = "shutdown", ++ "stop"; ++ ++ memory-region = <&q6_region>; ++ ++ glink-edge { ++ interrupts = ; ++ qcom,remote-pid = <1>; ++ mboxes = <&apcs_glb 8>; ++ ++ rpm_requests { ++ qcom,glink-channels = "IPCRTR"; ++ }; ++ }; ++ }; + }; + + timer { diff --git a/target/linux/qualcommax/patches-6.6/0121-arm64-dts-ipq8074-Add-WLAN-node.patch b/target/linux/qualcommax/patches-6.6/0121-arm64-dts-ipq8074-Add-WLAN-node.patch new file mode 100644 index 000000000..ef34c50ec --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0121-arm64-dts-ipq8074-Add-WLAN-node.patch @@ -0,0 +1,135 @@ +From a67d1901741c162645eda0dbdc3a2c0c2aff5cf4 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Tue, 21 Dec 2021 14:49:36 +0100 +Subject: [PATCH] arm64: dts: ipq8074: Add WLAN node + +IPQ8074 has a AHB based Q6v5 802.11ax radios that are supported +by the ath11k. + +Add the required DT node to enable the built-in radios. + +Signed-off-by: Robert Marko +--- + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 111 ++++++++++++++++++++++++++ + 1 file changed, 111 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -1053,6 +1053,117 @@ + }; + }; + }; ++ ++ wifi: wifi@c0000000 { ++ compatible = "qcom,ipq8074-wifi"; ++ reg = <0xc000000 0x2000000>; ++ ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ ++ interrupt-names = "misc-pulse1", ++ "misc-latch", ++ "sw-exception", ++ "ce0", ++ "ce1", ++ "ce2", ++ "ce3", ++ "ce4", ++ "ce5", ++ "ce6", ++ "ce7", ++ "ce8", ++ "ce9", ++ "ce10", ++ "ce11", ++ "host2wbm-desc-feed", ++ "host2reo-re-injection", ++ "host2reo-command", ++ "host2rxdma-monitor-ring3", ++ "host2rxdma-monitor-ring2", ++ "host2rxdma-monitor-ring1", ++ "reo2ost-exception", ++ "wbm2host-rx-release", ++ "reo2host-status", ++ "reo2host-destination-ring4", ++ "reo2host-destination-ring3", ++ "reo2host-destination-ring2", ++ "reo2host-destination-ring1", ++ "rxdma2host-monitor-destination-mac3", ++ "rxdma2host-monitor-destination-mac2", ++ "rxdma2host-monitor-destination-mac1", ++ "ppdu-end-interrupts-mac3", ++ "ppdu-end-interrupts-mac2", ++ "ppdu-end-interrupts-mac1", ++ "rxdma2host-monitor-status-ring-mac3", ++ "rxdma2host-monitor-status-ring-mac2", ++ "rxdma2host-monitor-status-ring-mac1", ++ "host2rxdma-host-buf-ring-mac3", ++ "host2rxdma-host-buf-ring-mac2", ++ "host2rxdma-host-buf-ring-mac1", ++ "rxdma2host-destination-ring-mac3", ++ "rxdma2host-destination-ring-mac2", ++ "rxdma2host-destination-ring-mac1", ++ "host2tcl-input-ring4", ++ "host2tcl-input-ring3", ++ "host2tcl-input-ring2", ++ "host2tcl-input-ring1", ++ "wbm2host-tx-completions-ring3", ++ "wbm2host-tx-completions-ring2", ++ "wbm2host-tx-completions-ring1", ++ "tcl2host-status-ring"; ++ qcom,rproc = <&q6v5_wcss>; ++ status = "disabled"; ++ }; + }; + + timer { diff --git a/target/linux/qualcommax/patches-6.6/0122-arm64-dts-ipq8074-add-CPU-clock.patch b/target/linux/qualcommax/patches-6.6/0122-arm64-dts-ipq8074-add-CPU-clock.patch new file mode 100644 index 000000000..a3c5f344a --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0122-arm64-dts-ipq8074-add-CPU-clock.patch @@ -0,0 +1,59 @@ +From cb3ef99c1553565e1dc0301ccd5c1c0fa2d15c15 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Fri, 31 Dec 2021 17:56:14 +0100 +Subject: [PATCH] arm64: dts: ipq8074: add CPU clock + +Now that CPU clock is exposed and can be controlled, add the necessary +properties to the CPU nodes. + +Signed-off-by: Robert Marko +--- + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 9 +++++++++ + 1 file changed, 9 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -5,6 +5,7 @@ + + #include + #include ++#include + + / { + #address-cells = <2>; +@@ -38,6 +39,8 @@ + reg = <0x0>; + next-level-cache = <&L2_0>; + enable-method = "psci"; ++ clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; ++ clock-names = "cpu"; + }; + + CPU1: cpu@1 { +@@ -46,6 +49,8 @@ + enable-method = "psci"; + reg = <0x1>; + next-level-cache = <&L2_0>; ++ clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; ++ clock-names = "cpu"; + }; + + CPU2: cpu@2 { +@@ -54,6 +59,8 @@ + enable-method = "psci"; + reg = <0x2>; + next-level-cache = <&L2_0>; ++ clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; ++ clock-names = "cpu"; + }; + + CPU3: cpu@3 { +@@ -62,6 +69,8 @@ + enable-method = "psci"; + reg = <0x3>; + next-level-cache = <&L2_0>; ++ clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; ++ clock-names = "cpu"; + }; + + L2_0: l2-cache { diff --git a/target/linux/qualcommax/patches-6.6/0123-arm64-dts-ipq8074-add-cooling-cells-to-CPU-nodes.patch b/target/linux/qualcommax/patches-6.6/0123-arm64-dts-ipq8074-add-cooling-cells-to-CPU-nodes.patch new file mode 100644 index 000000000..3520b3813 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0123-arm64-dts-ipq8074-add-cooling-cells-to-CPU-nodes.patch @@ -0,0 +1,48 @@ +From 347ca56e86c99021fad059b9a8ef101245b8507e Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Fri, 31 Dec 2021 20:38:06 +0100 +Subject: [PATCH] arm64: dts: ipq8074: add cooling cells to CPU nodes + +Since there is CPU Freq support as well as thermal sensor support +now for the IPQ8074, add cooling cells to CPU nodes so that they can +be used as cooling devices using CPU Freq. + +Signed-off-by: Robert Marko +--- + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -41,6 +41,7 @@ + enable-method = "psci"; + clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; + clock-names = "cpu"; ++ #cooling-cells = <2>; + }; + + CPU1: cpu@1 { +@@ -51,6 +52,7 @@ + next-level-cache = <&L2_0>; + clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; + clock-names = "cpu"; ++ #cooling-cells = <2>; + }; + + CPU2: cpu@2 { +@@ -61,6 +63,7 @@ + next-level-cache = <&L2_0>; + clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; + clock-names = "cpu"; ++ #cooling-cells = <2>; + }; + + CPU3: cpu@3 { +@@ -71,6 +74,7 @@ + next-level-cache = <&L2_0>; + clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; + clock-names = "cpu"; ++ #cooling-cells = <2>; + }; + + L2_0: l2-cache { diff --git a/target/linux/qualcommax/patches-6.6/0129-arm64-dts-qcom-ipq8074-add-QFPROM-fuses.patch b/target/linux/qualcommax/patches-6.6/0129-arm64-dts-qcom-ipq8074-add-QFPROM-fuses.patch new file mode 100644 index 000000000..7730ad89f --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0129-arm64-dts-qcom-ipq8074-add-QFPROM-fuses.patch @@ -0,0 +1,121 @@ +From 04d2fc6a551bbd972a6428059b45ce79cb9de9d7 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Fri, 6 May 2022 22:38:24 +0200 +Subject: [PATCH] arm64: dts: qcom: ipq8074: add QFPROM fuses + +Add the QFPROM node and CPR fuses. + +Signed-off-by: Robert Marko +--- + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 107 ++++++++++++++++++++++++++ + 1 file changed, 107 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -349,6 +349,106 @@ + reg = <0x000a4000 0x2000>; + #address-cells = <1>; + #size-cells = <1>; ++ ++ cpr_efuse_speedbin: speedbin@125 { ++ reg = <0x125 0x1>; ++ bits = <0 3>; ++ }; ++ ++ cpr_efuse_boost_cfg: boost_cfg@125 { ++ reg = <0x125 0x1>; ++ bits = <3 3>; ++ }; ++ ++ cpr_efuse_misc_volt_adj: misc_volt_adj@125 { ++ reg = <0x125 0x1>; ++ bits = <3 3>; ++ }; ++ ++ cpr_efuse_boost_volt: boost_volt@126 { ++ reg = <0x126 0x1>; ++ bits = <6 1>; ++ }; ++ ++ cpr_efuse_revision: revision@23e { ++ reg = <0x23e 0x1>; ++ bits = <5 3>; ++ }; ++ ++ cpr_efuse_ro_sel0: rosel0@249 { ++ reg = <0x249 0x1>; ++ bits = <0 4>; ++ }; ++ ++ cpr_efuse_ro_sel1: rosel1@248 { ++ reg = <0x248 0x1>; ++ bits = <4 4>; ++ }; ++ ++ cpr_efuse_ro_sel2: rosel2@248 { ++ reg = <0x248 0x2>; ++ bits = <0 4>; ++ }; ++ ++ cpr_efuse_ro_sel3: rosel3@249 { ++ reg = <0x249 0x1>; ++ bits = <4 4>; ++ }; ++ ++ cpr_efuse_init_voltage0: ivoltage0@23a { ++ reg = <0x23a 0x1>; ++ bits = <2 6>; ++ }; ++ ++ cpr_efuse_init_voltage1: ivoltage1@239 { ++ reg = <0x239 0x2>; ++ bits = <4 6>; ++ }; ++ ++ cpr_efuse_init_voltage2: ivoltage2@238 { ++ reg = <0x238 0x2>; ++ bits = <6 6>; ++ }; ++ ++ cpr_efuse_init_voltage3: ivoltage3@238 { ++ reg = <0x238 0x1>; ++ bits = <0 6>; ++ }; ++ ++ cpr_efuse_quot0: quot0@244 { ++ reg = <0x244 0x2>; ++ bits = <0 12>; ++ }; ++ ++ cpr_efuse_quot1: quot1@242 { ++ reg = <0x242 0x2>; ++ bits = <4 12>; ++ }; ++ ++ cpr_efuse_quot2: quot2@241 { ++ reg = <0x241 0x2>; ++ bits = <0 12>; ++ }; ++ ++ cpr_efuse_quot3: quot3@245 { ++ reg = <0x245 0x2>; ++ bits = <4 12>; ++ }; ++ ++ cpr_efuse_quot0_offset: quot0_offset@23d { ++ reg = <0x23d 0x2>; ++ bits = <6 7>; ++ }; ++ ++ cpr_efuse_quot1_offset: quot1_offset@23c { ++ reg = <0x23c 0x2>; ++ bits = <7 7>; ++ }; ++ ++ cpr_efuse_quot2_offset: quot2_offset@23c { ++ reg = <0x23c 0x1>; ++ bits = <0 7>; ++ }; + }; + + prng: rng@e3000 { diff --git a/target/linux/qualcommax/patches-6.6/0130-arm64-dts-qcom-ipq8074-add-CPU-OPP-table.patch b/target/linux/qualcommax/patches-6.6/0130-arm64-dts-qcom-ipq8074-add-CPU-OPP-table.patch new file mode 100644 index 000000000..a89e50f52 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0130-arm64-dts-qcom-ipq8074-add-CPU-OPP-table.patch @@ -0,0 +1,102 @@ +From a20c4e8738a00087aa5d53fe5148ed484e23d229 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Sat, 31 Dec 2022 13:56:26 +0100 +Subject: [PATCH] arm64: dts: qcom: ipq8074: add CPU OPP table + +Now that there is NVMEM CPUFreq support for IPQ8074, we can add the OPP +table for SoC. + +Signed-off-by: Robert Marko +--- + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 52 +++++++++++++++++++++++++++ + 1 file changed, 52 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -42,6 +42,7 @@ + clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; + clock-names = "cpu"; + #cooling-cells = <2>; ++ operating-points-v2 = <&cpu_opp_table>; + }; + + CPU1: cpu@1 { +@@ -53,6 +54,7 @@ + clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; + clock-names = "cpu"; + #cooling-cells = <2>; ++ operating-points-v2 = <&cpu_opp_table>; + }; + + CPU2: cpu@2 { +@@ -64,6 +66,7 @@ + clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; + clock-names = "cpu"; + #cooling-cells = <2>; ++ operating-points-v2 = <&cpu_opp_table>; + }; + + CPU3: cpu@3 { +@@ -75,6 +78,7 @@ + clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; + clock-names = "cpu"; + #cooling-cells = <2>; ++ operating-points-v2 = <&cpu_opp_table>; + }; + + L2_0: l2-cache { +@@ -84,6 +88,54 @@ + }; + }; + ++ cpu_opp_table: opp-table { ++ compatible = "operating-points-v2-kryo-cpu"; ++ nvmem-cells = <&cpr_efuse_speedbin>; ++ opp-shared; ++ ++ opp-1017600000 { ++ opp-hz = /bits/ 64 <1017600000>; ++ opp-microvolt = <1>; ++ opp-supported-hw = <0xf>; ++ clock-latency-ns = <200000>; ++ }; ++ ++ opp-1382400000 { ++ opp-hz = /bits/ 64 <1382400000>; ++ opp-microvolt = <2>; ++ opp-supported-hw = <0xf>; ++ clock-latency-ns = <200000>; ++ }; ++ ++ opp-1651200000 { ++ opp-hz = /bits/ 64 <1651200000>; ++ opp-microvolt = <3>; ++ opp-supported-hw = <0x1>; ++ clock-latency-ns = <200000>; ++ }; ++ ++ opp-1843200000 { ++ opp-hz = /bits/ 64 <1843200000>; ++ opp-microvolt = <4>; ++ opp-supported-hw = <0x1>; ++ clock-latency-ns = <200000>; ++ }; ++ ++ opp-1920000000 { ++ opp-hz = /bits/ 64 <1920000000>; ++ opp-microvolt = <5>; ++ opp-supported-hw = <0x1>; ++ clock-latency-ns = <200000>; ++ }; ++ ++ opp-2208000000 { ++ opp-hz = /bits/ 64 <2208000000>; ++ opp-microvolt = <6>; ++ opp-supported-hw = <0x1>; ++ clock-latency-ns = <200000>; ++ }; ++ }; ++ + pmu { + compatible = "arm,cortex-a53-pmu"; + interrupts = ; diff --git a/target/linux/qualcommax/patches-6.6/0136-remoteproc-qcom-wcss-populate-driver-data-for-IPQ601.patch b/target/linux/qualcommax/patches-6.6/0136-remoteproc-qcom-wcss-populate-driver-data-for-IPQ601.patch new file mode 100644 index 000000000..eaf6e3727 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0136-remoteproc-qcom-wcss-populate-driver-data-for-IPQ601.patch @@ -0,0 +1,61 @@ +From 9dd19a9ae36bc60d58287d0c52e53024d484e64d Mon Sep 17 00:00:00 2001 +From: Gokul Sriram Palanisamy +Date: Fri, 29 Jan 2021 22:41:59 +0530 +Subject: [PATCH 2/3] remoteproc: qcom: wcss: populate driver data for IPQ6018 + +Populate hardcoded param using driver data for IPQ6018 SoCs. + +Signed-off-by: Gokul Sriram Palanisamy +--- + drivers/remoteproc/qcom_q6v5_wcss.c | 19 +++++++++++++++++-- + 1 file changed, 17 insertions(+), 2 deletions(-) + +--- a/drivers/remoteproc/qcom_q6v5_wcss.c ++++ b/drivers/remoteproc/qcom_q6v5_wcss.c +@@ -969,7 +969,7 @@ static int q6v5_alloc_memory_region(stru + return 0; + } + +-static int ipq8074_init_clock(struct q6v5_wcss *wcss) ++static int ipq_init_clock(struct q6v5_wcss *wcss) + { + int ret; + +@@ -1176,7 +1176,7 @@ static void q6v5_wcss_remove(struct plat + } + + static const struct wcss_data wcss_ipq8074_res_init = { +- .init_clock = ipq8074_init_clock, ++ .init_clock = ipq_init_clock, + .q6_firmware_name = "IPQ8074/q6_fw.mdt", + .m3_firmware_name = "IPQ8074/m3_fw.mdt", + .crash_reason_smem = WCSS_CRASH_REASON, +@@ -1190,6 +1190,20 @@ static const struct wcss_data wcss_ipq80 + .need_auto_boot = false, + }; + ++static const struct wcss_data wcss_ipq6018_res_init = { ++ .init_clock = ipq_init_clock, ++ .q6_firmware_name = "IPQ6018/q6_fw.mdt", ++ .m3_firmware_name = "IPQ6018/m3_fw.mdt", ++ .crash_reason_smem = WCSS_CRASH_REASON, ++ .aon_reset_required = true, ++ .wcss_q6_reset_required = true, ++ .bcr_reset_required = false, ++ .ssr_name = "q6wcss", ++ .ops = &q6v5_wcss_ipq8074_ops, ++ .requires_force_stop = true, ++ .need_mem_protection = true, ++}; ++ + static const struct wcss_data wcss_qcs404_res_init = { + .init_clock = qcs404_init_clock, + .init_regulator = qcs404_init_regulator, +@@ -1209,6 +1223,7 @@ static const struct wcss_data wcss_qcs40 + + static const struct of_device_id q6v5_wcss_of_match[] = { + { .compatible = "qcom,ipq8074-wcss-pil", .data = &wcss_ipq8074_res_init }, ++ { .compatible = "qcom,ipq6018-wcss-pil", .data = &wcss_ipq6018_res_init }, + { .compatible = "qcom,qcs404-wcss-pil", .data = &wcss_qcs404_res_init }, + { }, + }; diff --git a/target/linux/qualcommax/patches-6.6/0137-arm64-dts-qcom-ipq6018-add-SDHCI-node.patch b/target/linux/qualcommax/patches-6.6/0137-arm64-dts-qcom-ipq6018-add-SDHCI-node.patch new file mode 100644 index 000000000..e1296aa79 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0137-arm64-dts-qcom-ipq6018-add-SDHCI-node.patch @@ -0,0 +1,45 @@ +From e4d7544ce092807e8c5aeb618cec30e2eb9b40c2 Mon Sep 17 00:00:00 2001 +From: Mantas Pucka +Date: Mon, 24 Apr 2023 15:13:32 +0300 +Subject: [PATCH 3/3] arm64: dts: qcom: ipq6018: add SDHCI node + +IPQ6018 has one SD/eMMC controller, add node for it. + +Signed-off-by: Mantas Pucka +Tested-by: Robert Marko +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 23 +++++++++++++++++++++++ + 1 file changed, 23 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -470,6 +470,29 @@ + }; + }; + ++ sdhc_1: mmc@7804000 { ++ compatible = "qcom,ipq6018-sdhci", "qcom,sdhci-msm-v5"; ++ reg = <0x0 0x07804000 0x0 0x1000>, ++ <0x0 0x07805000 0x0 0x1000>, ++ <0x0 0x07808000 0x0 0x2000>; ++ reg-names = "hc", "cqhci", "ice"; ++ ++ interrupts = , ++ ; ++ interrupt-names = "hc_irq", "pwr_irq"; ++ ++ clocks = <&gcc GCC_SDCC1_AHB_CLK>, ++ <&gcc GCC_SDCC1_APPS_CLK>, ++ <&xo>, ++ <&gcc GCC_SDCC1_ICE_CORE_CLK>; ++ clock-names = "iface", "core", "xo", "ice"; ++ ++ resets = <&gcc GCC_SDCC1_BCR>; ++ supports-cqe; ++ bus-width = <8>; ++ status = "disabled"; ++ }; ++ + blsp_dma: dma-controller@7884000 { + compatible = "qcom,bam-v1.7.0"; + reg = <0x0 0x07884000 0x0 0x2b000>; diff --git a/target/linux/qualcommax/patches-6.6/0139-arm64-dts-qcom-ipq6018-add-LDOA2-regulator.patch b/target/linux/qualcommax/patches-6.6/0139-arm64-dts-qcom-ipq6018-add-LDOA2-regulator.patch new file mode 100644 index 000000000..2f7746646 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0139-arm64-dts-qcom-ipq6018-add-LDOA2-regulator.patch @@ -0,0 +1,27 @@ +From d24bc08bfc66f47d6e0a294a080d62893a7696b5 Mon Sep 17 00:00:00 2001 +From: Chukun Pan +Date: Thu, 18 Jan 2024 21:30:21 +0800 +Subject: [PATCH] arm64: dts: qcom: ipq6018: add LDOA2 regulator + +Add LDOA2 regulator of MP5496 to support SDCC voltage scaling. + +Suggested-by: Robert Marko +Signed-off-by: Chukun Pan +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -179,6 +179,11 @@ + regulator-max-microvolt = <1062500>; + regulator-always-on; + }; ++ ++ ipq6018_l2: l2 { ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <3300000>; ++ }; + }; + }; + }; diff --git a/target/linux/qualcommax/patches-6.6/0140-dt-bindings-pwm-add-IPQ6018-binding.patch b/target/linux/qualcommax/patches-6.6/0140-dt-bindings-pwm-add-IPQ6018-binding.patch new file mode 100644 index 000000000..ecc5c9ade --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0140-dt-bindings-pwm-add-IPQ6018-binding.patch @@ -0,0 +1,63 @@ +From: Devi Priya +Date: Thu, 5 Oct 2023 21:35:48 +0530 +Subject: [PATCH] dt-bindings: pwm: add IPQ6018 binding + +DT binding for the PWM block in Qualcomm IPQ6018 SoC. + +Reviewed-by: Bjorn Andersson +Reviewed-by: Krzysztof Kozlowski +Co-developed-by: Baruch Siach +Signed-off-by: Baruch Siach +Signed-off-by: Devi Priya +--- +diff --git a/Documentation/devicetree/bindings/pwm/qcom,ipq6018-pwm.yaml b/Documentation/devicetree/bindings/pwm/qcom,ipq6018-pwm.yaml +new file mode 100644 +index 000000000000..6d0d7ed271f7 +--- /dev/null ++++ b/Documentation/devicetree/bindings/pwm/qcom,ipq6018-pwm.yaml +@@ -0,0 +1,45 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/pwm/qcom,ipq6018-pwm.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Qualcomm IPQ6018 PWM controller ++ ++maintainers: ++ - Baruch Siach ++ ++properties: ++ compatible: ++ const: qcom,ipq6018-pwm ++ ++ reg: ++ description: Offset of PWM register in the TCSR block. ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 1 ++ ++ "#pwm-cells": ++ const: 2 ++ ++required: ++ - compatible ++ - reg ++ - clocks ++ - "#pwm-cells" ++ ++additionalProperties: false ++ ++examples: ++ - | ++ #include ++ ++ pwm: pwm@a010 { ++ compatible = "qcom,ipq6018-pwm"; ++ reg = <0xa010 0x20>; ++ clocks = <&gcc GCC_ADSS_PWM_CLK>; ++ assigned-clocks = <&gcc GCC_ADSS_PWM_CLK>; ++ assigned-clock-rates = <100000000>; ++ #pwm-cells = <2>; ++ }; diff --git a/target/linux/qualcommax/patches-6.6/0141-pwm-driver-for-qualcomm-ipq6018-pwm-block.patch b/target/linux/qualcommax/patches-6.6/0141-pwm-driver-for-qualcomm-ipq6018-pwm-block.patch new file mode 100644 index 000000000..28f60829e --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0141-pwm-driver-for-qualcomm-ipq6018-pwm-block.patch @@ -0,0 +1,337 @@ +From: Devi Priya +Date: Thu, 5 Oct 2023 21:35:47 +0530 +Subject: [PATCH] pwm: driver for qualcomm ipq6018 pwm block + +Driver for the PWM block in Qualcomm IPQ6018 line of SoCs. Based on +driver from downstream Codeaurora kernel tree. Removed support for older +(V1) variants because I have no access to that hardware. + +Tested on IPQ6010 based hardware. + +Co-developed-by: Baruch Siach +Signed-off-by: Baruch Siach +Signed-off-by: Devi Priya +--- +diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig +index 8ebcddf91f7b..c2d51680823a 100644 +--- a/drivers/pwm/Kconfig ++++ b/drivers/pwm/Kconfig +@@ -282,6 +282,18 @@ config PWM_INTEL_LGM + To compile this driver as a module, choose M here: the module + will be called pwm-intel-lgm. + ++config PWM_IPQ ++ tristate "IPQ PWM support" ++ depends on ARCH_QCOM || COMPILE_TEST ++ depends on HAVE_CLK && HAS_IOMEM ++ help ++ Generic PWM framework driver for IPQ PWM block which supports ++ 4 pwm channels. Each of the these channels can be configured ++ independent of each other. ++ ++ To compile this driver as a module, choose M here: the module ++ will be called pwm-ipq. ++ + config PWM_IQS620A + tristate "Azoteq IQS620A PWM support" + depends on MFD_IQS62X || COMPILE_TEST +diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile +index c822389c2a24..1b69e8cb2b91 100644 +--- a/drivers/pwm/Makefile ++++ b/drivers/pwm/Makefile +@@ -24,6 +24,7 @@ obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o + obj-$(CONFIG_PWM_IMX27) += pwm-imx27.o + obj-$(CONFIG_PWM_IMX_TPM) += pwm-imx-tpm.o + obj-$(CONFIG_PWM_INTEL_LGM) += pwm-intel-lgm.o ++obj-$(CONFIG_PWM_IPQ) += pwm-ipq.o + obj-$(CONFIG_PWM_IQS620A) += pwm-iqs620a.o + obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o + obj-$(CONFIG_PWM_KEEMBAY) += pwm-keembay.o +diff --git a/drivers/pwm/pwm-ipq.c b/drivers/pwm/pwm-ipq.c +new file mode 100644 +index 000000000000..5dbe46bb56d6 +--- /dev/null ++++ b/drivers/pwm/pwm-ipq.c +@@ -0,0 +1,282 @@ ++// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 ++/* ++ * Copyright (c) 2016-2017, 2020 The Linux Foundation. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* The frequency range supported is 1 Hz to clock rate */ ++#define IPQ_PWM_MAX_PERIOD_NS ((u64)NSEC_PER_SEC) ++ ++/* ++ * The max value specified for each field is based on the number of bits ++ * in the pwm control register for that field ++ */ ++#define IPQ_PWM_MAX_DIV 0xFFFF ++ ++/* ++ * Two 32-bit registers for each PWM: REG0, and REG1. ++ * Base offset for PWM #i is at 8 * #i. ++ */ ++#define IPQ_PWM_REG0 0 ++#define IPQ_PWM_REG0_PWM_DIV GENMASK(15, 0) ++#define IPQ_PWM_REG0_HI_DURATION GENMASK(31, 16) ++ ++#define IPQ_PWM_REG1 4 ++#define IPQ_PWM_REG1_PRE_DIV GENMASK(15, 0) ++/* ++ * Enable bit is set to enable output toggling in pwm device. ++ * Update bit is set to reflect the changed divider and high duration ++ * values in register. ++ */ ++#define IPQ_PWM_REG1_UPDATE BIT(30) ++#define IPQ_PWM_REG1_ENABLE BIT(31) ++ ++struct ipq_pwm_chip { ++ struct pwm_chip chip; ++ struct clk *clk; ++ void __iomem *mem; ++}; ++ ++static struct ipq_pwm_chip *ipq_pwm_from_chip(struct pwm_chip *chip) ++{ ++ return container_of(chip, struct ipq_pwm_chip, chip); ++} ++ ++static unsigned int ipq_pwm_reg_read(struct pwm_device *pwm, unsigned int reg) ++{ ++ struct ipq_pwm_chip *ipq_chip = ipq_pwm_from_chip(pwm->chip); ++ unsigned int off = 8 * pwm->hwpwm + reg; ++ ++ return readl(ipq_chip->mem + off); ++} ++ ++static void ipq_pwm_reg_write(struct pwm_device *pwm, unsigned int reg, ++ unsigned int val) ++{ ++ struct ipq_pwm_chip *ipq_chip = ipq_pwm_from_chip(pwm->chip); ++ unsigned int off = 8 * pwm->hwpwm + reg; ++ ++ writel(val, ipq_chip->mem + off); ++} ++ ++static void config_div_and_duty(struct pwm_device *pwm, unsigned int pre_div, ++ unsigned int pwm_div, unsigned long rate, u64 duty_ns, ++ bool enable) ++{ ++ unsigned long hi_dur; ++ unsigned long val = 0; ++ ++ /* ++ * high duration = pwm duty * (pwm div + 1) ++ * pwm duty = duty_ns / period_ns ++ */ ++ hi_dur = div64_u64(duty_ns * rate, (pre_div + 1) * NSEC_PER_SEC); ++ ++ val = FIELD_PREP(IPQ_PWM_REG0_HI_DURATION, hi_dur) | ++ FIELD_PREP(IPQ_PWM_REG0_PWM_DIV, pwm_div); ++ ipq_pwm_reg_write(pwm, IPQ_PWM_REG0, val); ++ ++ val = FIELD_PREP(IPQ_PWM_REG1_PRE_DIV, pre_div); ++ ipq_pwm_reg_write(pwm, IPQ_PWM_REG1, val); ++ ++ /* PWM enable toggle needs a separate write to REG1 */ ++ val |= IPQ_PWM_REG1_UPDATE; ++ if (enable) ++ val |= IPQ_PWM_REG1_ENABLE; ++ ipq_pwm_reg_write(pwm, IPQ_PWM_REG1, val); ++} ++ ++static int ipq_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, ++ const struct pwm_state *state) ++{ ++ struct ipq_pwm_chip *ipq_chip = ipq_pwm_from_chip(chip); ++ unsigned int pre_div, pwm_div, best_pre_div, best_pwm_div; ++ unsigned long rate = clk_get_rate(ipq_chip->clk); ++ u64 period_ns, duty_ns, period_rate; ++ u64 min_diff; ++ ++ if (state->polarity != PWM_POLARITY_NORMAL) ++ return -EINVAL; ++ ++ if (state->period < DIV64_U64_ROUND_UP(NSEC_PER_SEC, rate)) ++ return -ERANGE; ++ ++ period_ns = min(state->period, IPQ_PWM_MAX_PERIOD_NS); ++ duty_ns = min(state->duty_cycle, period_ns); ++ ++ /* ++ * period_ns is 1G or less. As long as rate is less than 16 GHz, ++ * period_rate does not overflow. Make that explicit. ++ */ ++ if ((unsigned long long)rate > 16ULL * GIGA) ++ return -EINVAL; ++ period_rate = period_ns * rate; ++ best_pre_div = IPQ_PWM_MAX_DIV; ++ best_pwm_div = IPQ_PWM_MAX_DIV; ++ /* ++ * We don't need to consider pre_div values smaller than ++ * ++ * period_rate ++ * pre_div_min := ------------------------------------ ++ * NSEC_PER_SEC * (IPQ_PWM_MAX_DIV + 1) ++ * ++ * because pre_div = pre_div_min results in a better ++ * approximation. ++ */ ++ pre_div = div64_u64(period_rate, ++ (u64)NSEC_PER_SEC * (IPQ_PWM_MAX_DIV + 1)); ++ min_diff = period_rate; ++ ++ for (; pre_div <= IPQ_PWM_MAX_DIV; pre_div++) { ++ u64 remainder; ++ ++ pwm_div = div64_u64_rem(period_rate, ++ (u64)NSEC_PER_SEC * (pre_div + 1), &remainder); ++ /* pwm_div is unsigned; the check below catches underflow */ ++ pwm_div--; ++ ++ /* ++ * Swapping values for pre_div and pwm_div produces the same ++ * period length. So we can skip all settings with pre_div > ++ * pwm_div which results in bigger constraints for selecting ++ * the duty_cycle than with the two values swapped. ++ */ ++ if (pre_div > pwm_div) ++ break; ++ ++ /* ++ * Make sure we can do 100% duty cycle where ++ * hi_dur == pwm_div + 1 ++ */ ++ if (pwm_div > IPQ_PWM_MAX_DIV - 1) ++ continue; ++ ++ if (remainder < min_diff) { ++ best_pre_div = pre_div; ++ best_pwm_div = pwm_div; ++ min_diff = remainder; ++ ++ if (min_diff == 0) /* bingo */ ++ break; ++ } ++ } ++ ++ /* config divider values for the closest possible frequency */ ++ config_div_and_duty(pwm, best_pre_div, best_pwm_div, ++ rate, duty_ns, state->enabled); ++ ++ return 0; ++} ++ ++static int ipq_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, ++ struct pwm_state *state) ++{ ++ struct ipq_pwm_chip *ipq_chip = ipq_pwm_from_chip(chip); ++ unsigned long rate = clk_get_rate(ipq_chip->clk); ++ unsigned int pre_div, pwm_div, hi_dur; ++ u64 effective_div, hi_div; ++ u32 reg0, reg1; ++ ++ reg0 = ipq_pwm_reg_read(pwm, IPQ_PWM_REG0); ++ reg1 = ipq_pwm_reg_read(pwm, IPQ_PWM_REG1); ++ ++ state->polarity = PWM_POLARITY_NORMAL; ++ state->enabled = reg1 & IPQ_PWM_REG1_ENABLE; ++ ++ pwm_div = FIELD_GET(IPQ_PWM_REG0_PWM_DIV, reg0); ++ hi_dur = FIELD_GET(IPQ_PWM_REG0_HI_DURATION, reg0); ++ pre_div = FIELD_GET(IPQ_PWM_REG1_PRE_DIV, reg1); ++ ++ /* No overflow here, both pre_div and pwm_div <= 0xffff */ ++ effective_div = (u64)(pre_div + 1) * (pwm_div + 1); ++ state->period = DIV64_U64_ROUND_UP(effective_div * NSEC_PER_SEC, rate); ++ ++ hi_div = hi_dur * (pre_div + 1); ++ state->duty_cycle = DIV64_U64_ROUND_UP(hi_div * NSEC_PER_SEC, rate); ++ ++ return 0; ++} ++ ++static const struct pwm_ops ipq_pwm_ops = { ++ .apply = ipq_pwm_apply, ++ .get_state = ipq_pwm_get_state, ++ .owner = THIS_MODULE, ++}; ++ ++static int ipq_pwm_probe(struct platform_device *pdev) ++{ ++ struct ipq_pwm_chip *pwm; ++ struct device *dev = &pdev->dev; ++ int ret; ++ ++ pwm = devm_kzalloc(dev, sizeof(*pwm), GFP_KERNEL); ++ if (!pwm) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, pwm); ++ ++ pwm->mem = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(pwm->mem)) ++ return dev_err_probe(dev, PTR_ERR(pwm->mem), ++ "regs map failed"); ++ ++ pwm->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(pwm->clk)) ++ return dev_err_probe(dev, PTR_ERR(pwm->clk), ++ "failed to get clock"); ++ ++ ret = clk_prepare_enable(pwm->clk); ++ if (ret) ++ return dev_err_probe(dev, ret, "clock enable failed"); ++ ++ pwm->chip.dev = dev; ++ pwm->chip.ops = &ipq_pwm_ops; ++ pwm->chip.npwm = 4; ++ ++ ret = pwmchip_add(&pwm->chip); ++ if (ret < 0) { ++ dev_err_probe(dev, ret, "pwmchip_add() failed\n"); ++ clk_disable_unprepare(pwm->clk); ++ } ++ ++ return ret; ++} ++ ++static int ipq_pwm_remove(struct platform_device *pdev) ++{ ++ struct ipq_pwm_chip *pwm = platform_get_drvdata(pdev); ++ ++ pwmchip_remove(&pwm->chip); ++ clk_disable_unprepare(pwm->clk); ++ ++ return 0; ++} ++ ++static const struct of_device_id pwm_ipq_dt_match[] = { ++ { .compatible = "qcom,ipq6018-pwm", }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, pwm_ipq_dt_match); ++ ++static struct platform_driver ipq_pwm_driver = { ++ .driver = { ++ .name = "ipq-pwm", ++ .of_match_table = pwm_ipq_dt_match, ++ }, ++ .probe = ipq_pwm_probe, ++ .remove = ipq_pwm_remove, ++}; ++ ++module_platform_driver(ipq_pwm_driver); ++ ++MODULE_LICENSE("Dual BSD/GPL"); diff --git a/target/linux/qualcommax/patches-6.6/0142-dt-bindings-mfd-qcom-tcsr-Add-simple-mfd-support-for-IPQ6018.patch b/target/linux/qualcommax/patches-6.6/0142-dt-bindings-mfd-qcom-tcsr-Add-simple-mfd-support-for-IPQ6018.patch new file mode 100644 index 000000000..1714fc4e6 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0142-dt-bindings-mfd-qcom-tcsr-Add-simple-mfd-support-for-IPQ6018.patch @@ -0,0 +1,148 @@ +From: Devi Priya +Subject: [PATCH] dt-bindings: mfd: qcom,tcsr: Add simple-mfd support for IPQ6018 +Date: Thu, 5 Oct 2023 21:35:49 +0530 + +Update the binding to include pwm as the child node to TCSR block and +add simple-mfd support for IPQ6018. + +Reviewed-by: Krzysztof Kozlowski +Signed-off-by: Devi Priya +--- + .../devicetree/bindings/mfd/qcom,tcsr.yaml | 112 +++++++++++++----- + 1 file changed, 81 insertions(+), 31 deletions(-) + +--- a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml ++++ b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml +@@ -15,49 +15,101 @@ description: + + properties: + compatible: +- items: +- - enum: +- - qcom,msm8976-tcsr +- - qcom,msm8998-tcsr +- - qcom,qcs404-tcsr +- - qcom,sc7180-tcsr +- - qcom,sc7280-tcsr +- - qcom,sc8280xp-tcsr +- - qcom,sdm630-tcsr +- - qcom,sdm845-tcsr +- - qcom,sdx55-tcsr +- - qcom,sdx65-tcsr +- - qcom,sm8150-tcsr +- - qcom,sm8450-tcsr +- - qcom,tcsr-apq8064 +- - qcom,tcsr-apq8084 +- - qcom,tcsr-ipq5332 +- - qcom,tcsr-ipq6018 +- - qcom,tcsr-ipq8064 +- - qcom,tcsr-ipq8074 +- - qcom,tcsr-ipq9574 +- - qcom,tcsr-mdm9615 +- - qcom,tcsr-msm8226 +- - qcom,tcsr-msm8660 +- - qcom,tcsr-msm8916 +- - qcom,tcsr-msm8953 +- - qcom,tcsr-msm8960 +- - qcom,tcsr-msm8974 +- - qcom,tcsr-msm8996 +- - const: syscon ++ oneOf: ++ - items: ++ - enum: ++ - qcom,msm8976-tcsr ++ - qcom,msm8998-tcsr ++ - qcom,qcs404-tcsr ++ - qcom,sc7180-tcsr ++ - qcom,sc7280-tcsr ++ - qcom,sc8280xp-tcsr ++ - qcom,sdm630-tcsr ++ - qcom,sdm845-tcsr ++ - qcom,sdx55-tcsr ++ - qcom,sdx65-tcsr ++ - qcom,sm4450-tcsr ++ - qcom,sm8150-tcsr ++ - qcom,sm8450-tcsr ++ - qcom,tcsr-apq8064 ++ - qcom,tcsr-apq8084 ++ - qcom,tcsr-ipq5332 ++ - qcom,tcsr-ipq8064 ++ - qcom,tcsr-ipq8074 ++ - qcom,tcsr-ipq9574 ++ - qcom,tcsr-mdm9615 ++ - qcom,tcsr-msm8226 ++ - qcom,tcsr-msm8660 ++ - qcom,tcsr-msm8916 ++ - qcom,tcsr-msm8953 ++ - qcom,tcsr-msm8960 ++ - qcom,tcsr-msm8974 ++ - qcom,tcsr-msm8996 ++ - const: syscon ++ - items: ++ - const: qcom,tcsr-ipq6018 ++ - const: syscon ++ - const: simple-mfd + + reg: + maxItems: 1 + ++ ranges: true ++ ++ "#address-cells": ++ const: 1 ++ ++ "#size-cells": ++ const: 1 ++ ++patternProperties: ++ "pwm@[a-f0-9]+$": ++ type: object ++ $ref: /schemas/pwm/qcom,ipq6018-pwm.yaml ++ ++ + required: + - compatible + - reg + ++allOf: ++ - if: ++ not: ++ properties: ++ compatible: ++ contains: ++ enum: ++ - qcom,tcsr-ipq6018 ++ then: ++ patternProperties: ++ "pwm@[a-f0-9]+$": false ++ + additionalProperties: false + + examples: ++ # Example 1 - Syscon node found on MSM8960 + - | + syscon@1a400000 { + compatible = "qcom,tcsr-msm8960", "syscon"; + reg = <0x1a400000 0x100>; + }; ++ # Example 2 - Syscon node found on IPQ6018 ++ - | ++ #include ++ ++ syscon@1937000 { ++ compatible = "qcom,tcsr-ipq6018", "syscon", "simple-mfd"; ++ reg = <0x01937000 0x21000>; ++ ranges = <0 0x1937000 0x21000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ pwm: pwm@a010 { ++ compatible = "qcom,ipq6018-pwm"; ++ reg = <0xa010 0x20>; ++ clocks = <&gcc GCC_ADSS_PWM_CLK>; ++ assigned-clocks = <&gcc GCC_ADSS_PWM_CLK>; ++ assigned-clock-rates = <100000000>; ++ #pwm-cells = <2>; ++ }; ++ }; +\ No newline at end of file diff --git a/target/linux/qualcommax/patches-6.6/0150-dt-bindings-nvmem-add-IPQ5018-compatible.patch b/target/linux/qualcommax/patches-6.6/0150-dt-bindings-nvmem-add-IPQ5018-compatible.patch new file mode 100644 index 000000000..c31b2ba49 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0150-dt-bindings-nvmem-add-IPQ5018-compatible.patch @@ -0,0 +1,22 @@ +From: Sricharan Ramabadhran +Subject: [PATCH V2 1/1] dt-bindings: nvmem: Add compatible for IPQ5018 +Date: Fri, 15 Sep 2023 17:31:20 +0530 + +Document the QFPROM on IPQ5018. + +Reviewed-by: Krzysztof Kozlowski +Signed-off-by: Sricharan Ramabadhran +--- + Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml | 1 + + 1 file changed, 1 insertion(+) + +--- a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml ++++ b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml +@@ -18,6 +18,7 @@ properties: + - enum: + - qcom,apq8064-qfprom + - qcom,apq8084-qfprom ++ - qcom,ipq5018-qfprom + - qcom,ipq5332-qfprom + - qcom,ipq6018-qfprom + - qcom,ipq8064-qfprom diff --git a/target/linux/qualcommax/patches-6.6/0151-dt-bindings-thermal-qcom-tsens-Add-ipq5018-compatible.patch b/target/linux/qualcommax/patches-6.6/0151-dt-bindings-thermal-qcom-tsens-Add-ipq5018-compatible.patch new file mode 100644 index 000000000..36ab4abbd --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0151-dt-bindings-thermal-qcom-tsens-Add-ipq5018-compatible.patch @@ -0,0 +1,26 @@ +From: Sricharan Ramabadhran +Date: Fri, 22 Sep 2023 17:21:13 +0530 +Subject: [PATCH] dt-bindings: thermal: qcom-tsens: Add ipq5018 compatible + +IPQ5018 has tsens v1.0 block with 4 sensors and 1 interrupt. + +Signed-off-by: Sricharan Ramabadhran +--- +--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml ++++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml +@@ -39,6 +39,7 @@ properties: + - description: v1 of TSENS + items: + - enum: ++ - qcom,ipq5018-tsens + - qcom,msm8956-tsens + - qcom,msm8976-tsens + - qcom,qcs404-tsens +@@ -232,6 +233,7 @@ allOf: + compatible: + contains: + enum: ++ - qcom,ipq5018-tsens + - qcom,ipq8064-tsens + - qcom,msm8960-tsens + - qcom,tsens-v0_1 diff --git a/target/linux/qualcommax/patches-6.6/0152-thermal-qcom-add-new-feat-for-soc-without-rpm.patch b/target/linux/qualcommax/patches-6.6/0152-thermal-qcom-add-new-feat-for-soc-without-rpm.patch new file mode 100644 index 000000000..c8f393e2b --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0152-thermal-qcom-add-new-feat-for-soc-without-rpm.patch @@ -0,0 +1,45 @@ +From: Sricharan Ramabadhran +Subject: [PATCH] thermal/drivers/qcom: Add new feat for soc without rpm +Date: Fri, 22 Sep 2023 17:21:14 +0530 + +In IPQ5018, Tsens IP doesn't have RPM. Hence the early init to +enable tsens would not be done. So add a flag for that in feat +and skip enable checks. Without this, tsens probe fails. + +Reviewed-by: Dmitry Baryshkov +Signed-off-by: Sricharan Ramabadhran +--- + drivers/thermal/qcom/tsens.c | 2 +- + drivers/thermal/qcom/tsens.h | 3 +++ + 2 files changed, 4 insertions(+), 1 deletion(-) + +--- a/drivers/thermal/qcom/tsens.c ++++ b/drivers/thermal/qcom/tsens.c +@@ -974,7 +974,7 @@ int __init init_common(struct tsens_priv + ret = regmap_field_read(priv->rf[TSENS_EN], &enabled); + if (ret) + goto err_put_device; +- if (!enabled) { ++ if (!enabled && !(priv->feat->ignore_enable)) { + dev_err(dev, "%s: device not enabled\n", __func__); + ret = -ENODEV; + goto err_put_device; +--- a/drivers/thermal/qcom/tsens.h ++++ b/drivers/thermal/qcom/tsens.h +@@ -505,6 +505,8 @@ enum regfield_ids { + * @srot_split: does the IP neatly splits the register space into SROT and TM, + * with SROT only being available to secure boot firmware? + * @has_watchdog: does this IP support watchdog functionality? ++ * @ignore_enable: does this IP reside in a soc that does not have rpm to ++ * do pre-init. + * @max_sensors: maximum sensors supported by this version of the IP + * @trip_min_temp: minimum trip temperature supported by this version of the IP + * @trip_max_temp: maximum trip temperature supported by this version of the IP +@@ -516,6 +518,7 @@ struct tsens_features { + unsigned int adc:1; + unsigned int srot_split:1; + unsigned int has_watchdog:1; ++ unsigned int ignore_enable:1; + unsigned int max_sensors; + int trip_min_temp; + int trip_max_temp; diff --git a/target/linux/qualcommax/patches-6.6/0153-thermal-qcom-tsens-add-support-for-IPQ5018-tsens.patch b/target/linux/qualcommax/patches-6.6/0153-thermal-qcom-tsens-add-support-for-IPQ5018-tsens.patch new file mode 100644 index 000000000..477a050fd --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0153-thermal-qcom-tsens-add-support-for-IPQ5018-tsens.patch @@ -0,0 +1,124 @@ +From: Sricharan Ramabadhran +Subject: [PATCH] thermal/drivers/tsens: Add support for IPQ5018 tsens +Date: Fri, 22 Sep 2023 17:21:15 +0530 + +IPQ5018 has tsens IP V1.0, 4 sensors and 1 interrupt. +The soc does not have a RPM, hence tsens has to be reset and +enabled in the driver init. Adding the driver support for same. + +Signed-off-by: Sricharan Ramabadhran +--- + drivers/thermal/qcom/tsens-v1.c | 60 +++++++++++++++++++++++++++++++++ + drivers/thermal/qcom/tsens.c | 3 ++ + drivers/thermal/qcom/tsens.h | 2 +- + 3 files changed, 64 insertions(+), 1 deletion(-) + +diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c +index dc1c4ae2d8b0..acee2064f83e 100644 +--- a/drivers/thermal/qcom/tsens-v1.c ++++ b/drivers/thermal/qcom/tsens-v1.c +@@ -79,6 +79,18 @@ static struct tsens_features tsens_v1_feat = { + .trip_max_temp = 120000, + }; + ++static struct tsens_features tsens_v1_ipq5018_feat = { ++ .ver_major = VER_1_X, ++ .crit_int = 0, ++ .combo_int = 0, ++ .adc = 1, ++ .srot_split = 1, ++ .max_sensors = 11, ++ .trip_min_temp = -40000, ++ .trip_max_temp = 120000, ++ .ignore_enable = 1, ++}; ++ + static const struct reg_field tsens_v1_regfields[MAX_REGFIELDS] = { + /* ----- SROT ------ */ + /* VERSION */ +@@ -150,6 +162,41 @@ static int __init init_8956(struct tsens_priv *priv) { + return init_common(priv); + } + ++static int __init init_ipq5018(struct tsens_priv *priv) ++{ ++ int ret; ++ u32 mask; ++ ++ ret = init_common(priv); ++ if (ret < 0) { ++ dev_err(priv->dev, "Init common failed %d\n", ret); ++ return ret; ++ } ++ ++ ret = regmap_field_write(priv->rf[TSENS_SW_RST], 1); ++ if (ret) { ++ dev_err(priv->dev, "Reset failed\n"); ++ return ret; ++ } ++ ++ mask = GENMASK(priv->num_sensors, 0); ++ ret = regmap_field_update_bits(priv->rf[SENSOR_EN], mask, mask); ++ if (ret) { ++ dev_err(priv->dev, "Sensor Enable failed\n"); ++ return ret; ++ } ++ ++ ret = regmap_field_write(priv->rf[TSENS_EN], 1); ++ if (ret) { ++ dev_err(priv->dev, "Enable failed\n"); ++ return ret; ++ } ++ ++ ret = regmap_field_write(priv->rf[TSENS_SW_RST], 0); ++ ++ return ret; ++} ++ + static const struct tsens_ops ops_generic_v1 = { + .init = init_common, + .calibrate = calibrate_v1, +@@ -187,3 +234,16 @@ struct tsens_plat_data data_8976 = { + .feat = &tsens_v1_feat, + .fields = tsens_v1_regfields, + }; ++ ++const struct tsens_ops ops_ipq5018 = { ++ .init = init_ipq5018, ++ .calibrate = tsens_calibrate_common, ++ .get_temp = get_temp_tsens_valid, ++}; ++ ++struct tsens_plat_data data_ipq5018 = { ++ .num_sensors = 5, ++ .ops = &ops_ipq5018, ++ .feat = &tsens_v1_ipq5018_feat, ++ .fields = tsens_v1_regfields, ++}; +diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c +index 0a43ccf02ec4..c792b9dc6676 100644 +--- a/drivers/thermal/qcom/tsens.c ++++ b/drivers/thermal/qcom/tsens.c +@@ -1101,6 +1101,9 @@ static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume); + + static const struct of_device_id tsens_table[] = { + { ++ .compatible = "qcom,ipq5018-tsens", ++ .data = &data_ipq5018, ++ }, { + .compatible = "qcom,ipq8064-tsens", + .data = &data_8960, + }, { +diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h +index e254cd2df904..b6594b546d11 100644 +--- a/drivers/thermal/qcom/tsens.h ++++ b/drivers/thermal/qcom/tsens.h +@@ -645,7 +645,7 @@ extern struct tsens_plat_data data_8960; + extern struct tsens_plat_data data_8226, data_8909, data_8916, data_8939, data_8974, data_9607; + + /* TSENS v1 targets */ +-extern struct tsens_plat_data data_tsens_v1, data_8976, data_8956; ++extern struct tsens_plat_data data_tsens_v1, data_8976, data_8956, data_ipq5018; + + /* TSENS v2 targets */ + extern struct tsens_plat_data data_8996, data_ipq8074, data_tsens_v2; diff --git a/target/linux/qualcommax/patches-6.6/0154-dts-qcom-IPQ5018-add-tsens-node.patch b/target/linux/qualcommax/patches-6.6/0154-dts-qcom-IPQ5018-add-tsens-node.patch new file mode 100644 index 000000000..4f9b8abfa --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0154-dts-qcom-IPQ5018-add-tsens-node.patch @@ -0,0 +1,200 @@ +From: Sricharan Ramabadhran +Subject: [PATCH] arm64: dts: qcom: ipq5018: Add tsens node +Date: Fri, 22 Sep 2023 17:21:16 +0530 + +IPQ5018 has tsens V1.0 IP with 4 sensors. +There is no RPM, so tsens has to be manually enabled. Adding the tsens +and nvmem node and IPQ5018 has 4 thermal sensors (zones). With the +critical temperature being 120'C and action is to reboot. Adding all +the 4 zones here. + +Signed-off-by: Sricharan Ramabadhran +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 169 ++++++++++++++++++++++++++ + 1 file changed, 169 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -149,6 +149,117 @@ + status = "disabled"; + }; + ++ qfprom: qfprom@a0000 { ++ compatible = "qcom,ipq5018-qfprom", "qcom,qfprom"; ++ reg = <0xa0000 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ tsens_mode: mode@249 { ++ reg = <0x249 1>; ++ bits = <0 3>; ++ }; ++ ++ tsens_base1: base1@249 { ++ reg = <0x249 2>; ++ bits = <3 8>; ++ }; ++ ++ tsens_base2: base2@24a { ++ reg = <0x24a 2>; ++ bits = <3 8>; ++ }; ++ ++ tsens_s0_p1: s0-p1@24b { ++ reg = <0x24b 0x2>; ++ bits = <2 6>; ++ }; ++ ++ tsens_s0_p2: s0-p2@24c { ++ reg = <0x24c 0x1>; ++ bits = <1 6>; ++ }; ++ ++ tsens_s1_p1: s1-p1@24c { ++ reg = <0x24c 0x2>; ++ bits = <7 6>; ++ }; ++ ++ tsens_s1_p2: s1-p2@24d { ++ reg = <0x24d 0x2>; ++ bits = <5 6>; ++ }; ++ ++ tsens_s2_p1: s2-p1@24e { ++ reg = <0x24e 0x2>; ++ bits = <3 6>; ++ }; ++ ++ tsens_s2_p2: s2-p2@24f { ++ reg = <0x24f 0x1>; ++ bits = <1 6>; ++ }; ++ ++ tsens_s3_p1: s3-p1@24f { ++ reg = <0x24f 0x2>; ++ bits = <7 6>; ++ }; ++ ++ tsens_s3_p2: s3-p2@250 { ++ reg = <0x250 0x2>; ++ bits = <5 6>; ++ }; ++ ++ tsens_s4_p1: s4-p1@251 { ++ reg = <0x251 0x2>; ++ bits = <3 6>; ++ }; ++ ++ tsens_s4_p2: s4-p2@254 { ++ reg = <0x254 0x1>; ++ bits = <0 6>; ++ }; ++ }; ++ ++ tsens: thermal-sensor@4a9000 { ++ compatible = "qcom,ipq5018-tsens"; ++ reg = <0x4a9000 0x1000>, /* TM */ ++ <0x4a8000 0x1000>; /* SROT */ ++ ++ nvmem-cells = <&tsens_mode>, ++ <&tsens_base1>, ++ <&tsens_base2>, ++ <&tsens_s0_p1>, ++ <&tsens_s0_p2>, ++ <&tsens_s1_p1>, ++ <&tsens_s1_p2>, ++ <&tsens_s2_p1>, ++ <&tsens_s2_p2>, ++ <&tsens_s3_p1>, ++ <&tsens_s3_p2>, ++ <&tsens_s4_p1>, ++ <&tsens_s4_p2>; ++ ++ nvmem-cell-names = "mode", ++ "base1", ++ "base2", ++ "s0_p1", ++ "s0_p2", ++ "s1_p1", ++ "s1_p2", ++ "s2_p1", ++ "s2_p2", ++ "s3_p1", ++ "s3_p2", ++ "s4_p1", ++ "s4_p2"; ++ ++ interrupts = ; ++ interrupt-names = "uplow"; ++ #qcom,sensors = <5>; ++ #thermal-sensor-cells = <1>; ++ }; ++ + tlmm: pinctrl@1000000 { + compatible = "qcom,ipq5018-tlmm"; + reg = <0x01000000 0x300000>; +@@ -391,6 +502,64 @@ + }; + }; + }; ++ ++ thermal-zones { ++ cpu-thermal { ++ polling-delay-passive = <0>; ++ polling-delay = <0>; ++ thermal-sensors = <&tsens 2>; ++ ++ trips { ++ cpu-critical { ++ temperature = <120000>; ++ hysteresis = <2>; ++ type = "critical"; ++ }; ++ }; ++ }; ++ ++ gephy-thermal { ++ polling-delay-passive = <0>; ++ polling-delay = <0>; ++ thermal-sensors = <&tsens 4>; ++ ++ trips { ++ gephy-critical { ++ temperature = <120000>; ++ hysteresis = <2>; ++ type = "critical"; ++ }; ++ }; ++ }; ++ ++ top-glue-thermal { ++ polling-delay-passive = <0>; ++ polling-delay = <0>; ++ thermal-sensors = <&tsens 3>; ++ ++ trips { ++ top_glue-critical { ++ temperature = <120000>; ++ hysteresis = <2>; ++ type = "critical"; ++ }; ++ }; ++ }; ++ ++ ubi32-thermal { ++ polling-delay-passive = <0>; ++ polling-delay = <0>; ++ thermal-sensors = <&tsens 1>; ++ ++ trips { ++ ubi32-critical { ++ temperature = <120000>; ++ hysteresis = <2>; ++ type = "critical"; ++ }; ++ }; ++ }; ++ }; + + timer { + compatible = "arm,armv8-timer"; diff --git a/target/linux/qualcommax/patches-6.6/0155-dt-bindings-phy-qcom-document-PCIe-uniphy.patch b/target/linux/qualcommax/patches-6.6/0155-dt-bindings-phy-qcom-document-PCIe-uniphy.patch new file mode 100644 index 000000000..3ccbd01d2 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0155-dt-bindings-phy-qcom-document-PCIe-uniphy.patch @@ -0,0 +1,95 @@ +From: Nitheesh Sekar +Subject: [PATCH] dt-bindings: phy: qcom,uniphy-pcie: Document PCIe uniphy +Date: Tue, 3 Oct 2023 17:38:41 +0530 + +Document the Qualcomm UNIPHY PCIe 28LP present in IPQ5018. + +Signed-off-by: Nitheesh Sekar +--- + .../bindings/phy/qcom,uniphy-pcie-28lp.yaml | 77 +++++++++++++++++++ + 1 file changed, 77 insertions(+) + create mode 100644 Documentation/devicetree/bindings/phy/qcom,uniphy-pcie-28lp.yaml + +diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq5018-uniphy-pcie.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq5018-uniphy-pcie.yaml +new file mode 100644 +index 000000000000..6b2574f9532e +--- /dev/null ++++ b/Documentation/devicetree/bindings/phy/qcom,ipq5018-uniphy-pcie.yaml +@@ -0,0 +1,77 @@ ++# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/phy/qcom,ipq5018-uniphy-pcie.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Qualcomm IPQ5018 UNIPHY PCIe PHY driver ++ ++maintainers: ++ - Nitheesh Sekar ++ - Sricharan Ramabadhran ++ ++properties: ++ compatible: ++ enum: ++ - qcom,ipq5018-uniphy-pcie-gen2x1 ++ - qcom,ipq5018-uniphy-pcie-gen2x2 ++ ++ reg: ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 1 ++ ++ clock-names: ++ items: ++ - const: pipe_clk ++ ++ resets: ++ maxItems: 2 ++ ++ reset-names: ++ items: ++ - const: phy ++ - const: phy_phy ++ ++ "#phy-cells": ++ const: 0 ++ ++ "#clock-cells": ++ const: 0 ++ ++ clock-output-names: ++ maxItems: 1 ++ ++required: ++ - compatible ++ - reg ++ - resets ++ - reset-names ++ - clocks ++ - clock-names ++ - "#phy-cells" ++ - "#clock-cells" ++ - clock-output-names ++ ++additionalProperties: false ++ ++examples: ++ - | ++ #include ++ #include ++ ++ phy@86000 { ++ compatible = "qcom,ipq5018-uniphy-pcie-gen2x2"; ++ reg = <0x86000 0x800>; ++ #phy-cells = <0>; ++ #clock-cells = <0>; ++ clocks = <&gcc GCC_PCIE0_PIPE_CLK>; ++ clock-names = "pipe_clk"; ++ clock-output-names = "pcie0_pipe_clk"; ++ assigned-clocks = <&gcc GCC_PCIE1_PIPE_CLK>; ++ assigned-clock-rates = <125000000>; ++ resets = <&gcc GCC_PCIE0_PHY_BCR>, ++ <&gcc GCC_PCIE0PHY_PHY_BCR>; ++ reset-names = "phy", "phy_phy"; ++ }; diff --git a/target/linux/qualcommax/patches-6.6/0156-dt-bindings-pci-qcom-add-IPQ5018-soc.patch b/target/linux/qualcommax/patches-6.6/0156-dt-bindings-pci-qcom-add-IPQ5018-soc.patch new file mode 100644 index 000000000..db59aea28 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0156-dt-bindings-pci-qcom-add-IPQ5018-soc.patch @@ -0,0 +1,78 @@ +From: Nitheesh Sekar +Subject: [PATCH] dt-bindings: PCI: qcom: Add IPQ5108 SoC +Date: Tue, 3 Oct 2023 17:38:42 +0530 + +Add support for the PCIe controller on the Qualcomm +IPQ5108 SoC to the bindings. + +Signed-off-by: Nitheesh Sekar +--- + .../devicetree/bindings/pci/qcom,pcie.yaml | 36 +++++++++++++++++++ + 1 file changed, 36 insertions(+) + +--- a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml ++++ b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml +@@ -21,6 +21,7 @@ properties: + - qcom,pcie-apq8064 + - qcom,pcie-apq8084 + - qcom,pcie-ipq4019 ++ - qcom,pcie-ipq5018 + - qcom,pcie-ipq6018 + - qcom,pcie-ipq8064 + - qcom,pcie-ipq8064-v2 +@@ -170,6 +171,7 @@ allOf: + compatible: + contains: + enum: ++ - qcom,pcie-ipq5018 + - qcom,pcie-ipq6018 + - qcom,pcie-ipq8074-gen3 + then: +@@ -337,6 +339,39 @@ allOf: + compatible: + contains: + enum: ++ - qcom,pcie-ipq5018 ++ then: ++ properties: ++ clocks: ++ minItems: 6 ++ maxItems: 6 ++ clock-names: ++ items: ++ - const: iface # PCIe to SysNOC BIU clock ++ - const: axi_m # AXI Master clock ++ - const: axi_s # AXI Slave clock ++ - const: ahb # AHB clock ++ - const: aux # Auxiliary clock ++ - const: axi_bridge # AXI bridge clock ++ resets: ++ minItems: 8 ++ maxItems: 8 ++ reset-names: ++ items: ++ - const: pipe # PIPE reset ++ - const: sleep # Sleep reset ++ - const: sticky # Core sticky reset ++ - const: axi_m # AXI master reset ++ - const: axi_s # AXI slave reset ++ - const: ahb # AHB reset ++ - const: axi_m_sticky # AXI master sticky reset ++ - const: axi_s_sticky # AXI slave sticky reset ++ ++ - if: ++ properties: ++ compatible: ++ contains: ++ enum: + - qcom,pcie-msm8996 + then: + properties: +@@ -875,6 +910,7 @@ allOf: + - qcom,pcie-apq8064 + - qcom,pcie-apq8084 + - qcom,pcie-ipq4019 ++ - qcom,pcie-ipq5018 + - qcom,pcie-ipq6018 + - qcom,pcie-ipq8064 + - qcom,pcie-ipq8064-v2 diff --git a/target/linux/qualcommax/patches-6.6/0157-phy-qcom-add-uniphy-pcie-gen2-driver.patch b/target/linux/qualcommax/patches-6.6/0157-phy-qcom-add-uniphy-pcie-gen2-driver.patch new file mode 100644 index 000000000..06b8a2c9e --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0157-phy-qcom-add-uniphy-pcie-gen2-driver.patch @@ -0,0 +1,381 @@ +From: Nitheesh Sekar +Subject: [PATCH] phy: qcom: Introduce PCIe UNIPHY 28LP driver +Date: Tue, 3 Oct 2023 17:38:43 +0530 + +Add Qualcomm PCIe UNIPHY 28LP driver support present +in Qualcomm IPQ5018 SoC and the phy init sequence. + +Signed-off-by: Nitheesh Sekar +--- + drivers/phy/qualcomm/Kconfig | 12 + + drivers/phy/qualcomm/Makefile | 1 + + .../phy/qualcomm/phy-qcom-uniphy-pcie-28lp.c | 336 ++++++++++++++++++ + 3 files changed, 349 insertions(+) + create mode 100644 drivers/phy/qualcomm/phy-qcom-uniphy-pcie-28lp.c + +--- a/drivers/phy/qualcomm/Kconfig ++++ b/drivers/phy/qualcomm/Kconfig +@@ -35,6 +35,18 @@ config PHY_QCOM_IPQ4019_USB + help + Support for the USB PHY-s on Qualcomm IPQ40xx SoC-s. + ++config PHY_QCOM_IPQ5018_UNIPHY_PCIE ++ bool "PCIE IPQ5018 UNIPHY PHY driver" ++ depends on ARCH_QCOM ++ depends on HAS_IOMEM ++ depends on OF ++ select GENERIC_PHY ++ help ++ Enable this to support the IPQ5018 PCIe UNIPHY phy transceiver that ++ is used with PCIe controllers on Qualcomm IPQ5018 chips. It ++ handles PHY initialization, clock management required after ++ resetting the hardware and power management. ++ + config PHY_QCOM_IPQ806X_SATA + tristate "Qualcomm IPQ806x SATA SerDes/PHY driver" + depends on ARCH_QCOM +--- a/drivers/phy/qualcomm/Makefile ++++ b/drivers/phy/qualcomm/Makefile +@@ -3,6 +3,7 @@ obj-$(CONFIG_PHY_ATH79_USB) += phy-ath7 + obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o + obj-$(CONFIG_PHY_QCOM_EDP) += phy-qcom-edp.o + obj-$(CONFIG_PHY_QCOM_IPQ4019_USB) += phy-qcom-ipq4019-usb.o ++obj-$(CONFIG_PHY_QCOM_IPQ5018_UNIPHY_PCIE) += phy-qcom-ipq5018-uniphy-pcie.o + obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o + obj-$(CONFIG_PHY_QCOM_M31_USB) += phy-qcom-m31.o + obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o +--- /dev/null ++++ b/drivers/phy/qualcomm/phy-qcom-ipq5018-uniphy-pcie.c +@@ -0,0 +1,332 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright (c) 2023, The Linux Foundation. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define PIPE_CLK_DELAY_MIN_US 5000 ++#define PIPE_CLK_DELAY_MAX_US 5100 ++#define CDR_CTRL_REG_1 0x80 ++#define CDR_CTRL_REG_2 0x84 ++#define CDR_CTRL_REG_3 0x88 ++#define CDR_CTRL_REG_4 0x8C ++#define CDR_CTRL_REG_5 0x90 ++#define CDR_CTRL_REG_6 0x94 ++#define CDR_CTRL_REG_7 0x98 ++#define SSCG_CTRL_REG_1 0x9c ++#define SSCG_CTRL_REG_2 0xa0 ++#define SSCG_CTRL_REG_3 0xa4 ++#define SSCG_CTRL_REG_4 0xa8 ++#define SSCG_CTRL_REG_5 0xac ++#define SSCG_CTRL_REG_6 0xb0 ++#define PCS_INTERNAL_CONTROL_2 0x2d8 ++ ++#define PHY_MODE_FIXED 0x1 ++ ++enum qcom_uniphy_pcie_type { ++ PHY_TYPE_PCIE = 1, ++ PHY_TYPE_PCIE_GEN2, ++ PHY_TYPE_PCIE_GEN3, ++}; ++ ++struct uniphy_regs { ++ unsigned int offset; ++ unsigned int val; ++}; ++ ++struct uniphy_pcie_data { ++ int lanes; ++ /* 2nd lane offset */ ++ int lane_offset; ++ unsigned int phy_type; ++ const struct uniphy_regs *init_seq; ++ unsigned int init_seq_num; ++}; ++ ++struct qcom_uniphy_pcie { ++ struct phy phy; ++ struct device *dev; ++ const struct uniphy_pcie_data *data; ++ struct clk_bulk_data *clks; ++ int num_clks; ++ struct reset_control *resets; ++ void __iomem *base; ++}; ++ ++#define phy_to_dw_phy(x) container_of((x), struct qca_uni_pcie_phy, phy) ++ ++static const struct uniphy_regs ipq5018_regs[] = { ++ { ++ .offset = SSCG_CTRL_REG_4, ++ .val = 0x1cb9, ++ }, { ++ .offset = SSCG_CTRL_REG_5, ++ .val = 0x023a, ++ }, { ++ .offset = SSCG_CTRL_REG_3, ++ .val = 0xd360, ++ }, { ++ .offset = SSCG_CTRL_REG_1, ++ .val = 0x1, ++ }, { ++ .offset = SSCG_CTRL_REG_2, ++ .val = 0xeb, ++ }, { ++ .offset = CDR_CTRL_REG_4, ++ .val = 0x3f9, ++ }, { ++ .offset = CDR_CTRL_REG_5, ++ .val = 0x1c9, ++ }, { ++ .offset = CDR_CTRL_REG_2, ++ .val = 0x419, ++ }, { ++ .offset = CDR_CTRL_REG_1, ++ .val = 0x200, ++ }, { ++ .offset = PCS_INTERNAL_CONTROL_2, ++ .val = 0xf101, ++ }, ++}; ++ ++static const struct uniphy_pcie_data ipq5018_2x1_data = { ++ .lanes = 1, ++ .lane_offset = 0x800, ++ .phy_type = PHY_TYPE_PCIE_GEN2, ++ .init_seq = ipq5018_regs, ++ .init_seq_num = ARRAY_SIZE(ipq5018_regs), ++}; ++ ++static const struct uniphy_pcie_data ipq5018_2x2_data = { ++ .lanes = 2, ++ .lane_offset = 0x800, ++ .phy_type = PHY_TYPE_PCIE_GEN2, ++ .init_seq = ipq5018_regs, ++ .init_seq_num = ARRAY_SIZE(ipq5018_regs), ++}; ++ ++static void qcom_uniphy_pcie_init(struct qcom_uniphy_pcie *phy) ++{ ++ const struct uniphy_pcie_data *data = phy->data; ++ const struct uniphy_regs *init_seq; ++ void __iomem *base = phy->base; ++ ++ for (int lane = 0; lane < data->lanes; lane++) { ++ init_seq = data->init_seq; ++ ++ for (int i = 0; i < data->init_seq_num; i++, init_seq++) ++ writel(init_seq->val, base + init_seq->offset); ++ ++ base += data->lane_offset; ++ } ++} ++ ++static int qcom_uniphy_pcie_power_off(struct phy *x) ++{ ++ struct qcom_uniphy_pcie *phy = phy_get_drvdata(x); ++ ++ clk_bulk_disable_unprepare(phy->num_clks, phy->clks); ++ ++ reset_control_assert(phy->resets); ++ ++ return 0; ++} ++ ++static int qcom_uniphy_pcie_power_on(struct phy *x) ++{ ++ int ret; ++ struct qcom_uniphy_pcie *phy = phy_get_drvdata(x); ++ ++ ret = reset_control_assert(phy->resets); ++ if (ret) { ++ dev_err(phy->dev, "reset assert failed (%d)\n", ret); ++ return ret; ++ } ++ ++ /* ++ * Delay periods before and after reset deassert are working values ++ * from downstream Codeaurora kernel ++ */ ++ usleep_range(100, 150); ++ ++ ret = reset_control_deassert(phy->resets); ++ if (ret) { ++ dev_err(phy->dev, "reset deassert failed (%d)\n", ret); ++ return ret; ++ } ++ ++ usleep_range(PIPE_CLK_DELAY_MIN_US, PIPE_CLK_DELAY_MAX_US); ++ ++ ret = clk_bulk_prepare_enable(phy->num_clks, phy->clks); ++ if (ret) { ++ dev_err(phy->dev, "clk prepare and enable failed %d\n", ret); ++ return ret; ++ } ++ ++ usleep_range(30, 50); ++ ++ qcom_uniphy_pcie_init(phy); ++ return 0; ++} ++ ++static inline int qcom_uniphy_pcie_get_resources(struct platform_device *pdev, ++ struct qcom_uniphy_pcie *phy) ++{ ++ struct resource *res; ++ ++ phy->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); ++ if (IS_ERR(phy->base)) { ++ dev_err(phy->dev, "cannot get phy registers\n"); ++ return PTR_ERR(phy->base); ++ } ++ ++ phy->num_clks = devm_clk_bulk_get_all(phy->dev, &phy->clks); ++ if (phy->num_clks < 0) ++ return phy->num_clks; ++ ++ phy->resets = devm_reset_control_array_get_exclusive(phy->dev); ++ if (IS_ERR(phy->resets)) ++ return PTR_ERR(phy->resets); ++ ++ return 0; ++} ++ ++/* ++ * Register a fixed rate pipe clock. ++ * ++ * The _pipe_clksrc generated by PHY goes to the GCC that gate ++ * controls it. The _pipe_clk coming out of the GCC is requested ++ * by the PHY driver for its operations. ++ * We register the _pipe_clksrc here. The gcc driver takes care ++ * of assigning this _pipe_clksrc as parent to _pipe_clk. ++ * Below picture shows this relationship. ++ * ++ * +---------------+ ++ * | PHY block |<<---------------------------------------+ ++ * | | | ++ * | +-------+ | +-----+ | ++ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+ ++ * clk | +-------+ | +-----+ ++ * +---------------+ ++ */ ++static int phy_pipe_clk_register(struct qcom_uniphy_pcie *phy, ++ struct device_node *np) ++{ ++ struct clk_fixed_rate *fixed; ++ struct clk_init_data init = { }; ++ int ret; ++ ++ ret = of_property_read_string(np, "clock-output-names", &init.name); ++ if (ret) { ++ dev_err(phy->dev, "%pOFn: No clock-output-names\n", np); ++ return ret; ++ } ++ ++ fixed = devm_kzalloc(phy->dev, sizeof(*fixed), GFP_KERNEL); ++ if (!fixed) ++ return -ENOMEM; ++ ++ init.ops = &clk_fixed_rate_ops; ++ fixed->fixed_rate = 125000000; ++ fixed->hw.init = &init; ++ ++ ret = devm_clk_hw_register(phy->dev, &fixed->hw); ++ if (ret) ++ return ret; ++ ++ ret = devm_of_clk_add_hw_provider(phy->dev, of_clk_hw_simple_get, ++ &fixed->hw); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static const struct of_device_id qcom_uniphy_pcie_id_table[] = { ++ { ++ .compatible = "qcom,ipq5018-uniphy-pcie-gen2x1", ++ .data = &ipq5018_2x1_data, ++ }, ++ { ++ .compatible = "qcom,ipq5018-uniphy-pcie-gen2x2", ++ .data = &ipq5018_2x2_data, ++ }, ++ { /* Sentinel */ }, ++}; ++MODULE_DEVICE_TABLE(of, qcom_uniphy_pcie_id_table); ++ ++static const struct phy_ops pcie_ops = { ++ .power_on = qcom_uniphy_pcie_power_on, ++ .power_off = qcom_uniphy_pcie_power_off, ++ .owner = THIS_MODULE, ++}; ++ ++static int qcom_uniphy_pcie_probe(struct platform_device *pdev) ++{ ++ struct qcom_uniphy_pcie *phy; ++ int ret; ++ struct phy *generic_phy; ++ struct phy_provider *phy_provider; ++ struct device *dev = &pdev->dev; ++ struct device_node *np = of_node_get(dev->of_node); ++ ++ phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL); ++ if (!phy) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, phy); ++ phy->dev = &pdev->dev; ++ ++ phy->data = of_device_get_match_data(dev); ++ if (!phy->data) ++ return -EINVAL; ++ ++ ret = qcom_uniphy_pcie_get_resources(pdev, phy); ++ if (ret < 0) { ++ dev_err_probe(&pdev->dev, ret, "failed to get resources: %d\n", ret); ++ return ret; ++ } ++ ++ ret = phy_pipe_clk_register(phy, np); ++ if (ret) ++ dev_err_probe(&pdev->dev, ret, "failed to register phy pipe clk\n"); ++ ++ generic_phy = devm_phy_create(phy->dev, NULL, &pcie_ops); ++ if (IS_ERR(generic_phy)) ++ return PTR_ERR(generic_phy); ++ ++ phy_set_drvdata(generic_phy, phy); ++ phy_provider = devm_of_phy_provider_register(phy->dev, ++ of_phy_simple_xlate); ++ if (IS_ERR(phy_provider)) ++ return PTR_ERR(phy_provider); ++ ++ return 0; ++} ++ ++static struct platform_driver qcom_uniphy_pcie_driver = { ++ .probe = qcom_uniphy_pcie_probe, ++ .driver = { ++ .name = "qcom-ipq5018-uniphy-pcie", ++ .owner = THIS_MODULE, ++ .of_match_table = qcom_uniphy_pcie_id_table, ++ }, ++}; ++ ++module_platform_driver(qcom_uniphy_pcie_driver); ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_DESCRIPTION("PCIE QCOM IPQ5018 UNIPHY driver"); diff --git a/target/linux/qualcommax/patches-6.6/0158-pci-qcom-add-support-for-ipq5018.patch b/target/linux/qualcommax/patches-6.6/0158-pci-qcom-add-support-for-ipq5018.patch new file mode 100644 index 000000000..0acc24cfc --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0158-pci-qcom-add-support-for-ipq5018.patch @@ -0,0 +1,77 @@ +From: Nitheesh Sekar +Subject: [PATCH] PCI: qcom: Add support for IPQ5018 +Date: Tue, 3 Oct 2023 17:38:44 +0530 + +Added a new compatible 'qcom,pcie-ipq5018' and modified +get_resources of 'ops 2_9_0' to get the clocks from the +device-tree. + +Co-developed-by: Anusha Rao +Signed-off-by: Anusha Rao +Co-developed-by: Devi Priya +Signed-off-by: Devi Priya +Signed-off-by: Nitheesh Sekar +--- + drivers/pci/controller/dwc/pcie-qcom.c | 22 ++++++++-------------- + 1 file changed, 8 insertions(+), 14 deletions(-) + +--- a/drivers/pci/controller/dwc/pcie-qcom.c ++++ b/drivers/pci/controller/dwc/pcie-qcom.c +@@ -202,8 +202,9 @@ struct qcom_pcie_resources_2_7_0 { + + #define QCOM_PCIE_2_9_0_MAX_CLOCKS 5 + struct qcom_pcie_resources_2_9_0 { +- struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS]; ++ struct clk_bulk_data *clks; + struct reset_control *rst; ++ int num_clks; + }; + + union qcom_pcie_resources { +@@ -1056,17 +1057,10 @@ static int qcom_pcie_get_resources_2_9_0 + struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; +- int ret; +- +- res->clks[0].id = "iface"; +- res->clks[1].id = "axi_m"; +- res->clks[2].id = "axi_s"; +- res->clks[3].id = "axi_bridge"; +- res->clks[4].id = "rchng"; + +- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); +- if (ret < 0) +- return ret; ++ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks); ++ if (res->num_clks < 0) ++ return res->num_clks; + + res->rst = devm_reset_control_array_get_exclusive(dev); + if (IS_ERR(res->rst)) +@@ -1079,7 +1073,7 @@ static void qcom_pcie_deinit_2_9_0(struc + { + struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; + +- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); ++ clk_bulk_disable_unprepare(res->num_clks, res->clks); + } + + static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) +@@ -1108,7 +1102,7 @@ static int qcom_pcie_init_2_9_0(struct q + + usleep_range(2000, 2500); + +- return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); ++ return clk_bulk_prepare_enable(res->num_clks, res->clks); + } + + static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) +@@ -1613,6 +1607,7 @@ static const struct of_device_id qcom_pc + { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, + { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, + { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, ++ { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 }, + { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, + { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, + { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, diff --git a/target/linux/qualcommax/patches-6.6/0159-arm64-dts-qcom-IPQ5018-add-PCIe-related-nodes.patch b/target/linux/qualcommax/patches-6.6/0159-arm64-dts-qcom-IPQ5018-add-PCIe-related-nodes.patch new file mode 100644 index 000000000..c7741814a --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0159-arm64-dts-qcom-IPQ5018-add-PCIe-related-nodes.patch @@ -0,0 +1,207 @@ +From: Nitheesh Sekar +Subject: [PATCH] arm64: dts: qcom: ipq5018: Add PCIe related nodes +Date: Tue, 3 Oct 2023 17:38:45 +0530 + +Add phy and controller nodes for PCIe_x2 and PCIe_x1. +PCIe_x2 is 2-lane Gen2 and PCIe_x1 is 1-lane Gen2. + +Signed-off-by: Nitheesh Sekar +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 186 +++++++++++++++++++++++++- + 1 file changed, 184 insertions(+), 2 deletions(-) + +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -149,6 +149,38 @@ + status = "disabled"; + }; + ++ pcie_x1phy: phy@7e000{ ++ compatible = "qcom,ipq5018-uniphy-pcie-gen2x1"; ++ reg = <0x0007e000 0x800>; ++ #phy-cells = <0>; ++ #clock-cells = <0>; ++ clocks = <&gcc GCC_PCIE1_PIPE_CLK>; ++ clock-names = "pipe_clk"; ++ clock-output-names = "pcie1_pipe_clk"; ++ assigned-clocks = <&gcc GCC_PCIE1_PIPE_CLK>; ++ assigned-clock-rates = <125000000>; ++ resets = <&gcc GCC_PCIE1_PHY_BCR>, ++ <&gcc GCC_PCIE1PHY_PHY_BCR>; ++ reset-names = "phy", "phy_phy"; ++ status = "disabled"; ++ }; ++ ++ pcie_x2phy: phy@86000{ ++ compatible = "qcom,ipq5018-uniphy-pcie-gen2x2"; ++ reg = <0x00086000 0x800>; ++ #phy-cells = <0>; ++ #clock-cells = <0>; ++ clocks = <&gcc GCC_PCIE0_PIPE_CLK>; ++ clock-names = "pipe_clk"; ++ clock-output-names = "pcie0_pipe_clk"; ++ assigned-clocks = <&gcc GCC_PCIE0_PIPE_CLK>; ++ assigned-clock-rates = <125000000>; ++ resets = <&gcc GCC_PCIE0_PHY_BCR>, ++ <&gcc GCC_PCIE0PHY_PHY_BCR>; ++ reset-names = "phy", "phy_phy"; ++ status = "disabled"; ++ }; ++ + qfprom: qfprom@a0000 { + compatible = "qcom,ipq5018-qfprom", "qcom,qfprom"; + reg = <0xa0000 0x1000>; +@@ -283,8 +315,8 @@ + reg = <0x01800000 0x80000>; + clocks = <&xo_board_clk>, + <&sleep_clk>, +- <0>, +- <0>, ++ <&pcie_x2phy>, ++ <&pcie_x1phy>, + <0>, + <0>, + <0>, +@@ -501,6 +533,142 @@ + status = "disabled"; + }; + }; ++ ++ pcie_x1: pcie@80000000 { ++ compatible = "qcom,pcie-ipq5018"; ++ reg = <0x80000000 0xf1d>, ++ <0x80000f20 0xa8>, ++ <0x80001000 0x1000>, ++ <0x00078000 0x3000>, ++ <0x80100000 0x1000>; ++ reg-names = "dbi", "elbi", "atu", "parf", "config"; ++ device_type = "pci"; ++ linux,pci-domain = <0>; ++ bus-range = <0x00 0xff>; ++ num-lanes = <1>; ++ max-link-speed = <2>; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ phys = <&pcie_x1phy>; ++ phy-names ="pciephy"; ++ ++ ranges = <0x81000000 0 0x80200000 0x80200000 0 0x00100000>, /* I/O */ ++ <0x82000000 0 0x80300000 0x80300000 0 0x10000000>; /* MEM */ ++ ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 0x7>; ++ interrupt-map = <0 0 0 1 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_a */ ++ <0 0 0 2 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_b */ ++ <0 0 0 3 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>, /* int_c */ ++ <0 0 0 4 &intc 0 145 IRQ_TYPE_LEVEL_HIGH>; /* int_d */ ++ ++ interrupts = ; ++ interrupt-names = "global_irq"; ++ ++ clocks = <&gcc GCC_SYS_NOC_PCIE1_AXI_CLK>, ++ <&gcc GCC_PCIE1_AXI_M_CLK>, ++ <&gcc GCC_PCIE1_AXI_S_CLK>, ++ <&gcc GCC_PCIE1_AHB_CLK>, ++ <&gcc GCC_PCIE1_AUX_CLK>, ++ <&gcc GCC_PCIE1_AXI_S_BRIDGE_CLK>; ++ ++ clock-names = "iface", ++ "axi_m", ++ "axi_s", ++ "ahb", ++ "aux", ++ "axi_bridge"; ++ ++ resets = <&gcc GCC_PCIE1_PIPE_ARES>, ++ <&gcc GCC_PCIE1_SLEEP_ARES>, ++ <&gcc GCC_PCIE1_CORE_STICKY_ARES>, ++ <&gcc GCC_PCIE1_AXI_MASTER_ARES>, ++ <&gcc GCC_PCIE1_AXI_SLAVE_ARES>, ++ <&gcc GCC_PCIE1_AHB_ARES>, ++ <&gcc GCC_PCIE1_AXI_MASTER_STICKY_ARES>, ++ <&gcc GCC_PCIE1_AXI_SLAVE_STICKY_ARES>; ++ ++ reset-names = "pipe", ++ "sleep", ++ "sticky", ++ "axi_m", ++ "axi_s", ++ "ahb", ++ "axi_m_sticky", ++ "axi_s_sticky"; ++ ++ msi-map = <0x0 &v2m0 0x0 0xff8>; ++ status = "disabled"; ++ }; ++ ++ pcie_x2: pcie@a0000000 { ++ compatible = "qcom,pcie-ipq5018"; ++ reg = <0xa0000000 0xf1d>, ++ <0xa0000f20 0xa8>, ++ <0xa0001000 0x1000>, ++ <0x00080000 0x3000>, ++ <0xa0100000 0x1000>; ++ reg-names = "dbi", "elbi", "atu", "parf", "config"; ++ device_type = "pci"; ++ linux,pci-domain = <1>; ++ bus-range = <0x00 0xff>; ++ num-lanes = <2>; ++ max-link-speed = <2>; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ phys = <&pcie_x2phy>; ++ phy-names ="pciephy"; ++ ++ ranges = <0x81000000 0 0xa0200000 0xa0200000 0 0x00100000>, /* I/O */ ++ <0x82000000 0 0xa0300000 0xa0300000 0 0x10000000>; /* MEM */ ++ ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 0x7>; ++ interrupt-map = <0 0 0 1 &intc 0 75 IRQ_TYPE_LEVEL_HIGH>, /* int_a */ ++ <0 0 0 2 &intc 0 78 IRQ_TYPE_LEVEL_HIGH>, /* int_b */ ++ <0 0 0 3 &intc 0 79 IRQ_TYPE_LEVEL_HIGH>, /* int_c */ ++ <0 0 0 4 &intc 0 83 IRQ_TYPE_LEVEL_HIGH>; /* int_d */ ++ ++ interrupts = ; ++ interrupt-names = "global_irq"; ++ ++ clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>, ++ <&gcc GCC_PCIE0_AXI_M_CLK>, ++ <&gcc GCC_PCIE0_AXI_S_CLK>, ++ <&gcc GCC_PCIE0_AHB_CLK>, ++ <&gcc GCC_PCIE0_AUX_CLK>, ++ <&gcc GCC_PCIE0_AXI_S_BRIDGE_CLK>; ++ ++ clock-names = "iface", ++ "axi_m", ++ "axi_s", ++ "ahb", ++ "aux", ++ "axi_bridge"; ++ ++ resets = <&gcc GCC_PCIE0_PIPE_ARES>, ++ <&gcc GCC_PCIE0_SLEEP_ARES>, ++ <&gcc GCC_PCIE0_CORE_STICKY_ARES>, ++ <&gcc GCC_PCIE0_AXI_MASTER_ARES>, ++ <&gcc GCC_PCIE0_AXI_SLAVE_ARES>, ++ <&gcc GCC_PCIE0_AHB_ARES>, ++ <&gcc GCC_PCIE0_AXI_MASTER_STICKY_ARES>, ++ <&gcc GCC_PCIE0_AXI_SLAVE_STICKY_ARES>; ++ ++ reset-names = "pipe", ++ "sleep", ++ "sticky", ++ "axi_m", ++ "axi_s", ++ "ahb", ++ "axi_m_sticky", ++ "axi_s_sticky"; ++ ++ msi-map = <0x0 &v2m0 0x0 0xff8>; ++ status = "disabled"; ++ }; + }; + + thermal-zones { diff --git a/target/linux/qualcommax/patches-6.6/0162-clk-qcom-apss-ipq-pll-use-stromer-ops-for-IPQ5018-to-fix-boot-failure.patch b/target/linux/qualcommax/patches-6.6/0162-clk-qcom-apss-ipq-pll-use-stromer-ops-for-IPQ5018-to-fix-boot-failure.patch new file mode 100644 index 000000000..be2c6b761 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0162-clk-qcom-apss-ipq-pll-use-stromer-ops-for-IPQ5018-to-fix-boot-failure.patch @@ -0,0 +1,85 @@ +From: Gabor Juhos +Subject: [PATCH] clk: qcom: apss-ipq-pll: use stromer ops for IPQ5018 to fix boot failure +Date: Fri, 15 Mar 2024 17:16:41 +0100 + +Booting v6.8 results in a hang on various IPQ5018 based boards. +Investigating the problem showed that the hang happens when the +clk_alpha_pll_stromer_plus_set_rate() function tries to write +into the PLL_MODE register of the APSS PLL. + +Checking the downstream code revealed that it uses [1] stromer +specific operations for IPQ5018, whereas in the current code +the stromer plus specific operations are used. + +The ops in the 'ipq_pll_stromer_plus' clock definition can't be +changed since that is needed for IPQ5332, so add a new alpha pll +clock declaration which uses the correct stromer ops and use this +new clock for IPQ5018 to avoid the boot failure. + +Also, change pll_type in 'ipq5018_pll_data' to +CLK_ALPHA_PLL_TYPE_STROMER to better reflect that it is a Stromer +PLL and change the apss_ipq_pll_probe() function accordingly. + +1. https://git.codelinaro.org/clo/qsdk/oss/kernel/linux-ipq-5.4/-/blob/NHSS.QSDK.12.4/drivers/clk/qcom/apss-ipq5018.c#L67 + +Fixes: 50492f929486 ("clk: qcom: apss-ipq-pll: add support for IPQ5018") +Signed-off-by: Gabor Juhos +--- + drivers/clk/qcom/apss-ipq-pll.c | 30 +++++++++++++++++++++++++++--- + 1 file changed, 27 insertions(+), 3 deletions(-) + +diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c +index 678b805f13d45..dfffec2f06ae7 100644 +--- a/drivers/clk/qcom/apss-ipq-pll.c ++++ b/drivers/clk/qcom/apss-ipq-pll.c +@@ -55,6 +55,29 @@ static struct clk_alpha_pll ipq_pll_huay + }, + }; + ++static struct clk_alpha_pll ipq_pll_stromer = { ++ .offset = 0x0, ++ /* ++ * Reuse CLK_ALPHA_PLL_TYPE_STROMER_PLUS register offsets. ++ * Although this is a bit confusing, but the offset values ++ * are correct nevertheless. ++ */ ++ .regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_STROMER_PLUS], ++ .flags = SUPPORTS_DYNAMIC_UPDATE, ++ .clkr = { ++ .enable_reg = 0x0, ++ .enable_mask = BIT(0), ++ .hw.init = &(const struct clk_init_data) { ++ .name = "a53pll", ++ .parent_data = &(const struct clk_parent_data) { ++ .fw_name = "xo", ++ }, ++ .num_parents = 1, ++ .ops = &clk_alpha_pll_stromer_ops, ++ }, ++ }, ++}; ++ + static struct clk_alpha_pll ipq_pll_stromer_plus = { + .offset = 0x0, + .regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_STROMER_PLUS], +@@ -144,8 +167,8 @@ struct apss_pll_data { + }; + + static const struct apss_pll_data ipq5018_pll_data = { +- .pll_type = CLK_ALPHA_PLL_TYPE_STROMER_PLUS, +- .pll = &ipq_pll_stromer_plus, ++ .pll_type = CLK_ALPHA_PLL_TYPE_STROMER, ++ .pll = &ipq_pll_stromer, + .pll_config = &ipq5018_pll_config, + }; + +@@ -203,7 +226,8 @@ static int apss_ipq_pll_probe(struct pla + + if (data->pll_type == CLK_ALPHA_PLL_TYPE_HUAYRA) + clk_alpha_pll_configure(data->pll, regmap, data->pll_config); +- else if (data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER_PLUS) ++ else if (data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER || ++ data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER_PLUS) + clk_stromer_pll_configure(data->pll, regmap, data->pll_config); + + ret = devm_clk_register_regmap(dev, &data->pll->clkr); diff --git a/target/linux/qualcommax/patches-6.6/0163-clk-qcom-apss-ipq-pll-fix-PLL-rate-for-IPQ5018.patch b/target/linux/qualcommax/patches-6.6/0163-clk-qcom-apss-ipq-pll-fix-PLL-rate-for-IPQ5018.patch new file mode 100644 index 000000000..dbd74161a --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0163-clk-qcom-apss-ipq-pll-fix-PLL-rate-for-IPQ5018.patch @@ -0,0 +1,34 @@ +From: Gabor Juhos +Subject: [PATCH] clk: qcom: apss-ipq-pll: fix PLL rate for IPQ5018 +Date: Tue, 26 Mar 2024 14:34:11 +0100 + +According to ipq5018.dtsi, the maximum supported rate by the +CPU is 1.008 GHz on the IPQ5018 platform, however the current +configuration of the PLL results in 1.2 GHz rate. + +Change the 'L' value in the PLL configuration to limit the +rate to 1.008 GHz. The downstream kernel also uses the same +value [1]. Also add a comment to indicate the desired +frequency. + +[1] https://git.codelinaro.org/clo/qsdk/oss/kernel/linux-ipq-5.4/-/blob/NHSS.QSDK.12.4/drivers/clk/qcom/apss-ipq5018.c?ref_type=heads#L151 + +Fixes: 50492f929486 ("clk: qcom: apss-ipq-pll: add support for IPQ5018") +Signed-off-by: Gabor Juhos +--- + drivers/clk/qcom/apss-ipq-pll.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c +index 678b805f13d45..5e3da5558f4e0 100644 +--- a/drivers/clk/qcom/apss-ipq-pll.c ++++ b/drivers/clk/qcom/apss-ipq-pll.c +@@ -97,7 +97,7 @@ static struct clk_alpha_pll ipq_pll_stro + }; + + static const struct alpha_pll_config ipq5018_pll_config = { +- .l = 0x32, ++ .l = 0x2a, + .config_ctl_val = 0x4001075b, + .config_ctl_hi_val = 0x304, + .main_output_mask = BIT(0), diff --git a/target/linux/qualcommax/patches-6.6/0301-dt-bindings-mfd-qcom-tcsr-add-IPQ5018-compatible.patch b/target/linux/qualcommax/patches-6.6/0301-dt-bindings-mfd-qcom-tcsr-add-IPQ5018-compatible.patch new file mode 100644 index 000000000..2faaa1d10 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0301-dt-bindings-mfd-qcom-tcsr-add-IPQ5018-compatible.patch @@ -0,0 +1,18 @@ +From: George Moussalem +Subject: [PATCH] dt-bindings: mfd: qcom,tcsr: Add IPQ5018 compatible +Date: Sun, 06 Oct 2024 16:34:11 +0400 + +Document the qcom,tcsr-ipq5018 compatible. + +Signed-off-by: George Moussalem +--- +--- a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml ++++ b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml +@@ -33,6 +33,7 @@ properties: + - qcom,sm8450-tcsr + - qcom,tcsr-apq8064 + - qcom,tcsr-apq8084 ++ - qcom,tcsr-ipq5018 + - qcom,tcsr-ipq5332 + - qcom,tcsr-ipq8064 + - qcom,tcsr-ipq8074 diff --git a/target/linux/qualcommax/patches-6.6/0302-arm64-dts-qcom-IPQ5018-add-TCSR-node.patch b/target/linux/qualcommax/patches-6.6/0302-arm64-dts-qcom-IPQ5018-add-TCSR-node.patch new file mode 100644 index 000000000..e982cf06d --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0302-arm64-dts-qcom-IPQ5018-add-TCSR-node.patch @@ -0,0 +1,22 @@ +From: George Moussalem +Subject: [PATCH] arm64: dts: qcom: ipq5018: Add TCSR node +Date: Sun, 06 Oct 2024 16:34:11 +0400 + +Add TCSR node. + +Signed-off-by: George Moussalem +--- +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -333,6 +333,11 @@ + #hwlock-cells = <1>; + }; + ++ tcsr: syscon@1937000 { ++ compatible = "qcom,tcsr-ipq5018", "syscon", "simple-mfd"; ++ reg = <0x01937000 0x21000>; ++ }; ++ + sdhc_1: mmc@7804000 { + compatible = "qcom,ipq5018-sdhci", "qcom,sdhci-msm-v5"; + reg = <0x7804000 0x1000>; diff --git a/target/linux/qualcommax/patches-6.6/0303-arm64-dts-qcom-IPQ5018-enable-the-download-mode-support.patch b/target/linux/qualcommax/patches-6.6/0303-arm64-dts-qcom-IPQ5018-enable-the-download-mode-support.patch new file mode 100644 index 000000000..265695d1c --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0303-arm64-dts-qcom-IPQ5018-enable-the-download-mode-support.patch @@ -0,0 +1,19 @@ +From: George Moussalem +Subject: [PATCH] arm64: dts: qcom: ipq5018: enable the download mode support +Date: Sun, 06 Oct 2024 16:34:11 +0400 + +IPQ5018 also supports the download mode to collect the RAM dumps if system crashes, to perform +the post mortem analysis. Add support for the same. + +Signed-off-by: George Moussalem +--- +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -82,6 +82,7 @@ + scm { + compatible = "qcom,scm-ipq5018", "qcom,scm"; + qcom,sdi-enabled; ++ qcom,dload-mode = <&tcsr 0x6100>; + }; + }; + diff --git a/target/linux/qualcommax/patches-6.6/0304-dt-bindings-pwm-add-IPQ5018-compatible.patch b/target/linux/qualcommax/patches-6.6/0304-dt-bindings-pwm-add-IPQ5018-compatible.patch new file mode 100644 index 000000000..570502893 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0304-dt-bindings-pwm-add-IPQ5018-compatible.patch @@ -0,0 +1,22 @@ +From: George Moussalem +Subject: [PATCH] dt-bindings: mfd: qcom,tcsr: Add IPQ5018 compatible +Date: Sun, 06 Oct 2024 16:34:11 +0400 + +Add compatible for IPQ5018. + +Signed-off-by: George Moussalem +--- +--- a/Documentation/devicetree/bindings/pwm/qcom,ipq6018-pwm.yaml ++++ b/Documentation/devicetree/bindings/pwm/qcom,ipq6018-pwm.yaml +@@ -11,7 +11,10 @@ maintainers: + + properties: + compatible: +- const: qcom,ipq6018-pwm ++ items: ++ - enum: ++ - qcom,ipq5018-pwm ++ - const: qcom,ipq6018-pwm + + reg: + description: Offset of PWM register in the TCSR block. diff --git a/target/linux/qualcommax/patches-6.6/0305-pinctrl-qcom-IPQ5018-update-pwm-groups.patch b/target/linux/qualcommax/patches-6.6/0305-pinctrl-qcom-IPQ5018-update-pwm-groups.patch new file mode 100644 index 000000000..486112295 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0305-pinctrl-qcom-IPQ5018-update-pwm-groups.patch @@ -0,0 +1,56 @@ +--- a/drivers/pinctrl/qcom/pinctrl-ipq5018.c ++++ b/drivers/pinctrl/qcom/pinctrl-ipq5018.c +@@ -541,7 +541,7 @@ static const char * const qdss_tracectl_ + }; + + static const char * const pwm0_groups[] = { +- "gpio42", ++ "gpio42", "gpio46", + }; + + static const char * const qdss_cti_trig_out_b0_groups[] = { +@@ -549,7 +549,7 @@ static const char * const qdss_cti_trig_ + }; + + static const char * const pwm1_groups[] = { +- "gpio43", ++ "gpio43", "gpio1", + }; + + static const char * const qdss_cti_trig_in_b0_groups[] = { +@@ -565,7 +565,7 @@ static const char * const qdss_cti_trig_ + }; + + static const char * const pwm3_groups[] = { +- "gpio45", ++ "gpio45", "gpio30", + }; + + static const char * const qdss_cti_trig_in_b1_groups[] = { +@@ -679,7 +679,7 @@ static const struct pinfunction ipq5018_ + + static const struct msm_pingroup ipq5018_groups[] = { + PINGROUP(0, atest_char, _, qdss_cti_trig_out_a0, wci_txd, wci_rxd, xfem, _, _, _), +- PINGROUP(1, atest_char, _, qdss_cti_trig_in_a0, wci_txd, wci_rxd, xfem, _, _, _), ++ PINGROUP(1, atest_char, pwm1, qdss_cti_trig_in_a0, wci_txd, wci_rxd, xfem, _, _, _), + PINGROUP(2, atest_char, _, qdss_cti_trig_out_a1, wci_txd, wci_rxd, xfem, _, _, _), + PINGROUP(3, atest_char, _, qdss_cti_trig_in_a1, wci_txd, wci_rxd, xfem, _, _, _), + PINGROUP(4, sdc1_data, qspi_data, blsp1_spi1, btss, dbg_out, qdss_traceclk_a, _, burn0, _), +@@ -708,7 +708,7 @@ static const struct msm_pingroup ipq5018 + PINGROUP(27, audio_txmclk, wsa_swrm, audio_txmclk, blsp2_spi, btss, _, qdss_tracedata_b, _, _), + PINGROUP(28, audio_txbclk, wsa_swrm, blsp0_uart1, btss, qdss_tracedata_b, _, _, _, _), + PINGROUP(29, audio_txfsync, _, blsp0_uart1, _, qdss_tracedata_b, _, _, _, _), +- PINGROUP(30, audio_txd, led2, led0, _, _, _, _, _, _), ++ PINGROUP(30, audio_txd, led2, led0, pwm3, _, _, _, _, _), + PINGROUP(31, blsp2_spi0, blsp1_uart1, _, qdss_tracedata_b, eud_gpio, _, _, _, _), + PINGROUP(32, blsp2_spi0, blsp1_uart1, _, qdss_tracedata_b, eud_gpio, _, _, _, _), + PINGROUP(33, blsp2_i2c0, blsp2_spi0, blsp1_uart1, _, qdss_tracedata_b, eud_gpio, _, _, _), +@@ -724,7 +724,7 @@ static const struct msm_pingroup ipq5018 + PINGROUP(43, pwm1, qdss_cti_trig_in_b0, wci_txd, wci_rxd, xfem, _, _, _, _), + PINGROUP(44, pwm2, qdss_cti_trig_out_b1, wci_txd, wci_rxd, xfem, _, _, _, _), + PINGROUP(45, pwm3, qdss_cti_trig_in_b1, wci_txd, wci_rxd, xfem, _, _, _, _), +- PINGROUP(46, led0, _, _, _, _, _, _, _, _), ++ PINGROUP(46, led0, pwm0, _, _, _, _, _, _, _), + }; + + static const struct msm_pinctrl_soc_data ipq5018_pinctrl = { diff --git a/target/linux/qualcommax/patches-6.6/0306-arm64-dts-qcom-ipq5018-Add-PWM-node.patch b/target/linux/qualcommax/patches-6.6/0306-arm64-dts-qcom-ipq5018-Add-PWM-node.patch new file mode 100644 index 000000000..f6542f266 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0306-arm64-dts-qcom-ipq5018-Add-PWM-node.patch @@ -0,0 +1,27 @@ +From: George Moussalem +Subject: [PATCH] arm64: dts: qcom: ipq5018: Add PWM node +Date: Sun, 06 Oct 2024 16:34:11 +0400 + +Add PWM node. + +Signed-off-by: George Moussalem +--- +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -339,6 +339,16 @@ + reg = <0x01937000 0x21000>; + }; + ++ pwm: pwm@1941010 { ++ compatible = "qcom,ipq5018-pwm", "qcom,ipq6018-pwm"; ++ reg = <0x01941010 0x20>; ++ clocks = <&gcc GCC_ADSS_PWM_CLK>; ++ assigned-clocks = <&gcc GCC_ADSS_PWM_CLK>; ++ assigned-clock-rates = <100000000>; ++ #pwm-cells = <2>; ++ status = "disabled"; ++ }; ++ + sdhc_1: mmc@7804000 { + compatible = "qcom,ipq5018-sdhci", "qcom,sdhci-msm-v5"; + reg = <0x7804000 0x1000>; diff --git a/target/linux/qualcommax/patches-6.6/0324-arm64-dts-qcom-ipq5018-Add-crypto-nodes.patch b/target/linux/qualcommax/patches-6.6/0324-arm64-dts-qcom-ipq5018-Add-crypto-nodes.patch new file mode 100644 index 000000000..135624d97 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0324-arm64-dts-qcom-ipq5018-Add-crypto-nodes.patch @@ -0,0 +1,41 @@ +From: George Moussalem +Subject: [PATCH] arm64: dts: qcom: ipq5018: Add crypto nodes +Date: Sun, 06 Oct 2024 16:34:11 +0400 + +Add dma controller and crypto nodes. + +Signed-off-by: George Moussalem +--- +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -293,6 +293,30 @@ + #thermal-sensor-cells = <1>; + }; + ++ cryptobam: dma-controller@704000 { ++ compatible = "qcom,bam-v1.7.0"; ++ reg = <0x00704000 0x20000>; ++ interrupts = ; ++ clocks = <&gcc GCC_CRYPTO_AHB_CLK>; ++ clock-names = "bam_clk"; ++ #dma-cells = <1>; ++ qcom,ee = <1>; ++ qcom,controlled-remotely; ++ status = "disabled"; ++ }; ++ ++ crypto: crypto@73a000 { ++ compatible = "qcom,crypto-v5.1"; ++ reg = <0x0073a000 0x6000>; ++ clocks = <&gcc GCC_CRYPTO_AHB_CLK>, ++ <&gcc GCC_CRYPTO_AXI_CLK>, ++ <&gcc GCC_CRYPTO_CLK>; ++ clock-names = "iface", "bus", "core"; ++ dmas = <&cryptobam 2>, <&cryptobam 3>; ++ dma-names = "rx", "tx"; ++ status = "disabled"; ++ }; ++ + tlmm: pinctrl@1000000 { + compatible = "qcom,ipq5018-tlmm"; + reg = <0x01000000 0x300000>; diff --git a/target/linux/qualcommax/patches-6.6/0337-arm64-dts-qcom-ipq5018-Add-PRNG-node.patch b/target/linux/qualcommax/patches-6.6/0337-arm64-dts-qcom-ipq5018-Add-PRNG-node.patch new file mode 100644 index 000000000..1154e6368 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0337-arm64-dts-qcom-ipq5018-Add-PRNG-node.patch @@ -0,0 +1,25 @@ +From: George Moussalem +Subject: [PATCH] arm64: dts: qcom: ipq5018: Add PRNG node +Date: Sun, 06 Oct 2024 16:34:11 +0400 + +Add PRNG node. + +Signed-off-by: George Moussalem +--- +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -254,6 +254,14 @@ + }; + }; + ++ prng: rng@e3000 { ++ compatible = "qcom,prng-ee"; ++ reg = <0x000e3000 0x1000>; ++ clocks = <&gcc GCC_PRNG_AHB_CLK>; ++ clock-names = "core"; ++ status = "disabled"; ++ }; ++ + tsens: thermal-sensor@4a9000 { + compatible = "qcom,ipq5018-tsens"; + reg = <0x4a9000 0x1000>, /* TM */ diff --git a/target/linux/qualcommax/patches-6.6/0339-arm64-dts-qcom-ipq5018-Add-QUP1-UART2-node.patch b/target/linux/qualcommax/patches-6.6/0339-arm64-dts-qcom-ipq5018-Add-QUP1-UART2-node.patch new file mode 100644 index 000000000..8fa688a7a --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0339-arm64-dts-qcom-ipq5018-Add-QUP1-UART2-node.patch @@ -0,0 +1,27 @@ +From: George Moussalem +Subject: [PATCH] arm64: dts: qcom: ipq5018: Add QUP1-UART2 node +Date: Sun, 06 Oct 2024 16:34:11 +0400 + +Add QUP1-UART2 node. + +Signed-off-by: George Moussalem +--- +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -418,6 +418,16 @@ + status = "disabled"; + }; + ++ blsp1_uart2: serial@78b0000 { ++ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; ++ reg = <0x078b0000 0x200>; ++ interrupts = ; ++ clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>, ++ <&gcc GCC_BLSP1_AHB_CLK>; ++ clock-names = "core", "iface"; ++ status = "disabled"; ++ }; ++ + blsp1_spi1: spi@78b5000 { + compatible = "qcom,spi-qup-v2.2.1"; + #address-cells = <1>; diff --git a/target/linux/qualcommax/patches-6.6/0340-arm64-dts-qcom-ipq5018-Add-QUP3-I2C-node.patch b/target/linux/qualcommax/patches-6.6/0340-arm64-dts-qcom-ipq5018-Add-QUP3-I2C-node.patch new file mode 100644 index 000000000..87edba2c1 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0340-arm64-dts-qcom-ipq5018-Add-QUP3-I2C-node.patch @@ -0,0 +1,32 @@ +From: George Moussalem +Subject: [PATCH] arm64: dts: qcom: ipq5018: Add QUP3 I2C node +Date: Sun, 06 Oct 2024 16:34:11 +0400 + +Add QUP3-I2C node. + +Signed-off-by: George Moussalem +--- +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -442,6 +442,21 @@ + status = "disabled"; + }; + ++ blsp1_i2c3: i2c@78b7000 { ++ compatible = "qcom,i2c-qup-v2.2.1"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x078b7000 0x600>; ++ interrupts = ; ++ clocks = <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>, ++ <&gcc GCC_BLSP1_AHB_CLK>; ++ clock-names = "core", "iface"; ++ clock-frequency = <400000>; ++ dmas = <&blsp_dma 9>, <&blsp_dma 8>; ++ dma-names = "tx", "rx"; ++ status = "disabled"; ++ }; ++ + usb: usb@8af8800 { + compatible = "qcom,ipq5018-dwc3", "qcom,dwc3"; + reg = <0x08af8800 0x400>; diff --git a/target/linux/qualcommax/patches-6.6/0400-mtd-rawnand-add-support-for-TH58NYG3S0HBAI4.patch b/target/linux/qualcommax/patches-6.6/0400-mtd-rawnand-add-support-for-TH58NYG3S0HBAI4.patch new file mode 100644 index 000000000..c7632d47c --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0400-mtd-rawnand-add-support-for-TH58NYG3S0HBAI4.patch @@ -0,0 +1,42 @@ +From 8d8b37d3af2bdccf0a37d2017d876bfc6ce42552 Mon Sep 17 00:00:00 2001 +From: Chukun Pan +Date: Fri, 20 Oct 2023 23:18:21 +0800 +Subject: [PATCH 1/1] mtd: rawnand: add support for TH58NYG3S0HBAI4 NAND flash + +The Toshiba TH58NYG3S0HBAI4 is detected with 128 byte OOB while the flash +has 256 bytes OOB. Since it is not an ONFI compliant NAND, the model name +cannot be read from anywhere, add a static NAND ID entry to correct this. + +However, the NAND ID of this flash is inconsistent with the datasheet. +The actual NAND ID is only 4 ID bytes, the last ID byte is missing. + +Datasheet available at (the ID table is on page 50): +https://europe.kioxia.com/content/dam/kioxia/newidr/productinfo/datasheet/201910/DST_TH58NYG3S0HBAI4-TDE_EN_31565.pdf + +Datasheet NAND ID: {0x98, 0xa3, 0x91, 0x26, 0x76} +Actual NAND ID: {0x98, 0xa3, 0x91, 0x26} + +It seems that this flash may be counterfeit, but another Toshiba flash +also has the same problem. Maybe the driver has a bug, or some Toshiba +nand flash is like this. Anyway, add a static NAND ID entry with only +4 ID bytes as a hack to make sure it works. + +Tested on Arcadyan AW1000 flashed with OpenWrt. + +Signed-off-by: Chukun Pan +--- + drivers/mtd/nand/raw/nand_ids.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/mtd/nand/raw/nand_ids.c ++++ b/drivers/mtd/nand/raw/nand_ids.c +@@ -58,6 +58,9 @@ struct nand_flash_dev nand_flash_ids[] = + { .id = {0xad, 0xde, 0x14, 0xa7, 0x42, 0x4a} }, + SZ_16K, SZ_8K, SZ_4M, NAND_NEED_SCRAMBLING, 6, 1664, + NAND_ECC_INFO(40, SZ_1K) }, ++ {"TH58NYG3S0HBAI4 8G 1.8V 8-bit", /* Last ID bytes missing */ ++ { .id = {0x98, 0xa3, 0x91, 0x26} }, ++ SZ_4K, SZ_1K, SZ_256K, 0, 4, 256, NAND_ECC_INFO(8, SZ_512) }, + {"TH58NVG2S3HBAI4 4G 3.3V 8-bit", + { .id = {0x98, 0xdc, 0x91, 0x15, 0x76} }, + SZ_2K, SZ_512, SZ_128K, 0, 5, 128, NAND_ECC_INFO(8, SZ_512) }, diff --git a/target/linux/qualcommax/patches-6.6/0401-spi-dt-bindings-Introduce-qcom-spi-qpic-snand.patch b/target/linux/qualcommax/patches-6.6/0401-spi-dt-bindings-Introduce-qcom-spi-qpic-snand.patch new file mode 100644 index 000000000..e3c45a28c --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0401-spi-dt-bindings-Introduce-qcom-spi-qpic-snand.patch @@ -0,0 +1,100 @@ +From: Md Sadre Alam +Date: Sun, 22 Sep 2024 17:03:44 +0530 +Subject: [PATCH] spi: dt-bindings: Introduce qcom,spi-qpic-snand + +Document the QPIC-SPI-NAND flash controller present in the IPQ SoCs. +It can work both in serial and parallel mode and supports typical +SPI-NAND page cache operations. + +Reviewed-by: Krzysztof Kozlowski +Signed-off-by: Md Sadre Alam +--- +diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml b/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml +new file mode 100644 +index 000000000000..f0d9f7643849 +--- /dev/null ++++ b/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml +@@ -0,0 +1,83 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/spi/qcom,spi-qpic-snand.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Qualcomm QPIC NAND controller ++ ++maintainers: ++ - Md sadre Alam ++ ++description: ++ The QCOM QPIC-SPI-NAND flash controller is an extended version of ++ the QCOM QPIC NAND flash controller. It can work both in serial ++ and parallel mode. It supports typical SPI-NAND page cache ++ operations in single, dual or quad IO mode with pipelined ECC ++ encoding/decoding using the QPIC ECC HW engine. ++ ++allOf: ++ - $ref: /schemas/spi/spi-controller.yaml# ++ ++properties: ++ compatible: ++ enum: ++ - qcom,spi-qpic-snand ++ ++ reg: ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 3 ++ ++ clock-names: ++ items: ++ - const: core ++ - const: aon ++ - const: iom ++ ++ dmas: ++ items: ++ - description: tx DMA channel ++ - description: rx DMA channel ++ - description: cmd DMA channel ++ ++ dma-names: ++ items: ++ - const: tx ++ - const: rx ++ - const: cmd ++ ++required: ++ - compatible ++ - reg ++ - clocks ++ - clock-names ++ ++unevaluatedProperties: false ++ ++examples: ++ - | ++ #include ++ spi@79b0000 { ++ compatible = "qcom,spi-qpic-snand"; ++ reg = <0x1ac00000 0x800>; ++ ++ clocks = <&gcc GCC_QPIC_CLK>, ++ <&gcc GCC_QPIC_AHB_CLK>, ++ <&gcc GCC_QPIC_IO_MACRO_CLK>; ++ clock-names = "core", "aon", "iom"; ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ flash@0 { ++ compatible = "spi-nand"; ++ reg = <0>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ nand-ecc-engine = <&qpic_nand>; ++ nand-ecc-strength = <4>; ++ nand-ecc-step-size = <512>; ++ }; ++ }; diff --git a/target/linux/qualcommax/patches-6.6/0402-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch b/target/linux/qualcommax/patches-6.6/0402-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch new file mode 100644 index 000000000..b7ea4fab5 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0402-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch @@ -0,0 +1,983 @@ +From: Md Sadre Alam +Date: Sun, 22 Sep 2024 17:03:45 +0530 +Subject: [PATCH] mtd: rawnand: qcom: cleanup qcom_nandc driver + +cleanup qcom_nandc driver as below + +- Remove register value indirection api + +- Remove set_reg() api + +- Convert read_loc_first & read_loc_last macro to function + +- Renamed multiple variables + +Signed-off-by: Md Sadre Alam +--- +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c +index b8cff9240b28..d134329330fe 100644 +--- a/drivers/mtd/nand/raw/qcom_nandc.c ++++ b/drivers/mtd/nand/raw/qcom_nandc.c +@@ -189,17 +189,6 @@ + #define ECC_BCH_4BIT BIT(2) + #define ECC_BCH_8BIT BIT(3) + +-#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc) \ +-nandc_set_reg(chip, reg, \ +- ((cw_offset) << READ_LOCATION_OFFSET) | \ +- ((read_size) << READ_LOCATION_SIZE) | \ +- ((is_last_read_loc) << READ_LOCATION_LAST)) +- +-#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc) \ +-nandc_set_reg(chip, reg, \ +- ((cw_offset) << READ_LOCATION_OFFSET) | \ +- ((read_size) << READ_LOCATION_SIZE) | \ +- ((is_last_read_loc) << READ_LOCATION_LAST)) + /* + * Returns the actual register address for all NAND_DEV_ registers + * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD) +@@ -257,8 +246,6 @@ nandc_set_reg(chip, reg, \ + * @tx_sgl_start - start index in data sgl for tx. + * @rx_sgl_pos - current index in data sgl for rx. + * @rx_sgl_start - start index in data sgl for rx. +- * @wait_second_completion - wait for second DMA desc completion before making +- * the NAND transfer completion. + */ + struct bam_transaction { + struct bam_cmd_element *bam_ce; +@@ -275,7 +262,6 @@ struct bam_transaction { + u32 tx_sgl_start; + u32 rx_sgl_pos; + u32 rx_sgl_start; +- bool wait_second_completion; + }; + + /* +@@ -471,9 +457,9 @@ struct qcom_op { + unsigned int data_instr_idx; + unsigned int rdy_timeout_ms; + unsigned int rdy_delay_ns; +- u32 addr1_reg; +- u32 addr2_reg; +- u32 cmd_reg; ++ __le32 addr1_reg; ++ __le32 addr2_reg; ++ __le32 cmd_reg; + u8 flag; + }; + +@@ -549,17 +535,17 @@ struct qcom_nand_host { + * among different NAND controllers. + * @ecc_modes - ecc mode for NAND + * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset +- * @is_bam - whether NAND controller is using BAM +- * @is_qpic - whether NAND CTRL is part of qpic IP +- * @qpic_v2 - flag to indicate QPIC IP version 2 ++ * @supports_bam - whether NAND controller is using BAM ++ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP ++ * @qpic_version2 - flag to indicate QPIC IP version 2 + * @use_codeword_fixup - whether NAND has different layout for boot partitions + */ + struct qcom_nandc_props { + u32 ecc_modes; + u32 dev_cmd_reg_start; +- bool is_bam; +- bool is_qpic; +- bool qpic_v2; ++ bool supports_bam; ++ bool nandc_part_of_qpic; ++ bool qpic_version2; + bool use_codeword_fixup; + }; + +@@ -613,19 +599,11 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc) + { + struct bam_transaction *bam_txn = nandc->bam_txn; + +- if (!nandc->props->is_bam) ++ if (!nandc->props->supports_bam) + return; + +- bam_txn->bam_ce_pos = 0; +- bam_txn->bam_ce_start = 0; +- bam_txn->cmd_sgl_pos = 0; +- bam_txn->cmd_sgl_start = 0; +- bam_txn->tx_sgl_pos = 0; +- bam_txn->tx_sgl_start = 0; +- bam_txn->rx_sgl_pos = 0; +- bam_txn->rx_sgl_start = 0; ++ memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8); + bam_txn->last_data_desc = NULL; +- bam_txn->wait_second_completion = false; + + sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage * + QPIC_PER_CW_CMD_SGL); +@@ -640,17 +618,7 @@ static void qpic_bam_dma_done(void *data) + { + struct bam_transaction *bam_txn = data; + +- /* +- * In case of data transfer with NAND, 2 callbacks will be generated. +- * One for command channel and another one for data channel. +- * If current transaction has data descriptors +- * (i.e. wait_second_completion is true), then set this to false +- * and wait for second DMA descriptor completion. +- */ +- if (bam_txn->wait_second_completion) +- bam_txn->wait_second_completion = false; +- else +- complete(&bam_txn->txn_done); ++ complete(&bam_txn->txn_done); + } + + static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip) +@@ -676,10 +644,9 @@ static inline void nandc_write(struct qcom_nand_controller *nandc, int offset, + iowrite32(val, nandc->base + offset); + } + +-static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc, +- bool is_cpu) ++static inline void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu) + { +- if (!nandc->props->is_bam) ++ if (!nandc->props->supports_bam) + return; + + if (is_cpu) +@@ -694,93 +661,90 @@ static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc, + DMA_FROM_DEVICE); + } + +-static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset) +-{ +- switch (offset) { +- case NAND_FLASH_CMD: +- return ®s->cmd; +- case NAND_ADDR0: +- return ®s->addr0; +- case NAND_ADDR1: +- return ®s->addr1; +- case NAND_FLASH_CHIP_SELECT: +- return ®s->chip_sel; +- case NAND_EXEC_CMD: +- return ®s->exec; +- case NAND_FLASH_STATUS: +- return ®s->clrflashstatus; +- case NAND_DEV0_CFG0: +- return ®s->cfg0; +- case NAND_DEV0_CFG1: +- return ®s->cfg1; +- case NAND_DEV0_ECC_CFG: +- return ®s->ecc_bch_cfg; +- case NAND_READ_STATUS: +- return ®s->clrreadstatus; +- case NAND_DEV_CMD1: +- return ®s->cmd1; +- case NAND_DEV_CMD1_RESTORE: +- return ®s->orig_cmd1; +- case NAND_DEV_CMD_VLD: +- return ®s->vld; +- case NAND_DEV_CMD_VLD_RESTORE: +- return ®s->orig_vld; +- case NAND_EBI2_ECC_BUF_CFG: +- return ®s->ecc_buf_cfg; +- case NAND_READ_LOCATION_0: +- return ®s->read_location0; +- case NAND_READ_LOCATION_1: +- return ®s->read_location1; +- case NAND_READ_LOCATION_2: +- return ®s->read_location2; +- case NAND_READ_LOCATION_3: +- return ®s->read_location3; +- case NAND_READ_LOCATION_LAST_CW_0: +- return ®s->read_location_last0; +- case NAND_READ_LOCATION_LAST_CW_1: +- return ®s->read_location_last1; +- case NAND_READ_LOCATION_LAST_CW_2: +- return ®s->read_location_last2; +- case NAND_READ_LOCATION_LAST_CW_3: +- return ®s->read_location_last3; +- default: +- return NULL; +- } ++/* Helper to check the code word, whether it is last cw or not */ ++static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw) ++{ ++ return cw == (ecc->steps - 1); + } + +-static void nandc_set_reg(struct nand_chip *chip, int offset, +- u32 val) ++/** ++ * nandc_set_read_loc_first() - to set read location first register ++ * @chip: NAND Private Flash Chip Data ++ * @reg_base: location register base ++ * @cw_offset: code word offset ++ * @read_size: code word read length ++ * @is_last_read_loc: is this the last read location ++ * ++ * This function will set location register value ++ */ ++static void nandc_set_read_loc_first(struct nand_chip *chip, ++ int reg_base, u32 cw_offset, ++ u32 read_size, u32 is_last_read_loc) + { + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); +- struct nandc_regs *regs = nandc->regs; +- __le32 *reg; +- +- reg = offset_to_nandc_reg(regs, offset); ++ __le32 locreg_val; ++ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | ++ ((read_size) << READ_LOCATION_SIZE) | ++ ((is_last_read_loc) << READ_LOCATION_LAST)); ++ ++ locreg_val = cpu_to_le32(val); ++ ++ if (reg_base == NAND_READ_LOCATION_0) ++ nandc->regs->read_location0 = locreg_val; ++ else if (reg_base == NAND_READ_LOCATION_1) ++ nandc->regs->read_location1 = locreg_val; ++ else if (reg_base == NAND_READ_LOCATION_2) ++ nandc->regs->read_location2 = locreg_val; ++ else if (reg_base == NAND_READ_LOCATION_3) ++ nandc->regs->read_location3 = locreg_val; ++} ++ ++/** ++ * nandc_set_read_loc_last - to set read location last register ++ * @chip: NAND Private Flash Chip Data ++ * @reg_base: location register base ++ * @cw_offset: code word offset ++ * @read_size: code word read length ++ * @is_last_read_loc: is this the last read location ++ * ++ * This function will set location last register value ++ */ ++static void nandc_set_read_loc_last(struct nand_chip *chip, ++ int reg_base, u32 cw_offset, ++ u32 read_size, u32 is_last_read_loc) ++{ ++ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); ++ __le32 locreg_val; ++ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | ++ ((read_size) << READ_LOCATION_SIZE) | ++ ((is_last_read_loc) << READ_LOCATION_LAST)); + +- if (reg) +- *reg = cpu_to_le32(val); +-} ++ locreg_val = cpu_to_le32(val); + +-/* Helper to check the code word, whether it is last cw or not */ +-static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw) +-{ +- return cw == (ecc->steps - 1); ++ if (reg_base == NAND_READ_LOCATION_LAST_CW_0) ++ nandc->regs->read_location_last0 = locreg_val; ++ else if (reg_base == NAND_READ_LOCATION_LAST_CW_1) ++ nandc->regs->read_location_last1 = locreg_val; ++ else if (reg_base == NAND_READ_LOCATION_LAST_CW_2) ++ nandc->regs->read_location_last2 = locreg_val; ++ else if (reg_base == NAND_READ_LOCATION_LAST_CW_3) ++ nandc->regs->read_location_last3 = locreg_val; + } + + /* helper to configure location register values */ + static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg, +- int cw_offset, int read_size, int is_last_read_loc) ++ u32 cw_offset, u32 read_size, u32 is_last_read_loc) + { + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int reg_base = NAND_READ_LOCATION_0; + +- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw)) ++ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw)) + reg_base = NAND_READ_LOCATION_LAST_CW_0; + + reg_base += reg * 4; + +- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw)) ++ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw)) + return nandc_set_read_loc_last(chip, reg_base, cw_offset, + read_size, is_last_read_loc); + else +@@ -792,12 +756,13 @@ static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg, + static void set_address(struct qcom_nand_host *host, u16 column, int page) + { + struct nand_chip *chip = &host->chip; ++ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + + if (chip->options & NAND_BUSWIDTH_16) + column >>= 1; + +- nandc_set_reg(chip, NAND_ADDR0, page << 16 | column); +- nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff); ++ nandc->regs->addr0 = cpu_to_le32(page << 16 | column); ++ nandc->regs->addr1 = cpu_to_le32(page >> 16 & 0xff); + } + + /* +@@ -811,41 +776,43 @@ static void set_address(struct qcom_nand_host *host, u16 column, int page) + static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw) + { + struct nand_chip *chip = &host->chip; +- u32 cmd, cfg0, cfg1, ecc_bch_cfg; ++ __le32 cmd, cfg0, cfg1, ecc_bch_cfg; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + + if (read) { + if (host->use_ecc) +- cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE; ++ cmd = cpu_to_le32(OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE); + else +- cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE; ++ cmd = cpu_to_le32(OP_PAGE_READ | PAGE_ACC | LAST_PAGE); + } else { +- cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE; ++ cmd = cpu_to_le32(OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE); + } + + if (host->use_ecc) { +- cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) | +- (num_cw - 1) << CW_PER_PAGE; ++ cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) | ++ (num_cw - 1) << CW_PER_PAGE); + +- cfg1 = host->cfg1; +- ecc_bch_cfg = host->ecc_bch_cfg; ++ cfg1 = cpu_to_le32(host->cfg1); ++ ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg); + } else { +- cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) | +- (num_cw - 1) << CW_PER_PAGE; ++ cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) | ++ (num_cw - 1) << CW_PER_PAGE); + +- cfg1 = host->cfg1_raw; +- ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE; ++ cfg1 = cpu_to_le32(host->cfg1_raw); ++ ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE); + } + +- nandc_set_reg(chip, NAND_FLASH_CMD, cmd); +- nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0); +- nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1); +- nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg); +- if (!nandc->props->qpic_v2) +- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg); +- nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus); +- nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus); +- nandc_set_reg(chip, NAND_EXEC_CMD, 1); ++ nandc->regs->cmd = cmd; ++ nandc->regs->cfg0 = cfg0; ++ nandc->regs->cfg1 = cfg1; ++ nandc->regs->ecc_bch_cfg = ecc_bch_cfg; ++ ++ if (!nandc->props->qpic_version2) ++ nandc->regs->ecc_buf_cfg = cpu_to_le32(host->ecc_buf_cfg); ++ ++ nandc->regs->clrflashstatus = cpu_to_le32(host->clrflashstatus); ++ nandc->regs->clrreadstatus = cpu_to_le32(host->clrreadstatus); ++ nandc->regs->exec = cpu_to_le32(1); + + if (read) + nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ? +@@ -1121,7 +1088,7 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first, + if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1) + first = dev_cmd_reg_addr(nandc, first); + +- if (nandc->props->is_bam) ++ if (nandc->props->supports_bam) + return prep_bam_dma_desc_cmd(nandc, true, first, vaddr, + num_regs, flags); + +@@ -1136,25 +1103,16 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first, + * write_reg_dma: prepares a descriptor to write a given number of + * contiguous registers + * ++ * @vaddr: contnigeous memory from where register value will ++ * be written + * @first: offset of the first register in the contiguous block + * @num_regs: number of registers to write + * @flags: flags to control DMA descriptor preparation + */ +-static int write_reg_dma(struct qcom_nand_controller *nandc, int first, +- int num_regs, unsigned int flags) ++static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, ++ int first, int num_regs, unsigned int flags) + { + bool flow_control = false; +- struct nandc_regs *regs = nandc->regs; +- void *vaddr; +- +- vaddr = offset_to_nandc_reg(regs, first); +- +- if (first == NAND_ERASED_CW_DETECT_CFG) { +- if (flags & NAND_ERASED_CW_SET) +- vaddr = ®s->erased_cw_detect_cfg_set; +- else +- vaddr = ®s->erased_cw_detect_cfg_clr; +- } + + if (first == NAND_EXEC_CMD) + flags |= NAND_BAM_NWD; +@@ -1165,7 +1123,7 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first, + if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD) + first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD); + +- if (nandc->props->is_bam) ++ if (nandc->props->supports_bam) + return prep_bam_dma_desc_cmd(nandc, false, first, vaddr, + num_regs, flags); + +@@ -1188,7 +1146,7 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first, + static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off, + const u8 *vaddr, int size, unsigned int flags) + { +- if (nandc->props->is_bam) ++ if (nandc->props->supports_bam) + return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags); + + return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false); +@@ -1206,7 +1164,7 @@ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off, + static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off, + const u8 *vaddr, int size, unsigned int flags) + { +- if (nandc->props->is_bam) ++ if (nandc->props->supports_bam) + return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags); + + return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false); +@@ -1220,13 +1178,14 @@ static void config_nand_page_read(struct nand_chip *chip) + { + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + +- write_reg_dma(nandc, NAND_ADDR0, 2, 0); +- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0); +- if (!nandc->props->qpic_v2) +- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0); +- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0); +- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, +- NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); ++ write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0); ++ write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); ++ if (!nandc->props->qpic_version2) ++ write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0); ++ write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr, ++ NAND_ERASED_CW_DETECT_CFG, 1, 0); ++ write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set, ++ NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); + } + + /* +@@ -1239,16 +1198,16 @@ config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw) + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + +- int reg = NAND_READ_LOCATION_0; ++ __le32 *reg = &nandc->regs->read_location0; + +- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw)) +- reg = NAND_READ_LOCATION_LAST_CW_0; ++ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw)) ++ reg = &nandc->regs->read_location_last0; + +- if (nandc->props->is_bam) +- write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL); ++ if (nandc->props->supports_bam) ++ write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL); + +- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); +- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); ++ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + if (use_ecc) { + read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0); +@@ -1279,10 +1238,10 @@ static void config_nand_page_write(struct nand_chip *chip) + { + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + +- write_reg_dma(nandc, NAND_ADDR0, 2, 0); +- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0); +- if (!nandc->props->qpic_v2) +- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, ++ write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0); ++ write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); ++ if (!nandc->props->qpic_version2) ++ write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, + NAND_BAM_NEXT_SGL); + } + +@@ -1294,13 +1253,13 @@ static void config_nand_cw_write(struct nand_chip *chip) + { + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + +- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); +- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); ++ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); + +- write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0); +- write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL); ++ write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0); ++ write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL); + } + + /* helpers to submit/free our list of dma descriptors */ +@@ -1311,7 +1270,7 @@ static int submit_descs(struct qcom_nand_controller *nandc) + struct bam_transaction *bam_txn = nandc->bam_txn; + int ret = 0; + +- if (nandc->props->is_bam) { ++ if (nandc->props->supports_bam) { + if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) { + ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0); + if (ret) +@@ -1336,14 +1295,9 @@ static int submit_descs(struct qcom_nand_controller *nandc) + list_for_each_entry(desc, &nandc->desc_list, node) + cookie = dmaengine_submit(desc->dma_desc); + +- if (nandc->props->is_bam) { ++ if (nandc->props->supports_bam) { + bam_txn->last_cmd_desc->callback = qpic_bam_dma_done; + bam_txn->last_cmd_desc->callback_param = bam_txn; +- if (bam_txn->last_data_desc) { +- bam_txn->last_data_desc->callback = qpic_bam_dma_done; +- bam_txn->last_data_desc->callback_param = bam_txn; +- bam_txn->wait_second_completion = true; +- } + + dma_async_issue_pending(nandc->tx_chan); + dma_async_issue_pending(nandc->rx_chan); +@@ -1365,7 +1319,7 @@ static int submit_descs(struct qcom_nand_controller *nandc) + list_for_each_entry_safe(desc, n, &nandc->desc_list, node) { + list_del(&desc->node); + +- if (nandc->props->is_bam) ++ if (nandc->props->supports_bam) + dma_unmap_sg(nandc->dev, desc->bam_sgl, + desc->sgl_cnt, desc->dir); + else +@@ -1382,7 +1336,7 @@ static int submit_descs(struct qcom_nand_controller *nandc) + static void clear_read_regs(struct qcom_nand_controller *nandc) + { + nandc->reg_read_pos = 0; +- nandc_read_buffer_sync(nandc, false); ++ nandc_dev_to_mem(nandc, false); + } + + /* +@@ -1446,7 +1400,7 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt) + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + int i; + +- nandc_read_buffer_sync(nandc, true); ++ nandc_dev_to_mem(nandc, true); + + for (i = 0; i < cw_cnt; i++) { + u32 flash = le32_to_cpu(nandc->reg_read_buf[i]); +@@ -1476,7 +1430,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip, + clear_read_regs(nandc); + host->use_ecc = false; + +- if (nandc->props->qpic_v2) ++ if (nandc->props->qpic_version2) + raw_cw = ecc->steps - 1; + + clear_bam_transaction(nandc); +@@ -1497,7 +1451,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip, + oob_size2 = host->ecc_bytes_hw + host->spare_bytes; + } + +- if (nandc->props->is_bam) { ++ if (nandc->props->supports_bam) { + nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0); + read_loc += data_size1; + +@@ -1621,7 +1575,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf, + u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf; + + buf = (struct read_stats *)nandc->reg_read_buf; +- nandc_read_buffer_sync(nandc, true); ++ nandc_dev_to_mem(nandc, true); + + for (i = 0; i < ecc->steps; i++, buf++) { + u32 flash, buffer, erased_cw; +@@ -1734,7 +1688,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf, + oob_size = host->ecc_bytes_hw + host->spare_bytes; + } + +- if (nandc->props->is_bam) { ++ if (nandc->props->supports_bam) { + if (data_buf && oob_buf) { + nandc_set_read_loc(chip, i, 0, 0, data_size, 0); + nandc_set_read_loc(chip, i, 1, data_size, +@@ -2455,14 +2409,14 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) + + mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops); + /* Free the initially allocated BAM transaction for reading the ONFI params */ +- if (nandc->props->is_bam) ++ if (nandc->props->supports_bam) + free_bam_transaction(nandc); + + nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage, + cwperpage); + + /* Now allocate the BAM transaction based on updated max_cwperpage */ +- if (nandc->props->is_bam) { ++ if (nandc->props->supports_bam) { + nandc->bam_txn = alloc_bam_transaction(nandc); + if (!nandc->bam_txn) { + dev_err(nandc->dev, +@@ -2522,7 +2476,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) + | ecc_mode << ECC_MODE + | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH; + +- if (!nandc->props->qpic_v2) ++ if (!nandc->props->qpic_version2) + host->ecc_buf_cfg = 0x203 << NUM_STEPS; + + host->clrflashstatus = FS_READY_BSY_N; +@@ -2556,7 +2510,7 @@ static int qcom_op_cmd_mapping(struct nand_chip *chip, u8 opcode, + cmd = OP_FETCH_ID; + break; + case NAND_CMD_PARAM: +- if (nandc->props->qpic_v2) ++ if (nandc->props->qpic_version2) + cmd = OP_PAGE_READ_ONFI_READ; + else + cmd = OP_PAGE_READ; +@@ -2609,7 +2563,7 @@ static int qcom_parse_instructions(struct nand_chip *chip, + if (ret < 0) + return ret; + +- q_op->cmd_reg = ret; ++ q_op->cmd_reg = cpu_to_le32(ret); + q_op->rdy_delay_ns = instr->delay_ns; + break; + +@@ -2619,10 +2573,10 @@ static int qcom_parse_instructions(struct nand_chip *chip, + addrs = &instr->ctx.addr.addrs[offset]; + + for (i = 0; i < min_t(unsigned int, 4, naddrs); i++) +- q_op->addr1_reg |= addrs[i] << (i * 8); ++ q_op->addr1_reg |= cpu_to_le32(addrs[i] << (i * 8)); + + if (naddrs > 4) +- q_op->addr2_reg |= addrs[4]; ++ q_op->addr2_reg |= cpu_to_le32(addrs[4]); + + q_op->rdy_delay_ns = instr->delay_ns; + break; +@@ -2663,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms) + unsigned long start = jiffies + msecs_to_jiffies(time_ms); + u32 flash; + +- nandc_read_buffer_sync(nandc, true); ++ nandc_dev_to_mem(nandc, true); + + do { + flash = le32_to_cpu(nandc->reg_read_buf[0]); +@@ -2706,11 +2660,11 @@ static int qcom_read_status_exec(struct nand_chip *chip, + clear_read_regs(nandc); + clear_bam_transaction(nandc); + +- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg); +- nandc_set_reg(chip, NAND_EXEC_CMD, 1); ++ nandc->regs->cmd = q_op.cmd_reg; ++ nandc->regs->exec = cpu_to_le32(1); + +- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); +- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); ++ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); + + ret = submit_descs(nandc); +@@ -2719,7 +2673,7 @@ static int qcom_read_status_exec(struct nand_chip *chip, + goto err_out; + } + +- nandc_read_buffer_sync(nandc, true); ++ nandc_dev_to_mem(nandc, true); + + for (i = 0; i < num_cw; i++) { + flash_status = le32_to_cpu(nandc->reg_read_buf[i]); +@@ -2763,16 +2717,14 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo + clear_read_regs(nandc); + clear_bam_transaction(nandc); + +- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg); +- nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg); +- nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg); +- nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT, +- nandc->props->is_bam ? 0 : DM_EN); +- +- nandc_set_reg(chip, NAND_EXEC_CMD, 1); ++ nandc->regs->cmd = q_op.cmd_reg; ++ nandc->regs->addr0 = q_op.addr1_reg; ++ nandc->regs->addr1 = q_op.addr2_reg; ++ nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN); ++ nandc->regs->exec = cpu_to_le32(1); + +- write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL); +- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL); ++ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL); + +@@ -2786,7 +2738,7 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo + op_id = q_op.data_instr_idx; + len = nand_subop_get_data_len(subop, op_id); + +- nandc_read_buffer_sync(nandc, true); ++ nandc_dev_to_mem(nandc, true); + memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len); + + err_out: +@@ -2807,15 +2759,14 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub + + if (q_op.flag == OP_PROGRAM_PAGE) { + goto wait_rdy; +- } else if (q_op.cmd_reg == OP_BLOCK_ERASE) { +- q_op.cmd_reg |= PAGE_ACC | LAST_PAGE; +- nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg); +- nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg); +- nandc_set_reg(chip, NAND_DEV0_CFG0, +- host->cfg0_raw & ~(7 << CW_PER_PAGE)); +- nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw); ++ } else if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) { ++ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE); ++ nandc->regs->addr0 = q_op.addr1_reg; ++ nandc->regs->addr1 = q_op.addr2_reg; ++ nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE)); ++ nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw); + instrs = 3; +- } else if (q_op.cmd_reg != OP_RESET_DEVICE) { ++ } else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) { + return 0; + } + +@@ -2826,14 +2777,14 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub + clear_read_regs(nandc); + clear_bam_transaction(nandc); + +- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg); +- nandc_set_reg(chip, NAND_EXEC_CMD, 1); ++ nandc->regs->cmd = q_op.cmd_reg; ++ nandc->regs->exec = cpu_to_le32(1); + +- write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL); +- if (q_op.cmd_reg == OP_BLOCK_ERASE) +- write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL); ++ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL); ++ if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) ++ write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL); + +- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); + + ret = submit_descs(nandc); +@@ -2864,7 +2815,7 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ + if (ret) + return ret; + +- q_op.cmd_reg |= PAGE_ACC | LAST_PAGE; ++ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE); + + nandc->buf_count = 0; + nandc->buf_start = 0; +@@ -2872,38 +2823,38 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ + clear_read_regs(nandc); + clear_bam_transaction(nandc); + +- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg); +- +- nandc_set_reg(chip, NAND_ADDR0, 0); +- nandc_set_reg(chip, NAND_ADDR1, 0); +- nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE +- | 512 << UD_SIZE_BYTES +- | 5 << NUM_ADDR_CYCLES +- | 0 << SPARE_SIZE_BYTES); +- nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES +- | 0 << CS_ACTIVE_BSY +- | 17 << BAD_BLOCK_BYTE_NUM +- | 1 << BAD_BLOCK_IN_SPARE_AREA +- | 2 << WR_RD_BSY_GAP +- | 0 << WIDE_FLASH +- | 1 << DEV0_CFG1_ECC_DISABLE); +- if (!nandc->props->qpic_v2) +- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE); ++ nandc->regs->cmd = q_op.cmd_reg; ++ nandc->regs->addr0 = 0; ++ nandc->regs->addr1 = 0; ++ ++ nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE ++ | 512 << UD_SIZE_BYTES ++ | 5 << NUM_ADDR_CYCLES ++ | 0 << SPARE_SIZE_BYTES); ++ ++ nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES ++ | 0 << CS_ACTIVE_BSY ++ | 17 << BAD_BLOCK_BYTE_NUM ++ | 1 << BAD_BLOCK_IN_SPARE_AREA ++ | 2 << WR_RD_BSY_GAP ++ | 0 << WIDE_FLASH ++ | 1 << DEV0_CFG1_ECC_DISABLE); ++ ++ if (!nandc->props->qpic_version2) ++ nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE); + + /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */ +- if (!nandc->props->qpic_v2) { +- nandc_set_reg(chip, NAND_DEV_CMD_VLD, +- (nandc->vld & ~READ_START_VLD)); +- nandc_set_reg(chip, NAND_DEV_CMD1, +- (nandc->cmd1 & ~(0xFF << READ_ADDR)) +- | NAND_CMD_PARAM << READ_ADDR); ++ if (!nandc->props->qpic_version2) { ++ nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD)); ++ nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR)) ++ | NAND_CMD_PARAM << READ_ADDR); + } + +- nandc_set_reg(chip, NAND_EXEC_CMD, 1); ++ nandc->regs->exec = cpu_to_le32(1); + +- if (!nandc->props->qpic_v2) { +- nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1); +- nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld); ++ if (!nandc->props->qpic_version2) { ++ nandc->regs->orig_cmd1 = cpu_to_le32(nandc->cmd1); ++ nandc->regs->orig_vld = cpu_to_le32(nandc->vld); + } + + instr = q_op.data_instr; +@@ -2912,9 +2863,9 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ + + nandc_set_read_loc(chip, 0, 0, 0, len, 1); + +- if (!nandc->props->qpic_v2) { +- write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0); +- write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); ++ if (!nandc->props->qpic_version2) { ++ write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0); ++ write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); + } + + nandc->buf_count = len; +@@ -2926,9 +2877,10 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ + nandc->buf_count, 0); + + /* restore CMD1 and VLD regs */ +- if (!nandc->props->qpic_v2) { +- write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0); +- write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL); ++ if (!nandc->props->qpic_version2) { ++ write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0); ++ write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1, ++ NAND_BAM_NEXT_SGL); + } + + ret = submit_descs(nandc); +@@ -3017,7 +2969,7 @@ static const struct nand_controller_ops qcom_nandc_ops = { + + static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc) + { +- if (nandc->props->is_bam) { ++ if (nandc->props->supports_bam) { + if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma)) + dma_unmap_single(nandc->dev, nandc->reg_read_dma, + MAX_REG_RD * +@@ -3070,7 +3022,7 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc) + if (!nandc->reg_read_buf) + return -ENOMEM; + +- if (nandc->props->is_bam) { ++ if (nandc->props->supports_bam) { + nandc->reg_read_dma = + dma_map_single(nandc->dev, nandc->reg_read_buf, + MAX_REG_RD * +@@ -3151,15 +3103,15 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc) + u32 nand_ctrl; + + /* kill onenand */ +- if (!nandc->props->is_qpic) ++ if (!nandc->props->nandc_part_of_qpic) + nandc_write(nandc, SFLASHC_BURST_CFG, 0); + +- if (!nandc->props->qpic_v2) ++ if (!nandc->props->qpic_version2) + nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD), + NAND_DEV_CMD_VLD_VAL); + + /* enable ADM or BAM DMA */ +- if (nandc->props->is_bam) { ++ if (nandc->props->supports_bam) { + nand_ctrl = nandc_read(nandc, NAND_CTRL); + + /* +@@ -3176,7 +3128,7 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc) + } + + /* save the original values of these registers */ +- if (!nandc->props->qpic_v2) { ++ if (!nandc->props->qpic_version2) { + nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1)); + nandc->vld = NAND_DEV_CMD_VLD_VAL; + } +@@ -3349,7 +3301,7 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev) + struct device_node *np = nandc->dev->of_node; + int ret; + +- if (!nandc->props->is_bam) { ++ if (!nandc->props->supports_bam) { + ret = of_property_read_u32(np, "qcom,cmd-crci", + &nandc->cmd_crci); + if (ret) { +@@ -3474,30 +3426,30 @@ static void qcom_nandc_remove(struct platform_device *pdev) + + static const struct qcom_nandc_props ipq806x_nandc_props = { + .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT), +- .is_bam = false, ++ .supports_bam = false, + .use_codeword_fixup = true, + .dev_cmd_reg_start = 0x0, + }; + + static const struct qcom_nandc_props ipq4019_nandc_props = { + .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), +- .is_bam = true, +- .is_qpic = true, ++ .supports_bam = true, ++ .nandc_part_of_qpic = true, + .dev_cmd_reg_start = 0x0, + }; + + static const struct qcom_nandc_props ipq8074_nandc_props = { + .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), +- .is_bam = true, +- .is_qpic = true, ++ .supports_bam = true, ++ .nandc_part_of_qpic = true, + .dev_cmd_reg_start = 0x7000, + }; + + static const struct qcom_nandc_props sdx55_nandc_props = { + .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), +- .is_bam = true, +- .is_qpic = true, +- .qpic_v2 = true, ++ .supports_bam = true, ++ .nandc_part_of_qpic = true, ++ .qpic_version2 = true, + .dev_cmd_reg_start = 0x7000, + }; + diff --git a/target/linux/qualcommax/patches-6.6/0403-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch b/target/linux/qualcommax/patches-6.6/0403-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch new file mode 100644 index 000000000..4fcefe0bc --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0403-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch @@ -0,0 +1,876 @@ +From: Md Sadre Alam +Date: Sun, 22 Sep 2024 17:03:46 +0530 +Subject: [PATCH] mtd: rawnand: qcom: Add qcom prefix to common api + +Add qcom prefix to all the api which will be commonly +used by spi nand driver and raw nand driver. + +Signed-off-by: Md Sadre Alam +--- +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c +index d134329330fe..daf8f73b25bc 100644 +--- a/drivers/mtd/nand/raw/qcom_nandc.c ++++ b/drivers/mtd/nand/raw/qcom_nandc.c +@@ -53,7 +53,7 @@ + #define NAND_READ_LOCATION_LAST_CW_2 0xf48 + #define NAND_READ_LOCATION_LAST_CW_3 0xf4c + +-/* dummy register offsets, used by write_reg_dma */ ++/* dummy register offsets, used by qcom_write_reg_dma */ + #define NAND_DEV_CMD1_RESTORE 0xdead + #define NAND_DEV_CMD_VLD_RESTORE 0xbeef + +@@ -211,7 +211,7 @@ + + /* + * Flags used in DMA descriptor preparation helper functions +- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma) ++ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma) + */ + /* Don't set the EOT in current tx BAM sgl */ + #define NAND_BAM_NO_EOT BIT(0) +@@ -550,7 +550,7 @@ struct qcom_nandc_props { + }; + + /* Frees the BAM transaction memory */ +-static void free_bam_transaction(struct qcom_nand_controller *nandc) ++static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc) + { + struct bam_transaction *bam_txn = nandc->bam_txn; + +@@ -559,7 +559,7 @@ static void free_bam_transaction(struct qcom_nand_controller *nandc) + + /* Allocates and Initializes the BAM transaction */ + static struct bam_transaction * +-alloc_bam_transaction(struct qcom_nand_controller *nandc) ++qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc) + { + struct bam_transaction *bam_txn; + size_t bam_txn_size; +@@ -595,7 +595,7 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc) + } + + /* Clears the BAM transaction indexes */ +-static void clear_bam_transaction(struct qcom_nand_controller *nandc) ++static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc) + { + struct bam_transaction *bam_txn = nandc->bam_txn; + +@@ -614,7 +614,7 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc) + } + + /* Callback for DMA descriptor completion */ +-static void qpic_bam_dma_done(void *data) ++static void qcom_qpic_bam_dma_done(void *data) + { + struct bam_transaction *bam_txn = data; + +@@ -644,7 +644,7 @@ static inline void nandc_write(struct qcom_nand_controller *nandc, int offset, + iowrite32(val, nandc->base + offset); + } + +-static inline void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu) ++static inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu) + { + if (!nandc->props->supports_bam) + return; +@@ -824,9 +824,9 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i + * for BAM. This descriptor will be added in the NAND DMA descriptor queue + * which will be submitted to DMA engine. + */ +-static int prepare_bam_async_desc(struct qcom_nand_controller *nandc, +- struct dma_chan *chan, +- unsigned long flags) ++static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc, ++ struct dma_chan *chan, ++ unsigned long flags) + { + struct desc_info *desc; + struct scatterlist *sgl; +@@ -903,9 +903,9 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc, + * NAND_BAM_NEXT_SGL will be used for starting the separate SGL + * after the current command element. + */ +-static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, +- int reg_off, const void *vaddr, +- int size, unsigned int flags) ++static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, ++ int reg_off, const void *vaddr, ++ int size, unsigned int flags) + { + int bam_ce_size; + int i, ret; +@@ -943,9 +943,9 @@ static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, + bam_txn->bam_ce_start = bam_txn->bam_ce_pos; + + if (flags & NAND_BAM_NWD) { +- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan, +- DMA_PREP_FENCE | +- DMA_PREP_CMD); ++ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan, ++ DMA_PREP_FENCE | ++ DMA_PREP_CMD); + if (ret) + return ret; + } +@@ -958,9 +958,8 @@ static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, + * Prepares the data descriptor for BAM DMA which will be used for NAND + * data reads and writes. + */ +-static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, +- const void *vaddr, +- int size, unsigned int flags) ++static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, ++ const void *vaddr, int size, unsigned int flags) + { + int ret; + struct bam_transaction *bam_txn = nandc->bam_txn; +@@ -979,8 +978,8 @@ static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, + * is not set, form the DMA descriptor + */ + if (!(flags & NAND_BAM_NO_EOT)) { +- ret = prepare_bam_async_desc(nandc, nandc->tx_chan, +- DMA_PREP_INTERRUPT); ++ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan, ++ DMA_PREP_INTERRUPT); + if (ret) + return ret; + } +@@ -989,9 +988,9 @@ static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, + return 0; + } + +-static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, +- int reg_off, const void *vaddr, int size, +- bool flow_control) ++static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, ++ int reg_off, const void *vaddr, int size, ++ bool flow_control) + { + struct desc_info *desc; + struct dma_async_tx_descriptor *dma_desc; +@@ -1069,15 +1068,15 @@ static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, + } + + /* +- * read_reg_dma: prepares a descriptor to read a given number of ++ * qcom_read_reg_dma: prepares a descriptor to read a given number of + * contiguous registers to the reg_read_buf pointer + * + * @first: offset of the first register in the contiguous block + * @num_regs: number of registers to read + * @flags: flags to control DMA descriptor preparation + */ +-static int read_reg_dma(struct qcom_nand_controller *nandc, int first, +- int num_regs, unsigned int flags) ++static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, ++ int num_regs, unsigned int flags) + { + bool flow_control = false; + void *vaddr; +@@ -1089,18 +1088,18 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first, + first = dev_cmd_reg_addr(nandc, first); + + if (nandc->props->supports_bam) +- return prep_bam_dma_desc_cmd(nandc, true, first, vaddr, ++ return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr, + num_regs, flags); + + if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) + flow_control = true; + +- return prep_adm_dma_desc(nandc, true, first, vaddr, ++ return qcom_prep_adm_dma_desc(nandc, true, first, vaddr, + num_regs * sizeof(u32), flow_control); + } + + /* +- * write_reg_dma: prepares a descriptor to write a given number of ++ * qcom_write_reg_dma: prepares a descriptor to write a given number of + * contiguous registers + * + * @vaddr: contnigeous memory from where register value will +@@ -1109,8 +1108,8 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first, + * @num_regs: number of registers to write + * @flags: flags to control DMA descriptor preparation + */ +-static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, +- int first, int num_regs, unsigned int flags) ++static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, ++ int first, int num_regs, unsigned int flags) + { + bool flow_control = false; + +@@ -1124,18 +1123,18 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, + first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD); + + if (nandc->props->supports_bam) +- return prep_bam_dma_desc_cmd(nandc, false, first, vaddr, ++ return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr, + num_regs, flags); + + if (first == NAND_FLASH_CMD) + flow_control = true; + +- return prep_adm_dma_desc(nandc, false, first, vaddr, ++ return qcom_prep_adm_dma_desc(nandc, false, first, vaddr, + num_regs * sizeof(u32), flow_control); + } + + /* +- * read_data_dma: prepares a DMA descriptor to transfer data from the ++ * qcom_read_data_dma: prepares a DMA descriptor to transfer data from the + * controller's internal buffer to the buffer 'vaddr' + * + * @reg_off: offset within the controller's data buffer +@@ -1143,17 +1142,17 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, + * @size: DMA transaction size in bytes + * @flags: flags to control DMA descriptor preparation + */ +-static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off, +- const u8 *vaddr, int size, unsigned int flags) ++static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, ++ const u8 *vaddr, int size, unsigned int flags) + { + if (nandc->props->supports_bam) +- return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags); ++ return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags); + +- return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false); ++ return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false); + } + + /* +- * write_data_dma: prepares a DMA descriptor to transfer data from ++ * qcom_write_data_dma: prepares a DMA descriptor to transfer data from + * 'vaddr' to the controller's internal buffer + * + * @reg_off: offset within the controller's data buffer +@@ -1161,13 +1160,13 @@ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off, + * @size: DMA transaction size in bytes + * @flags: flags to control DMA descriptor preparation + */ +-static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off, +- const u8 *vaddr, int size, unsigned int flags) ++static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, ++ const u8 *vaddr, int size, unsigned int flags) + { + if (nandc->props->supports_bam) +- return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags); ++ return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags); + +- return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false); ++ return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false); + } + + /* +@@ -1178,14 +1177,14 @@ static void config_nand_page_read(struct nand_chip *chip) + { + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + +- write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0); +- write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); ++ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0); ++ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); + if (!nandc->props->qpic_version2) +- write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0); +- write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr, +- NAND_ERASED_CW_DETECT_CFG, 1, 0); +- write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set, +- NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0); ++ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr, ++ NAND_ERASED_CW_DETECT_CFG, 1, 0); ++ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set, ++ NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); + } + + /* +@@ -1204,17 +1203,17 @@ config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw) + reg = &nandc->regs->read_location_last0; + + if (nandc->props->supports_bam) +- write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL); + +- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); +- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + if (use_ecc) { +- read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0); +- read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1, +- NAND_BAM_NEXT_SGL); ++ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0); ++ qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1, ++ NAND_BAM_NEXT_SGL); + } else { +- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); ++ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); + } + } + +@@ -1238,11 +1237,11 @@ static void config_nand_page_write(struct nand_chip *chip) + { + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + +- write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0); +- write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); ++ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0); ++ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); + if (!nandc->props->qpic_version2) +- write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, +- NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, ++ NAND_BAM_NEXT_SGL); + } + + /* +@@ -1253,17 +1252,18 @@ static void config_nand_cw_write(struct nand_chip *chip) + { + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + +- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); +- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + +- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); ++ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); + +- write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0); +- write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0); ++ qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, ++ NAND_BAM_NEXT_SGL); + } + + /* helpers to submit/free our list of dma descriptors */ +-static int submit_descs(struct qcom_nand_controller *nandc) ++static int qcom_submit_descs(struct qcom_nand_controller *nandc) + { + struct desc_info *desc, *n; + dma_cookie_t cookie = 0; +@@ -1272,21 +1272,21 @@ static int submit_descs(struct qcom_nand_controller *nandc) + + if (nandc->props->supports_bam) { + if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) { +- ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0); ++ ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0); + if (ret) + goto err_unmap_free_desc; + } + + if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) { +- ret = prepare_bam_async_desc(nandc, nandc->tx_chan, +- DMA_PREP_INTERRUPT); ++ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan, ++ DMA_PREP_INTERRUPT); + if (ret) + goto err_unmap_free_desc; + } + + if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { +- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan, +- DMA_PREP_CMD); ++ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan, ++ DMA_PREP_CMD); + if (ret) + goto err_unmap_free_desc; + } +@@ -1296,7 +1296,7 @@ static int submit_descs(struct qcom_nand_controller *nandc) + cookie = dmaengine_submit(desc->dma_desc); + + if (nandc->props->supports_bam) { +- bam_txn->last_cmd_desc->callback = qpic_bam_dma_done; ++ bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done; + bam_txn->last_cmd_desc->callback_param = bam_txn; + + dma_async_issue_pending(nandc->tx_chan); +@@ -1314,7 +1314,7 @@ static int submit_descs(struct qcom_nand_controller *nandc) + err_unmap_free_desc: + /* + * Unmap the dma sg_list and free the desc allocated by both +- * prepare_bam_async_desc() and prep_adm_dma_desc() functions. ++ * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions. + */ + list_for_each_entry_safe(desc, n, &nandc->desc_list, node) { + list_del(&desc->node); +@@ -1333,10 +1333,10 @@ static int submit_descs(struct qcom_nand_controller *nandc) + } + + /* reset the register read buffer for next NAND operation */ +-static void clear_read_regs(struct qcom_nand_controller *nandc) ++static void qcom_clear_read_regs(struct qcom_nand_controller *nandc) + { + nandc->reg_read_pos = 0; +- nandc_dev_to_mem(nandc, false); ++ qcom_nandc_dev_to_mem(nandc, false); + } + + /* +@@ -1400,7 +1400,7 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt) + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + int i; + +- nandc_dev_to_mem(nandc, true); ++ qcom_nandc_dev_to_mem(nandc, true); + + for (i = 0; i < cw_cnt; i++) { + u32 flash = le32_to_cpu(nandc->reg_read_buf[i]); +@@ -1427,13 +1427,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip, + nand_read_page_op(chip, page, 0, NULL, 0); + nandc->buf_count = 0; + nandc->buf_start = 0; +- clear_read_regs(nandc); ++ qcom_clear_read_regs(nandc); + host->use_ecc = false; + + if (nandc->props->qpic_version2) + raw_cw = ecc->steps - 1; + +- clear_bam_transaction(nandc); ++ qcom_clear_bam_transaction(nandc); + set_address(host, host->cw_size * cw, page); + update_rw_regs(host, 1, true, raw_cw); + config_nand_page_read(chip); +@@ -1466,18 +1466,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip, + + config_nand_cw_read(chip, false, raw_cw); + +- read_data_dma(nandc, reg_off, data_buf, data_size1, 0); ++ qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0); + reg_off += data_size1; + +- read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0); ++ qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0); + reg_off += oob_size1; + +- read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0); ++ qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0); + reg_off += data_size2; + +- read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0); ++ qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0); + +- ret = submit_descs(nandc); ++ ret = qcom_submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure to read raw cw %d\n", cw); + return ret; +@@ -1575,7 +1575,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf, + u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf; + + buf = (struct read_stats *)nandc->reg_read_buf; +- nandc_dev_to_mem(nandc, true); ++ qcom_nandc_dev_to_mem(nandc, true); + + for (i = 0; i < ecc->steps; i++, buf++) { + u32 flash, buffer, erased_cw; +@@ -1704,8 +1704,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf, + config_nand_cw_read(chip, true, i); + + if (data_buf) +- read_data_dma(nandc, FLASH_BUF_ACC, data_buf, +- data_size, 0); ++ qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf, ++ data_size, 0); + + /* + * when ecc is enabled, the controller doesn't read the real +@@ -1720,8 +1720,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf, + for (j = 0; j < host->bbm_size; j++) + *oob_buf++ = 0xff; + +- read_data_dma(nandc, FLASH_BUF_ACC + data_size, +- oob_buf, oob_size, 0); ++ qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size, ++ oob_buf, oob_size, 0); + } + + if (data_buf) +@@ -1730,7 +1730,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf, + oob_buf += oob_size; + } + +- ret = submit_descs(nandc); ++ ret = qcom_submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure to read page/oob\n"); + return ret; +@@ -1751,7 +1751,7 @@ static int copy_last_cw(struct qcom_nand_host *host, int page) + int size; + int ret; + +- clear_read_regs(nandc); ++ qcom_clear_read_regs(nandc); + + size = host->use_ecc ? host->cw_data : host->cw_size; + +@@ -1763,9 +1763,9 @@ static int copy_last_cw(struct qcom_nand_host *host, int page) + + config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1); + +- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0); ++ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0); + +- ret = submit_descs(nandc); ++ ret = qcom_submit_descs(nandc); + if (ret) + dev_err(nandc->dev, "failed to copy last codeword\n"); + +@@ -1851,14 +1851,14 @@ static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf, + nandc->buf_count = 0; + nandc->buf_start = 0; + host->use_ecc = true; +- clear_read_regs(nandc); ++ qcom_clear_read_regs(nandc); + set_address(host, 0, page); + update_rw_regs(host, ecc->steps, true, 0); + + data_buf = buf; + oob_buf = oob_required ? chip->oob_poi : NULL; + +- clear_bam_transaction(nandc); ++ qcom_clear_bam_transaction(nandc); + + return read_page_ecc(host, data_buf, oob_buf, page); + } +@@ -1899,8 +1899,8 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page) + if (host->nr_boot_partitions) + qcom_nandc_codeword_fixup(host, page); + +- clear_read_regs(nandc); +- clear_bam_transaction(nandc); ++ qcom_clear_read_regs(nandc); ++ qcom_clear_bam_transaction(nandc); + + host->use_ecc = true; + set_address(host, 0, page); +@@ -1927,8 +1927,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf, + set_address(host, 0, page); + nandc->buf_count = 0; + nandc->buf_start = 0; +- clear_read_regs(nandc); +- clear_bam_transaction(nandc); ++ qcom_clear_read_regs(nandc); ++ qcom_clear_bam_transaction(nandc); + + data_buf = (u8 *)buf; + oob_buf = chip->oob_poi; +@@ -1949,8 +1949,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf, + oob_size = ecc->bytes; + } + +- write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size, +- i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0); ++ qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size, ++ i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0); + + /* + * when ECC is enabled, we don't really need to write anything +@@ -1962,8 +1962,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf, + if (qcom_nandc_is_last_cw(ecc, i)) { + oob_buf += host->bbm_size; + +- write_data_dma(nandc, FLASH_BUF_ACC + data_size, +- oob_buf, oob_size, 0); ++ qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size, ++ oob_buf, oob_size, 0); + } + + config_nand_cw_write(chip); +@@ -1972,7 +1972,7 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf, + oob_buf += oob_size; + } + +- ret = submit_descs(nandc); ++ ret = qcom_submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure to write page\n"); + return ret; +@@ -1997,8 +1997,8 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip, + qcom_nandc_codeword_fixup(host, page); + + nand_prog_page_begin_op(chip, page, 0, NULL, 0); +- clear_read_regs(nandc); +- clear_bam_transaction(nandc); ++ qcom_clear_read_regs(nandc); ++ qcom_clear_bam_transaction(nandc); + + data_buf = (u8 *)buf; + oob_buf = chip->oob_poi; +@@ -2024,28 +2024,28 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip, + oob_size2 = host->ecc_bytes_hw + host->spare_bytes; + } + +- write_data_dma(nandc, reg_off, data_buf, data_size1, +- NAND_BAM_NO_EOT); ++ qcom_write_data_dma(nandc, reg_off, data_buf, data_size1, ++ NAND_BAM_NO_EOT); + reg_off += data_size1; + data_buf += data_size1; + +- write_data_dma(nandc, reg_off, oob_buf, oob_size1, +- NAND_BAM_NO_EOT); ++ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1, ++ NAND_BAM_NO_EOT); + reg_off += oob_size1; + oob_buf += oob_size1; + +- write_data_dma(nandc, reg_off, data_buf, data_size2, +- NAND_BAM_NO_EOT); ++ qcom_write_data_dma(nandc, reg_off, data_buf, data_size2, ++ NAND_BAM_NO_EOT); + reg_off += data_size2; + data_buf += data_size2; + +- write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0); ++ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0); + oob_buf += oob_size2; + + config_nand_cw_write(chip); + } + +- ret = submit_descs(nandc); ++ ret = qcom_submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure to write raw page\n"); + return ret; +@@ -2075,7 +2075,7 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page) + qcom_nandc_codeword_fixup(host, page); + + host->use_ecc = true; +- clear_bam_transaction(nandc); ++ qcom_clear_bam_transaction(nandc); + + /* calculate the data and oob size for the last codeword/step */ + data_size = ecc->size - ((ecc->steps - 1) << 2); +@@ -2090,11 +2090,11 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page) + update_rw_regs(host, 1, false, 0); + + config_nand_page_write(chip); +- write_data_dma(nandc, FLASH_BUF_ACC, +- nandc->data_buffer, data_size + oob_size, 0); ++ qcom_write_data_dma(nandc, FLASH_BUF_ACC, ++ nandc->data_buffer, data_size + oob_size, 0); + config_nand_cw_write(chip); + +- ret = submit_descs(nandc); ++ ret = qcom_submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure to write oob\n"); + return ret; +@@ -2121,7 +2121,7 @@ static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs) + */ + host->use_ecc = false; + +- clear_bam_transaction(nandc); ++ qcom_clear_bam_transaction(nandc); + ret = copy_last_cw(host, page); + if (ret) + goto err; +@@ -2148,8 +2148,8 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs) + struct nand_ecc_ctrl *ecc = &chip->ecc; + int page, ret; + +- clear_read_regs(nandc); +- clear_bam_transaction(nandc); ++ qcom_clear_read_regs(nandc); ++ qcom_clear_bam_transaction(nandc); + + /* + * to mark the BBM as bad, we flash the entire last codeword with 0s. +@@ -2166,11 +2166,11 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs) + update_rw_regs(host, 1, false, ecc->steps - 1); + + config_nand_page_write(chip); +- write_data_dma(nandc, FLASH_BUF_ACC, +- nandc->data_buffer, host->cw_size, 0); ++ qcom_write_data_dma(nandc, FLASH_BUF_ACC, ++ nandc->data_buffer, host->cw_size, 0); + config_nand_cw_write(chip); + +- ret = submit_descs(nandc); ++ ret = qcom_submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure to update BBM\n"); + return ret; +@@ -2410,14 +2410,14 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) + mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops); + /* Free the initially allocated BAM transaction for reading the ONFI params */ + if (nandc->props->supports_bam) +- free_bam_transaction(nandc); ++ qcom_free_bam_transaction(nandc); + + nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage, + cwperpage); + + /* Now allocate the BAM transaction based on updated max_cwperpage */ + if (nandc->props->supports_bam) { +- nandc->bam_txn = alloc_bam_transaction(nandc); ++ nandc->bam_txn = qcom_alloc_bam_transaction(nandc); + if (!nandc->bam_txn) { + dev_err(nandc->dev, + "failed to allocate bam transaction\n"); +@@ -2617,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms) + unsigned long start = jiffies + msecs_to_jiffies(time_ms); + u32 flash; + +- nandc_dev_to_mem(nandc, true); ++ qcom_nandc_dev_to_mem(nandc, true); + + do { + flash = le32_to_cpu(nandc->reg_read_buf[0]); +@@ -2657,23 +2657,23 @@ static int qcom_read_status_exec(struct nand_chip *chip, + nandc->buf_start = 0; + host->use_ecc = false; + +- clear_read_regs(nandc); +- clear_bam_transaction(nandc); ++ qcom_clear_read_regs(nandc); ++ qcom_clear_bam_transaction(nandc); + + nandc->regs->cmd = q_op.cmd_reg; + nandc->regs->exec = cpu_to_le32(1); + +- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); +- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); +- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); + +- ret = submit_descs(nandc); ++ ret = qcom_submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure in submitting status descriptor\n"); + goto err_out; + } + +- nandc_dev_to_mem(nandc, true); ++ qcom_nandc_dev_to_mem(nandc, true); + + for (i = 0; i < num_cw; i++) { + flash_status = le32_to_cpu(nandc->reg_read_buf[i]); +@@ -2714,8 +2714,8 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo + nandc->buf_start = 0; + host->use_ecc = false; + +- clear_read_regs(nandc); +- clear_bam_transaction(nandc); ++ qcom_clear_read_regs(nandc); ++ qcom_clear_bam_transaction(nandc); + + nandc->regs->cmd = q_op.cmd_reg; + nandc->regs->addr0 = q_op.addr1_reg; +@@ -2723,12 +2723,12 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo + nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN); + nandc->regs->exec = cpu_to_le32(1); + +- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL); +- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + +- read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL); ++ qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL); + +- ret = submit_descs(nandc); ++ ret = qcom_submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure in submitting read id descriptor\n"); + goto err_out; +@@ -2738,7 +2738,7 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo + op_id = q_op.data_instr_idx; + len = nand_subop_get_data_len(subop, op_id); + +- nandc_dev_to_mem(nandc, true); ++ qcom_nandc_dev_to_mem(nandc, true); + memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len); + + err_out: +@@ -2774,20 +2774,20 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub + nandc->buf_start = 0; + host->use_ecc = false; + +- clear_read_regs(nandc); +- clear_bam_transaction(nandc); ++ qcom_clear_read_regs(nandc); ++ qcom_clear_bam_transaction(nandc); + + nandc->regs->cmd = q_op.cmd_reg; + nandc->regs->exec = cpu_to_le32(1); + +- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL); + if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) +- write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL); + +- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); +- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); + +- ret = submit_descs(nandc); ++ ret = qcom_submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure in submitting misc descriptor\n"); + goto err_out; +@@ -2820,8 +2820,8 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ + nandc->buf_count = 0; + nandc->buf_start = 0; + host->use_ecc = false; +- clear_read_regs(nandc); +- clear_bam_transaction(nandc); ++ qcom_clear_read_regs(nandc); ++ qcom_clear_bam_transaction(nandc); + + nandc->regs->cmd = q_op.cmd_reg; + nandc->regs->addr0 = 0; +@@ -2864,8 +2864,8 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ + nandc_set_read_loc(chip, 0, 0, 0, len, 1); + + if (!nandc->props->qpic_version2) { +- write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0); +- write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0); ++ qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); + } + + nandc->buf_count = len; +@@ -2873,17 +2873,17 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ + + config_nand_single_cw_page_read(chip, false, 0); + +- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, +- nandc->buf_count, 0); ++ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, ++ nandc->buf_count, 0); + + /* restore CMD1 and VLD regs */ + if (!nandc->props->qpic_version2) { +- write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0); +- write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1, +- NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0); ++ qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1, ++ NAND_BAM_NEXT_SGL); + } + +- ret = submit_descs(nandc); ++ ret = qcom_submit_descs(nandc); + if (ret) { + dev_err(nandc->dev, "failure in submitting param page descriptor\n"); + goto err_out; +@@ -3067,7 +3067,7 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc) + * maximum codeword size + */ + nandc->max_cwperpage = 1; +- nandc->bam_txn = alloc_bam_transaction(nandc); ++ nandc->bam_txn = qcom_alloc_bam_transaction(nandc); + if (!nandc->bam_txn) { + dev_err(nandc->dev, + "failed to allocate bam transaction\n"); diff --git a/target/linux/qualcommax/patches-6.6/0404-mtd-nand-Add-qpic_common-API-file.patch b/target/linux/qualcommax/patches-6.6/0404-mtd-nand-Add-qpic_common-API-file.patch new file mode 100644 index 000000000..72e332d25 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0404-mtd-nand-Add-qpic_common-API-file.patch @@ -0,0 +1,2430 @@ +From: Md Sadre Alam +Date: Sun, 22 Sep 2024 17:03:47 +0530 +Subject: [PATCH] mtd: nand: Add qpic_common API file + +Add qpic_common.c file which hold all the common +qpic APIs which will be used by both qpic raw nand +driver and qpic spi nand driver. + +Signed-off-by: Md Sadre Alam +--- +diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile +index 19e1291ac4d5..760a6e4efdac 100644 +--- a/drivers/mtd/nand/Makefile ++++ b/drivers/mtd/nand/Makefile +@@ -4,6 +4,10 @@ nandcore-objs := core.o bbt.o + obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o + obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o + ++ifeq ($(CONFIG_MTD_NAND_QCOM),y) ++obj-y += qpic_common.o ++endif ++ + obj-y += onenand/ + obj-y += raw/ + obj-y += spi/ +diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c +new file mode 100644 +index 000000000000..2fe1a82307b4 +--- /dev/null ++++ b/drivers/mtd/nand/qpic_common.c +@@ -0,0 +1,738 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2016, The Linux Foundation. All rights reserved. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/** ++ * qcom_free_bam_transaction() - Frees the BAM transaction memory ++ * @nandc: qpic nand controller ++ * ++ * This function frees the bam transaction memory ++ */ ++void qcom_free_bam_transaction(struct qcom_nand_controller *nandc) ++{ ++ struct bam_transaction *bam_txn = nandc->bam_txn; ++ ++ kfree(bam_txn); ++} ++ ++/** ++ * qcom_alloc_bam_transaction() - allocate BAM transaction ++ * @nandc: qpic nand controller ++ * ++ * This function will allocate and initialize the BAM transaction structure ++ */ ++struct bam_transaction * ++qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc) ++{ ++ struct bam_transaction *bam_txn; ++ size_t bam_txn_size; ++ unsigned int num_cw = nandc->max_cwperpage; ++ void *bam_txn_buf; ++ ++ bam_txn_size = ++ sizeof(*bam_txn) + num_cw * ++ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) + ++ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + ++ (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL)); ++ ++ bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL); ++ if (!bam_txn_buf) ++ return NULL; ++ ++ bam_txn = bam_txn_buf; ++ bam_txn_buf += sizeof(*bam_txn); ++ ++ bam_txn->bam_ce = bam_txn_buf; ++ bam_txn_buf += ++ sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw; ++ ++ bam_txn->cmd_sgl = bam_txn_buf; ++ bam_txn_buf += ++ sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw; ++ ++ bam_txn->data_sgl = bam_txn_buf; ++ ++ init_completion(&bam_txn->txn_done); ++ ++ return bam_txn; ++} ++ ++/** ++ * qcom_clear_bam_transaction() - Clears the BAM transaction ++ * @nandc: qpic nand controller ++ * ++ * This function will clear the BAM transaction indexes. ++ */ ++void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc) ++{ ++ struct bam_transaction *bam_txn = nandc->bam_txn; ++ ++ if (!nandc->props->supports_bam) ++ return; ++ ++ memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8); ++ bam_txn->last_data_desc = NULL; ++ ++ sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage * ++ QPIC_PER_CW_CMD_SGL); ++ sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage * ++ QPIC_PER_CW_DATA_SGL); ++ ++ reinit_completion(&bam_txn->txn_done); ++} ++ ++/** ++ * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion ++ * @data: data pointer ++ * ++ * This function is a callback for DMA descriptor completion ++ */ ++void qcom_qpic_bam_dma_done(void *data) ++{ ++ struct bam_transaction *bam_txn = data; ++ ++ complete(&bam_txn->txn_done); ++} ++ ++/** ++ * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device ++ * @nandc: qpic nand controller ++ * @is_cpu: cpu or Device ++ * ++ * This function will check for dma sync for cpu or device ++ */ ++inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu) ++{ ++ if (!nandc->props->supports_bam) ++ return; ++ ++ if (is_cpu) ++ dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma, ++ MAX_REG_RD * ++ sizeof(*nandc->reg_read_buf), ++ DMA_FROM_DEVICE); ++ else ++ dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma, ++ MAX_REG_RD * ++ sizeof(*nandc->reg_read_buf), ++ DMA_FROM_DEVICE); ++} ++ ++/** ++ * qcom_prepare_bam_async_desc() - Prepare DMA descriptor ++ * @nandc: qpic nand controller ++ * @chan: dma channel ++ * @flags: flags to control DMA descriptor preparation ++ * ++ * This function maps the scatter gather list for DMA transfer and forms the ++ * DMA descriptor for BAM.This descriptor will be added in the NAND DMA ++ * descriptor queue which will be submitted to DMA engine. ++ */ ++int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc, ++ struct dma_chan *chan, unsigned long flags) ++{ ++ struct desc_info *desc; ++ struct scatterlist *sgl; ++ unsigned int sgl_cnt; ++ int ret; ++ struct bam_transaction *bam_txn = nandc->bam_txn; ++ enum dma_transfer_direction dir_eng; ++ struct dma_async_tx_descriptor *dma_desc; ++ ++ desc = kzalloc(sizeof(*desc), GFP_KERNEL); ++ if (!desc) ++ return -ENOMEM; ++ ++ if (chan == nandc->cmd_chan) { ++ sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start]; ++ sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start; ++ bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos; ++ dir_eng = DMA_MEM_TO_DEV; ++ desc->dir = DMA_TO_DEVICE; ++ } else if (chan == nandc->tx_chan) { ++ sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start]; ++ sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start; ++ bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos; ++ dir_eng = DMA_MEM_TO_DEV; ++ desc->dir = DMA_TO_DEVICE; ++ } else { ++ sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start]; ++ sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start; ++ bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos; ++ dir_eng = DMA_DEV_TO_MEM; ++ desc->dir = DMA_FROM_DEVICE; ++ } ++ ++ sg_mark_end(sgl + sgl_cnt - 1); ++ ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir); ++ if (ret == 0) { ++ dev_err(nandc->dev, "failure in mapping desc\n"); ++ kfree(desc); ++ return -ENOMEM; ++ } ++ ++ desc->sgl_cnt = sgl_cnt; ++ desc->bam_sgl = sgl; ++ ++ dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng, ++ flags); ++ ++ if (!dma_desc) { ++ dev_err(nandc->dev, "failure in prep desc\n"); ++ dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir); ++ kfree(desc); ++ return -EINVAL; ++ } ++ ++ desc->dma_desc = dma_desc; ++ ++ /* update last data/command descriptor */ ++ if (chan == nandc->cmd_chan) ++ bam_txn->last_cmd_desc = dma_desc; ++ else ++ bam_txn->last_data_desc = dma_desc; ++ ++ list_add_tail(&desc->node, &nandc->desc_list); ++ ++ return 0; ++} ++ ++/** ++ * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA ++ * @nandc: qpic nand controller ++ * @read: read or write type ++ * @reg_off: offset within the controller's data buffer ++ * @vaddr: virtual address of the buffer we want to write to ++ * @size: DMA transaction size in bytes ++ * @flags: flags to control DMA descriptor preparation ++ * ++ * This function will prepares the command descriptor for BAM DMA ++ * which will be used for NAND register reads and writes. ++ */ ++int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, ++ int reg_off, const void *vaddr, ++ int size, unsigned int flags) ++{ ++ int bam_ce_size; ++ int i, ret; ++ struct bam_cmd_element *bam_ce_buffer; ++ struct bam_transaction *bam_txn = nandc->bam_txn; ++ ++ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos]; ++ ++ /* fill the command desc */ ++ for (i = 0; i < size; i++) { ++ if (read) ++ bam_prep_ce(&bam_ce_buffer[i], ++ nandc_reg_phys(nandc, reg_off + 4 * i), ++ BAM_READ_COMMAND, ++ reg_buf_dma_addr(nandc, ++ (__le32 *)vaddr + i)); ++ else ++ bam_prep_ce_le32(&bam_ce_buffer[i], ++ nandc_reg_phys(nandc, reg_off + 4 * i), ++ BAM_WRITE_COMMAND, ++ *((__le32 *)vaddr + i)); ++ } ++ ++ bam_txn->bam_ce_pos += size; ++ ++ /* use the separate sgl after this command */ ++ if (flags & NAND_BAM_NEXT_SGL) { ++ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start]; ++ bam_ce_size = (bam_txn->bam_ce_pos - ++ bam_txn->bam_ce_start) * ++ sizeof(struct bam_cmd_element); ++ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos], ++ bam_ce_buffer, bam_ce_size); ++ bam_txn->cmd_sgl_pos++; ++ bam_txn->bam_ce_start = bam_txn->bam_ce_pos; ++ ++ if (flags & NAND_BAM_NWD) { ++ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan, ++ DMA_PREP_FENCE | DMA_PREP_CMD); ++ if (ret) ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA ++ * @nandc: qpic nand controller ++ * @read: read or write type ++ * @vaddr: virtual address of the buffer we want to write to ++ * @size: DMA transaction size in bytes ++ * @flags: flags to control DMA descriptor preparation ++ * ++ * This function will prepares the data descriptor for BAM DMA which ++ * will be used for NAND data reads and writes. ++ */ ++int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, ++ const void *vaddr, int size, unsigned int flags) ++{ ++ int ret; ++ struct bam_transaction *bam_txn = nandc->bam_txn; ++ ++ if (read) { ++ sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos], ++ vaddr, size); ++ bam_txn->rx_sgl_pos++; ++ } else { ++ sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos], ++ vaddr, size); ++ bam_txn->tx_sgl_pos++; ++ ++ /* ++ * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag ++ * is not set, form the DMA descriptor ++ */ ++ if (!(flags & NAND_BAM_NO_EOT)) { ++ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan, ++ DMA_PREP_INTERRUPT); ++ if (ret) ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * qcom_prep_adm_dma_desc() - Prepare descriptor for adma ++ * @nandc: qpic nand controller ++ * @read: read or write type ++ * @reg_off: offset within the controller's data buffer ++ * @vaddr: virtual address of the buffer we want to write to ++ * @size: adm dma transaction size in bytes ++ * @flow_control: flow controller ++ * ++ * This function will prepare descriptor for adma ++ */ ++int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, ++ int reg_off, const void *vaddr, int size, ++ bool flow_control) ++{ ++ struct qcom_adm_peripheral_config periph_conf = {}; ++ struct dma_async_tx_descriptor *dma_desc; ++ struct dma_slave_config slave_conf = {0}; ++ enum dma_transfer_direction dir_eng; ++ struct desc_info *desc; ++ struct scatterlist *sgl; ++ int ret; ++ ++ desc = kzalloc(sizeof(*desc), GFP_KERNEL); ++ if (!desc) ++ return -ENOMEM; ++ ++ sgl = &desc->adm_sgl; ++ ++ sg_init_one(sgl, vaddr, size); ++ ++ if (read) { ++ dir_eng = DMA_DEV_TO_MEM; ++ desc->dir = DMA_FROM_DEVICE; ++ } else { ++ dir_eng = DMA_MEM_TO_DEV; ++ desc->dir = DMA_TO_DEVICE; ++ } ++ ++ ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir); ++ if (!ret) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ slave_conf.device_fc = flow_control; ++ if (read) { ++ slave_conf.src_maxburst = 16; ++ slave_conf.src_addr = nandc->base_dma + reg_off; ++ if (nandc->data_crci) { ++ periph_conf.crci = nandc->data_crci; ++ slave_conf.peripheral_config = &periph_conf; ++ slave_conf.peripheral_size = sizeof(periph_conf); ++ } ++ } else { ++ slave_conf.dst_maxburst = 16; ++ slave_conf.dst_addr = nandc->base_dma + reg_off; ++ if (nandc->cmd_crci) { ++ periph_conf.crci = nandc->cmd_crci; ++ slave_conf.peripheral_config = &periph_conf; ++ slave_conf.peripheral_size = sizeof(periph_conf); ++ } ++ } ++ ++ ret = dmaengine_slave_config(nandc->chan, &slave_conf); ++ if (ret) { ++ dev_err(nandc->dev, "failed to configure dma channel\n"); ++ goto err; ++ } ++ ++ dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0); ++ if (!dma_desc) { ++ dev_err(nandc->dev, "failed to prepare desc\n"); ++ ret = -EINVAL; ++ goto err; ++ } ++ ++ desc->dma_desc = dma_desc; ++ ++ list_add_tail(&desc->node, &nandc->desc_list); ++ ++ return 0; ++err: ++ kfree(desc); ++ ++ return ret; ++} ++ ++/** ++ * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer ++ * @nandc: qpic nand controller ++ * @first: offset of the first register in the contiguous block ++ * @num_regs: number of registers to read ++ * @flags: flags to control DMA descriptor preparation ++ * ++ * This function will prepares a descriptor to read a given number of ++ * contiguous registers to the reg_read_buf pointer. ++ */ ++int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, ++ int num_regs, unsigned int flags) ++{ ++ bool flow_control = false; ++ void *vaddr; ++ ++ vaddr = nandc->reg_read_buf + nandc->reg_read_pos; ++ nandc->reg_read_pos += num_regs; ++ ++ if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1) ++ first = dev_cmd_reg_addr(nandc, first); ++ ++ if (nandc->props->supports_bam) ++ return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr, ++ num_regs, flags); ++ ++ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) ++ flow_control = true; ++ ++ return qcom_prep_adm_dma_desc(nandc, true, first, vaddr, ++ num_regs * sizeof(u32), flow_control); ++} ++ ++/** ++ * qcom_write_reg_dma() - write a given number of registers ++ * @nandc: qpic nand controller ++ * @vaddr: contnigeous memory from where register value will ++ * be written ++ * @first: offset of the first register in the contiguous block ++ * @num_regs: number of registers to write ++ * @flags: flags to control DMA descriptor preparation ++ * ++ * This function will prepares a descriptor to write a given number of ++ * contiguous registers ++ */ ++int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, ++ int first, int num_regs, unsigned int flags) ++{ ++ bool flow_control = false; ++ ++ if (first == NAND_EXEC_CMD) ++ flags |= NAND_BAM_NWD; ++ ++ if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1) ++ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1); ++ ++ if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD) ++ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD); ++ ++ if (nandc->props->supports_bam) ++ return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr, ++ num_regs, flags); ++ ++ if (first == NAND_FLASH_CMD) ++ flow_control = true; ++ ++ return qcom_prep_adm_dma_desc(nandc, false, first, vaddr, ++ num_regs * sizeof(u32), flow_control); ++} ++ ++/** ++ * qcom_read_data_dma() - transfer data ++ * @nandc: qpic nand controller ++ * @reg_off: offset within the controller's data buffer ++ * @vaddr: virtual address of the buffer we want to write to ++ * @size: DMA transaction size in bytes ++ * @flags: flags to control DMA descriptor preparation ++ * ++ * This function will prepares a DMA descriptor to transfer data from the ++ * controller's internal buffer to the buffer 'vaddr' ++ */ ++int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, ++ const u8 *vaddr, int size, unsigned int flags) ++{ ++ if (nandc->props->supports_bam) ++ return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags); ++ ++ return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false); ++} ++ ++/** ++ * qcom_write_data_dma() - transfer data ++ * @nandc: qpic nand controller ++ * @reg_off: offset within the controller's data buffer ++ * @vaddr: virtual address of the buffer we want to read from ++ * @size: DMA transaction size in bytes ++ * @flags: flags to control DMA descriptor preparation ++ * ++ * This function will prepares a DMA descriptor to transfer data from ++ * 'vaddr' to the controller's internal buffer ++ */ ++int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, ++ const u8 *vaddr, int size, unsigned int flags) ++{ ++ if (nandc->props->supports_bam) ++ return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags); ++ ++ return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false); ++} ++ ++/** ++ * qcom_submit_descs() - submit dma descriptor ++ * @nandc: qpic nand controller ++ * ++ * This function will submit all the prepared dma descriptor ++ * cmd or data descriptor ++ */ ++int qcom_submit_descs(struct qcom_nand_controller *nandc) ++{ ++ struct desc_info *desc, *n; ++ dma_cookie_t cookie = 0; ++ struct bam_transaction *bam_txn = nandc->bam_txn; ++ int ret = 0; ++ ++ if (nandc->props->supports_bam) { ++ if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) { ++ ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0); ++ if (ret) ++ goto err_unmap_free_desc; ++ } ++ ++ if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) { ++ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan, ++ DMA_PREP_INTERRUPT); ++ if (ret) ++ goto err_unmap_free_desc; ++ } ++ ++ if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { ++ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan, ++ DMA_PREP_CMD); ++ if (ret) ++ goto err_unmap_free_desc; ++ } ++ } ++ ++ list_for_each_entry(desc, &nandc->desc_list, node) ++ cookie = dmaengine_submit(desc->dma_desc); ++ ++ if (nandc->props->supports_bam) { ++ bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done; ++ bam_txn->last_cmd_desc->callback_param = bam_txn; ++ ++ dma_async_issue_pending(nandc->tx_chan); ++ dma_async_issue_pending(nandc->rx_chan); ++ dma_async_issue_pending(nandc->cmd_chan); ++ ++ if (!wait_for_completion_timeout(&bam_txn->txn_done, ++ QPIC_NAND_COMPLETION_TIMEOUT)) ++ ret = -ETIMEDOUT; ++ } else { ++ if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE) ++ ret = -ETIMEDOUT; ++ } ++ ++err_unmap_free_desc: ++ /* ++ * Unmap the dma sg_list and free the desc allocated by both ++ * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions. ++ */ ++ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) { ++ list_del(&desc->node); ++ ++ if (nandc->props->supports_bam) ++ dma_unmap_sg(nandc->dev, desc->bam_sgl, ++ desc->sgl_cnt, desc->dir); ++ else ++ dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1, ++ desc->dir); ++ ++ kfree(desc); ++ } ++ ++ return ret; ++} ++ ++/** ++ * qcom_clear_read_regs() - reset the read register buffer ++ * @nandc: qpic nand controller ++ * ++ * This function reset the register read buffer for next NAND operation ++ */ ++void qcom_clear_read_regs(struct qcom_nand_controller *nandc) ++{ ++ nandc->reg_read_pos = 0; ++ qcom_nandc_dev_to_mem(nandc, false); ++} ++ ++/** ++ * qcom_nandc_unalloc() - unallocate qpic nand controller ++ * @nandc: qpic nand controller ++ * ++ * This function will unallocate memory alloacted for qpic nand controller ++ */ ++void qcom_nandc_unalloc(struct qcom_nand_controller *nandc) ++{ ++ if (nandc->props->supports_bam) { ++ if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma)) ++ dma_unmap_single(nandc->dev, nandc->reg_read_dma, ++ MAX_REG_RD * ++ sizeof(*nandc->reg_read_buf), ++ DMA_FROM_DEVICE); ++ ++ if (nandc->tx_chan) ++ dma_release_channel(nandc->tx_chan); ++ ++ if (nandc->rx_chan) ++ dma_release_channel(nandc->rx_chan); ++ ++ if (nandc->cmd_chan) ++ dma_release_channel(nandc->cmd_chan); ++ } else { ++ if (nandc->chan) ++ dma_release_channel(nandc->chan); ++ } ++} ++ ++/** ++ * qcom_nandc_alloc() - Allocate qpic nand controller ++ * @nandc: qpic nand controller ++ * ++ * This function will allocate memory for qpic nand controller ++ */ ++int qcom_nandc_alloc(struct qcom_nand_controller *nandc) ++{ ++ int ret; ++ ++ ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32)); ++ if (ret) { ++ dev_err(nandc->dev, "failed to set DMA mask\n"); ++ return ret; ++ } ++ ++ /* ++ * we use the internal buffer for reading ONFI params, reading small ++ * data like ID and status, and preforming read-copy-write operations ++ * when writing to a codeword partially. 532 is the maximum possible ++ * size of a codeword for our nand controller ++ */ ++ nandc->buf_size = 532; ++ ++ nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL); ++ if (!nandc->data_buffer) ++ return -ENOMEM; ++ ++ nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL); ++ if (!nandc->regs) ++ return -ENOMEM; ++ ++ nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD, ++ sizeof(*nandc->reg_read_buf), ++ GFP_KERNEL); ++ if (!nandc->reg_read_buf) ++ return -ENOMEM; ++ ++ if (nandc->props->supports_bam) { ++ nandc->reg_read_dma = ++ dma_map_single(nandc->dev, nandc->reg_read_buf, ++ MAX_REG_RD * ++ sizeof(*nandc->reg_read_buf), ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) { ++ dev_err(nandc->dev, "failed to DMA MAP reg buffer\n"); ++ return -EIO; ++ } ++ ++ nandc->tx_chan = dma_request_chan(nandc->dev, "tx"); ++ if (IS_ERR(nandc->tx_chan)) { ++ ret = PTR_ERR(nandc->tx_chan); ++ nandc->tx_chan = NULL; ++ dev_err_probe(nandc->dev, ret, ++ "tx DMA channel request failed\n"); ++ goto unalloc; ++ } ++ ++ nandc->rx_chan = dma_request_chan(nandc->dev, "rx"); ++ if (IS_ERR(nandc->rx_chan)) { ++ ret = PTR_ERR(nandc->rx_chan); ++ nandc->rx_chan = NULL; ++ dev_err_probe(nandc->dev, ret, ++ "rx DMA channel request failed\n"); ++ goto unalloc; ++ } ++ ++ nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd"); ++ if (IS_ERR(nandc->cmd_chan)) { ++ ret = PTR_ERR(nandc->cmd_chan); ++ nandc->cmd_chan = NULL; ++ dev_err_probe(nandc->dev, ret, ++ "cmd DMA channel request failed\n"); ++ goto unalloc; ++ } ++ ++ /* ++ * Initially allocate BAM transaction to read ONFI param page. ++ * After detecting all the devices, this BAM transaction will ++ * be freed and the next BAM transaction will be allocated with ++ * maximum codeword size ++ */ ++ nandc->max_cwperpage = 1; ++ nandc->bam_txn = qcom_alloc_bam_transaction(nandc); ++ if (!nandc->bam_txn) { ++ dev_err(nandc->dev, ++ "failed to allocate bam transaction\n"); ++ ret = -ENOMEM; ++ goto unalloc; ++ } ++ } else { ++ nandc->chan = dma_request_chan(nandc->dev, "rxtx"); ++ if (IS_ERR(nandc->chan)) { ++ ret = PTR_ERR(nandc->chan); ++ nandc->chan = NULL; ++ dev_err_probe(nandc->dev, ret, ++ "rxtx DMA channel request failed\n"); ++ return ret; ++ } ++ } ++ ++ INIT_LIST_HEAD(&nandc->desc_list); ++ INIT_LIST_HEAD(&nandc->host_list); ++ ++ return 0; ++unalloc: ++ qcom_nandc_unalloc(nandc); ++ return ret; ++} +diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig +index d0aaccf72d78..47f5a7561a73 100644 +--- a/drivers/mtd/nand/raw/Kconfig ++++ b/drivers/mtd/nand/raw/Kconfig +@@ -330,7 +330,7 @@ config MTD_NAND_HISI504 + Enables support for NAND controller on Hisilicon SoC Hip04. + + config MTD_NAND_QCOM +- tristate "QCOM NAND controller" ++ bool "QCOM NAND controller" + depends on ARCH_QCOM || COMPILE_TEST + depends on HAS_IOMEM + help +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c +index daf8f73b25bc..91f1eb781cb2 100644 +--- a/drivers/mtd/nand/raw/qcom_nandc.c ++++ b/drivers/mtd/nand/raw/qcom_nandc.c +@@ -15,417 +15,7 @@ + #include + #include + #include +- +-/* NANDc reg offsets */ +-#define NAND_FLASH_CMD 0x00 +-#define NAND_ADDR0 0x04 +-#define NAND_ADDR1 0x08 +-#define NAND_FLASH_CHIP_SELECT 0x0c +-#define NAND_EXEC_CMD 0x10 +-#define NAND_FLASH_STATUS 0x14 +-#define NAND_BUFFER_STATUS 0x18 +-#define NAND_DEV0_CFG0 0x20 +-#define NAND_DEV0_CFG1 0x24 +-#define NAND_DEV0_ECC_CFG 0x28 +-#define NAND_AUTO_STATUS_EN 0x2c +-#define NAND_DEV1_CFG0 0x30 +-#define NAND_DEV1_CFG1 0x34 +-#define NAND_READ_ID 0x40 +-#define NAND_READ_STATUS 0x44 +-#define NAND_DEV_CMD0 0xa0 +-#define NAND_DEV_CMD1 0xa4 +-#define NAND_DEV_CMD2 0xa8 +-#define NAND_DEV_CMD_VLD 0xac +-#define SFLASHC_BURST_CFG 0xe0 +-#define NAND_ERASED_CW_DETECT_CFG 0xe8 +-#define NAND_ERASED_CW_DETECT_STATUS 0xec +-#define NAND_EBI2_ECC_BUF_CFG 0xf0 +-#define FLASH_BUF_ACC 0x100 +- +-#define NAND_CTRL 0xf00 +-#define NAND_VERSION 0xf08 +-#define NAND_READ_LOCATION_0 0xf20 +-#define NAND_READ_LOCATION_1 0xf24 +-#define NAND_READ_LOCATION_2 0xf28 +-#define NAND_READ_LOCATION_3 0xf2c +-#define NAND_READ_LOCATION_LAST_CW_0 0xf40 +-#define NAND_READ_LOCATION_LAST_CW_1 0xf44 +-#define NAND_READ_LOCATION_LAST_CW_2 0xf48 +-#define NAND_READ_LOCATION_LAST_CW_3 0xf4c +- +-/* dummy register offsets, used by qcom_write_reg_dma */ +-#define NAND_DEV_CMD1_RESTORE 0xdead +-#define NAND_DEV_CMD_VLD_RESTORE 0xbeef +- +-/* NAND_FLASH_CMD bits */ +-#define PAGE_ACC BIT(4) +-#define LAST_PAGE BIT(5) +- +-/* NAND_FLASH_CHIP_SELECT bits */ +-#define NAND_DEV_SEL 0 +-#define DM_EN BIT(2) +- +-/* NAND_FLASH_STATUS bits */ +-#define FS_OP_ERR BIT(4) +-#define FS_READY_BSY_N BIT(5) +-#define FS_MPU_ERR BIT(8) +-#define FS_DEVICE_STS_ERR BIT(16) +-#define FS_DEVICE_WP BIT(23) +- +-/* NAND_BUFFER_STATUS bits */ +-#define BS_UNCORRECTABLE_BIT BIT(8) +-#define BS_CORRECTABLE_ERR_MSK 0x1f +- +-/* NAND_DEVn_CFG0 bits */ +-#define DISABLE_STATUS_AFTER_WRITE 4 +-#define CW_PER_PAGE 6 +-#define UD_SIZE_BYTES 9 +-#define UD_SIZE_BYTES_MASK GENMASK(18, 9) +-#define ECC_PARITY_SIZE_BYTES_RS 19 +-#define SPARE_SIZE_BYTES 23 +-#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23) +-#define NUM_ADDR_CYCLES 27 +-#define STATUS_BFR_READ 30 +-#define SET_RD_MODE_AFTER_STATUS 31 +- +-/* NAND_DEVn_CFG0 bits */ +-#define DEV0_CFG1_ECC_DISABLE 0 +-#define WIDE_FLASH 1 +-#define NAND_RECOVERY_CYCLES 2 +-#define CS_ACTIVE_BSY 5 +-#define BAD_BLOCK_BYTE_NUM 6 +-#define BAD_BLOCK_IN_SPARE_AREA 16 +-#define WR_RD_BSY_GAP 17 +-#define ENABLE_BCH_ECC 27 +- +-/* NAND_DEV0_ECC_CFG bits */ +-#define ECC_CFG_ECC_DISABLE 0 +-#define ECC_SW_RESET 1 +-#define ECC_MODE 4 +-#define ECC_PARITY_SIZE_BYTES_BCH 8 +-#define ECC_NUM_DATA_BYTES 16 +-#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16) +-#define ECC_FORCE_CLK_OPEN 30 +- +-/* NAND_DEV_CMD1 bits */ +-#define READ_ADDR 0 +- +-/* NAND_DEV_CMD_VLD bits */ +-#define READ_START_VLD BIT(0) +-#define READ_STOP_VLD BIT(1) +-#define WRITE_START_VLD BIT(2) +-#define ERASE_START_VLD BIT(3) +-#define SEQ_READ_START_VLD BIT(4) +- +-/* NAND_EBI2_ECC_BUF_CFG bits */ +-#define NUM_STEPS 0 +- +-/* NAND_ERASED_CW_DETECT_CFG bits */ +-#define ERASED_CW_ECC_MASK 1 +-#define AUTO_DETECT_RES 0 +-#define MASK_ECC BIT(ERASED_CW_ECC_MASK) +-#define RESET_ERASED_DET BIT(AUTO_DETECT_RES) +-#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES) +-#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC) +-#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC) +- +-/* NAND_ERASED_CW_DETECT_STATUS bits */ +-#define PAGE_ALL_ERASED BIT(7) +-#define CODEWORD_ALL_ERASED BIT(6) +-#define PAGE_ERASED BIT(5) +-#define CODEWORD_ERASED BIT(4) +-#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED) +-#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED) +- +-/* NAND_READ_LOCATION_n bits */ +-#define READ_LOCATION_OFFSET 0 +-#define READ_LOCATION_SIZE 16 +-#define READ_LOCATION_LAST 31 +- +-/* Version Mask */ +-#define NAND_VERSION_MAJOR_MASK 0xf0000000 +-#define NAND_VERSION_MAJOR_SHIFT 28 +-#define NAND_VERSION_MINOR_MASK 0x0fff0000 +-#define NAND_VERSION_MINOR_SHIFT 16 +- +-/* NAND OP_CMDs */ +-#define OP_PAGE_READ 0x2 +-#define OP_PAGE_READ_WITH_ECC 0x3 +-#define OP_PAGE_READ_WITH_ECC_SPARE 0x4 +-#define OP_PAGE_READ_ONFI_READ 0x5 +-#define OP_PROGRAM_PAGE 0x6 +-#define OP_PAGE_PROGRAM_WITH_ECC 0x7 +-#define OP_PROGRAM_PAGE_SPARE 0x9 +-#define OP_BLOCK_ERASE 0xa +-#define OP_CHECK_STATUS 0xc +-#define OP_FETCH_ID 0xb +-#define OP_RESET_DEVICE 0xd +- +-/* Default Value for NAND_DEV_CMD_VLD */ +-#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \ +- ERASE_START_VLD | SEQ_READ_START_VLD) +- +-/* NAND_CTRL bits */ +-#define BAM_MODE_EN BIT(0) +- +-/* +- * the NAND controller performs reads/writes with ECC in 516 byte chunks. +- * the driver calls the chunks 'step' or 'codeword' interchangeably +- */ +-#define NANDC_STEP_SIZE 512 +- +-/* +- * the largest page size we support is 8K, this will have 16 steps/codewords +- * of 512 bytes each +- */ +-#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE) +- +-/* we read at most 3 registers per codeword scan */ +-#define MAX_REG_RD (3 * MAX_NUM_STEPS) +- +-/* ECC modes supported by the controller */ +-#define ECC_NONE BIT(0) +-#define ECC_RS_4BIT BIT(1) +-#define ECC_BCH_4BIT BIT(2) +-#define ECC_BCH_8BIT BIT(3) +- +-/* +- * Returns the actual register address for all NAND_DEV_ registers +- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD) +- */ +-#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg)) +- +-/* Returns the NAND register physical address */ +-#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset)) +- +-/* Returns the dma address for reg read buffer */ +-#define reg_buf_dma_addr(chip, vaddr) \ +- ((chip)->reg_read_dma + \ +- ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf)) +- +-#define QPIC_PER_CW_CMD_ELEMENTS 32 +-#define QPIC_PER_CW_CMD_SGL 32 +-#define QPIC_PER_CW_DATA_SGL 8 +- +-#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000) +- +-/* +- * Flags used in DMA descriptor preparation helper functions +- * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma) +- */ +-/* Don't set the EOT in current tx BAM sgl */ +-#define NAND_BAM_NO_EOT BIT(0) +-/* Set the NWD flag in current BAM sgl */ +-#define NAND_BAM_NWD BIT(1) +-/* Finish writing in the current BAM sgl and start writing in another BAM sgl */ +-#define NAND_BAM_NEXT_SGL BIT(2) +-/* +- * Erased codeword status is being used two times in single transfer so this +- * flag will determine the current value of erased codeword status register +- */ +-#define NAND_ERASED_CW_SET BIT(4) +- +-#define MAX_ADDRESS_CYCLE 5 +- +-/* +- * This data type corresponds to the BAM transaction which will be used for all +- * NAND transfers. +- * @bam_ce - the array of BAM command elements +- * @cmd_sgl - sgl for NAND BAM command pipe +- * @data_sgl - sgl for NAND BAM consumer/producer pipe +- * @last_data_desc - last DMA desc in data channel (tx/rx). +- * @last_cmd_desc - last DMA desc in command channel. +- * @txn_done - completion for NAND transfer. +- * @bam_ce_pos - the index in bam_ce which is available for next sgl +- * @bam_ce_start - the index in bam_ce which marks the start position ce +- * for current sgl. It will be used for size calculation +- * for current sgl +- * @cmd_sgl_pos - current index in command sgl. +- * @cmd_sgl_start - start index in command sgl. +- * @tx_sgl_pos - current index in data sgl for tx. +- * @tx_sgl_start - start index in data sgl for tx. +- * @rx_sgl_pos - current index in data sgl for rx. +- * @rx_sgl_start - start index in data sgl for rx. +- */ +-struct bam_transaction { +- struct bam_cmd_element *bam_ce; +- struct scatterlist *cmd_sgl; +- struct scatterlist *data_sgl; +- struct dma_async_tx_descriptor *last_data_desc; +- struct dma_async_tx_descriptor *last_cmd_desc; +- struct completion txn_done; +- u32 bam_ce_pos; +- u32 bam_ce_start; +- u32 cmd_sgl_pos; +- u32 cmd_sgl_start; +- u32 tx_sgl_pos; +- u32 tx_sgl_start; +- u32 rx_sgl_pos; +- u32 rx_sgl_start; +-}; +- +-/* +- * This data type corresponds to the nand dma descriptor +- * @dma_desc - low level DMA engine descriptor +- * @list - list for desc_info +- * +- * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by +- * ADM +- * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM +- * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM +- * @dir - DMA transfer direction +- */ +-struct desc_info { +- struct dma_async_tx_descriptor *dma_desc; +- struct list_head node; +- +- union { +- struct scatterlist adm_sgl; +- struct { +- struct scatterlist *bam_sgl; +- int sgl_cnt; +- }; +- }; +- enum dma_data_direction dir; +-}; +- +-/* +- * holds the current register values that we want to write. acts as a contiguous +- * chunk of memory which we use to write the controller registers through DMA. +- */ +-struct nandc_regs { +- __le32 cmd; +- __le32 addr0; +- __le32 addr1; +- __le32 chip_sel; +- __le32 exec; +- +- __le32 cfg0; +- __le32 cfg1; +- __le32 ecc_bch_cfg; +- +- __le32 clrflashstatus; +- __le32 clrreadstatus; +- +- __le32 cmd1; +- __le32 vld; +- +- __le32 orig_cmd1; +- __le32 orig_vld; +- +- __le32 ecc_buf_cfg; +- __le32 read_location0; +- __le32 read_location1; +- __le32 read_location2; +- __le32 read_location3; +- __le32 read_location_last0; +- __le32 read_location_last1; +- __le32 read_location_last2; +- __le32 read_location_last3; +- +- __le32 erased_cw_detect_cfg_clr; +- __le32 erased_cw_detect_cfg_set; +-}; +- +-/* +- * NAND controller data struct +- * +- * @dev: parent device +- * +- * @base: MMIO base +- * +- * @core_clk: controller clock +- * @aon_clk: another controller clock +- * +- * @regs: a contiguous chunk of memory for DMA register +- * writes. contains the register values to be +- * written to controller +- * +- * @props: properties of current NAND controller, +- * initialized via DT match data +- * +- * @controller: base controller structure +- * @host_list: list containing all the chips attached to the +- * controller +- * +- * @chan: dma channel +- * @cmd_crci: ADM DMA CRCI for command flow control +- * @data_crci: ADM DMA CRCI for data flow control +- * +- * @desc_list: DMA descriptor list (list of desc_infos) +- * +- * @data_buffer: our local DMA buffer for page read/writes, +- * used when we can't use the buffer provided +- * by upper layers directly +- * @reg_read_buf: local buffer for reading back registers via DMA +- * +- * @base_phys: physical base address of controller registers +- * @base_dma: dma base address of controller registers +- * @reg_read_dma: contains dma address for register read buffer +- * +- * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf +- * functions +- * @max_cwperpage: maximum QPIC codewords required. calculated +- * from all connected NAND devices pagesize +- * +- * @reg_read_pos: marker for data read in reg_read_buf +- * +- * @cmd1/vld: some fixed controller register values +- * +- * @exec_opwrite: flag to select correct number of code word +- * while reading status +- */ +-struct qcom_nand_controller { +- struct device *dev; +- +- void __iomem *base; +- +- struct clk *core_clk; +- struct clk *aon_clk; +- +- struct nandc_regs *regs; +- struct bam_transaction *bam_txn; +- +- const struct qcom_nandc_props *props; +- +- struct nand_controller controller; +- struct list_head host_list; +- +- union { +- /* will be used only by QPIC for BAM DMA */ +- struct { +- struct dma_chan *tx_chan; +- struct dma_chan *rx_chan; +- struct dma_chan *cmd_chan; +- }; +- +- /* will be used only by EBI2 for ADM DMA */ +- struct { +- struct dma_chan *chan; +- unsigned int cmd_crci; +- unsigned int data_crci; +- }; +- }; +- +- struct list_head desc_list; +- +- u8 *data_buffer; +- __le32 *reg_read_buf; +- +- phys_addr_t base_phys; +- dma_addr_t base_dma; +- dma_addr_t reg_read_dma; +- +- int buf_size; +- int buf_count; +- int buf_start; +- unsigned int max_cwperpage; +- +- int reg_read_pos; +- +- u32 cmd1, vld; +- bool exec_opwrite; +-}; ++#include + + /* + * NAND special boot partitions +@@ -530,97 +120,6 @@ struct qcom_nand_host { + bool bch_enabled; + }; + +-/* +- * This data type corresponds to the NAND controller properties which varies +- * among different NAND controllers. +- * @ecc_modes - ecc mode for NAND +- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset +- * @supports_bam - whether NAND controller is using BAM +- * @nandc_part_of_qpic - whether NAND controller is part of qpic IP +- * @qpic_version2 - flag to indicate QPIC IP version 2 +- * @use_codeword_fixup - whether NAND has different layout for boot partitions +- */ +-struct qcom_nandc_props { +- u32 ecc_modes; +- u32 dev_cmd_reg_start; +- bool supports_bam; +- bool nandc_part_of_qpic; +- bool qpic_version2; +- bool use_codeword_fixup; +-}; +- +-/* Frees the BAM transaction memory */ +-static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc) +-{ +- struct bam_transaction *bam_txn = nandc->bam_txn; +- +- devm_kfree(nandc->dev, bam_txn); +-} +- +-/* Allocates and Initializes the BAM transaction */ +-static struct bam_transaction * +-qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc) +-{ +- struct bam_transaction *bam_txn; +- size_t bam_txn_size; +- unsigned int num_cw = nandc->max_cwperpage; +- void *bam_txn_buf; +- +- bam_txn_size = +- sizeof(*bam_txn) + num_cw * +- ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) + +- (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + +- (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL)); +- +- bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL); +- if (!bam_txn_buf) +- return NULL; +- +- bam_txn = bam_txn_buf; +- bam_txn_buf += sizeof(*bam_txn); +- +- bam_txn->bam_ce = bam_txn_buf; +- bam_txn_buf += +- sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw; +- +- bam_txn->cmd_sgl = bam_txn_buf; +- bam_txn_buf += +- sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw; +- +- bam_txn->data_sgl = bam_txn_buf; +- +- init_completion(&bam_txn->txn_done); +- +- return bam_txn; +-} +- +-/* Clears the BAM transaction indexes */ +-static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc) +-{ +- struct bam_transaction *bam_txn = nandc->bam_txn; +- +- if (!nandc->props->supports_bam) +- return; +- +- memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8); +- bam_txn->last_data_desc = NULL; +- +- sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage * +- QPIC_PER_CW_CMD_SGL); +- sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage * +- QPIC_PER_CW_DATA_SGL); +- +- reinit_completion(&bam_txn->txn_done); +-} +- +-/* Callback for DMA descriptor completion */ +-static void qcom_qpic_bam_dma_done(void *data) +-{ +- struct bam_transaction *bam_txn = data; +- +- complete(&bam_txn->txn_done); +-} +- + static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip) + { + return container_of(chip, struct qcom_nand_host, chip); +@@ -629,8 +128,8 @@ static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip) + static inline struct qcom_nand_controller * + get_qcom_nand_controller(struct nand_chip *chip) + { +- return container_of(chip->controller, struct qcom_nand_controller, +- controller); ++ return (struct qcom_nand_controller *) ++ ((u8 *)chip->controller - sizeof(struct qcom_nand_controller)); + } + + static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset) +@@ -644,23 +143,6 @@ static inline void nandc_write(struct qcom_nand_controller *nandc, int offset, + iowrite32(val, nandc->base + offset); + } + +-static inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu) +-{ +- if (!nandc->props->supports_bam) +- return; +- +- if (is_cpu) +- dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma, +- MAX_REG_RD * +- sizeof(*nandc->reg_read_buf), +- DMA_FROM_DEVICE); +- else +- dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma, +- MAX_REG_RD * +- sizeof(*nandc->reg_read_buf), +- DMA_FROM_DEVICE); +-} +- + /* Helper to check the code word, whether it is last cw or not */ + static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw) + { +@@ -819,356 +301,6 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i + host->cw_data : host->cw_size, 1); + } + +-/* +- * Maps the scatter gather list for DMA transfer and forms the DMA descriptor +- * for BAM. This descriptor will be added in the NAND DMA descriptor queue +- * which will be submitted to DMA engine. +- */ +-static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc, +- struct dma_chan *chan, +- unsigned long flags) +-{ +- struct desc_info *desc; +- struct scatterlist *sgl; +- unsigned int sgl_cnt; +- int ret; +- struct bam_transaction *bam_txn = nandc->bam_txn; +- enum dma_transfer_direction dir_eng; +- struct dma_async_tx_descriptor *dma_desc; +- +- desc = kzalloc(sizeof(*desc), GFP_KERNEL); +- if (!desc) +- return -ENOMEM; +- +- if (chan == nandc->cmd_chan) { +- sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start]; +- sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start; +- bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos; +- dir_eng = DMA_MEM_TO_DEV; +- desc->dir = DMA_TO_DEVICE; +- } else if (chan == nandc->tx_chan) { +- sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start]; +- sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start; +- bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos; +- dir_eng = DMA_MEM_TO_DEV; +- desc->dir = DMA_TO_DEVICE; +- } else { +- sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start]; +- sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start; +- bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos; +- dir_eng = DMA_DEV_TO_MEM; +- desc->dir = DMA_FROM_DEVICE; +- } +- +- sg_mark_end(sgl + sgl_cnt - 1); +- ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir); +- if (ret == 0) { +- dev_err(nandc->dev, "failure in mapping desc\n"); +- kfree(desc); +- return -ENOMEM; +- } +- +- desc->sgl_cnt = sgl_cnt; +- desc->bam_sgl = sgl; +- +- dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng, +- flags); +- +- if (!dma_desc) { +- dev_err(nandc->dev, "failure in prep desc\n"); +- dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir); +- kfree(desc); +- return -EINVAL; +- } +- +- desc->dma_desc = dma_desc; +- +- /* update last data/command descriptor */ +- if (chan == nandc->cmd_chan) +- bam_txn->last_cmd_desc = dma_desc; +- else +- bam_txn->last_data_desc = dma_desc; +- +- list_add_tail(&desc->node, &nandc->desc_list); +- +- return 0; +-} +- +-/* +- * Prepares the command descriptor for BAM DMA which will be used for NAND +- * register reads and writes. The command descriptor requires the command +- * to be formed in command element type so this function uses the command +- * element from bam transaction ce array and fills the same with required +- * data. A single SGL can contain multiple command elements so +- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL +- * after the current command element. +- */ +-static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, +- int reg_off, const void *vaddr, +- int size, unsigned int flags) +-{ +- int bam_ce_size; +- int i, ret; +- struct bam_cmd_element *bam_ce_buffer; +- struct bam_transaction *bam_txn = nandc->bam_txn; +- +- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos]; +- +- /* fill the command desc */ +- for (i = 0; i < size; i++) { +- if (read) +- bam_prep_ce(&bam_ce_buffer[i], +- nandc_reg_phys(nandc, reg_off + 4 * i), +- BAM_READ_COMMAND, +- reg_buf_dma_addr(nandc, +- (__le32 *)vaddr + i)); +- else +- bam_prep_ce_le32(&bam_ce_buffer[i], +- nandc_reg_phys(nandc, reg_off + 4 * i), +- BAM_WRITE_COMMAND, +- *((__le32 *)vaddr + i)); +- } +- +- bam_txn->bam_ce_pos += size; +- +- /* use the separate sgl after this command */ +- if (flags & NAND_BAM_NEXT_SGL) { +- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start]; +- bam_ce_size = (bam_txn->bam_ce_pos - +- bam_txn->bam_ce_start) * +- sizeof(struct bam_cmd_element); +- sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos], +- bam_ce_buffer, bam_ce_size); +- bam_txn->cmd_sgl_pos++; +- bam_txn->bam_ce_start = bam_txn->bam_ce_pos; +- +- if (flags & NAND_BAM_NWD) { +- ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan, +- DMA_PREP_FENCE | +- DMA_PREP_CMD); +- if (ret) +- return ret; +- } +- } +- +- return 0; +-} +- +-/* +- * Prepares the data descriptor for BAM DMA which will be used for NAND +- * data reads and writes. +- */ +-static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, +- const void *vaddr, int size, unsigned int flags) +-{ +- int ret; +- struct bam_transaction *bam_txn = nandc->bam_txn; +- +- if (read) { +- sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos], +- vaddr, size); +- bam_txn->rx_sgl_pos++; +- } else { +- sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos], +- vaddr, size); +- bam_txn->tx_sgl_pos++; +- +- /* +- * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag +- * is not set, form the DMA descriptor +- */ +- if (!(flags & NAND_BAM_NO_EOT)) { +- ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan, +- DMA_PREP_INTERRUPT); +- if (ret) +- return ret; +- } +- } +- +- return 0; +-} +- +-static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, +- int reg_off, const void *vaddr, int size, +- bool flow_control) +-{ +- struct desc_info *desc; +- struct dma_async_tx_descriptor *dma_desc; +- struct scatterlist *sgl; +- struct dma_slave_config slave_conf; +- struct qcom_adm_peripheral_config periph_conf = {}; +- enum dma_transfer_direction dir_eng; +- int ret; +- +- desc = kzalloc(sizeof(*desc), GFP_KERNEL); +- if (!desc) +- return -ENOMEM; +- +- sgl = &desc->adm_sgl; +- +- sg_init_one(sgl, vaddr, size); +- +- if (read) { +- dir_eng = DMA_DEV_TO_MEM; +- desc->dir = DMA_FROM_DEVICE; +- } else { +- dir_eng = DMA_MEM_TO_DEV; +- desc->dir = DMA_TO_DEVICE; +- } +- +- ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir); +- if (ret == 0) { +- ret = -ENOMEM; +- goto err; +- } +- +- memset(&slave_conf, 0x00, sizeof(slave_conf)); +- +- slave_conf.device_fc = flow_control; +- if (read) { +- slave_conf.src_maxburst = 16; +- slave_conf.src_addr = nandc->base_dma + reg_off; +- if (nandc->data_crci) { +- periph_conf.crci = nandc->data_crci; +- slave_conf.peripheral_config = &periph_conf; +- slave_conf.peripheral_size = sizeof(periph_conf); +- } +- } else { +- slave_conf.dst_maxburst = 16; +- slave_conf.dst_addr = nandc->base_dma + reg_off; +- if (nandc->cmd_crci) { +- periph_conf.crci = nandc->cmd_crci; +- slave_conf.peripheral_config = &periph_conf; +- slave_conf.peripheral_size = sizeof(periph_conf); +- } +- } +- +- ret = dmaengine_slave_config(nandc->chan, &slave_conf); +- if (ret) { +- dev_err(nandc->dev, "failed to configure dma channel\n"); +- goto err; +- } +- +- dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0); +- if (!dma_desc) { +- dev_err(nandc->dev, "failed to prepare desc\n"); +- ret = -EINVAL; +- goto err; +- } +- +- desc->dma_desc = dma_desc; +- +- list_add_tail(&desc->node, &nandc->desc_list); +- +- return 0; +-err: +- kfree(desc); +- +- return ret; +-} +- +-/* +- * qcom_read_reg_dma: prepares a descriptor to read a given number of +- * contiguous registers to the reg_read_buf pointer +- * +- * @first: offset of the first register in the contiguous block +- * @num_regs: number of registers to read +- * @flags: flags to control DMA descriptor preparation +- */ +-static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, +- int num_regs, unsigned int flags) +-{ +- bool flow_control = false; +- void *vaddr; +- +- vaddr = nandc->reg_read_buf + nandc->reg_read_pos; +- nandc->reg_read_pos += num_regs; +- +- if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1) +- first = dev_cmd_reg_addr(nandc, first); +- +- if (nandc->props->supports_bam) +- return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr, +- num_regs, flags); +- +- if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) +- flow_control = true; +- +- return qcom_prep_adm_dma_desc(nandc, true, first, vaddr, +- num_regs * sizeof(u32), flow_control); +-} +- +-/* +- * qcom_write_reg_dma: prepares a descriptor to write a given number of +- * contiguous registers +- * +- * @vaddr: contnigeous memory from where register value will +- * be written +- * @first: offset of the first register in the contiguous block +- * @num_regs: number of registers to write +- * @flags: flags to control DMA descriptor preparation +- */ +-static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, +- int first, int num_regs, unsigned int flags) +-{ +- bool flow_control = false; +- +- if (first == NAND_EXEC_CMD) +- flags |= NAND_BAM_NWD; +- +- if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1) +- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1); +- +- if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD) +- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD); +- +- if (nandc->props->supports_bam) +- return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr, +- num_regs, flags); +- +- if (first == NAND_FLASH_CMD) +- flow_control = true; +- +- return qcom_prep_adm_dma_desc(nandc, false, first, vaddr, +- num_regs * sizeof(u32), flow_control); +-} +- +-/* +- * qcom_read_data_dma: prepares a DMA descriptor to transfer data from the +- * controller's internal buffer to the buffer 'vaddr' +- * +- * @reg_off: offset within the controller's data buffer +- * @vaddr: virtual address of the buffer we want to write to +- * @size: DMA transaction size in bytes +- * @flags: flags to control DMA descriptor preparation +- */ +-static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, +- const u8 *vaddr, int size, unsigned int flags) +-{ +- if (nandc->props->supports_bam) +- return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags); +- +- return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false); +-} +- +-/* +- * qcom_write_data_dma: prepares a DMA descriptor to transfer data from +- * 'vaddr' to the controller's internal buffer +- * +- * @reg_off: offset within the controller's data buffer +- * @vaddr: virtual address of the buffer we want to read from +- * @size: DMA transaction size in bytes +- * @flags: flags to control DMA descriptor preparation +- */ +-static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, +- const u8 *vaddr, int size, unsigned int flags) +-{ +- if (nandc->props->supports_bam) +- return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags); +- +- return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false); +-} +- + /* + * Helper to prepare DMA descriptors for configuring registers + * before reading a NAND page. +@@ -1262,83 +394,6 @@ static void config_nand_cw_write(struct nand_chip *chip) + NAND_BAM_NEXT_SGL); + } + +-/* helpers to submit/free our list of dma descriptors */ +-static int qcom_submit_descs(struct qcom_nand_controller *nandc) +-{ +- struct desc_info *desc, *n; +- dma_cookie_t cookie = 0; +- struct bam_transaction *bam_txn = nandc->bam_txn; +- int ret = 0; +- +- if (nandc->props->supports_bam) { +- if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) { +- ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0); +- if (ret) +- goto err_unmap_free_desc; +- } +- +- if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) { +- ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan, +- DMA_PREP_INTERRUPT); +- if (ret) +- goto err_unmap_free_desc; +- } +- +- if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { +- ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan, +- DMA_PREP_CMD); +- if (ret) +- goto err_unmap_free_desc; +- } +- } +- +- list_for_each_entry(desc, &nandc->desc_list, node) +- cookie = dmaengine_submit(desc->dma_desc); +- +- if (nandc->props->supports_bam) { +- bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done; +- bam_txn->last_cmd_desc->callback_param = bam_txn; +- +- dma_async_issue_pending(nandc->tx_chan); +- dma_async_issue_pending(nandc->rx_chan); +- dma_async_issue_pending(nandc->cmd_chan); +- +- if (!wait_for_completion_timeout(&bam_txn->txn_done, +- QPIC_NAND_COMPLETION_TIMEOUT)) +- ret = -ETIMEDOUT; +- } else { +- if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE) +- ret = -ETIMEDOUT; +- } +- +-err_unmap_free_desc: +- /* +- * Unmap the dma sg_list and free the desc allocated by both +- * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions. +- */ +- list_for_each_entry_safe(desc, n, &nandc->desc_list, node) { +- list_del(&desc->node); +- +- if (nandc->props->supports_bam) +- dma_unmap_sg(nandc->dev, desc->bam_sgl, +- desc->sgl_cnt, desc->dir); +- else +- dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1, +- desc->dir); +- +- kfree(desc); +- } +- +- return ret; +-} +- +-/* reset the register read buffer for next NAND operation */ +-static void qcom_clear_read_regs(struct qcom_nand_controller *nandc) +-{ +- nandc->reg_read_pos = 0; +- qcom_nandc_dev_to_mem(nandc, false); +-} +- + /* + * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read + * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS. +@@ -2967,141 +2022,14 @@ static const struct nand_controller_ops qcom_nandc_ops = { + .exec_op = qcom_nand_exec_op, + }; + +-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc) +-{ +- if (nandc->props->supports_bam) { +- if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma)) +- dma_unmap_single(nandc->dev, nandc->reg_read_dma, +- MAX_REG_RD * +- sizeof(*nandc->reg_read_buf), +- DMA_FROM_DEVICE); +- +- if (nandc->tx_chan) +- dma_release_channel(nandc->tx_chan); +- +- if (nandc->rx_chan) +- dma_release_channel(nandc->rx_chan); +- +- if (nandc->cmd_chan) +- dma_release_channel(nandc->cmd_chan); +- } else { +- if (nandc->chan) +- dma_release_channel(nandc->chan); +- } +-} +- +-static int qcom_nandc_alloc(struct qcom_nand_controller *nandc) +-{ +- int ret; +- +- ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32)); +- if (ret) { +- dev_err(nandc->dev, "failed to set DMA mask\n"); +- return ret; +- } +- +- /* +- * we use the internal buffer for reading ONFI params, reading small +- * data like ID and status, and preforming read-copy-write operations +- * when writing to a codeword partially. 532 is the maximum possible +- * size of a codeword for our nand controller +- */ +- nandc->buf_size = 532; +- +- nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL); +- if (!nandc->data_buffer) +- return -ENOMEM; +- +- nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL); +- if (!nandc->regs) +- return -ENOMEM; +- +- nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD, +- sizeof(*nandc->reg_read_buf), +- GFP_KERNEL); +- if (!nandc->reg_read_buf) +- return -ENOMEM; +- +- if (nandc->props->supports_bam) { +- nandc->reg_read_dma = +- dma_map_single(nandc->dev, nandc->reg_read_buf, +- MAX_REG_RD * +- sizeof(*nandc->reg_read_buf), +- DMA_FROM_DEVICE); +- if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) { +- dev_err(nandc->dev, "failed to DMA MAP reg buffer\n"); +- return -EIO; +- } +- +- nandc->tx_chan = dma_request_chan(nandc->dev, "tx"); +- if (IS_ERR(nandc->tx_chan)) { +- ret = PTR_ERR(nandc->tx_chan); +- nandc->tx_chan = NULL; +- dev_err_probe(nandc->dev, ret, +- "tx DMA channel request failed\n"); +- goto unalloc; +- } +- +- nandc->rx_chan = dma_request_chan(nandc->dev, "rx"); +- if (IS_ERR(nandc->rx_chan)) { +- ret = PTR_ERR(nandc->rx_chan); +- nandc->rx_chan = NULL; +- dev_err_probe(nandc->dev, ret, +- "rx DMA channel request failed\n"); +- goto unalloc; +- } +- +- nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd"); +- if (IS_ERR(nandc->cmd_chan)) { +- ret = PTR_ERR(nandc->cmd_chan); +- nandc->cmd_chan = NULL; +- dev_err_probe(nandc->dev, ret, +- "cmd DMA channel request failed\n"); +- goto unalloc; +- } +- +- /* +- * Initially allocate BAM transaction to read ONFI param page. +- * After detecting all the devices, this BAM transaction will +- * be freed and the next BAM transaction will be allocated with +- * maximum codeword size +- */ +- nandc->max_cwperpage = 1; +- nandc->bam_txn = qcom_alloc_bam_transaction(nandc); +- if (!nandc->bam_txn) { +- dev_err(nandc->dev, +- "failed to allocate bam transaction\n"); +- ret = -ENOMEM; +- goto unalloc; +- } +- } else { +- nandc->chan = dma_request_chan(nandc->dev, "rxtx"); +- if (IS_ERR(nandc->chan)) { +- ret = PTR_ERR(nandc->chan); +- nandc->chan = NULL; +- dev_err_probe(nandc->dev, ret, +- "rxtx DMA channel request failed\n"); +- return ret; +- } +- } +- +- INIT_LIST_HEAD(&nandc->desc_list); +- INIT_LIST_HEAD(&nandc->host_list); +- +- nand_controller_init(&nandc->controller); +- nandc->controller.ops = &qcom_nandc_ops; +- +- return 0; +-unalloc: +- qcom_nandc_unalloc(nandc); +- return ret; +-} +- + /* one time setup of a few nand controller registers */ + static int qcom_nandc_setup(struct qcom_nand_controller *nandc) + { + u32 nand_ctrl; + ++ nand_controller_init(nandc->controller); ++ nandc->controller->ops = &qcom_nandc_ops; ++ + /* kill onenand */ + if (!nandc->props->nandc_part_of_qpic) + nandc_write(nandc, SFLASHC_BURST_CFG, 0); +@@ -3240,7 +2168,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc, + chip->legacy.block_bad = qcom_nandc_block_bad; + chip->legacy.block_markbad = qcom_nandc_block_markbad; + +- chip->controller = &nandc->controller; ++ chip->controller = nandc->controller; + chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | + NAND_SKIP_BBTSCAN; + +@@ -3323,17 +2251,21 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev) + static int qcom_nandc_probe(struct platform_device *pdev) + { + struct qcom_nand_controller *nandc; ++ struct nand_controller *controller; + const void *dev_data; + struct device *dev = &pdev->dev; + struct resource *res; + int ret; + +- nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL); ++ nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc) + sizeof(*controller), ++ GFP_KERNEL); + if (!nandc) + return -ENOMEM; ++ controller = (struct nand_controller *)&nandc[1]; + + platform_set_drvdata(pdev, nandc); + nandc->dev = dev; ++ nandc->controller = controller; + + dev_data = of_device_get_match_data(dev); + if (!dev_data) { +diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h +new file mode 100644 +index 000000000000..425994429387 +--- /dev/null ++++ b/include/linux/mtd/nand-qpic-common.h +@@ -0,0 +1,468 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * QCOM QPIC common APIs header file ++ * ++ * Copyright (c) 2023 Qualcomm Inc. ++ * Authors: Md sadre Alam ++ * ++ */ ++#ifndef __MTD_NAND_QPIC_COMMON_H__ ++#define __MTD_NAND_QPIC_COMMON_H__ ++ ++/* NANDc reg offsets */ ++#define NAND_FLASH_CMD 0x00 ++#define NAND_ADDR0 0x04 ++#define NAND_ADDR1 0x08 ++#define NAND_FLASH_CHIP_SELECT 0x0c ++#define NAND_EXEC_CMD 0x10 ++#define NAND_FLASH_STATUS 0x14 ++#define NAND_BUFFER_STATUS 0x18 ++#define NAND_DEV0_CFG0 0x20 ++#define NAND_DEV0_CFG1 0x24 ++#define NAND_DEV0_ECC_CFG 0x28 ++#define NAND_AUTO_STATUS_EN 0x2c ++#define NAND_DEV1_CFG0 0x30 ++#define NAND_DEV1_CFG1 0x34 ++#define NAND_READ_ID 0x40 ++#define NAND_READ_STATUS 0x44 ++#define NAND_DEV_CMD0 0xa0 ++#define NAND_DEV_CMD1 0xa4 ++#define NAND_DEV_CMD2 0xa8 ++#define NAND_DEV_CMD_VLD 0xac ++#define SFLASHC_BURST_CFG 0xe0 ++#define NAND_ERASED_CW_DETECT_CFG 0xe8 ++#define NAND_ERASED_CW_DETECT_STATUS 0xec ++#define NAND_EBI2_ECC_BUF_CFG 0xf0 ++#define FLASH_BUF_ACC 0x100 ++ ++#define NAND_CTRL 0xf00 ++#define NAND_VERSION 0xf08 ++#define NAND_READ_LOCATION_0 0xf20 ++#define NAND_READ_LOCATION_1 0xf24 ++#define NAND_READ_LOCATION_2 0xf28 ++#define NAND_READ_LOCATION_3 0xf2c ++#define NAND_READ_LOCATION_LAST_CW_0 0xf40 ++#define NAND_READ_LOCATION_LAST_CW_1 0xf44 ++#define NAND_READ_LOCATION_LAST_CW_2 0xf48 ++#define NAND_READ_LOCATION_LAST_CW_3 0xf4c ++ ++/* dummy register offsets, used by qcom_write_reg_dma */ ++#define NAND_DEV_CMD1_RESTORE 0xdead ++#define NAND_DEV_CMD_VLD_RESTORE 0xbeef ++ ++/* NAND_FLASH_CMD bits */ ++#define PAGE_ACC BIT(4) ++#define LAST_PAGE BIT(5) ++ ++/* NAND_FLASH_CHIP_SELECT bits */ ++#define NAND_DEV_SEL 0 ++#define DM_EN BIT(2) ++ ++/* NAND_FLASH_STATUS bits */ ++#define FS_OP_ERR BIT(4) ++#define FS_READY_BSY_N BIT(5) ++#define FS_MPU_ERR BIT(8) ++#define FS_DEVICE_STS_ERR BIT(16) ++#define FS_DEVICE_WP BIT(23) ++ ++/* NAND_BUFFER_STATUS bits */ ++#define BS_UNCORRECTABLE_BIT BIT(8) ++#define BS_CORRECTABLE_ERR_MSK 0x1f ++ ++/* NAND_DEVn_CFG0 bits */ ++#define DISABLE_STATUS_AFTER_WRITE 4 ++#define CW_PER_PAGE 6 ++#define UD_SIZE_BYTES 9 ++#define UD_SIZE_BYTES_MASK GENMASK(18, 9) ++#define ECC_PARITY_SIZE_BYTES_RS 19 ++#define SPARE_SIZE_BYTES 23 ++#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23) ++#define NUM_ADDR_CYCLES 27 ++#define STATUS_BFR_READ 30 ++#define SET_RD_MODE_AFTER_STATUS 31 ++ ++/* NAND_DEVn_CFG0 bits */ ++#define DEV0_CFG1_ECC_DISABLE 0 ++#define WIDE_FLASH 1 ++#define NAND_RECOVERY_CYCLES 2 ++#define CS_ACTIVE_BSY 5 ++#define BAD_BLOCK_BYTE_NUM 6 ++#define BAD_BLOCK_IN_SPARE_AREA 16 ++#define WR_RD_BSY_GAP 17 ++#define ENABLE_BCH_ECC 27 ++ ++/* NAND_DEV0_ECC_CFG bits */ ++#define ECC_CFG_ECC_DISABLE 0 ++#define ECC_SW_RESET 1 ++#define ECC_MODE 4 ++#define ECC_PARITY_SIZE_BYTES_BCH 8 ++#define ECC_NUM_DATA_BYTES 16 ++#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16) ++#define ECC_FORCE_CLK_OPEN 30 ++ ++/* NAND_DEV_CMD1 bits */ ++#define READ_ADDR 0 ++ ++/* NAND_DEV_CMD_VLD bits */ ++#define READ_START_VLD BIT(0) ++#define READ_STOP_VLD BIT(1) ++#define WRITE_START_VLD BIT(2) ++#define ERASE_START_VLD BIT(3) ++#define SEQ_READ_START_VLD BIT(4) ++ ++/* NAND_EBI2_ECC_BUF_CFG bits */ ++#define NUM_STEPS 0 ++ ++/* NAND_ERASED_CW_DETECT_CFG bits */ ++#define ERASED_CW_ECC_MASK 1 ++#define AUTO_DETECT_RES 0 ++#define MASK_ECC BIT(ERASED_CW_ECC_MASK) ++#define RESET_ERASED_DET BIT(AUTO_DETECT_RES) ++#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES) ++#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC) ++#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC) ++ ++/* NAND_ERASED_CW_DETECT_STATUS bits */ ++#define PAGE_ALL_ERASED BIT(7) ++#define CODEWORD_ALL_ERASED BIT(6) ++#define PAGE_ERASED BIT(5) ++#define CODEWORD_ERASED BIT(4) ++#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED) ++#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED) ++ ++/* NAND_READ_LOCATION_n bits */ ++#define READ_LOCATION_OFFSET 0 ++#define READ_LOCATION_SIZE 16 ++#define READ_LOCATION_LAST 31 ++ ++/* Version Mask */ ++#define NAND_VERSION_MAJOR_MASK 0xf0000000 ++#define NAND_VERSION_MAJOR_SHIFT 28 ++#define NAND_VERSION_MINOR_MASK 0x0fff0000 ++#define NAND_VERSION_MINOR_SHIFT 16 ++ ++/* NAND OP_CMDs */ ++#define OP_PAGE_READ 0x2 ++#define OP_PAGE_READ_WITH_ECC 0x3 ++#define OP_PAGE_READ_WITH_ECC_SPARE 0x4 ++#define OP_PAGE_READ_ONFI_READ 0x5 ++#define OP_PROGRAM_PAGE 0x6 ++#define OP_PAGE_PROGRAM_WITH_ECC 0x7 ++#define OP_PROGRAM_PAGE_SPARE 0x9 ++#define OP_BLOCK_ERASE 0xa ++#define OP_CHECK_STATUS 0xc ++#define OP_FETCH_ID 0xb ++#define OP_RESET_DEVICE 0xd ++ ++/* Default Value for NAND_DEV_CMD_VLD */ ++#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \ ++ ERASE_START_VLD | SEQ_READ_START_VLD) ++ ++/* NAND_CTRL bits */ ++#define BAM_MODE_EN BIT(0) ++ ++/* ++ * the NAND controller performs reads/writes with ECC in 516 byte chunks. ++ * the driver calls the chunks 'step' or 'codeword' interchangeably ++ */ ++#define NANDC_STEP_SIZE 512 ++ ++/* ++ * the largest page size we support is 8K, this will have 16 steps/codewords ++ * of 512 bytes each ++ */ ++#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE) ++ ++/* we read at most 3 registers per codeword scan */ ++#define MAX_REG_RD (3 * MAX_NUM_STEPS) ++ ++/* ECC modes supported by the controller */ ++#define ECC_NONE BIT(0) ++#define ECC_RS_4BIT BIT(1) ++#define ECC_BCH_4BIT BIT(2) ++#define ECC_BCH_8BIT BIT(3) ++ ++/* ++ * Returns the actual register address for all NAND_DEV_ registers ++ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD) ++ */ ++#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg)) ++ ++/* Returns the NAND register physical address */ ++#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset)) ++ ++/* Returns the dma address for reg read buffer */ ++#define reg_buf_dma_addr(chip, vaddr) \ ++ ((chip)->reg_read_dma + \ ++ ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf)) ++ ++#define QPIC_PER_CW_CMD_ELEMENTS 32 ++#define QPIC_PER_CW_CMD_SGL 32 ++#define QPIC_PER_CW_DATA_SGL 8 ++ ++#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000) ++ ++/* ++ * Flags used in DMA descriptor preparation helper functions ++ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma) ++ */ ++/* Don't set the EOT in current tx BAM sgl */ ++#define NAND_BAM_NO_EOT BIT(0) ++/* Set the NWD flag in current BAM sgl */ ++#define NAND_BAM_NWD BIT(1) ++/* Finish writing in the current BAM sgl and start writing in another BAM sgl */ ++#define NAND_BAM_NEXT_SGL BIT(2) ++/* ++ * Erased codeword status is being used two times in single transfer so this ++ * flag will determine the current value of erased codeword status register ++ */ ++#define NAND_ERASED_CW_SET BIT(4) ++ ++#define MAX_ADDRESS_CYCLE 5 ++ ++/* ++ * This data type corresponds to the BAM transaction which will be used for all ++ * NAND transfers. ++ * @bam_ce - the array of BAM command elements ++ * @cmd_sgl - sgl for NAND BAM command pipe ++ * @data_sgl - sgl for NAND BAM consumer/producer pipe ++ * @last_data_desc - last DMA desc in data channel (tx/rx). ++ * @last_cmd_desc - last DMA desc in command channel. ++ * @txn_done - completion for NAND transfer. ++ * @bam_ce_pos - the index in bam_ce which is available for next sgl ++ * @bam_ce_start - the index in bam_ce which marks the start position ce ++ * for current sgl. It will be used for size calculation ++ * for current sgl ++ * @cmd_sgl_pos - current index in command sgl. ++ * @cmd_sgl_start - start index in command sgl. ++ * @tx_sgl_pos - current index in data sgl for tx. ++ * @tx_sgl_start - start index in data sgl for tx. ++ * @rx_sgl_pos - current index in data sgl for rx. ++ * @rx_sgl_start - start index in data sgl for rx. ++ */ ++struct bam_transaction { ++ struct bam_cmd_element *bam_ce; ++ struct scatterlist *cmd_sgl; ++ struct scatterlist *data_sgl; ++ struct dma_async_tx_descriptor *last_data_desc; ++ struct dma_async_tx_descriptor *last_cmd_desc; ++ struct completion txn_done; ++ u32 bam_ce_pos; ++ u32 bam_ce_start; ++ u32 cmd_sgl_pos; ++ u32 cmd_sgl_start; ++ u32 tx_sgl_pos; ++ u32 tx_sgl_start; ++ u32 rx_sgl_pos; ++ u32 rx_sgl_start; ++}; ++ ++/* ++ * This data type corresponds to the nand dma descriptor ++ * @dma_desc - low level DMA engine descriptor ++ * @list - list for desc_info ++ * ++ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by ++ * ADM ++ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM ++ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM ++ * @dir - DMA transfer direction ++ */ ++struct desc_info { ++ struct dma_async_tx_descriptor *dma_desc; ++ struct list_head node; ++ ++ union { ++ struct scatterlist adm_sgl; ++ struct { ++ struct scatterlist *bam_sgl; ++ int sgl_cnt; ++ }; ++ }; ++ enum dma_data_direction dir; ++}; ++ ++/* ++ * holds the current register values that we want to write. acts as a contiguous ++ * chunk of memory which we use to write the controller registers through DMA. ++ */ ++struct nandc_regs { ++ __le32 cmd; ++ __le32 addr0; ++ __le32 addr1; ++ __le32 chip_sel; ++ __le32 exec; ++ ++ __le32 cfg0; ++ __le32 cfg1; ++ __le32 ecc_bch_cfg; ++ ++ __le32 clrflashstatus; ++ __le32 clrreadstatus; ++ ++ __le32 cmd1; ++ __le32 vld; ++ ++ __le32 orig_cmd1; ++ __le32 orig_vld; ++ ++ __le32 ecc_buf_cfg; ++ __le32 read_location0; ++ __le32 read_location1; ++ __le32 read_location2; ++ __le32 read_location3; ++ __le32 read_location_last0; ++ __le32 read_location_last1; ++ __le32 read_location_last2; ++ __le32 read_location_last3; ++ ++ __le32 erased_cw_detect_cfg_clr; ++ __le32 erased_cw_detect_cfg_set; ++}; ++ ++/* ++ * NAND controller data struct ++ * ++ * @dev: parent device ++ * ++ * @base: MMIO base ++ * ++ * @core_clk: controller clock ++ * @aon_clk: another controller clock ++ * ++ * @regs: a contiguous chunk of memory for DMA register ++ * writes. contains the register values to be ++ * written to controller ++ * ++ * @props: properties of current NAND controller, ++ * initialized via DT match data ++ * ++ * @controller: base controller structure ++ * @host_list: list containing all the chips attached to the ++ * controller ++ * ++ * @chan: dma channel ++ * @cmd_crci: ADM DMA CRCI for command flow control ++ * @data_crci: ADM DMA CRCI for data flow control ++ * ++ * @desc_list: DMA descriptor list (list of desc_infos) ++ * ++ * @data_buffer: our local DMA buffer for page read/writes, ++ * used when we can't use the buffer provided ++ * by upper layers directly ++ * @reg_read_buf: local buffer for reading back registers via DMA ++ * ++ * @base_phys: physical base address of controller registers ++ * @base_dma: dma base address of controller registers ++ * @reg_read_dma: contains dma address for register read buffer ++ * ++ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf ++ * functions ++ * @max_cwperpage: maximum QPIC codewords required. calculated ++ * from all connected NAND devices pagesize ++ * ++ * @reg_read_pos: marker for data read in reg_read_buf ++ * ++ * @cmd1/vld: some fixed controller register values ++ * ++ * @exec_opwrite: flag to select correct number of code word ++ * while reading status ++ */ ++struct qcom_nand_controller { ++ struct device *dev; ++ ++ void __iomem *base; ++ ++ struct clk *core_clk; ++ struct clk *aon_clk; ++ ++ struct nandc_regs *regs; ++ struct bam_transaction *bam_txn; ++ ++ const struct qcom_nandc_props *props; ++ ++ struct nand_controller *controller; ++ struct list_head host_list; ++ ++ union { ++ /* will be used only by QPIC for BAM DMA */ ++ struct { ++ struct dma_chan *tx_chan; ++ struct dma_chan *rx_chan; ++ struct dma_chan *cmd_chan; ++ }; ++ ++ /* will be used only by EBI2 for ADM DMA */ ++ struct { ++ struct dma_chan *chan; ++ unsigned int cmd_crci; ++ unsigned int data_crci; ++ }; ++ }; ++ ++ struct list_head desc_list; ++ ++ u8 *data_buffer; ++ __le32 *reg_read_buf; ++ ++ phys_addr_t base_phys; ++ dma_addr_t base_dma; ++ dma_addr_t reg_read_dma; ++ ++ int buf_size; ++ int buf_count; ++ int buf_start; ++ unsigned int max_cwperpage; ++ ++ int reg_read_pos; ++ ++ u32 cmd1, vld; ++ bool exec_opwrite; ++}; ++ ++/* ++ * This data type corresponds to the NAND controller properties which varies ++ * among different NAND controllers. ++ * @ecc_modes - ecc mode for NAND ++ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset ++ * @supports_bam - whether NAND controller is using BAM ++ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP ++ * @qpic_version2 - flag to indicate QPIC IP version 2 ++ * @use_codeword_fixup - whether NAND has different layout for boot partitions ++ */ ++struct qcom_nandc_props { ++ u32 ecc_modes; ++ u32 dev_cmd_reg_start; ++ bool supports_bam; ++ bool nandc_part_of_qpic; ++ bool qpic_version2; ++ bool use_codeword_fixup; ++}; ++ ++void qcom_free_bam_transaction(struct qcom_nand_controller *nandc); ++struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc); ++void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc); ++void qcom_qpic_bam_dma_done(void *data); ++void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu); ++int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc, ++ struct dma_chan *chan, unsigned long flags); ++int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, ++ int reg_off, const void *vaddr, int size, unsigned int flags); ++int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, ++ const void *vaddr, int size, unsigned int flags); ++int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off, ++ const void *vaddr, int size, bool flow_control); ++int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs, ++ unsigned int flags); ++int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first, ++ int num_regs, unsigned int flags); ++int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr, ++ int size, unsigned int flags); ++int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr, ++ int size, unsigned int flags); ++int qcom_submit_descs(struct qcom_nand_controller *nandc); ++void qcom_clear_read_regs(struct qcom_nand_controller *nandc); ++void qcom_nandc_unalloc(struct qcom_nand_controller *nandc); ++int qcom_nandc_alloc(struct qcom_nand_controller *nandc); ++#endif ++ diff --git a/target/linux/qualcommax/patches-6.6/0405-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch b/target/linux/qualcommax/patches-6.6/0405-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch new file mode 100644 index 000000000..542a44a73 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0405-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch @@ -0,0 +1,194 @@ +From: Md Sadre Alam +Date: Sun, 22 Sep 2024 17:03:48 +0530 +Subject: [PATCH] mtd: rawnand: qcom: use FIELD_PREP and GENMASK + +Use the bitfield macro FIELD_PREP, and GENMASK to +do the shift and mask in one go. This makes the code +more readable. + +Signed-off-by: Md Sadre Alam +--- +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c +index 91f1eb781cb2..c1159dbc8eba 100644 +--- a/drivers/mtd/nand/raw/qcom_nandc.c ++++ b/drivers/mtd/nand/raw/qcom_nandc.c +@@ -281,7 +281,7 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i + (num_cw - 1) << CW_PER_PAGE); + + cfg1 = cpu_to_le32(host->cfg1_raw); +- ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE); ++ ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE); + } + + nandc->regs->cmd = cmd; +@@ -1494,42 +1494,41 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) + host->cw_size = host->cw_data + ecc->bytes; + bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1; + +- host->cfg0 = (cwperpage - 1) << CW_PER_PAGE +- | host->cw_data << UD_SIZE_BYTES +- | 0 << DISABLE_STATUS_AFTER_WRITE +- | 5 << NUM_ADDR_CYCLES +- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS +- | 0 << STATUS_BFR_READ +- | 1 << SET_RD_MODE_AFTER_STATUS +- | host->spare_bytes << SPARE_SIZE_BYTES; +- +- host->cfg1 = 7 << NAND_RECOVERY_CYCLES +- | 0 << CS_ACTIVE_BSY +- | bad_block_byte << BAD_BLOCK_BYTE_NUM +- | 0 << BAD_BLOCK_IN_SPARE_AREA +- | 2 << WR_RD_BSY_GAP +- | wide_bus << WIDE_FLASH +- | host->bch_enabled << ENABLE_BCH_ECC; +- +- host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE +- | host->cw_size << UD_SIZE_BYTES +- | 5 << NUM_ADDR_CYCLES +- | 0 << SPARE_SIZE_BYTES; +- +- host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES +- | 0 << CS_ACTIVE_BSY +- | 17 << BAD_BLOCK_BYTE_NUM +- | 1 << BAD_BLOCK_IN_SPARE_AREA +- | 2 << WR_RD_BSY_GAP +- | wide_bus << WIDE_FLASH +- | 1 << DEV0_CFG1_ECC_DISABLE; +- +- host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE +- | 0 << ECC_SW_RESET +- | host->cw_data << ECC_NUM_DATA_BYTES +- | 1 << ECC_FORCE_CLK_OPEN +- | ecc_mode << ECC_MODE +- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH; ++ host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) | ++ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) | ++ FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) | ++ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) | ++ FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) | ++ FIELD_PREP(STATUS_BFR_READ, 0) | ++ FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) | ++ FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes); ++ ++ host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) | ++ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) | ++ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) | ++ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) | ++ FIELD_PREP(WIDE_FLASH, wide_bus) | ++ FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled); ++ ++ host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) | ++ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) | ++ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) | ++ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0); ++ ++ host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) | ++ FIELD_PREP(CS_ACTIVE_BSY, 0) | ++ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) | ++ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) | ++ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) | ++ FIELD_PREP(WIDE_FLASH, wide_bus) | ++ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1); ++ ++ host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) | ++ FIELD_PREP(ECC_SW_RESET, 0) | ++ FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) | ++ FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) | ++ FIELD_PREP(ECC_MODE_MASK, ecc_mode) | ++ FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw); + + if (!nandc->props->qpic_version2) + host->ecc_buf_cfg = 0x203 << NUM_STEPS; +@@ -1882,21 +1881,21 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ + nandc->regs->addr0 = 0; + nandc->regs->addr1 = 0; + +- nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE +- | 512 << UD_SIZE_BYTES +- | 5 << NUM_ADDR_CYCLES +- | 0 << SPARE_SIZE_BYTES); ++ host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) | ++ FIELD_PREP(UD_SIZE_BYTES_MASK, 512) | ++ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) | ++ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0); + +- nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES +- | 0 << CS_ACTIVE_BSY +- | 17 << BAD_BLOCK_BYTE_NUM +- | 1 << BAD_BLOCK_IN_SPARE_AREA +- | 2 << WR_RD_BSY_GAP +- | 0 << WIDE_FLASH +- | 1 << DEV0_CFG1_ECC_DISABLE); ++ host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) | ++ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) | ++ FIELD_PREP(CS_ACTIVE_BSY, 0) | ++ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) | ++ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) | ++ FIELD_PREP(WIDE_FLASH, 0) | ++ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1); + + if (!nandc->props->qpic_version2) +- nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE); ++ nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE); + + /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */ + if (!nandc->props->qpic_version2) { +diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h +index 425994429387..e79c79775eb8 100644 +--- a/include/linux/mtd/nand-qpic-common.h ++++ b/include/linux/mtd/nand-qpic-common.h +@@ -70,35 +70,42 @@ + #define BS_CORRECTABLE_ERR_MSK 0x1f + + /* NAND_DEVn_CFG0 bits */ +-#define DISABLE_STATUS_AFTER_WRITE 4 ++#define DISABLE_STATUS_AFTER_WRITE BIT(4) + #define CW_PER_PAGE 6 ++#define CW_PER_PAGE_MASK GENMASK(8, 6) + #define UD_SIZE_BYTES 9 + #define UD_SIZE_BYTES_MASK GENMASK(18, 9) +-#define ECC_PARITY_SIZE_BYTES_RS 19 ++#define ECC_PARITY_SIZE_BYTES_RS GENMASK(22, 19) + #define SPARE_SIZE_BYTES 23 + #define SPARE_SIZE_BYTES_MASK GENMASK(26, 23) + #define NUM_ADDR_CYCLES 27 +-#define STATUS_BFR_READ 30 +-#define SET_RD_MODE_AFTER_STATUS 31 ++#define NUM_ADDR_CYCLES_MASK GENMASK(29, 27) ++#define STATUS_BFR_READ BIT(30) ++#define SET_RD_MODE_AFTER_STATUS BIT(31) + + /* NAND_DEVn_CFG0 bits */ +-#define DEV0_CFG1_ECC_DISABLE 0 +-#define WIDE_FLASH 1 ++#define DEV0_CFG1_ECC_DISABLE BIT(0) ++#define WIDE_FLASH BIT(1) + #define NAND_RECOVERY_CYCLES 2 +-#define CS_ACTIVE_BSY 5 ++#define NAND_RECOVERY_CYCLES_MASK GENMASK(4, 2) ++#define CS_ACTIVE_BSY BIT(5) + #define BAD_BLOCK_BYTE_NUM 6 +-#define BAD_BLOCK_IN_SPARE_AREA 16 ++#define BAD_BLOCK_BYTE_NUM_MASK GENMASK(15, 6) ++#define BAD_BLOCK_IN_SPARE_AREA BIT(16) + #define WR_RD_BSY_GAP 17 +-#define ENABLE_BCH_ECC 27 ++#define WR_RD_BSY_GAP_MASK GENMASK(22, 17) ++#define ENABLE_BCH_ECC BIT(27) + + /* NAND_DEV0_ECC_CFG bits */ +-#define ECC_CFG_ECC_DISABLE 0 +-#define ECC_SW_RESET 1 ++#define ECC_CFG_ECC_DISABLE BIT(0) ++#define ECC_SW_RESET BIT(1) + #define ECC_MODE 4 ++#define ECC_MODE_MASK GENMASK(5, 4) + #define ECC_PARITY_SIZE_BYTES_BCH 8 ++#define ECC_PARITY_SIZE_BYTES_BCH_MASK GENMASK(12, 8) + #define ECC_NUM_DATA_BYTES 16 + #define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16) +-#define ECC_FORCE_CLK_OPEN 30 ++#define ECC_FORCE_CLK_OPEN BIT(30) + + /* NAND_DEV_CMD1 bits */ + #define READ_ADDR 0 diff --git a/target/linux/qualcommax/patches-6.6/0406-spi-spi-qpic-add-driver-for-QCOM-SPI-NAND-flash-Interface.patch b/target/linux/qualcommax/patches-6.6/0406-spi-spi-qpic-add-driver-for-QCOM-SPI-NAND-flash-Interface.patch new file mode 100644 index 000000000..95188529c --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0406-spi-spi-qpic-add-driver-for-QCOM-SPI-NAND-flash-Interface.patch @@ -0,0 +1,1729 @@ +From: Md Sadre Alam +Date: Sun, 22 Sep 2024 17:03:49 +0530 +Subject: [PATCH] spi: spi-qpic: add driver for QCOM SPI NAND flash Interface + +This driver implements support for the SPI-NAND mode of QCOM NAND Flash +Interface as a SPI-MEM controller with pipelined ECC capability. + +Co-developed-by: Sricharan Ramabadhran +Signed-off-by: Sricharan Ramabadhran +Co-developed-by: Varadarajan Narayanan +Signed-off-by: Varadarajan Narayanan +Signed-off-by: Md Sadre Alam +--- +--- a/drivers/mtd/nand/Makefile ++++ b/drivers/mtd/nand/Makefile +@@ -7,8 +7,11 @@ obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bm + + ifeq ($(CONFIG_MTD_NAND_QCOM),y) + obj-y += qpic_common.o ++else ++ifeq ($(CONFIG_SPI_QPIC_SNAND),y) ++obj-y += qpic_common.o ++endif + endif +- + obj-y += onenand/ + obj-y += raw/ + obj-y += spi/ +--- a/drivers/spi/Kconfig ++++ b/drivers/spi/Kconfig +@@ -870,6 +870,14 @@ config SPI_QCOM_QSPI + help + QSPI(Quad SPI) driver for Qualcomm QSPI controller. + ++config SPI_QPIC_SNAND ++ bool "QPIC SNAND controller" ++ depends on ARCH_QCOM || COMPILE_TEST ++ help ++ QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller. ++ QPIC controller supports both parallel nand and serial nand. ++ This config will enable serial nand driver for QPIC controller. ++ + config SPI_QUP + tristate "Qualcomm SPI controller with QUP interface" + depends on ARCH_QCOM || COMPILE_TEST +--- a/drivers/spi/Makefile ++++ b/drivers/spi/Makefile +@@ -110,6 +110,7 @@ obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx- + obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o + obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o + obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o ++obj-$(CONFIG_SPI_QPIC_SNAND) += spi-qpic-snand.o + obj-$(CONFIG_SPI_QUP) += spi-qup.o + obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o + obj-$(CONFIG_SPI_ROCKCHIP_SFC) += spi-rockchip-sfc.o +--- /dev/null ++++ b/drivers/spi/spi-qpic-snand.c +@@ -0,0 +1,1634 @@ ++/* ++ * SPDX-License-Identifier: GPL-2.0 ++ * ++ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. ++ * ++ * Authors: ++ * Md Sadre Alam ++ * Sricharan R ++ * Varadarajan Narayanan ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define NAND_FLASH_SPI_CFG 0xc0 ++#define NAND_NUM_ADDR_CYCLES 0xc4 ++#define NAND_BUSY_CHECK_WAIT_CNT 0xc8 ++#define NAND_FLASH_FEATURES 0xf64 ++ ++/* QSPI NAND config reg bits */ ++#define LOAD_CLK_CNTR_INIT_EN BIT(28) ++#define CLK_CNTR_INIT_VAL_VEC 0x924 ++#define CLK_CNTR_INIT_VAL_VEC_MASK GENMASK(27, 16) ++#define FEA_STATUS_DEV_ADDR 0xc0 ++#define FEA_STATUS_DEV_ADDR_MASK GENMASK(15, 8) ++#define SPI_CFG BIT(0) ++#define SPI_NUM_ADDR 0xDA4DB ++#define SPI_WAIT_CNT 0x10 ++#define QPIC_QSPI_NUM_CS 1 ++#define SPI_TRANSFER_MODE_x1 BIT(29) ++#define SPI_TRANSFER_MODE_x4 (3 << 29) ++#define SPI_WP BIT(28) ++#define SPI_HOLD BIT(27) ++#define QPIC_SET_FEATURE BIT(31) ++ ++#define SPINAND_RESET 0xff ++#define SPINAND_READID 0x9f ++#define SPINAND_GET_FEATURE 0x0f ++#define SPINAND_SET_FEATURE 0x1f ++#define SPINAND_READ 0x13 ++#define SPINAND_ERASE 0xd8 ++#define SPINAND_WRITE_EN 0x06 ++#define SPINAND_PROGRAM_EXECUTE 0x10 ++#define SPINAND_PROGRAM_LOAD 0x84 ++ ++#define ACC_FEATURE 0xe ++#define BAD_BLOCK_MARKER_SIZE 0x2 ++#define OOB_BUF_SIZE 128 ++#define ecceng_to_qspi(eng) container_of(eng, struct qpic_spi_nand, ecc_eng) ++struct qpic_snand_op { ++ u32 cmd_reg; ++ u32 addr1_reg; ++ u32 addr2_reg; ++}; ++ ++struct snandc_read_status { ++ __le32 snandc_flash; ++ __le32 snandc_buffer; ++ __le32 snandc_erased_cw; ++}; ++ ++/* ++ * ECC state struct ++ * @corrected: ECC corrected ++ * @bitflips: Max bit flip ++ * @failed: ECC failed ++ */ ++struct qcom_ecc_stats { ++ u32 corrected; ++ u32 bitflips; ++ u32 failed; ++}; ++ ++struct qpic_ecc { ++ struct device *dev; ++ int ecc_bytes_hw; ++ int spare_bytes; ++ int bbm_size; ++ int ecc_mode; ++ int bytes; ++ int steps; ++ int step_size; ++ int strength; ++ int cw_size; ++ int cw_data; ++ u32 cfg0; ++ u32 cfg1; ++ u32 cfg0_raw; ++ u32 cfg1_raw; ++ u32 ecc_buf_cfg; ++ u32 ecc_bch_cfg; ++ u32 clrflashstatus; ++ u32 clrreadstatus; ++ bool bch_enabled; ++}; ++ ++struct qpic_spi_nand { ++ struct qcom_nand_controller *snandc; ++ struct spi_controller *ctlr; ++ struct mtd_info *mtd; ++ struct clk *iomacro_clk; ++ struct qpic_ecc *ecc; ++ struct qcom_ecc_stats ecc_stats; ++ struct nand_ecc_engine ecc_eng; ++ u8 *data_buf; ++ u8 *oob_buf; ++ u32 wlen; ++ __le32 addr1; ++ __le32 addr2; ++ __le32 cmd; ++ u32 num_cw; ++ bool oob_rw; ++ bool page_rw; ++ bool raw_rw; ++}; ++ ++static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc, ++ int reg, int cw_offset, int read_size, ++ int is_last_read_loc) ++{ ++ __le32 locreg_val; ++ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | ++ ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc) ++ << READ_LOCATION_LAST)); ++ ++ locreg_val = cpu_to_le32(val); ++ ++ if (reg == NAND_READ_LOCATION_0) ++ snandc->regs->read_location0 = locreg_val; ++ else if (reg == NAND_READ_LOCATION_1) ++ snandc->regs->read_location1 = locreg_val; ++ else if (reg == NAND_READ_LOCATION_2) ++ snandc->regs->read_location1 = locreg_val; ++ else if (reg == NAND_READ_LOCATION_3) ++ snandc->regs->read_location3 = locreg_val; ++} ++ ++static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc, ++ int reg, int cw_offset, int read_size, ++ int is_last_read_loc) ++{ ++ __le32 locreg_val; ++ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | ++ ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc) ++ << READ_LOCATION_LAST)); ++ ++ locreg_val = cpu_to_le32(val); ++ ++ if (reg == NAND_READ_LOCATION_LAST_CW_0) ++ snandc->regs->read_location_last0 = locreg_val; ++ else if (reg == NAND_READ_LOCATION_LAST_CW_1) ++ snandc->regs->read_location_last1 = locreg_val; ++ else if (reg == NAND_READ_LOCATION_LAST_CW_2) ++ snandc->regs->read_location_last2 = locreg_val; ++ else if (reg == NAND_READ_LOCATION_LAST_CW_3) ++ snandc->regs->read_location_last3 = locreg_val; ++} ++ ++static struct qcom_nand_controller *nand_to_qcom_snand(struct nand_device *nand) ++{ ++ struct nand_ecc_engine *eng = nand->ecc.engine; ++ struct qpic_spi_nand *qspi = ecceng_to_qspi(eng); ++ ++ return qspi->snandc; ++} ++ ++static int qcom_spi_init(struct qcom_nand_controller *snandc) ++{ ++ u32 snand_cfg_val = 0x0; ++ int ret; ++ ++ snand_cfg_val = FIELD_PREP(CLK_CNTR_INIT_VAL_VEC_MASK, CLK_CNTR_INIT_VAL_VEC) | ++ FIELD_PREP(LOAD_CLK_CNTR_INIT_EN, 0) | ++ FIELD_PREP(FEA_STATUS_DEV_ADDR_MASK, FEA_STATUS_DEV_ADDR) | ++ FIELD_PREP(SPI_CFG, 0); ++ ++ snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val); ++ snandc->regs->num_addr_cycle = cpu_to_le32(SPI_NUM_ADDR); ++ snandc->regs->busy_wait_cnt = cpu_to_le32(SPI_WAIT_CNT); ++ ++ qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0); ++ ++ snand_cfg_val &= ~LOAD_CLK_CNTR_INIT_EN; ++ snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val); ++ ++ qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0); ++ ++ qcom_write_reg_dma(snandc, &snandc->regs->num_addr_cycle, NAND_NUM_ADDR_CYCLES, 1, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->busy_wait_cnt, NAND_BUSY_CHECK_WAIT_CNT, 1, ++ NAND_BAM_NEXT_SGL); ++ ++ ret = qcom_submit_descs(snandc); ++ if (ret) { ++ dev_err(snandc->dev, "failure in submitting spi init descriptor\n"); ++ return ret; ++ } ++ ++ return ret; ++} ++ ++static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ struct nand_device *nand = mtd_to_nanddev(mtd); ++ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); ++ struct qpic_ecc *qecc = snandc->qspi->ecc; ++ ++ if (section > 1) ++ return -ERANGE; ++ ++ oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes; ++ oobregion->offset = mtd->oobsize - oobregion->length; ++ ++ return 0; ++} ++ ++static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ struct nand_device *nand = mtd_to_nanddev(mtd); ++ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); ++ struct qpic_ecc *qecc = snandc->qspi->ecc; ++ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = qecc->steps * 4; ++ oobregion->offset = ((qecc->steps - 1) * qecc->bytes) + qecc->bbm_size; ++ ++ return 0; ++} ++ ++static const struct mtd_ooblayout_ops qcom_spi_ooblayout = { ++ .ecc = qcom_spi_ooblayout_ecc, ++ .free = qcom_spi_ooblayout_free, ++}; ++ ++static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand) ++{ ++ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); ++ struct nand_ecc_props *conf = &nand->ecc.ctx.conf; ++ struct mtd_info *mtd = nanddev_to_mtd(nand); ++ int cwperpage, bad_block_byte; ++ struct qpic_ecc *ecc_cfg; ++ ++ cwperpage = mtd->writesize / NANDC_STEP_SIZE; ++ snandc->qspi->num_cw = cwperpage; ++ ++ ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL); ++ if (!ecc_cfg) ++ return -ENOMEM; ++ snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize, ++ GFP_KERNEL); ++ if (!snandc->qspi->oob_buf) ++ return -ENOMEM; ++ ++ memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize); ++ ++ nand->ecc.ctx.priv = ecc_cfg; ++ snandc->qspi->mtd = mtd; ++ ++ ecc_cfg->ecc_bytes_hw = 7; ++ ecc_cfg->spare_bytes = 4; ++ ecc_cfg->bbm_size = 1; ++ ecc_cfg->bch_enabled = true; ++ ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size; ++ ++ ecc_cfg->steps = 4; ++ ecc_cfg->strength = 4; ++ ecc_cfg->step_size = 512; ++ ecc_cfg->cw_data = 516; ++ ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes; ++ bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1; ++ ++ mtd_set_ooblayout(mtd, &qcom_spi_ooblayout); ++ ++ ecc_cfg->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) | ++ FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_data) | ++ FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 1) | ++ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) | ++ FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, ecc_cfg->ecc_bytes_hw) | ++ FIELD_PREP(STATUS_BFR_READ, 0) | ++ FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) | ++ FIELD_PREP(SPARE_SIZE_BYTES_MASK, ecc_cfg->spare_bytes); ++ ++ ecc_cfg->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) | ++ FIELD_PREP(CS_ACTIVE_BSY, 0) | ++ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) | ++ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) | ++ FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) | ++ FIELD_PREP(WIDE_FLASH, 0) | ++ FIELD_PREP(ENABLE_BCH_ECC, ecc_cfg->bch_enabled); ++ ++ ecc_cfg->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) | ++ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) | ++ FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_size) | ++ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0); ++ ++ ecc_cfg->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) | ++ FIELD_PREP(CS_ACTIVE_BSY, 0) | ++ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) | ++ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) | ++ FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) | ++ FIELD_PREP(WIDE_FLASH, 0) | ++ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1); ++ ++ ecc_cfg->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !ecc_cfg->bch_enabled) | ++ FIELD_PREP(ECC_SW_RESET, 0) | ++ FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) | ++ FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) | ++ FIELD_PREP(ECC_MODE_MASK, 0) | ++ FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw); ++ ++ ecc_cfg->ecc_buf_cfg = 0x203 << NUM_STEPS; ++ ecc_cfg->clrflashstatus = FS_READY_BSY_N; ++ ecc_cfg->clrreadstatus = 0xc0; ++ ++ conf->step_size = ecc_cfg->step_size; ++ conf->strength = ecc_cfg->strength; ++ ++ snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET); ++ snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET); ++ ++ dev_dbg(snandc->dev, "ECC strength: %u bits per %u bytes\n", ++ ecc_cfg->strength, ecc_cfg->step_size); ++ ++ return 0; ++} ++ ++static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand) ++{ ++ struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand); ++ ++ kfree(ecc_cfg); ++} ++ ++static int qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device *nand, ++ struct nand_page_io_req *req) ++{ ++ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); ++ struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand); ++ ++ snandc->qspi->ecc = ecc_cfg; ++ snandc->qspi->raw_rw = false; ++ snandc->qspi->oob_rw = false; ++ snandc->qspi->page_rw = false; ++ ++ if (req->datalen) ++ snandc->qspi->page_rw = true; ++ ++ if (req->ooblen) ++ snandc->qspi->oob_rw = true; ++ ++ if (req->mode == MTD_OPS_RAW) ++ snandc->qspi->raw_rw = true; ++ ++ return 0; ++} ++ ++static int qcom_spi_ecc_finish_io_req_pipelined(struct nand_device *nand, ++ struct nand_page_io_req *req) ++{ ++ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); ++ struct mtd_info *mtd = nanddev_to_mtd(nand); ++ ++ if (req->mode == MTD_OPS_RAW || req->type != NAND_PAGE_READ) ++ return 0; ++ ++ if (snandc->qspi->ecc_stats.failed) ++ mtd->ecc_stats.failed += snandc->qspi->ecc_stats.failed; ++ else ++ mtd->ecc_stats.corrected += snandc->qspi->ecc_stats.corrected; ++ ++ if (snandc->qspi->ecc_stats.failed) ++ return -EBADMSG; ++ else ++ return snandc->qspi->ecc_stats.bitflips; ++} ++ ++static struct nand_ecc_engine_ops qcom_spi_ecc_engine_ops_pipelined = { ++ .init_ctx = qcom_spi_ecc_init_ctx_pipelined, ++ .cleanup_ctx = qcom_spi_ecc_cleanup_ctx_pipelined, ++ .prepare_io_req = qcom_spi_ecc_prepare_io_req_pipelined, ++ .finish_io_req = qcom_spi_ecc_finish_io_req_pipelined, ++}; ++ ++/* helper to configure location register values */ ++static void qcom_spi_set_read_loc(struct qcom_nand_controller *snandc, int cw, int reg, ++ int cw_offset, int read_size, int is_last_read_loc) ++{ ++ int reg_base = NAND_READ_LOCATION_0; ++ int num_cw = snandc->qspi->num_cw; ++ ++ if (cw == (num_cw - 1)) ++ reg_base = NAND_READ_LOCATION_LAST_CW_0; ++ ++ reg_base += reg * 4; ++ ++ if (cw == (num_cw - 1)) ++ return qcom_spi_set_read_loc_last(snandc, reg_base, cw_offset, ++ read_size, is_last_read_loc); ++ else ++ return qcom_spi_set_read_loc_first(snandc, reg_base, cw_offset, ++ read_size, is_last_read_loc); ++} ++ ++static void ++qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int cw) ++{ ++ __le32 *reg = &snandc->regs->read_location0; ++ int num_cw = snandc->qspi->num_cw; ++ ++ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL); ++ if (cw == (num_cw - 1)) { ++ reg = &snandc->regs->read_location_last0; ++ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, ++ NAND_BAM_NEXT_SGL); ++ } ++ ++ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ ++ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0); ++ qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1, ++ NAND_BAM_NEXT_SGL); ++} ++ ++static int qcom_spi_block_erase(struct qcom_nand_controller *snandc) ++{ ++ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; ++ int ret; ++ ++ snandc->buf_count = 0; ++ snandc->buf_start = 0; ++ qcom_clear_read_regs(snandc); ++ qcom_clear_bam_transaction(snandc); ++ ++ snandc->regs->cmd = snandc->qspi->cmd; ++ snandc->regs->addr0 = snandc->qspi->addr1; ++ snandc->regs->addr1 = snandc->qspi->addr2; ++ snandc->regs->cfg0 = cpu_to_le32(ecc_cfg->cfg0_raw & ~(7 << CW_PER_PAGE)); ++ snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw); ++ snandc->regs->exec = cpu_to_le32(1); ++ ++ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ ++ ret = qcom_submit_descs(snandc); ++ if (ret) { ++ dev_err(snandc->dev, "failure to erase block\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *snandc, ++ bool use_ecc, int cw) ++{ ++ __le32 *reg = &snandc->regs->read_location0; ++ int num_cw = snandc->qspi->num_cw; ++ ++ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, ++ NAND_ERASED_CW_DETECT_CFG, 1, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, ++ NAND_ERASED_CW_DETECT_CFG, 1, ++ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); ++ ++ if (cw == (num_cw - 1)) { ++ reg = &snandc->regs->read_location_last0; ++ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, NAND_BAM_NEXT_SGL); ++ } ++ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ ++ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0); ++} ++ ++static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc, ++ const struct spi_mem_op *op) ++{ ++ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; ++ struct mtd_info *mtd = snandc->qspi->mtd; ++ int size, ret = 0; ++ int col, bbpos; ++ u32 cfg0, cfg1, ecc_bch_cfg; ++ u32 num_cw = snandc->qspi->num_cw; ++ ++ qcom_clear_bam_transaction(snandc); ++ qcom_clear_read_regs(snandc); ++ ++ size = ecc_cfg->cw_size; ++ col = ecc_cfg->cw_size * (num_cw - 1); ++ ++ memset(snandc->data_buffer, 0xff, size); ++ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col)); ++ snandc->regs->addr1 = snandc->qspi->addr2; ++ ++ cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) | ++ 0 << CW_PER_PAGE; ++ cfg1 = ecc_cfg->cfg1_raw; ++ ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE; ++ ++ snandc->regs->cmd = snandc->qspi->cmd; ++ snandc->regs->cfg0 = cpu_to_le32(cfg0); ++ snandc->regs->cfg1 = cpu_to_le32(cfg1); ++ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); ++ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); ++ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); ++ snandc->regs->exec = cpu_to_le32(1); ++ ++ qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1); ++ ++ qcom_spi_config_single_cw_page_read(snandc, false, num_cw - 1); ++ ++ qcom_read_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, size, 0); ++ ++ ret = qcom_submit_descs(snandc); ++ if (ret) { ++ dev_err(snandc->dev, "failed to read last cw\n"); ++ return ret; ++ } ++ ++ qcom_nandc_dev_to_mem(snandc, true); ++ u32 flash = le32_to_cpu(snandc->reg_read_buf[0]); ++ ++ if (flash & (FS_OP_ERR | FS_MPU_ERR)) ++ return -EIO; ++ ++ bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); ++ ++ if (snandc->data_buffer[bbpos] == 0xff) ++ snandc->data_buffer[bbpos + 1] = 0xff; ++ if (snandc->data_buffer[bbpos] != 0xff) ++ snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos]; ++ ++ memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes); ++ ++ return ret; ++} ++ ++static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_buf, u8 *oob_buf) ++{ ++ struct snandc_read_status *buf; ++ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; ++ int i, num_cw = snandc->qspi->num_cw; ++ bool flash_op_err = false, erased; ++ unsigned int max_bitflips = 0; ++ unsigned int uncorrectable_cws = 0; ++ ++ snandc->qspi->ecc_stats.failed = 0; ++ snandc->qspi->ecc_stats.corrected = 0; ++ ++ qcom_nandc_dev_to_mem(snandc, true); ++ buf = (struct snandc_read_status *)snandc->reg_read_buf; ++ ++ for (i = 0; i < num_cw; i++, buf++) { ++ u32 flash, buffer, erased_cw; ++ int data_len, oob_len; ++ ++ if (i == (num_cw - 1)) { ++ data_len = NANDC_STEP_SIZE - ((num_cw - 1) << 2); ++ oob_len = num_cw << 2; ++ } else { ++ data_len = ecc_cfg->cw_data; ++ oob_len = 0; ++ } ++ ++ flash = le32_to_cpu(buf->snandc_flash); ++ buffer = le32_to_cpu(buf->snandc_buffer); ++ erased_cw = le32_to_cpu(buf->snandc_erased_cw); ++ ++ if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) { ++ if (ecc_cfg->bch_enabled) ++ erased = (erased_cw & ERASED_CW) == ERASED_CW; ++ else ++ erased = false; ++ ++ if (!erased) ++ uncorrectable_cws |= BIT(i); ++ ++ } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) { ++ flash_op_err = true; ++ } else { ++ unsigned int stat; ++ ++ stat = buffer & BS_CORRECTABLE_ERR_MSK; ++ snandc->qspi->ecc_stats.corrected += stat; ++ max_bitflips = max(max_bitflips, stat); ++ } ++ ++ if (data_buf) ++ data_buf += data_len; ++ if (oob_buf) ++ oob_buf += oob_len + ecc_cfg->bytes; ++ } ++ ++ if (flash_op_err) ++ return -EIO; ++ ++ if (!uncorrectable_cws) ++ snandc->qspi->ecc_stats.bitflips = max_bitflips; ++ else ++ snandc->qspi->ecc_stats.failed++; ++ ++ return 0; ++} ++ ++static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt) ++{ ++ int i; ++ ++ qcom_nandc_dev_to_mem(snandc, true); ++ ++ for (i = 0; i < cw_cnt; i++) { ++ u32 flash = le32_to_cpu(snandc->reg_read_buf[i]); ++ ++ if (flash & (FS_OP_ERR | FS_MPU_ERR)) ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf, ++ u8 *oob_buf, int cw) ++{ ++ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; ++ struct mtd_info *mtd = snandc->qspi->mtd; ++ int data_size1, data_size2, oob_size1, oob_size2; ++ int ret, reg_off = FLASH_BUF_ACC, read_loc = 0; ++ int raw_cw = cw; ++ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; ++ int col; ++ ++ snandc->buf_count = 0; ++ snandc->buf_start = 0; ++ qcom_clear_read_regs(snandc); ++ qcom_clear_bam_transaction(snandc); ++ raw_cw = num_cw - 1; ++ ++ cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) | ++ 0 << CW_PER_PAGE; ++ cfg1 = ecc_cfg->cfg1_raw; ++ ecc_bch_cfg = ECC_CFG_ECC_DISABLE; ++ ++ col = ecc_cfg->cw_size * cw; ++ ++ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col)); ++ snandc->regs->addr1 = snandc->qspi->addr2; ++ snandc->regs->cmd = snandc->qspi->cmd; ++ snandc->regs->cfg0 = cpu_to_le32(cfg0); ++ snandc->regs->cfg1 = cpu_to_le32(cfg1); ++ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); ++ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); ++ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); ++ snandc->regs->exec = cpu_to_le32(1); ++ ++ qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1); ++ ++ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0); ++ ++ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, ++ NAND_ERASED_CW_DETECT_CFG, 1, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, ++ NAND_ERASED_CW_DETECT_CFG, 1, ++ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); ++ ++ data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); ++ oob_size1 = ecc_cfg->bbm_size; ++ ++ if (cw == (num_cw - 1)) { ++ data_size2 = NANDC_STEP_SIZE - data_size1 - ++ ((num_cw - 1) * 4); ++ oob_size2 = (num_cw * 4) + ecc_cfg->ecc_bytes_hw + ++ ecc_cfg->spare_bytes; ++ } else { ++ data_size2 = ecc_cfg->cw_data - data_size1; ++ oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; ++ } ++ ++ qcom_spi_set_read_loc(snandc, cw, 0, read_loc, data_size1, 0); ++ read_loc += data_size1; ++ ++ qcom_spi_set_read_loc(snandc, cw, 1, read_loc, oob_size1, 0); ++ read_loc += oob_size1; ++ ++ qcom_spi_set_read_loc(snandc, cw, 2, read_loc, data_size2, 0); ++ read_loc += data_size2; ++ ++ qcom_spi_set_read_loc(snandc, cw, 3, read_loc, oob_size2, 1); ++ ++ qcom_spi_config_cw_read(snandc, false, raw_cw); ++ ++ qcom_read_data_dma(snandc, reg_off, data_buf, data_size1, 0); ++ reg_off += data_size1; ++ ++ qcom_read_data_dma(snandc, reg_off, oob_buf, oob_size1, 0); ++ reg_off += oob_size1; ++ ++ qcom_read_data_dma(snandc, reg_off, data_buf + data_size1, data_size2, 0); ++ reg_off += data_size2; ++ ++ qcom_read_data_dma(snandc, reg_off, oob_buf + oob_size1, oob_size2, 0); ++ ++ ret = qcom_submit_descs(snandc); ++ if (ret) { ++ dev_err(snandc->dev, "failure to read raw cw %d\n", cw); ++ return ret; ++ } ++ ++ return qcom_spi_check_raw_flash_errors(snandc, 1); ++} ++ ++static int qcom_spi_read_page_raw(struct qcom_nand_controller *snandc, ++ const struct spi_mem_op *op) ++{ ++ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; ++ u8 *data_buf = NULL, *oob_buf = NULL; ++ int ret, cw; ++ u32 num_cw = snandc->qspi->num_cw; ++ ++ if (snandc->qspi->page_rw) ++ data_buf = op->data.buf.in; ++ ++ oob_buf = snandc->qspi->oob_buf; ++ memset(oob_buf, 0xff, OOB_BUF_SIZE); ++ ++ for (cw = 0; cw < num_cw; cw++) { ++ ret = qcom_spi_read_cw_raw(snandc, data_buf, oob_buf, cw); ++ if (ret) ++ return ret; ++ ++ if (data_buf) ++ data_buf += ecc_cfg->cw_data; ++ if (oob_buf) ++ oob_buf += ecc_cfg->bytes; ++ } ++ ++ return 0; ++} ++ ++static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc, ++ const struct spi_mem_op *op) ++{ ++ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; ++ u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start; ++ int ret, i; ++ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; ++ ++ data_buf = op->data.buf.in; ++ data_buf_start = data_buf; ++ ++ oob_buf = snandc->qspi->oob_buf; ++ oob_buf_start = oob_buf; ++ ++ snandc->buf_count = 0; ++ snandc->buf_start = 0; ++ qcom_clear_read_regs(snandc); ++ ++ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | ++ (num_cw - 1) << CW_PER_PAGE; ++ cfg1 = ecc_cfg->cfg1; ++ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; ++ ++ snandc->regs->addr0 = snandc->qspi->addr1; ++ snandc->regs->addr1 = snandc->qspi->addr2; ++ snandc->regs->cmd = snandc->qspi->cmd; ++ snandc->regs->cfg0 = cpu_to_le32(cfg0); ++ snandc->regs->cfg1 = cpu_to_le32(cfg1); ++ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); ++ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); ++ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); ++ snandc->regs->exec = cpu_to_le32(1); ++ ++ qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1); ++ ++ qcom_clear_bam_transaction(snandc); ++ ++ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, ++ NAND_ERASED_CW_DETECT_CFG, 1, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, ++ NAND_ERASED_CW_DETECT_CFG, 1, ++ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); ++ ++ for (i = 0; i < num_cw; i++) { ++ int data_size, oob_size; ++ ++ if (i == (num_cw - 1)) { ++ data_size = 512 - ((num_cw - 1) << 2); ++ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + ++ ecc_cfg->spare_bytes; ++ } else { ++ data_size = ecc_cfg->cw_data; ++ oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; ++ } ++ ++ if (data_buf && oob_buf) { ++ qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 0); ++ qcom_spi_set_read_loc(snandc, i, 1, data_size, oob_size, 1); ++ } else if (data_buf) { ++ qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 1); ++ } else { ++ qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1); ++ } ++ ++ qcom_spi_config_cw_read(snandc, true, i); ++ ++ if (data_buf) ++ qcom_read_data_dma(snandc, FLASH_BUF_ACC, data_buf, ++ data_size, 0); ++ if (oob_buf) { ++ int j; ++ ++ for (j = 0; j < ecc_cfg->bbm_size; j++) ++ *oob_buf++ = 0xff; ++ ++ qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size, ++ oob_buf, oob_size, 0); ++ } ++ ++ if (data_buf) ++ data_buf += data_size; ++ if (oob_buf) ++ oob_buf += oob_size; ++ } ++ ++ ret = qcom_submit_descs(snandc); ++ if (ret) { ++ dev_err(snandc->dev, "failure to read page\n"); ++ return ret; ++ } ++ ++ return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start); ++} ++ ++static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc, ++ const struct spi_mem_op *op) ++{ ++ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; ++ u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start; ++ int ret, i; ++ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; ++ ++ oob_buf = op->data.buf.in; ++ oob_buf_start = oob_buf; ++ ++ data_buf_start = data_buf; ++ ++ snandc->buf_count = 0; ++ snandc->buf_start = 0; ++ qcom_clear_read_regs(snandc); ++ qcom_clear_bam_transaction(snandc); ++ ++ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | ++ (num_cw - 1) << CW_PER_PAGE; ++ cfg1 = ecc_cfg->cfg1; ++ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; ++ ++ snandc->regs->addr0 = snandc->qspi->addr1; ++ snandc->regs->addr1 = snandc->qspi->addr2; ++ snandc->regs->cmd = snandc->qspi->cmd; ++ snandc->regs->cfg0 = cpu_to_le32(cfg0); ++ snandc->regs->cfg1 = cpu_to_le32(cfg1); ++ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); ++ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); ++ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); ++ snandc->regs->exec = cpu_to_le32(1); ++ ++ qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1); ++ ++ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, ++ NAND_ERASED_CW_DETECT_CFG, 1, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, ++ NAND_ERASED_CW_DETECT_CFG, 1, ++ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); ++ ++ for (i = 0; i < num_cw; i++) { ++ int data_size, oob_size; ++ ++ if (i == (num_cw - 1)) { ++ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2); ++ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + ++ ecc_cfg->spare_bytes; ++ } else { ++ data_size = ecc_cfg->cw_data; ++ oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; ++ } ++ ++ qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1); ++ ++ qcom_spi_config_cw_read(snandc, true, i); ++ ++ if (oob_buf) { ++ int j; ++ ++ for (j = 0; j < ecc_cfg->bbm_size; j++) ++ *oob_buf++ = 0xff; ++ ++ qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size, ++ oob_buf, oob_size, 0); ++ } ++ ++ if (oob_buf) ++ oob_buf += oob_size; ++ } ++ ++ ret = qcom_submit_descs(snandc); ++ if (ret) { ++ dev_err(snandc->dev, "failure to read oob\n"); ++ return ret; ++ } ++ ++ return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start); ++} ++ ++static int qcom_spi_read_page(struct qcom_nand_controller *snandc, ++ const struct spi_mem_op *op) ++{ ++ if (snandc->qspi->page_rw && snandc->qspi->raw_rw) ++ return qcom_spi_read_page_raw(snandc, op); ++ ++ if (snandc->qspi->page_rw) ++ return qcom_spi_read_page_ecc(snandc, op); ++ ++ if (snandc->qspi->oob_rw && snandc->qspi->raw_rw) ++ return qcom_spi_read_last_cw(snandc, op); ++ ++ if (snandc->qspi->oob_rw) ++ return qcom_spi_read_page_oob(snandc, op); ++ ++ return 0; ++} ++ ++static void qcom_spi_config_page_write(struct qcom_nand_controller *snandc) ++{ ++ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, ++ 1, NAND_BAM_NEXT_SGL); ++} ++ ++static void qcom_spi_config_cw_write(struct qcom_nand_controller *snandc) ++{ ++ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); ++ ++ qcom_write_reg_dma(snandc, &snandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0); ++ qcom_write_reg_dma(snandc, &snandc->regs->clrreadstatus, NAND_READ_STATUS, 1, ++ NAND_BAM_NEXT_SGL); ++} ++ ++static int qcom_spi_program_raw(struct qcom_nand_controller *snandc, ++ const struct spi_mem_op *op) ++{ ++ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; ++ struct mtd_info *mtd = snandc->qspi->mtd; ++ u8 *data_buf = NULL, *oob_buf = NULL; ++ int i, ret; ++ int num_cw = snandc->qspi->num_cw; ++ u32 cfg0, cfg1, ecc_bch_cfg; ++ ++ cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) | ++ (num_cw - 1) << CW_PER_PAGE; ++ cfg1 = ecc_cfg->cfg1_raw; ++ ecc_bch_cfg = ECC_CFG_ECC_DISABLE; ++ ++ data_buf = snandc->qspi->data_buf; ++ ++ oob_buf = snandc->qspi->oob_buf; ++ memset(oob_buf, 0xff, OOB_BUF_SIZE); ++ ++ snandc->buf_count = 0; ++ snandc->buf_start = 0; ++ qcom_clear_read_regs(snandc); ++ qcom_clear_bam_transaction(snandc); ++ ++ snandc->regs->addr0 = snandc->qspi->addr1; ++ snandc->regs->addr1 = snandc->qspi->addr2; ++ snandc->regs->cmd = snandc->qspi->cmd; ++ snandc->regs->cfg0 = cpu_to_le32(cfg0); ++ snandc->regs->cfg1 = cpu_to_le32(cfg1); ++ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); ++ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); ++ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); ++ snandc->regs->exec = cpu_to_le32(1); ++ ++ qcom_spi_config_page_write(snandc); ++ ++ for (i = 0; i < num_cw; i++) { ++ int data_size1, data_size2, oob_size1, oob_size2; ++ int reg_off = FLASH_BUF_ACC; ++ ++ data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); ++ oob_size1 = ecc_cfg->bbm_size; ++ ++ if ((i == (num_cw - 1))) { ++ data_size2 = NANDC_STEP_SIZE - data_size1 - ++ ((num_cw - 1) << 2); ++ oob_size2 = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + ++ ecc_cfg->spare_bytes; ++ } else { ++ data_size2 = ecc_cfg->cw_data - data_size1; ++ oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; ++ } ++ ++ qcom_write_data_dma(snandc, reg_off, data_buf, data_size1, ++ NAND_BAM_NO_EOT); ++ reg_off += data_size1; ++ data_buf += data_size1; ++ ++ qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size1, ++ NAND_BAM_NO_EOT); ++ oob_buf += oob_size1; ++ reg_off += oob_size1; ++ ++ qcom_write_data_dma(snandc, reg_off, data_buf, data_size2, ++ NAND_BAM_NO_EOT); ++ reg_off += data_size2; ++ data_buf += data_size2; ++ ++ qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size2, 0); ++ oob_buf += oob_size2; ++ ++ qcom_spi_config_cw_write(snandc); ++ } ++ ++ ret = qcom_submit_descs(snandc); ++ if (ret) { ++ dev_err(snandc->dev, "failure to write raw page\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc, ++ const struct spi_mem_op *op) ++{ ++ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; ++ u8 *data_buf = NULL, *oob_buf = NULL; ++ int i, ret; ++ int num_cw = snandc->qspi->num_cw; ++ u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg; ++ ++ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | ++ (num_cw - 1) << CW_PER_PAGE; ++ cfg1 = ecc_cfg->cfg1; ++ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; ++ ecc_buf_cfg = ecc_cfg->ecc_buf_cfg; ++ ++ if (snandc->qspi->data_buf) ++ data_buf = snandc->qspi->data_buf; ++ ++ oob_buf = snandc->qspi->oob_buf; ++ ++ snandc->buf_count = 0; ++ snandc->buf_start = 0; ++ qcom_clear_read_regs(snandc); ++ qcom_clear_bam_transaction(snandc); ++ ++ snandc->regs->addr0 = snandc->qspi->addr1; ++ snandc->regs->addr1 = snandc->qspi->addr2; ++ snandc->regs->cmd = snandc->qspi->cmd; ++ snandc->regs->cfg0 = cpu_to_le32(cfg0); ++ snandc->regs->cfg1 = cpu_to_le32(cfg1); ++ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); ++ snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg); ++ snandc->regs->exec = cpu_to_le32(1); ++ ++ qcom_spi_config_page_write(snandc); ++ ++ for (i = 0; i < num_cw; i++) { ++ int data_size, oob_size; ++ ++ if (i == (num_cw - 1)) { ++ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2); ++ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + ++ ecc_cfg->spare_bytes; ++ } else { ++ data_size = ecc_cfg->cw_data; ++ oob_size = ecc_cfg->bytes; ++ } ++ ++ if (data_buf) ++ qcom_write_data_dma(snandc, FLASH_BUF_ACC, data_buf, data_size, ++ i == (num_cw - 1) ? NAND_BAM_NO_EOT : 0); ++ ++ if (i == (num_cw - 1)) { ++ if (oob_buf) { ++ oob_buf += ecc_cfg->bbm_size; ++ qcom_write_data_dma(snandc, FLASH_BUF_ACC + data_size, ++ oob_buf, oob_size, 0); ++ } ++ } ++ ++ qcom_spi_config_cw_write(snandc); ++ ++ if (data_buf) ++ data_buf += data_size; ++ if (oob_buf) ++ oob_buf += oob_size; ++ } ++ ++ ret = qcom_submit_descs(snandc); ++ if (ret) { ++ dev_err(snandc->dev, "failure to write page\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int qcom_spi_program_oob(struct qcom_nand_controller *snandc, ++ const struct spi_mem_op *op) ++{ ++ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; ++ u8 *oob_buf = NULL; ++ int ret, col, data_size, oob_size; ++ int num_cw = snandc->qspi->num_cw; ++ u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg; ++ ++ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | ++ (num_cw - 1) << CW_PER_PAGE; ++ cfg1 = ecc_cfg->cfg1; ++ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; ++ ecc_buf_cfg = ecc_cfg->ecc_buf_cfg; ++ ++ col = ecc_cfg->cw_size * (num_cw - 1); ++ ++ oob_buf = snandc->qspi->data_buf; ++ ++ snandc->buf_count = 0; ++ snandc->buf_start = 0; ++ qcom_clear_read_regs(snandc); ++ qcom_clear_bam_transaction(snandc); ++ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col)); ++ snandc->regs->addr1 = snandc->qspi->addr2; ++ snandc->regs->cmd = snandc->qspi->cmd; ++ snandc->regs->cfg0 = cpu_to_le32(cfg0); ++ snandc->regs->cfg1 = cpu_to_le32(cfg1); ++ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); ++ snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg); ++ snandc->regs->exec = cpu_to_le32(1); ++ ++ /* calculate the data and oob size for the last codeword/step */ ++ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2); ++ oob_size = snandc->qspi->mtd->oobavail; ++ ++ memset(snandc->data_buffer, 0xff, ecc_cfg->cw_data); ++ /* override new oob content to last codeword */ ++ mtd_ooblayout_get_databytes(snandc->qspi->mtd, snandc->data_buffer + data_size, ++ oob_buf, 0, snandc->qspi->mtd->oobavail); ++ qcom_spi_config_page_write(snandc); ++ qcom_write_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, data_size + oob_size, 0); ++ qcom_spi_config_cw_write(snandc); ++ ++ ret = qcom_submit_descs(snandc); ++ if (ret) { ++ dev_err(snandc->dev, "failure to write oob\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int qcom_spi_program_execute(struct qcom_nand_controller *snandc, ++ const struct spi_mem_op *op) ++{ ++ if (snandc->qspi->page_rw && snandc->qspi->raw_rw) ++ return qcom_spi_program_raw(snandc, op); ++ ++ if (snandc->qspi->page_rw) ++ return qcom_spi_program_ecc(snandc, op); ++ ++ if (snandc->qspi->oob_rw) ++ return qcom_spi_program_oob(snandc, op); ++ ++ return 0; ++} ++ ++static u32 qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode) ++{ ++ u32 cmd = 0x0; ++ ++ switch (opcode) { ++ case SPINAND_RESET: ++ cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_RESET_DEVICE); ++ break; ++ case SPINAND_READID: ++ cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_FETCH_ID); ++ break; ++ case SPINAND_GET_FEATURE: ++ cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE); ++ break; ++ case SPINAND_SET_FEATURE: ++ cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE | ++ QPIC_SET_FEATURE); ++ break; ++ case SPINAND_READ: ++ if (snandc->qspi->raw_rw) { ++ cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 | ++ SPI_WP | SPI_HOLD | OP_PAGE_READ); ++ } else { ++ cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 | ++ SPI_WP | SPI_HOLD | OP_PAGE_READ_WITH_ECC); ++ } ++ ++ break; ++ case SPINAND_ERASE: ++ cmd = OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE | SPI_WP | ++ SPI_HOLD | SPI_TRANSFER_MODE_x1; ++ break; ++ case SPINAND_WRITE_EN: ++ cmd = SPINAND_WRITE_EN; ++ break; ++ case SPINAND_PROGRAM_EXECUTE: ++ cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 | ++ SPI_WP | SPI_HOLD | OP_PROGRAM_PAGE); ++ break; ++ case SPINAND_PROGRAM_LOAD: ++ cmd = SPINAND_PROGRAM_LOAD; ++ break; ++ default: ++ dev_err(snandc->dev, "Opcode not supported: %u\n", opcode); ++ return -EOPNOTSUPP; ++ } ++ ++ return cmd; ++} ++ ++static int qcom_spi_write_page(struct qcom_nand_controller *snandc, ++ const struct spi_mem_op *op) ++{ ++ struct qpic_snand_op s_op = {}; ++ u32 cmd; ++ ++ cmd = qcom_spi_cmd_mapping(snandc, op->cmd.opcode); ++ if (cmd < 0) ++ return cmd; ++ ++ s_op.cmd_reg = cmd; ++ ++ if (op->cmd.opcode == SPINAND_PROGRAM_LOAD) ++ snandc->qspi->data_buf = (u8 *)op->data.buf.out; ++ ++ return 0; ++} ++ ++static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc, ++ const struct spi_mem_op *op) ++{ ++ struct qpic_snand_op s_op = {}; ++ u32 cmd; ++ int ret, opcode; ++ ++ cmd = qcom_spi_cmd_mapping(snandc, op->cmd.opcode); ++ if (cmd < 0) ++ return cmd; ++ ++ s_op.cmd_reg = cmd; ++ s_op.addr1_reg = op->addr.val; ++ s_op.addr2_reg = 0; ++ ++ opcode = op->cmd.opcode; ++ ++ switch (opcode) { ++ case SPINAND_WRITE_EN: ++ return 0; ++ case SPINAND_PROGRAM_EXECUTE: ++ s_op.addr1_reg = op->addr.val << 16; ++ s_op.addr2_reg = op->addr.val >> 16 & 0xff; ++ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg); ++ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); ++ snandc->qspi->cmd = cpu_to_le32(cmd); ++ return qcom_spi_program_execute(snandc, op); ++ case SPINAND_READ: ++ s_op.addr1_reg = (op->addr.val << 16); ++ s_op.addr2_reg = op->addr.val >> 16 & 0xff; ++ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg); ++ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); ++ snandc->qspi->cmd = cpu_to_le32(cmd); ++ return 0; ++ case SPINAND_ERASE: ++ s_op.addr2_reg = (op->addr.val >> 16) & 0xffff; ++ s_op.addr1_reg = op->addr.val; ++ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16); ++ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); ++ snandc->qspi->cmd = cpu_to_le32(cmd); ++ qcom_spi_block_erase(snandc); ++ return 0; ++ default: ++ break; ++ } ++ ++ snandc->buf_count = 0; ++ snandc->buf_start = 0; ++ qcom_clear_read_regs(snandc); ++ qcom_clear_bam_transaction(snandc); ++ ++ snandc->regs->cmd = cpu_to_le32(s_op.cmd_reg); ++ snandc->regs->exec = cpu_to_le32(1); ++ snandc->regs->addr0 = cpu_to_le32(s_op.addr1_reg); ++ snandc->regs->addr1 = cpu_to_le32(s_op.addr2_reg); ++ ++ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL); ++ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); ++ ++ ret = qcom_submit_descs(snandc); ++ if (ret) ++ dev_err(snandc->dev, "failure in submitting cmd descriptor\n"); ++ ++ return ret; ++} ++ ++static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_mem_op *op) ++{ ++ int ret, val, opcode; ++ bool copy = false, copy_ftr = false; ++ ++ ret = qcom_spi_send_cmdaddr(snandc, op); ++ if (ret) ++ return ret; ++ ++ snandc->buf_count = 0; ++ snandc->buf_start = 0; ++ qcom_clear_read_regs(snandc); ++ qcom_clear_bam_transaction(snandc); ++ opcode = op->cmd.opcode; ++ ++ switch (opcode) { ++ case SPINAND_READID: ++ snandc->buf_count = 4; ++ qcom_read_reg_dma(snandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL); ++ copy = true; ++ break; ++ case SPINAND_GET_FEATURE: ++ snandc->buf_count = 4; ++ qcom_read_reg_dma(snandc, NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL); ++ copy_ftr = true; ++ break; ++ case SPINAND_SET_FEATURE: ++ snandc->regs->flash_feature = cpu_to_le32(*(u32 *)op->data.buf.out); ++ qcom_write_reg_dma(snandc, &snandc->regs->flash_feature, ++ NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL); ++ break; ++ case SPINAND_PROGRAM_EXECUTE: ++ case SPINAND_WRITE_EN: ++ case SPINAND_RESET: ++ case SPINAND_ERASE: ++ case SPINAND_READ: ++ return 0; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ ret = qcom_submit_descs(snandc); ++ if (ret) ++ dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode); ++ ++ if (copy) { ++ qcom_nandc_dev_to_mem(snandc, true); ++ memcpy(op->data.buf.in, snandc->reg_read_buf, snandc->buf_count); ++ } ++ ++ if (copy_ftr) { ++ qcom_nandc_dev_to_mem(snandc, true); ++ val = le32_to_cpu(*(__le32 *)snandc->reg_read_buf); ++ val >>= 8; ++ memcpy(op->data.buf.in, &val, snandc->buf_count); ++ } ++ ++ return ret; ++} ++ ++static bool qcom_spi_is_page_op(const struct spi_mem_op *op) ++{ ++ if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && op->addr.buswidth != 4) ++ return false; ++ ++ if (op->data.dir == SPI_MEM_DATA_IN) { ++ if (op->addr.buswidth == 4 && op->data.buswidth == 4) ++ return true; ++ ++ if (op->addr.nbytes == 2 && op->addr.buswidth == 1) ++ return true; ++ ++ } else if (op->data.dir == SPI_MEM_DATA_OUT) { ++ if (op->data.buswidth == 4) ++ return true; ++ if (op->addr.nbytes == 2 && op->addr.buswidth == 1) ++ return true; ++ } ++ ++ return false; ++} ++ ++static bool qcom_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) ++{ ++ if (!spi_mem_default_supports_op(mem, op)) ++ return false; ++ ++ if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1) ++ return false; ++ ++ if (qcom_spi_is_page_op(op)) ++ return true; ++ ++ return ((!op->addr.nbytes || op->addr.buswidth == 1) && ++ (!op->dummy.nbytes || op->dummy.buswidth == 1) && ++ (!op->data.nbytes || op->data.buswidth == 1)); ++} ++ ++static int qcom_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) ++{ ++ struct qcom_nand_controller *snandc = spi_controller_get_devdata(mem->spi->controller); ++ ++ dev_dbg(snandc->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode, ++ op->addr.val, op->addr.buswidth, op->addr.nbytes, ++ op->data.buswidth, op->data.nbytes); ++ ++ if (qcom_spi_is_page_op(op)) { ++ if (op->data.dir == SPI_MEM_DATA_IN) ++ return qcom_spi_read_page(snandc, op); ++ if (op->data.dir == SPI_MEM_DATA_OUT) ++ return qcom_spi_write_page(snandc, op); ++ } else { ++ return qcom_spi_io_op(snandc, op); ++ } ++ ++ return 0; ++} ++ ++static const struct spi_controller_mem_ops qcom_spi_mem_ops = { ++ .supports_op = qcom_spi_supports_op, ++ .exec_op = qcom_spi_exec_op, ++}; ++ ++static const struct spi_controller_mem_caps qcom_spi_mem_caps = { ++ .ecc = true, ++}; ++ ++static int qcom_spi_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct spi_controller *ctlr; ++ struct qcom_nand_controller *snandc; ++ struct qpic_spi_nand *qspi; ++ struct qpic_ecc *ecc; ++ struct resource *res; ++ const void *dev_data; ++ int ret; ++ ++ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); ++ if (!ecc) ++ return -ENOMEM; ++ ++ qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL); ++ if (!qspi) ++ return -ENOMEM; ++ ++ ctlr = __devm_spi_alloc_controller(dev, sizeof(*snandc), false); ++ if (!ctlr) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, ctlr); ++ ++ snandc = spi_controller_get_devdata(ctlr); ++ qspi->snandc = snandc; ++ ++ snandc->dev = dev; ++ snandc->qspi = qspi; ++ snandc->qspi->ctlr = ctlr; ++ snandc->qspi->ecc = ecc; ++ ++ dev_data = of_device_get_match_data(dev); ++ if (!dev_data) { ++ dev_err(&pdev->dev, "failed to get device data\n"); ++ return -ENODEV; ++ } ++ ++ snandc->props = dev_data; ++ snandc->dev = &pdev->dev; ++ ++ snandc->core_clk = devm_clk_get(dev, "core"); ++ if (IS_ERR(snandc->core_clk)) ++ return PTR_ERR(snandc->core_clk); ++ ++ snandc->aon_clk = devm_clk_get(dev, "aon"); ++ if (IS_ERR(snandc->aon_clk)) ++ return PTR_ERR(snandc->aon_clk); ++ ++ snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom"); ++ if (IS_ERR(snandc->qspi->iomacro_clk)) ++ return PTR_ERR(snandc->qspi->iomacro_clk); ++ ++ snandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); ++ if (IS_ERR(snandc->base)) ++ return PTR_ERR(snandc->base); ++ ++ snandc->base_phys = res->start; ++ snandc->base_dma = dma_map_resource(dev, res->start, resource_size(res), ++ DMA_BIDIRECTIONAL, 0); ++ if (dma_mapping_error(dev, snandc->base_dma)) ++ return -ENXIO; ++ ++ ret = clk_prepare_enable(snandc->core_clk); ++ if (ret) ++ goto err_dis_core_clk; ++ ++ ret = clk_prepare_enable(snandc->aon_clk); ++ if (ret) ++ goto err_dis_aon_clk; ++ ++ ret = clk_prepare_enable(snandc->qspi->iomacro_clk); ++ if (ret) ++ goto err_dis_iom_clk; ++ ++ ret = qcom_nandc_alloc(snandc); ++ if (ret) ++ goto err_snand_alloc; ++ ++ ret = qcom_spi_init(snandc); ++ if (ret) ++ goto err_spi_init; ++ ++ /* setup ECC engine */ ++ snandc->qspi->ecc_eng.dev = &pdev->dev; ++ snandc->qspi->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED; ++ snandc->qspi->ecc_eng.ops = &qcom_spi_ecc_engine_ops_pipelined; ++ snandc->qspi->ecc_eng.priv = snandc; ++ ++ ret = nand_ecc_register_on_host_hw_engine(&snandc->qspi->ecc_eng); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to register ecc engine:%d\n", ret); ++ goto err_spi_init; ++ } ++ ++ ctlr->num_chipselect = QPIC_QSPI_NUM_CS; ++ ctlr->mem_ops = &qcom_spi_mem_ops; ++ ctlr->mem_caps = &qcom_spi_mem_caps; ++ ctlr->dev.of_node = pdev->dev.of_node; ++ ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL | ++ SPI_TX_QUAD | SPI_RX_QUAD; ++ ++ ret = spi_register_controller(ctlr); ++ if (ret) { ++ dev_err(&pdev->dev, "spi_register_controller failed.\n"); ++ goto err_spi_init; ++ } ++ ++ return 0; ++ ++err_spi_init: ++ qcom_nandc_unalloc(snandc); ++err_snand_alloc: ++ clk_disable_unprepare(snandc->qspi->iomacro_clk); ++err_dis_iom_clk: ++ clk_disable_unprepare(snandc->aon_clk); ++err_dis_aon_clk: ++ clk_disable_unprepare(snandc->core_clk); ++err_dis_core_clk: ++ dma_unmap_resource(dev, res->start, resource_size(res), ++ DMA_BIDIRECTIONAL, 0); ++ return ret; ++} ++ ++static void qcom_spi_remove(struct platform_device *pdev) ++{ ++ struct spi_controller *ctlr = platform_get_drvdata(pdev); ++ struct qcom_nand_controller *snandc = spi_controller_get_devdata(ctlr); ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ ++ spi_unregister_controller(ctlr); ++ ++ qcom_nandc_unalloc(snandc); ++ ++ clk_disable_unprepare(snandc->aon_clk); ++ clk_disable_unprepare(snandc->core_clk); ++ clk_disable_unprepare(snandc->qspi->iomacro_clk); ++ ++ dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res), ++ DMA_BIDIRECTIONAL, 0); ++} ++ ++static const struct qcom_nandc_props ipq9574_snandc_props = { ++ .dev_cmd_reg_start = 0x7000, ++ .supports_bam = true, ++}; ++ ++static const struct of_device_id qcom_snandc_of_match[] = { ++ { ++ .compatible = "qcom,spi-qpic-snand", ++ .data = &ipq9574_snandc_props, ++ }, ++ {} ++} ++MODULE_DEVICE_TABLE(of, qcom_snandc_of_match); ++ ++static struct platform_driver qcom_spi_driver = { ++ .driver = { ++ .name = "qcom_snand", ++ .of_match_table = qcom_snandc_of_match, ++ }, ++ .probe = qcom_spi_probe, ++ .remove = qcom_spi_remove, ++}; ++module_platform_driver(qcom_spi_driver); ++ ++MODULE_DESCRIPTION("SPI driver for QPIC QSPI cores"); ++MODULE_AUTHOR("Md Sadre Alam "); ++MODULE_LICENSE("GPL"); ++ +--- a/include/linux/mtd/nand-qpic-common.h ++++ b/include/linux/mtd/nand-qpic-common.h +@@ -322,6 +322,10 @@ struct nandc_regs { + __le32 read_location_last1; + __le32 read_location_last2; + __le32 read_location_last3; ++ __le32 spi_cfg; ++ __le32 num_addr_cycle; ++ __le32 busy_wait_cnt; ++ __le32 flash_feature; + + __le32 erased_cw_detect_cfg_clr; + __le32 erased_cw_detect_cfg_set; +@@ -336,6 +340,7 @@ struct nandc_regs { + * + * @core_clk: controller clock + * @aon_clk: another controller clock ++ * @iomacro_clk: io macro clock + * + * @regs: a contiguous chunk of memory for DMA register + * writes. contains the register values to be +@@ -345,6 +350,7 @@ struct nandc_regs { + * initialized via DT match data + * + * @controller: base controller structure ++ * @qspi: qpic spi structure + * @host_list: list containing all the chips attached to the + * controller + * +@@ -389,6 +395,7 @@ struct qcom_nand_controller { + const struct qcom_nandc_props *props; + + struct nand_controller *controller; ++ struct qpic_spi_nand *qspi; + struct list_head host_list; + + union { diff --git a/target/linux/qualcommax/patches-6.6/0408-spi-spi-qpic-fixes-compilation-issues.patch b/target/linux/qualcommax/patches-6.6/0408-spi-spi-qpic-fixes-compilation-issues.patch new file mode 100644 index 000000000..226e6059b --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0408-spi-spi-qpic-fixes-compilation-issues.patch @@ -0,0 +1,29 @@ +--- a/drivers/mtd/nand/qpic_common.c ++++ b/drivers/mtd/nand/qpic_common.c +@@ -82,7 +82,15 @@ void qcom_clear_bam_transaction(struct q + if (!nandc->props->supports_bam) + return; + +- memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8); ++ // memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8); ++ bam_txn->bam_ce_pos = 0; ++ bam_txn->bam_ce_start = 0; ++ bam_txn->cmd_sgl_pos = 0; ++ bam_txn->cmd_sgl_start = 0; ++ bam_txn->tx_sgl_pos = 0; ++ bam_txn->tx_sgl_start = 0; ++ bam_txn->rx_sgl_pos = 0; ++ bam_txn->rx_sgl_start = 0; + bam_txn->last_data_desc = NULL; + + sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage * +--- a/drivers/spi/spi-qpic-snand.c ++++ b/drivers/spi/spi-qpic-snand.c +@@ -1624,7 +1624,7 @@ static struct platform_driver qcom_spi_d + .of_match_table = qcom_snandc_of_match, + }, + .probe = qcom_spi_probe, +- .remove = qcom_spi_remove, ++ .remove_new = qcom_spi_remove, + }; + module_platform_driver(qcom_spi_driver); diff --git a/target/linux/qualcommax/patches-6.6/0411-spi-spi-qpic-snand-support-BCH8.patch b/target/linux/qualcommax/patches-6.6/0411-spi-spi-qpic-snand-support-BCH8.patch new file mode 100644 index 000000000..056fc3ea9 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0411-spi-spi-qpic-snand-support-BCH8.patch @@ -0,0 +1,50 @@ +From 396886e8644d5b601126b97e0b36c40c5fb5cecf Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:11 +0800 +Subject: [PATCH 1/2] spi: spi-qpic-snand: support BCH8 + +Signed-off-by: hzy +--- + drivers/spi/spi-qpic-snand.c | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +--- a/drivers/spi/spi-qpic-snand.c ++++ b/drivers/spi/spi-qpic-snand.c +@@ -252,6 +252,7 @@ static int qcom_spi_ecc_init_ctx_pipelin + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int cwperpage, bad_block_byte; ++ int ecc_mode; + struct qpic_ecc *ecc_cfg; + + cwperpage = mtd->writesize / NANDC_STEP_SIZE; +@@ -270,14 +271,17 @@ static int qcom_spi_ecc_init_ctx_pipelin + nand->ecc.ctx.priv = ecc_cfg; + snandc->qspi->mtd = mtd; + +- ecc_cfg->ecc_bytes_hw = 7; +- ecc_cfg->spare_bytes = 4; ++ /* BCH8 or BCH4 */ ++ ecc_mode = mtd->oobsize > 64 ? 1 : 0; ++ ++ ecc_cfg->ecc_bytes_hw = ecc_mode ? 13 : 7; ++ ecc_cfg->spare_bytes = ecc_mode ? 2 : 4; + ecc_cfg->bbm_size = 1; + ecc_cfg->bch_enabled = true; + ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size; + + ecc_cfg->steps = 4; +- ecc_cfg->strength = 4; ++ ecc_cfg->strength = ecc_mode ? 8 : 4; + ecc_cfg->step_size = 512; + ecc_cfg->cw_data = 516; + ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes; +@@ -319,7 +323,7 @@ static int qcom_spi_ecc_init_ctx_pipelin + FIELD_PREP(ECC_SW_RESET, 0) | + FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) | + FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) | +- FIELD_PREP(ECC_MODE_MASK, 0) | ++ FIELD_PREP(ECC_MODE_MASK, ecc_mode) | + FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw); + + ecc_cfg->ecc_buf_cfg = 0x203 << NUM_STEPS; diff --git a/target/linux/qualcommax/patches-6.6/0412-mtd-spinand-qpic-only-support-max-4-bytes-ID.patch b/target/linux/qualcommax/patches-6.6/0412-mtd-spinand-qpic-only-support-max-4-bytes-ID.patch new file mode 100644 index 000000000..993cd233f --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0412-mtd-spinand-qpic-only-support-max-4-bytes-ID.patch @@ -0,0 +1,26 @@ +From 3d550dc3eb4eaa2fe1d0668ed67e835c91487d61 Mon Sep 17 00:00:00 2001 +From: hzy +Date: Sun, 8 Sep 2024 16:40:11 +0800 +Subject: [PATCH 2/2] mtd: spinand: qpic only support max 4 bytes ID + +Signed-off-by: hzy +--- + drivers/mtd/nand/spi/core.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c +index 4c54a962c5d6..1a8ac8e20f6e 100644 +--- a/drivers/mtd/nand/spi/core.c ++++ b/drivers/mtd/nand/spi/core.c +@@ -1086,7 +1086,7 @@ int spinand_match_and_init(struct spinand_device *spinand, + if (rdid_method != info->devid.method) + continue; + +- if (memcmp(id + 1, info->devid.id, info->devid.len)) ++ if (memcmp(id + 1, info->devid.id, min(3, info->devid.len))) + continue; + + nand->memorg = table[i].memorg; +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0421-arm64-dts-qcom-ipq5018-Add-nand-node.patch b/target/linux/qualcommax/patches-6.6/0421-arm64-dts-qcom-ipq5018-Add-nand-node.patch new file mode 100644 index 000000000..f69f65855 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0421-arm64-dts-qcom-ipq5018-Add-nand-node.patch @@ -0,0 +1,49 @@ +From c2019f64539dd24e6e0da3cea2219d6f9e6b03e4 Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:11 +0800 +Subject: [PATCH] arm64: dts: qcom: ipq5018: Add nand node + +Signed-off-by: hzy +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 40 +++++++++++++++++++++++++++ + 1 file changed, 40 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -457,6 +457,36 @@ + status = "disabled"; + }; + ++ qpic_bam: dma@7984000 { ++ compatible = "qcom,bam-v1.7.0"; ++ reg = <0x07984000 0x1c000>; ++ interrupts = ; ++ clocks = <&gcc GCC_QPIC_AHB_CLK>; ++ clock-names = "bam_clk"; ++ #dma-cells = <1>; ++ qcom,ee = <0>; ++ status = "disabled"; ++ }; ++ ++ qpic_nand: qpic-nand@79b0000 { ++ compatible = "qcom,spi-qpic-snand"; ++ reg = <0x079b0000 0x10000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ clocks = <&gcc GCC_QPIC_CLK>, ++ <&gcc GCC_QPIC_AHB_CLK>, ++ <&gcc GCC_QPIC_IO_MACRO_CLK>; ++ clock-names = "core", "aon", "iom"; ++ ++ dmas = <&qpic_bam 0>, ++ <&qpic_bam 1>, ++ <&qpic_bam 2>, ++ <&qpic_bam 3>; ++ dma-names = "tx", "rx", "cmd", "status"; ++ ++ status = "disabled"; ++ }; ++ + usb: usb@8af8800 { + compatible = "qcom,ipq5018-dwc3", "qcom,dwc3"; + reg = <0x08af8800 0x400>; diff --git a/target/linux/qualcommax/patches-6.6/0431-arm64-dts-qcom-ipq5018-Add-more-nand-compatible-for-.patch b/target/linux/qualcommax/patches-6.6/0431-arm64-dts-qcom-ipq5018-Add-more-nand-compatible-for-.patch new file mode 100644 index 000000000..aa5e1de90 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0431-arm64-dts-qcom-ipq5018-Add-more-nand-compatible-for-.patch @@ -0,0 +1,22 @@ +From b76a7649402d3eb1245ab463832133fc7efda194 Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:11 +0800 +Subject: [PATCH] arm64: dts: qcom: ipq5018: Add more nand compatible for + uboot to fix partitions + +Signed-off-by: hzy +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -469,7 +469,7 @@ + }; + + qpic_nand: qpic-nand@79b0000 { +- compatible = "qcom,spi-qpic-snand"; ++ compatible = "qcom,spi-qpic-snand", "qcom,ebi2-nandc-bam-v2.1.1"; + reg = <0x079b0000 0x10000>; + #address-cells = <1>; + #size-cells = <0>; diff --git a/target/linux/qualcommax/patches-6.6/0701-dt-bindings-clock-qcom-Add-CMN-PLL-clock-controller-.patch b/target/linux/qualcommax/patches-6.6/0701-dt-bindings-clock-qcom-Add-CMN-PLL-clock-controller-.patch new file mode 100644 index 000000000..fbef3a18a --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0701-dt-bindings-clock-qcom-Add-CMN-PLL-clock-controller-.patch @@ -0,0 +1,122 @@ +From 7b89dbf5c7dcd8a9c131721e93c1292e5993968b Mon Sep 17 00:00:00 2001 +From: Luo Jie +Date: Tue, 20 Aug 2024 22:02:42 +0800 +Subject: [PATCH] dt-bindings: clock: qcom: Add CMN PLL clock controller + for IPQ SoC + +The CMN PLL controller provides clocks to networking hardware blocks +on Qualcomm IPQ9574 SoC. It receives input clock from the on-chip Wi-Fi, +and produces output clocks at fixed rates. These output rates are +predetermined, and are unrelated to the input clock rate. The output +clocks are supplied to the Ethernet hardware such as PPE (packet +process engine) and the externally connected switch or PHY device. + +Signed-off-by: Luo Jie +Reviewed-by: Krzysztof Kozlowski +--- + .../bindings/clock/qcom,ipq9574-cmn-pll.yaml | 70 +++++++++++++++++++ + include/dt-bindings/clock/qcom,ipq-cmn-pll.h | 15 ++++ + 2 files changed, 85 insertions(+) + create mode 100644 Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml + create mode 100644 include/dt-bindings/clock/qcom,ipq-cmn-pll.h + +diff --git a/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml b/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml +new file mode 100644 +index 000000000000..7ad04b58a698 +--- /dev/null ++++ b/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml +@@ -0,0 +1,70 @@ ++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/clock/qcom,ipq9574-cmn-pll.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Qualcomm CMN PLL Clock Controller on IPQ SoC ++ ++maintainers: ++ - Bjorn Andersson ++ - Luo Jie ++ ++description: ++ The CMN PLL clock controller expects a reference input clock. ++ This reference clock is from the on-board Wi-Fi. The CMN PLL ++ supplies a number of fixed rate output clocks to the Ethernet ++ devices including PPE (packet process engine) and the connected ++ switch or PHY device. ++ ++properties: ++ compatible: ++ enum: ++ - qcom,ipq9574-cmn-pll ++ ++ reg: ++ maxItems: 1 ++ ++ clocks: ++ items: ++ - description: The reference clock. The supported clock rates include ++ 25000000, 31250000, 40000000, 48000000, 50000000 and 96000000 HZ. ++ - description: The AHB clock ++ - description: The SYS clock ++ description: ++ The reference clock is the source clock of CMN PLL, which is from the ++ Wi-Fi. The AHB and SYS clocks must be enabled to access CMN PLL ++ clock registers. ++ ++ clock-names: ++ items: ++ - const: ref ++ - const: ahb ++ - const: sys ++ ++ "#clock-cells": ++ const: 1 ++ ++required: ++ - compatible ++ - reg ++ - clocks ++ - clock-names ++ - "#clock-cells" ++ ++additionalProperties: false ++ ++examples: ++ - | ++ #include ++ ++ clock-controller@9b000 { ++ compatible = "qcom,ipq9574-cmn-pll"; ++ reg = <0x0009b000 0x800>; ++ clocks = <&cmn_pll_ref_clk>, ++ <&gcc GCC_CMN_12GPLL_AHB_CLK>, ++ <&gcc GCC_CMN_12GPLL_SYS_CLK>; ++ clock-names = "ref", "ahb", "sys"; ++ #clock-cells = <1>; ++ }; ++... +diff --git a/include/dt-bindings/clock/qcom,ipq-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h +new file mode 100644 +index 000000000000..64b228659389 +--- /dev/null ++++ b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h +@@ -0,0 +1,15 @@ ++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ ++/* ++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. ++ */ ++ ++#ifndef _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H ++#define _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H ++ ++/* The output clocks from CMN PLL of IPQ9574. */ ++#define PPE_353MHZ_CLK 0 ++#define ETH0_50MHZ_CLK 1 ++#define ETH1_50MHZ_CLK 2 ++#define ETH2_50MHZ_CLK 3 ++#define ETH_25MHZ_CLK 4 ++#endif +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0702-clk-qcom-Add-CMN-PLL-clock-controller-driver-for-IPQ.patch b/target/linux/qualcommax/patches-6.6/0702-clk-qcom-Add-CMN-PLL-clock-controller-driver-for-IPQ.patch new file mode 100644 index 000000000..5e43d6dfe --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0702-clk-qcom-Add-CMN-PLL-clock-controller-driver-for-IPQ.patch @@ -0,0 +1,298 @@ +From a7e8397e2db6133e3435054a3f312dbd9cab05ed Mon Sep 17 00:00:00 2001 +From: Luo Jie +Date: Tue, 20 Aug 2024 22:02:43 +0800 +Subject: [PATCH] clk: qcom: Add CMN PLL clock controller driver for IPQ + SoC + +The CMN PLL clock controller supplies clocks to the hardware +blocks that together make up the Ethernet function on Qualcomm +IPQ SoCs. The driver is initially supported for IPQ9574 SoC. + +The CMN PLL clock controller expects a reference input clock +from the on-board Wi-Fi block acting as clock source. The input +reference clock needs to be configured to one of the supported +clock rates. + +The controller supplies a number of fixed-rate output clocks. +For the IPQ9574, there is one output clock of 353 MHZ to PPE +(Packet Process Engine) hardware block, three 50 MHZ output +clocks and an additional 25 MHZ output clock supplied to the +connected Ethernet devices. + +Signed-off-by: Luo Jie +--- + drivers/clk/qcom/Kconfig | 10 ++ + drivers/clk/qcom/Makefile | 1 + + drivers/clk/qcom/clk-ipq-cmn-pll.c | 227 +++++++++++++++++++++++++++++ + 3 files changed, 238 insertions(+) + create mode 100644 drivers/clk/qcom/clk-ipq-cmn-pll.c + +diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig +index a79b83758389..5a8a09f9c08c 100644 +--- a/drivers/clk/qcom/Kconfig ++++ b/drivers/clk/qcom/Kconfig +@@ -139,6 +139,16 @@ config IPQ_APSS_6018 + Say Y if you want to support CPU frequency scaling on + ipq based devices. + ++config IPQ_CMN_PLL ++ tristate "IPQ CMN PLL Clock Controller" ++ depends on IPQ_GCC_9574 ++ help ++ Support for CMN PLL clock controller on IPQ platform. The ++ CMN PLL feeds the reference clocks to the Ethernet devices ++ based on IPQ SoC. ++ Say Y or M if you want to support CMN PLL clock on the IPQ ++ based devices. ++ + config IPQ_GCC_4019 + tristate "IPQ4019 Global Clock Controller" + help +diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile +index 4790c8cca426..6214dfbad90f 100644 +--- a/drivers/clk/qcom/Makefile ++++ b/drivers/clk/qcom/Makefile +@@ -23,6 +23,7 @@ obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o + obj-$(CONFIG_CLK_GFM_LPASS_SM8250) += lpass-gfm-sm8250.o + obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o + obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o ++obj-$(CONFIG_IPQ_CMN_PLL) += clk-ipq-cmn-pll.o + obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o + obj-$(CONFIG_IPQ_GCC_5018) += gcc-ipq5018.o + obj-$(CONFIG_IPQ_GCC_5332) += gcc-ipq5332.o +diff --git a/drivers/clk/qcom/clk-ipq-cmn-pll.c b/drivers/clk/qcom/clk-ipq-cmn-pll.c +new file mode 100644 +index 000000000000..72030a61a131 +--- /dev/null ++++ b/drivers/clk/qcom/clk-ipq-cmn-pll.c +@@ -0,0 +1,227 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. ++ */ ++ ++/* ++ * CMN PLL block expects the reference clock from on-board Wi-Fi block, and ++ * supplies fixed rate clocks as output to the Ethernet hardware blocks. ++ * The Ethernet related blocks include PPE (packet process engine) and the ++ * external connected PHY (or switch) chip receiving clocks from the CMN PLL. ++ * ++ * On the IPQ9574 SoC, There are three clocks with 50 MHZ, one clock with ++ * 25 MHZ which are output from the CMN PLL to Ethernet PHY (or switch), ++ * and one clock with 353 MHZ to PPE. ++ * ++ * +---------+ ++ * | GCC | ++ * +--+---+--+ ++ * AHB CLK| |SYS CLK ++ * V V ++ * +-------+---+------+ ++ * | +-------------> eth0-50mhz ++ * REF CLK | IPQ9574 | ++ * -------->+ +-------------> eth1-50mhz ++ * | CMN PLL block | ++ * | +-------------> eth2-50mhz ++ * | | ++ * +---------+--------+-------------> eth-25mhz ++ * | ++ * V ++ * ppe-353mhz ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define CMN_PLL_REFCLK_SRC_SELECTION 0x28 ++#define CMN_PLL_REFCLK_SRC_DIV GENMASK(9, 8) ++ ++#define CMN_PLL_REFCLK_CONFIG 0x784 ++#define CMN_PLL_REFCLK_EXTERNAL BIT(9) ++#define CMN_PLL_REFCLK_DIV GENMASK(8, 4) ++#define CMN_PLL_REFCLK_INDEX GENMASK(3, 0) ++ ++#define CMN_PLL_POWER_ON_AND_RESET 0x780 ++#define CMN_ANA_EN_SW_RSTN BIT(6) ++ ++/** ++ * struct cmn_pll_fixed_output_clk - CMN PLL output clocks information ++ * @id: Clock specifier to be supplied ++ * @name: Clock name to be registered ++ * @rate: Clock rate ++ */ ++struct cmn_pll_fixed_output_clk { ++ unsigned int id; ++ const char *name; ++ const unsigned long rate; ++}; ++ ++#define CLK_PLL_OUTPUT(_id, _name, _rate) { \ ++ .id = _id, \ ++ .name = _name, \ ++ .rate = _rate, \ ++} ++ ++static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = { ++ CLK_PLL_OUTPUT(PPE_353MHZ_CLK, "ppe-353mhz", 353000000UL), ++ CLK_PLL_OUTPUT(ETH0_50MHZ_CLK, "eth0-50mhz", 50000000UL), ++ CLK_PLL_OUTPUT(ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL), ++ CLK_PLL_OUTPUT(ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL), ++ CLK_PLL_OUTPUT(ETH_25MHZ_CLK, "eth-25mhz", 25000000UL), ++}; ++ ++static int ipq_cmn_pll_config(struct device *dev, unsigned long parent_rate) ++{ ++ void __iomem *base; ++ u32 val; ++ ++ base = devm_of_iomap(dev, dev->of_node, 0, NULL); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); ++ ++ val = readl(base + CMN_PLL_REFCLK_CONFIG); ++ val &= ~(CMN_PLL_REFCLK_EXTERNAL | CMN_PLL_REFCLK_INDEX); ++ ++ /* ++ * Configure the reference input clock selection as per the given rate. ++ * The output clock rates are always of fixed value. ++ */ ++ switch (parent_rate) { ++ case 25000000: ++ val |= FIELD_PREP(CMN_PLL_REFCLK_INDEX, 3); ++ break; ++ case 31250000: ++ val |= FIELD_PREP(CMN_PLL_REFCLK_INDEX, 4); ++ break; ++ case 40000000: ++ val |= FIELD_PREP(CMN_PLL_REFCLK_INDEX, 6); ++ break; ++ case 48000000: ++ val |= FIELD_PREP(CMN_PLL_REFCLK_INDEX, 7); ++ break; ++ case 50000000: ++ val |= FIELD_PREP(CMN_PLL_REFCLK_INDEX, 8); ++ break; ++ case 96000000: ++ val |= FIELD_PREP(CMN_PLL_REFCLK_INDEX, 7); ++ val &= ~CMN_PLL_REFCLK_DIV; ++ val |= FIELD_PREP(CMN_PLL_REFCLK_DIV, 2); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ writel(val, base + CMN_PLL_REFCLK_CONFIG); ++ ++ /* Update the source clock rate selection. Only 96 MHZ uses 0. */ ++ val = readl(base + CMN_PLL_REFCLK_SRC_SELECTION); ++ val &= ~CMN_PLL_REFCLK_SRC_DIV; ++ if (parent_rate != 96000000) ++ val |= FIELD_PREP(CMN_PLL_REFCLK_SRC_DIV, 1); ++ ++ writel(val, base + CMN_PLL_REFCLK_SRC_SELECTION); ++ ++ /* ++ * Reset the CMN PLL block by asserting/de-asserting for 100 ms ++ * each, to ensure the updated configurations take effect. ++ */ ++ val = readl(base + CMN_PLL_POWER_ON_AND_RESET); ++ val &= ~CMN_ANA_EN_SW_RSTN; ++ writel(val, base); ++ msleep(100); ++ ++ val |= CMN_ANA_EN_SW_RSTN; ++ writel(val, base + CMN_PLL_POWER_ON_AND_RESET); ++ msleep(100); ++ ++ return 0; ++} ++ ++static int ipq_cmn_pll_clk_register(struct device *dev, const char *parent) ++{ ++ const struct cmn_pll_fixed_output_clk *fixed_clk; ++ struct clk_hw_onecell_data *data; ++ unsigned int num_clks; ++ struct clk_hw *hw; ++ int i; ++ ++ num_clks = ARRAY_SIZE(ipq9574_output_clks); ++ fixed_clk = ipq9574_output_clks; ++ ++ data = devm_kzalloc(dev, struct_size(data, hws, num_clks), GFP_KERNEL); ++ if (!data) ++ return -ENOMEM; ++ ++ for (i = 0; i < num_clks; i++) { ++ hw = devm_clk_hw_register_fixed_rate(dev, fixed_clk[i].name, ++ parent, 0, ++ fixed_clk[i].rate); ++ if (IS_ERR(hw)) ++ return PTR_ERR(hw); ++ ++ data->hws[fixed_clk[i].id] = hw; ++ } ++ data->num = num_clks; ++ ++ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, data); ++} ++ ++static int ipq_cmn_pll_clk_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct clk *clk; ++ int ret; ++ ++ /* ++ * To access the CMN PLL registers, the GCC AHB & SYSY clocks ++ * for CMN PLL block need to be enabled. ++ */ ++ clk = devm_clk_get_enabled(dev, "ahb"); ++ if (IS_ERR(clk)) ++ return dev_err_probe(dev, PTR_ERR(clk), ++ "Enable AHB clock failed\n"); ++ ++ clk = devm_clk_get_enabled(dev, "sys"); ++ if (IS_ERR(clk)) ++ return dev_err_probe(dev, PTR_ERR(clk), ++ "Enable SYS clock failed\n"); ++ ++ clk = devm_clk_get(dev, "ref"); ++ if (IS_ERR(clk)) ++ return dev_err_probe(dev, PTR_ERR(clk), ++ "Get reference clock failed\n"); ++ ++ /* Configure CMN PLL to apply the reference clock. */ ++ ret = ipq_cmn_pll_config(dev, clk_get_rate(clk)); ++ if (ret) ++ return dev_err_probe(dev, ret, "Configure CMN PLL failed\n"); ++ ++ return ipq_cmn_pll_clk_register(dev, __clk_get_name(clk)); ++} ++ ++static const struct of_device_id ipq_cmn_pll_clk_ids[] = { ++ { .compatible = "qcom,ipq9574-cmn-pll", }, ++ { } ++}; ++ ++static struct platform_driver ipq_cmn_pll_clk_driver = { ++ .probe = ipq_cmn_pll_clk_probe, ++ .driver = { ++ .name = "ipq_cmn_pll", ++ .of_match_table = ipq_cmn_pll_clk_ids, ++ }, ++}; ++ ++module_platform_driver(ipq_cmn_pll_clk_driver); ++ ++MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ CMN PLL Driver"); ++MODULE_LICENSE("GPL"); +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0703-clk-qcom-cmn-pll-add-IPQ5018-support.patch b/target/linux/qualcommax/patches-6.6/0703-clk-qcom-cmn-pll-add-IPQ5018-support.patch new file mode 100644 index 000000000..051b99da1 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0703-clk-qcom-cmn-pll-add-IPQ5018-support.patch @@ -0,0 +1,80 @@ +From a28797563b8c97c9abced82e0cf89302fcd2bf37 Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:11 +0800 +Subject: [PATCH 1/2] clk: qcom: cmn-pll: add IPQ5018 support + +Signed-off-by: hzy +--- + drivers/clk/qcom/Kconfig | 1 - + drivers/clk/qcom/clk-ipq-cmn-pll.c | 29 +++++++++++++++++++++++++++++ + 2 files changed, 29 insertions(+), 1 deletion(-) + +diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig +index 5a8a09f9c08c..556a5a20a78a 100644 +--- a/drivers/clk/qcom/Kconfig ++++ b/drivers/clk/qcom/Kconfig +@@ -141,7 +141,6 @@ config IPQ_APSS_6018 + + config IPQ_CMN_PLL + tristate "IPQ CMN PLL Clock Controller" +- depends on IPQ_GCC_9574 + help + Support for CMN PLL clock controller on IPQ platform. The + CMN PLL feeds the reference clocks to the Ethernet devices +diff --git a/drivers/clk/qcom/clk-ipq-cmn-pll.c b/drivers/clk/qcom/clk-ipq-cmn-pll.c +index 72030a61a131..8f2459af3105 100644 +--- a/drivers/clk/qcom/clk-ipq-cmn-pll.c ++++ b/drivers/clk/qcom/clk-ipq-cmn-pll.c +@@ -42,6 +42,9 @@ + #include + #include + ++#define TCSR_ETH_CMN 0x0 ++#define TCSR_ETH_CMN_ENABLE BIT(0) ++ + #define CMN_PLL_REFCLK_SRC_SELECTION 0x28 + #define CMN_PLL_REFCLK_SRC_DIV GENMASK(9, 8) + +@@ -79,6 +82,28 @@ static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = { + CLK_PLL_OUTPUT(ETH_25MHZ_CLK, "eth-25mhz", 25000000UL), + }; + ++static int ipq_cmn_pll_tcsr_enable(struct platform_device *pdev) ++{ ++ struct resource *res; ++ void __iomem *tcsr_base; ++ u32 val; ++ ++ /* For IPQ50xx, tcsr is necessary to enable cmn block */ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr"); ++ if (!res) ++ return 0; ++ ++ tcsr_base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR_OR_NULL(tcsr_base)) ++ return PTR_ERR(tcsr_base); ++ ++ val = readl(tcsr_base + TCSR_ETH_CMN); ++ val |= TCSR_ETH_CMN_ENABLE; ++ writel(val, (tcsr_base + TCSR_ETH_CMN)); ++ ++ return 0; ++} ++ + static int ipq_cmn_pll_config(struct device *dev, unsigned long parent_rate) + { + void __iomem *base; +@@ -181,6 +206,10 @@ static int ipq_cmn_pll_clk_probe(struct platform_device *pdev) + struct clk *clk; + int ret; + ++ ret = ipq_cmn_pll_tcsr_enable(pdev); ++ if (ret) ++ return dev_err_probe(dev, ret, "Enable CMN PLL failed\n"); ++ + /* + * To access the CMN PLL registers, the GCC AHB & SYSY clocks + * for CMN PLL block need to be enabled. +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0704-arm64-dts-qcom-ipq5018-Add-ethernet-cmn-node.patch b/target/linux/qualcommax/patches-6.6/0704-arm64-dts-qcom-ipq5018-Add-ethernet-cmn-node.patch new file mode 100644 index 000000000..910847099 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0704-arm64-dts-qcom-ipq5018-Add-ethernet-cmn-node.patch @@ -0,0 +1,45 @@ +From 1b625a37b96b0448aac126d7720eec38de8e5956 Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:11 +0800 +Subject: [PATCH 2/2] arm64: dts: qcom: ipq5018: Add ethernet cmn node + +Signed-off-by: hzy +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 19 +++++++++++++++++++ + 1 file changed, 19 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -16,6 +16,12 @@ + #size-cells = <2>; + + clocks { ++ cmn_pll_ref_clk: cmn-pll-ref-clk { ++ compatible = "fixed-clock"; ++ clock-frequency = <96000000>; ++ #clock-cells = <0>; ++ }; ++ + sleep_clk: sleep-clk { + compatible = "fixed-clock"; + #clock-cells = <0>; +@@ -182,6 +188,19 @@ + status = "disabled"; + }; + ++ clock-controller@9b000 { ++ compatible = "qcom,ipq9574-cmn-pll"; ++ reg = <0x0009b000 0x800>, ++ <0x19475c4 0x4>; ++ reg-names = "cmn", ++ "tcsr"; ++ clocks = <&cmn_pll_ref_clk>, ++ <&gcc GCC_CMN_BLK_AHB_CLK>, ++ <&gcc GCC_CMN_BLK_SYS_CLK>; ++ clock-names = "ref", "ahb", "sys"; ++ #clock-cells = <1>; ++ }; ++ + qfprom: qfprom@a0000 { + compatible = "qcom,ipq5018-qfprom", "qcom,qfprom"; + reg = <0xa0000 0x1000>; diff --git a/target/linux/qualcommax/patches-6.6/0711-net-phy-qcom-Introduce-IPQ5018-internal-PHY-driver.patch b/target/linux/qualcommax/patches-6.6/0711-net-phy-qcom-Introduce-IPQ5018-internal-PHY-driver.patch new file mode 100644 index 000000000..76b3ed35b --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0711-net-phy-qcom-Introduce-IPQ5018-internal-PHY-driver.patch @@ -0,0 +1,189 @@ +From 77ad12b3a5e21cae859247c0b82cf9a5b661e531 Mon Sep 17 00:00:00 2001 +From: hzy +Date: Sun, 8 Sep 2024 16:40:11 +0800 +Subject: [PATCH 1/3] net: phy: qcom: Introduce IPQ5018 internal PHY driver + +Signed-off-by: hzy +--- + drivers/net/phy/qcom/Kconfig | 6 ++ + drivers/net/phy/qcom/Makefile | 1 + + drivers/net/phy/qcom/ipq5018.c | 138 +++++++++++++++++++++++++++++++++ + 3 files changed, 145 insertions(+) + create mode 100644 drivers/net/phy/qcom/ipq5018.c + +diff --git a/drivers/net/phy/qcom/Kconfig b/drivers/net/phy/qcom/Kconfig +index 570626cc8e14..6487e6f93011 100644 +--- a/drivers/net/phy/qcom/Kconfig ++++ b/drivers/net/phy/qcom/Kconfig +@@ -9,6 +9,12 @@ config AT803X_PHY + help + Currently supports the AR8030, AR8031, AR8033, AR8035 model + ++config IPQ5018_PHY ++ tristate "Qualcomm IPQ5018 internal PHYs" ++ select QCOM_NET_PHYLIB ++ help ++ Currently supports the Qualcomm IPQ5018 internal PHY ++ + config QCA83XX_PHY + tristate "Qualcomm Atheros QCA833x PHYs" + select QCOM_NET_PHYLIB +diff --git a/drivers/net/phy/qcom/Makefile b/drivers/net/phy/qcom/Makefile +index f24fb550babd..7ebedd3ed0a2 100644 +--- a/drivers/net/phy/qcom/Makefile ++++ b/drivers/net/phy/qcom/Makefile +@@ -1,6 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + obj-$(CONFIG_QCOM_NET_PHYLIB) += qcom-phy-lib.o + obj-$(CONFIG_AT803X_PHY) += at803x.o ++obj-$(CONFIG_IPQ5018_PHY) += ipq5018.o + obj-$(CONFIG_QCA83XX_PHY) += qca83xx.o + obj-$(CONFIG_QCA808X_PHY) += qca808x.o + obj-$(CONFIG_QCA807X_PHY) += qca807x.o +diff --git a/drivers/net/phy/qcom/ipq5018.c b/drivers/net/phy/qcom/ipq5018.c +new file mode 100644 +index 000000000000..497ad28fe63f +--- /dev/null ++++ b/drivers/net/phy/qcom/ipq5018.c +@@ -0,0 +1,138 @@ ++#include ++#include ++#include ++#include ++#include ++ ++#include "qcom.h" ++ ++#define IPQ5018_PHY_ID 0x004dd0c0 ++ ++#define TX_RX_CLK_RATE 125000000 /* 125M */ ++ ++#define IPQ5018_PHY_FIFO_CONTROL 0x19 ++#define IPQ5018_PHY_FIFO_RESET GENMASK(1, 0) ++ ++struct ipq5018_phy { ++ int num_clks; ++ struct clk_bulk_data *clks; ++ struct reset_control *rst; ++ ++ struct clk_hw *clk_rx, *clk_tx; ++ struct clk_hw_onecell_data *clk_data; ++}; ++ ++static int ipq5018_probe(struct phy_device *phydev) ++{ ++ struct ipq5018_phy *priv; ++ struct device *dev = &phydev->mdio.dev; ++ char name[64]; ++ int ret; ++ ++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return dev_err_probe(dev, -ENOMEM, ++ "failed to allocate priv\n"); ++ ++ priv->num_clks = devm_clk_bulk_get_all(dev, &priv->clks); ++ if (priv->num_clks < 0) ++ return dev_err_probe(dev, priv->num_clks, ++ "failed to acquire clocks\n"); ++ ++ ret = clk_bulk_prepare_enable(priv->num_clks, priv->clks); ++ if (ret) ++ return dev_err_probe(dev, ret, ++ "failed to enable clocks\n"); ++ ++ priv->rst = devm_reset_control_array_get_exclusive(dev); ++ if (IS_ERR_OR_NULL(priv->rst)) ++ return dev_err_probe(dev, PTR_ERR(priv->rst), ++ "failed to acquire reset\n"); ++ ++ ret = reset_control_reset(priv->rst); ++ if (ret) ++ return dev_err_probe(dev, ret, ++ "failed to reset\n"); ++ ++ snprintf(name, sizeof(name), "%s#rx", dev_name(dev)); ++ priv->clk_rx = clk_hw_register_fixed_rate(dev, name, NULL, 0, ++ TX_RX_CLK_RATE); ++ if (IS_ERR_OR_NULL(priv->clk_rx)) ++ return dev_err_probe(dev, PTR_ERR(priv->clk_rx), ++ "failed to register rx clock\n"); ++ ++ snprintf(name, sizeof(name), "%s#tx", dev_name(dev)); ++ priv->clk_tx = clk_hw_register_fixed_rate(dev, name, NULL, 0, ++ TX_RX_CLK_RATE); ++ if (IS_ERR_OR_NULL(priv->clk_tx)) ++ return dev_err_probe(dev, PTR_ERR(priv->clk_tx), ++ "failed to register tx clock\n"); ++ ++ priv->clk_data = devm_kzalloc(dev, ++ struct_size(priv->clk_data, hws, 2), ++ GFP_KERNEL); ++ if (!priv->clk_data) ++ return dev_err_probe(dev, -ENOMEM, ++ "failed to allocate clk_data\n"); ++ ++ priv->clk_data->num = 2; ++ priv->clk_data->hws[0] = priv->clk_rx; ++ priv->clk_data->hws[1] = priv->clk_tx; ++ ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, ++ priv->clk_data); ++ if (ret) ++ return dev_err_probe(dev, ret, ++ "fail to register clock provider\n"); ++ ++ return 0; ++} ++ ++static int ipq5018_soft_reset(struct phy_device *phydev) ++{ ++ int ret; ++ ++ ret = phy_modify(phydev, IPQ5018_PHY_FIFO_CONTROL, ++ IPQ5018_PHY_FIFO_RESET, 0); ++ if (ret < 0) ++ return ret; ++ ++ msleep(50); ++ ++ ret = phy_modify(phydev, IPQ5018_PHY_FIFO_CONTROL, ++ IPQ5018_PHY_FIFO_RESET, IPQ5018_PHY_FIFO_RESET); ++ if (ret < 0) ++ return ret; ++ ++ return 0; ++} ++ ++static int ipq5018_cable_test_start(struct phy_device *phydev) ++{ ++ /* we do all the (time consuming) work later */ ++ return 0; ++} ++ ++static struct phy_driver ipq5018_internal_phy_driver[] = { ++ { ++ PHY_ID_MATCH_EXACT(IPQ5018_PHY_ID), ++ .name = "Qualcomm IPQ5018 internal PHY", ++ .flags = PHY_IS_INTERNAL | PHY_POLL_CABLE_TEST, ++ .probe = ipq5018_probe, ++ .soft_reset = ipq5018_soft_reset, ++ .read_status = at803x_read_status, ++ .config_intr = at803x_config_intr, ++ .handle_interrupt = at803x_handle_interrupt, ++ .cable_test_start = ipq5018_cable_test_start, ++ .cable_test_get_status = qca808x_cable_test_get_status, ++ }, ++}; ++module_phy_driver(ipq5018_internal_phy_driver); ++ ++static struct mdio_device_id __maybe_unused ipq5018_internal_phy_ids[] = { ++ { PHY_ID_MATCH_EXACT(IPQ5018_PHY_ID) }, ++ { } ++}; ++MODULE_DEVICE_TABLE(mdio, ipq5018_internal_phy_ids); ++ ++MODULE_DESCRIPTION("Qualcomm IPQ5018 internal PHY driver"); ++MODULE_AUTHOR("Ziyang Huang "); +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0712-arm64-dts-qcom-ipq5018-add-mdio-node.patch b/target/linux/qualcommax/patches-6.6/0712-arm64-dts-qcom-ipq5018-add-mdio-node.patch new file mode 100644 index 000000000..f37352e60 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0712-arm64-dts-qcom-ipq5018-add-mdio-node.patch @@ -0,0 +1,39 @@ +From d2cdc83fb2c7360856e598810b88211d815fc851 Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:12 +0800 +Subject: [PATCH 2/3] arm64: dts: qcom: ipq5018: add mdio node + +Signed-off-by: hzy +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 20 ++++++++++++++++++++ + 1 file changed, 20 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -188,6 +188,26 @@ + status = "disabled"; + }; + ++ mdio0: mdio@88000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "qcom,ipq5018-mdio", "qcom,qca-mdio"; ++ reg = <0x88000 0x64>; ++ clocks = <&gcc GCC_MDIO0_AHB_CLK>; ++ clock-names = "gcc_mdio_ahb_clk"; ++ status = "disabled"; ++ }; ++ ++ mdio1: mdio@90000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "qcom,ipq5018-mdio"; ++ reg = <0x90000 0x64>; ++ clocks = <&gcc GCC_MDIO1_AHB_CLK>; ++ clock-names = "gcc_mdio_ahb_clk"; ++ status = "disabled"; ++ }; ++ + clock-controller@9b000 { + compatible = "qcom,ipq9574-cmn-pll"; + reg = <0x0009b000 0x800>, diff --git a/target/linux/qualcommax/patches-6.6/0713-arm64-dts-qcom-ipq5018-add-ge_phy-node.patch b/target/linux/qualcommax/patches-6.6/0713-arm64-dts-qcom-ipq5018-add-ge_phy-node.patch new file mode 100644 index 000000000..1e645599b --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0713-arm64-dts-qcom-ipq5018-add-ge_phy-node.patch @@ -0,0 +1,42 @@ +From 28490d95fe9e059c5ce74b2289d66e0d7ede2d50 Mon Sep 17 00:00:00 2001 +From: hzy +Date: Sun, 8 Sep 2024 16:40:12 +0800 +Subject: [PATCH 3/3] arm64: dts: qcom: ipq5018: add ge_phy node + +Signed-off-by: hzy +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 16 ++++++++++++++-- + 1 file changed, 14 insertions(+), 2 deletions(-) + +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -196,6 +196,18 @@ + clocks = <&gcc GCC_MDIO0_AHB_CLK>; + clock-names = "gcc_mdio_ahb_clk"; + status = "disabled"; ++ ++ ge_phy: ethernet-phy@7 { ++ compatible = "ethernet-phy-id004d.d0c0"; ++ reg = <7>; ++ resets = <&gcc GCC_GEPHY_BCR>, ++ <&gcc GCC_GEPHY_MDC_SW_ARES>, ++ <&gcc GCC_GEPHY_DSP_HW_ARES>, ++ <&gcc GCC_GEPHY_RX_ARES>, ++ <&gcc GCC_GEPHY_TX_ARES>; ++ clocks = <&gcc GCC_GEPHY_RX_CLK>, ++ <&gcc GCC_GEPHY_TX_CLK>; ++ }; + }; + + mdio1: mdio@90000 { +@@ -390,8 +402,8 @@ + <&pcie_x2phy>, + <&pcie_x1phy>, + <0>, +- <0>, +- <0>, ++ <&ge_phy 0>, ++ <&ge_phy 1>, + <0>, + <0>; + #clock-cells = <1>; diff --git a/target/linux/qualcommax/patches-6.6/0721-clk-gcc-ipq5018-remove-the-unsupported-clk-combinati.patch b/target/linux/qualcommax/patches-6.6/0721-clk-gcc-ipq5018-remove-the-unsupported-clk-combinati.patch new file mode 100644 index 000000000..0f74f41d9 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0721-clk-gcc-ipq5018-remove-the-unsupported-clk-combinati.patch @@ -0,0 +1,36 @@ +From f71366e0530db2c5cecbbbb6edfbf7344bd6f83b Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:12 +0800 +Subject: [PATCH 1/2] clk: gcc-ipq5018: remove the unsupported clk + combination for gmac + +Signed-off-by: hzy +--- + drivers/clk/qcom/gcc-ipq5018.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c +index 3136ba1c2a59..4a8511d5f3a5 100644 +--- a/drivers/clk/qcom/gcc-ipq5018.c ++++ b/drivers/clk/qcom/gcc-ipq5018.c +@@ -677,7 +677,7 @@ static const struct freq_tbl ftbl_gmac1_rx_clk_src[] = { + F(2500000, P_UNIPHY_RX, 12.5, 0, 0), + F(24000000, P_XO, 1, 0, 0), + F(25000000, P_UNIPHY_RX, 2.5, 0, 0), +- F(125000000, P_UNIPHY_RX, 2.5, 0, 0), ++ /* F(125000000, P_UNIPHY_RX, 2.5, 0, 0), */ + F(125000000, P_UNIPHY_RX, 1, 0, 0), + F(312500000, P_UNIPHY_RX, 1, 0, 0), + { } +@@ -717,7 +717,7 @@ static const struct freq_tbl ftbl_gmac1_tx_clk_src[] = { + F(2500000, P_UNIPHY_TX, 12.5, 0, 0), + F(24000000, P_XO, 1, 0, 0), + F(25000000, P_UNIPHY_TX, 2.5, 0, 0), +- F(125000000, P_UNIPHY_TX, 2.5, 0, 0), ++ /* F(125000000, P_UNIPHY_TX, 2.5, 0, 0), */ + F(125000000, P_UNIPHY_TX, 1, 0, 0), + F(312500000, P_UNIPHY_TX, 1, 0, 0), + { } +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0722-clk-gcc-ipq5018-hack-for-qca-ssdk.patch b/target/linux/qualcommax/patches-6.6/0722-clk-gcc-ipq5018-hack-for-qca-ssdk.patch new file mode 100644 index 000000000..f5ca800c2 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0722-clk-gcc-ipq5018-hack-for-qca-ssdk.patch @@ -0,0 +1,61 @@ +From ce9e56a436e486690097cfbdda2d0c11b60db4c2 Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:12 +0800 +Subject: [PATCH 2/2] clk: gcc-ipq5018: hack for qca-ssdk + +Signed-off-by: hzy +--- + drivers/clk/qcom/gcc-ipq5018.c | 16 ++++++++-------- + 1 file changed, 8 insertions(+), 8 deletions(-) + +diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c +index 4a8511d5f3a5..3d65b7dce59d 100644 +--- a/drivers/clk/qcom/gcc-ipq5018.c ++++ b/drivers/clk/qcom/gcc-ipq5018.c +@@ -335,8 +335,8 @@ static const struct parent_map gcc_xo_gpll4_gpll0_gpll0_out_main_div2_map2[] = { + + static const struct clk_parent_data gcc_xo_gephy_gcc_rx_gephy_gcc_tx_ubi32_pll_gpll0[] = { + { .index = DT_XO }, +- { .index = DT_GEPHY_RX_CLK }, +- { .index = DT_GEPHY_TX_CLK }, ++ { .name = "gephy_gcc_rx", .index = -1 }, ++ { .name = "gephy_gcc_tx", .index = -1 }, + { .hw = &ubi32_pll.clkr.hw }, + { .hw = &gpll0.clkr.hw }, + }; +@@ -351,8 +351,8 @@ static const struct parent_map gcc_xo_gephy_gcc_rx_gephy_gcc_tx_ubi32_pll_gpll0_ + + static const struct clk_parent_data gcc_xo_gephy_gcc_tx_gephy_gcc_rx_ubi32_pll_gpll0[] = { + { .index = DT_XO }, +- { .index = DT_GEPHY_TX_CLK }, +- { .index = DT_GEPHY_RX_CLK }, ++ { .name = "gephy_gcc_tx", .index = -1 }, ++ { .name = "gephy_gcc_rx", .index = -1 }, + { .hw = &ubi32_pll.clkr.hw }, + { .hw = &gpll0.clkr.hw }, + }; +@@ -367,8 +367,8 @@ static const struct parent_map gcc_xo_gephy_gcc_tx_gephy_gcc_rx_ubi32_pll_gpll0_ + + static const struct clk_parent_data gcc_xo_uniphy_gcc_rx_uniphy_gcc_tx_ubi32_pll_gpll0[] = { + { .index = DT_XO }, +- { .index = DT_UNIPHY_RX_CLK }, +- { .index = DT_UNIPHY_TX_CLK }, ++ { .name = "uniphy_gcc_rx", .index = -1 }, ++ { .name = "uniphy_gcc_tx", .index = -1 }, + { .hw = &ubi32_pll.clkr.hw }, + { .hw = &gpll0.clkr.hw }, + }; +@@ -383,8 +383,8 @@ static const struct parent_map gcc_xo_uniphy_gcc_rx_uniphy_gcc_tx_ubi32_pll_gpll + + static const struct clk_parent_data gcc_xo_uniphy_gcc_tx_uniphy_gcc_rx_ubi32_pll_gpll0[] = { + { .index = DT_XO }, +- { .index = DT_UNIPHY_TX_CLK }, +- { .index = DT_UNIPHY_RX_CLK }, ++ { .name = "uniphy_gcc_tx", .index = -1 }, ++ { .name = "uniphy_gcc_rx", .index = -1 }, + { .hw = &ubi32_pll.clkr.hw }, + { .hw = &gpll0.clkr.hw }, + }; +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0751-net-dsa-qca8k-always-enable-SGMII-auto-negotiation.patch b/target/linux/qualcommax/patches-6.6/0751-net-dsa-qca8k-always-enable-SGMII-auto-negotiation.patch new file mode 100644 index 000000000..df0f1f80d --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0751-net-dsa-qca8k-always-enable-SGMII-auto-negotiation.patch @@ -0,0 +1,35 @@ +From d7a41a3ab6b8e3a3158997cda13f1fe28a37268c Mon Sep 17 00:00:00 2001 +From: hzy +Date: Sun, 8 Sep 2024 16:40:12 +0800 +Subject: [PATCH] net: dsa: qca8k: always enable SGMII auto-negotiation + +fixed-link can't work well without this + +Signed-off-by: hzy +--- + drivers/net/dsa/qca/qca8k-8xxx.c | 9 ++++----- + 1 file changed, 4 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c +index 052fc67339d3..c76c11a7aa39 100644 +--- a/drivers/net/dsa/qca/qca8k-8xxx.c ++++ b/drivers/net/dsa/qca/qca8k-8xxx.c +@@ -1545,11 +1545,10 @@ static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, + return -EINVAL; + } + +- /* Enable/disable SerDes auto-negotiation as necessary */ +- val = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED ? +- 0 : QCA8K_PWS_SERDES_AEN_DIS; +- +- ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8K_PWS_SERDES_AEN_DIS, val); ++ /* Enable SerDes auto-negotiation always. ++ * So fixed-link can work. ++ */ ++ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8K_PWS_SERDES_AEN_DIS, 0); + if (ret) + return ret; + +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0801-dt-bindings-remoteproc-qcom-Add-support-for-multipd-model.patch b/target/linux/qualcommax/patches-6.6/0801-dt-bindings-remoteproc-qcom-Add-support-for-multipd-model.patch new file mode 100644 index 000000000..4f5209c51 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0801-dt-bindings-remoteproc-qcom-Add-support-for-multipd-model.patch @@ -0,0 +1,205 @@ +From: Manikanta Mylavarapu +Date: Fri, 10 Nov 2023 14:49:29 +0530 +Subject: [PATCH] dt-bindings: remoteproc: qcom: Add support for multipd model + +Add new binding document for multipd model remoteproc. +IPQ5332, IPQ9574 follows multipd model. + +Signed-off-by: Manikanta Mylavarapu +Reviewed-by: Krzysztof Kozlowski +--- +diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,multipd-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,multipd-pil.yaml +new file mode 100644 +index 000000000000..c52ac1640d7a +--- /dev/null ++++ b/Documentation/devicetree/bindings/remoteproc/qcom,multipd-pil.yaml +@@ -0,0 +1,189 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/remoteproc/qcom,multipd-pil.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Qualcomm Multipd Secure Peripheral Image Loader ++ ++maintainers: ++ - Manikanta Mylavarapu ++ ++description: ++ Multipd Peripheral Image Loader loads firmware and boots Q6 protection domain, ++ WCSS protection domain remoteproc's on the Qualcomm IPQ9574, IPQ5332 SoC. ++ Protection domain is similar to process in Linux. Here QDSP6 processor runs ++ each wifi radio functionality on a separate process. One process can't access ++ other process resources, so this is termed as PD i.e protection domain. ++ ++properties: ++ compatible: ++ enum: ++ - qcom,ipq5332-q6-mpd ++ - qcom,ipq9574-q6-mpd ++ ++ reg: ++ maxItems: 1 ++ ++ firmware-name: ++ maxItems: 2 ++ ++ interrupts: ++ items: ++ - description: Watchdog interrupt ++ - description: Fatal interrupt ++ - description: Ready interrupt ++ - description: Handover interrupt ++ - description: Stop acknowledge interrupt ++ ++ interrupt-names: ++ items: ++ - const: wdog ++ - const: fatal ++ - const: ready ++ - const: handover ++ - const: stop-ack ++ ++ qcom,smem-states: ++ $ref: /schemas/types.yaml#/definitions/phandle-array ++ description: States used by the AP to signal the remote processor ++ items: ++ - description: Shutdown Q6 ++ - description: Stop Q6 ++ ++ qcom,smem-state-names: ++ description: ++ Names of the states used by the AP to signal the remote processor ++ items: ++ - const: shutdown ++ - const: stop ++ ++ memory-region: ++ items: ++ - description: Q6 reserved region ++ ++ glink-edge: ++ $ref: /schemas/remoteproc/qcom,glink-edge.yaml# ++ description: ++ Qualcomm G-Link subnode which represents communication edge, channels ++ and devices related to the Modem. ++ unevaluatedProperties: false ++ ++patternProperties: ++ "^pd-1|pd-2|pd-3": ++ type: object ++ description: ++ WCSS means 'wireless connectivity sub system', in simple words it's a ++ wifi radio block. In multipd model, Q6 root protection domain will ++ provide services to WCSS user protection domain. In other words WCSS ++ protection domains depends on Q6 protection domain i.e Q6 should be up ++ before WCSS, similarly Q6 should shut down after all WCSS domains are ++ down. It can be achieved by building parent-child topology between Q6 ++ and WCSS domain's i.e keeping wcss node as sub node of Q6 device node. ++ ++ properties: ++ firmware-name: ++ maxItems: 1 ++ ++ interrupts: ++ items: ++ - description: Fatal interrupt ++ - description: Ready interrupt ++ - description: Spawn acknowledge interrupt ++ - description: Stop acknowledge interrupt ++ ++ interrupt-names: ++ items: ++ - const: fatal ++ - const: ready ++ - const: spawn-ack ++ - const: stop-ack ++ ++ qcom,smem-states: ++ $ref: /schemas/types.yaml#/definitions/phandle-array ++ description: States used by the AP to signal the remote processor ++ items: ++ - description: Shutdown WCSS pd ++ - description: Stop WCSS pd ++ - description: Spawn WCSS pd ++ ++ qcom,smem-state-names: ++ description: ++ Names of the states used by the AP to signal the remote processor ++ items: ++ - const: shutdown ++ - const: stop ++ - const: spawn ++ ++ required: ++ - firmware-name ++ - interrupts ++ - interrupt-names ++ - qcom,smem-states ++ - qcom,smem-state-names ++ ++ additionalProperties: false ++ ++required: ++ - compatible ++ - firmware-name ++ - reg ++ - interrupts ++ - interrupt-names ++ - qcom,smem-states ++ - qcom,smem-state-names ++ - memory-region ++ ++additionalProperties: false ++ ++examples: ++ - | ++ #include ++ q6v5_wcss: remoteproc@d100000 { ++ compatible = "qcom,ipq5332-q6-mpd"; ++ reg = <0xd100000 0x4040>; ++ firmware-name = "ath11k/IPQ5332/hw1.0/q6_fw0.mdt", ++ "ath11k/IPQ5332/hw1.0/iu_fw.mdt"; ++ interrupts-extended = <&intc GIC_SPI 291 IRQ_TYPE_EDGE_RISING>, ++ <&wcss_smp2p_in 0 IRQ_TYPE_NONE>, ++ <&wcss_smp2p_in 1 IRQ_TYPE_NONE>, ++ <&wcss_smp2p_in 2 IRQ_TYPE_NONE>, ++ <&wcss_smp2p_in 3 IRQ_TYPE_NONE>; ++ interrupt-names = "wdog", ++ "fatal", ++ "ready", ++ "handover", ++ "stop-ack"; ++ ++ qcom,smem-states = <&wcss_smp2p_out 0>, ++ <&wcss_smp2p_out 1>; ++ qcom,smem-state-names = "shutdown", ++ "stop"; ++ ++ memory-region = <&q6_region>; ++ ++ glink-edge { ++ interrupts = ; ++ label = "rtr"; ++ qcom,remote-pid = <1>; ++ mboxes = <&apcs_glb 8>; ++ }; ++ ++ pd-1 { ++ firmware-name = "ath11k/IPQ5332/hw1.0/q6_fw1.mdt"; ++ interrupts-extended = <&wcss_smp2p_in 8 IRQ_TYPE_NONE>, ++ <&wcss_smp2p_in 9 IRQ_TYPE_NONE>, ++ <&wcss_smp2p_in 12 IRQ_TYPE_NONE>, ++ <&wcss_smp2p_in 11 IRQ_TYPE_NONE>; ++ interrupt-names = "fatal", ++ "ready", ++ "spawn-ack", ++ "stop-ack"; ++ qcom,smem-states = <&wcss_smp2p_out 8>, ++ <&wcss_smp2p_out 9>, ++ <&wcss_smp2p_out 10>; ++ qcom,smem-state-names = "shutdown", ++ "stop", ++ "spawn"; ++ }; ++ }; diff --git a/target/linux/qualcommax/patches-6.6/0802-firmware-qcom_scm-ipq5332-add-support-to-pass-metada.patch b/target/linux/qualcommax/patches-6.6/0802-firmware-qcom_scm-ipq5332-add-support-to-pass-metada.patch new file mode 100644 index 000000000..910a0d641 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0802-firmware-qcom_scm-ipq5332-add-support-to-pass-metada.patch @@ -0,0 +1,50 @@ +From 50799703c6c8ec0860e19b102dd7cca3d29028e1 Mon Sep 17 00:00:00 2001 +From: Manikanta Mylavarapu +Date: Fri, 10 Nov 2023 14:49:34 +0530 +Subject: [PATCH] firmware: qcom_scm: ipq5332: add support to pass + metadata size + +IPQ5332 security software running under trustzone +requires metadata size. With V2 cmd, pass metadata +size as well. + +Signed-off-by: Manikanta Mylavarapu +--- + drivers/firmware/qcom_scm.c | 8 ++++++++ + drivers/firmware/qcom_scm.h | 1 + + 2 files changed, 9 insertions(+) + +diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c +index ea4780eb1fb9..e85c0b513938 100644 +--- a/drivers/firmware/qcom_scm.c ++++ b/drivers/firmware/qcom_scm.c +@@ -592,6 +592,14 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) + if (ret) + goto disable_clk; + ++ if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, ++ QCOM_SCM_PAS_INIT_IMAGE_V2)) { ++ desc.cmd = QCOM_SCM_PAS_INIT_IMAGE_V2; ++ desc.arginfo = ++ QCOM_SCM_ARGS(3, QCOM_SCM_VAL, QCOM_SCM_RW, QCOM_SCM_VAL); ++ desc.args[2] = size; ++ } ++ + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); + +diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h +index 7b68fa820495..e5050af85371 100644 +--- a/drivers/firmware/qcom_scm.h ++++ b/drivers/firmware/qcom_scm.h +@@ -92,6 +92,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, + + #define QCOM_SCM_SVC_PIL 0x02 + #define QCOM_SCM_PIL_PAS_INIT_IMAGE 0x01 ++#define QCOM_SCM_PAS_INIT_IMAGE_V2 0x1a + #define QCOM_SCM_PIL_PAS_MEM_SETUP 0x02 + #define QCOM_SCM_PIL_PAS_AUTH_AND_RESET 0x05 + #define QCOM_SCM_PIL_PAS_SHUTDOWN 0x06 +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0803-firmware-qcom_scm-ipq5332-add-msa-lock-unlock-suppor.patch b/target/linux/qualcommax/patches-6.6/0803-firmware-qcom_scm-ipq5332-add-msa-lock-unlock-suppor.patch new file mode 100644 index 000000000..bdfe25cbe --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0803-firmware-qcom_scm-ipq5332-add-msa-lock-unlock-suppor.patch @@ -0,0 +1,134 @@ +From 217fbbc122663c5a3dac752cebef44fb3e0cc179 Mon Sep 17 00:00:00 2001 +From: Manikanta Mylavarapu +Date: Fri, 10 Nov 2023 14:49:35 +0530 +Subject: [PATCH] firmware: qcom_scm: ipq5332: add msa lock/unlock + support + +IPQ5332 user pd remoteproc firmwares need to be locked +with MSA(modem secure access) features. This patch add +support to lock/unlock MSA features. + +Signed-off-by: Manikanta Mylavarapu +--- + drivers/firmware/qcom_scm.c | 78 ++++++++++++++++++++++++++ + drivers/firmware/qcom_scm.h | 2 + + include/linux/firmware/qcom/qcom_scm.h | 2 + + 3 files changed, 82 insertions(+) + +diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c +index e85c0b513938..70fca012d672 100644 +--- a/drivers/firmware/qcom_scm.c ++++ b/drivers/firmware/qcom_scm.c +@@ -712,6 +712,84 @@ bool qcom_scm_pas_supported(u32 peripheral) + } + EXPORT_SYMBOL_GPL(qcom_scm_pas_supported); + ++/** ++ * qcom_scm_msa_lock() - Lock given peripheral firmware region as MSA ++ * ++ * @peripheral: peripheral id ++ * ++ * Return 0 on success. ++ */ ++int qcom_scm_msa_lock(u32 peripheral) ++{ ++ int ret; ++ struct qcom_scm_desc desc = { ++ .svc = QCOM_SCM_SVC_PIL, ++ .cmd = QCOM_SCM_MSA_LOCK, ++ .arginfo = QCOM_SCM_ARGS(1), ++ .args[0] = peripheral, ++ .owner = ARM_SMCCC_OWNER_SIP, ++ }; ++ struct qcom_scm_res res; ++ ++ if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, ++ QCOM_SCM_MSA_LOCK)) ++ return 0; ++ ++ ret = qcom_scm_clk_enable(); ++ if (ret) ++ return ret; ++ ++ ret = qcom_scm_bw_enable(); ++ if (ret) ++ return ret; ++ ++ ret = qcom_scm_call(__scm->dev, &desc, &res); ++ qcom_scm_bw_disable(); ++ qcom_scm_clk_disable(); ++ ++ return ret ? : res.result[0]; ++} ++EXPORT_SYMBOL_GPL(qcom_scm_msa_lock); ++ ++/** ++ * qcom_scm_msa_unlock() - Unlock given peripheral MSA firmware region ++ * ++ * @peripheral: peripheral id ++ * ++ * Return 0 on success. ++ */ ++int qcom_scm_msa_unlock(u32 peripheral) ++{ ++ int ret; ++ struct qcom_scm_desc desc = { ++ .svc = QCOM_SCM_SVC_PIL, ++ .cmd = QCOM_SCM_MSA_UNLOCK, ++ .arginfo = QCOM_SCM_ARGS(1), ++ .args[0] = peripheral, ++ .owner = ARM_SMCCC_OWNER_SIP, ++ }; ++ struct qcom_scm_res res; ++ ++ if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, ++ QCOM_SCM_MSA_UNLOCK)) ++ return 0; ++ ++ ret = qcom_scm_clk_enable(); ++ if (ret) ++ return ret; ++ ++ ret = qcom_scm_bw_enable(); ++ if (ret) ++ return ret; ++ ++ ret = qcom_scm_call(__scm->dev, &desc, &res); ++ qcom_scm_bw_disable(); ++ qcom_scm_clk_disable(); ++ ++ return ret ? : res.result[0]; ++} ++EXPORT_SYMBOL_GPL(qcom_scm_msa_unlock); ++ + static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) + { + struct qcom_scm_desc desc = { +diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h +index e5050af85371..345410b56a82 100644 +--- a/drivers/firmware/qcom_scm.h ++++ b/drivers/firmware/qcom_scm.h +@@ -98,6 +98,8 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, + #define QCOM_SCM_PIL_PAS_SHUTDOWN 0x06 + #define QCOM_SCM_PIL_PAS_IS_SUPPORTED 0x07 + #define QCOM_SCM_PIL_PAS_MSS_RESET 0x0a ++#define QCOM_SCM_MSA_LOCK 0x24 ++#define QCOM_SCM_MSA_UNLOCK 0x25 + + #define QCOM_SCM_SVC_IO 0x05 + #define QCOM_SCM_IO_READ 0x01 +diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h +index 0c091a3f6d49..58c476941e71 100644 +--- a/include/linux/firmware/qcom/qcom_scm.h ++++ b/include/linux/firmware/qcom/qcom_scm.h +@@ -81,6 +81,8 @@ extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, + extern int qcom_scm_pas_auth_and_reset(u32 peripheral); + extern int qcom_scm_pas_shutdown(u32 peripheral); + extern bool qcom_scm_pas_supported(u32 peripheral); ++extern int qcom_scm_msa_lock(u32 peripheral); ++extern int qcom_scm_msa_unlock(u32 peripheral); + + extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val); + extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); +-- +2.40.1 diff --git a/target/linux/qualcommax/patches-6.6/0804-remoteproc-qcom-q6v5-Add-multipd-interrupts-support.patch b/target/linux/qualcommax/patches-6.6/0804-remoteproc-qcom-q6v5-Add-multipd-interrupts-support.patch new file mode 100644 index 000000000..d3c046571 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0804-remoteproc-qcom-q6v5-Add-multipd-interrupts-support.patch @@ -0,0 +1,155 @@ +From cae691d32306966065df869fa7424728d1b16b14 Mon Sep 17 00:00:00 2001 +From: Manikanta Mylavarapu +Date: Fri, 10 Nov 2023 14:49:36 +0530 +Subject: [PATCH] remoteproc: qcom: q6v5: Add multipd interrupts support + +In multipd model, root & user pd remoteproc's interrupts are +different. User pd needs additional interrupts like spawn. +Instead of going with qcom_q6v5_init(), we defined a new +function to register userpd rproc interrupts in mpd driver. +Since userpd rproc uses some of common interrupts like fatal, +ready, static is removed from ISR handler and used in userpd +interrupt registration. + +Signed-off-by: Manikanta Mylavarapu +--- + drivers/remoteproc/qcom_q6v5.c | 41 +++++++++++++++++++++++++++++++--- + drivers/remoteproc/qcom_q6v5.h | 11 +++++++++ + 2 files changed, 49 insertions(+), 3 deletions(-) + +diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c +index 4ee5e67a9f03..0e32f13c196d 100644 +--- a/drivers/remoteproc/qcom_q6v5.c ++++ b/drivers/remoteproc/qcom_q6v5.c +@@ -112,7 +112,7 @@ static irqreturn_t q6v5_wdog_interrupt(int irq, void *data) + return IRQ_HANDLED; + } + +-static irqreturn_t q6v5_fatal_interrupt(int irq, void *data) ++irqreturn_t q6v5_fatal_interrupt(int irq, void *data) + { + struct qcom_q6v5 *q6v5 = data; + size_t len; +@@ -132,8 +132,9 @@ static irqreturn_t q6v5_fatal_interrupt(int irq, void *data) + + return IRQ_HANDLED; + } ++EXPORT_SYMBOL_GPL(q6v5_fatal_interrupt); + +-static irqreturn_t q6v5_ready_interrupt(int irq, void *data) ++irqreturn_t q6v5_ready_interrupt(int irq, void *data) + { + struct qcom_q6v5 *q6v5 = data; + +@@ -141,6 +142,7 @@ static irqreturn_t q6v5_ready_interrupt(int irq, void *data) + + return IRQ_HANDLED; + } ++EXPORT_SYMBOL_GPL(q6v5_ready_interrupt); + + /** + * qcom_q6v5_wait_for_start() - wait for remote processor start signal +@@ -177,7 +179,17 @@ static irqreturn_t q6v5_handover_interrupt(int irq, void *data) + return IRQ_HANDLED; + } + +-static irqreturn_t q6v5_stop_interrupt(int irq, void *data) ++irqreturn_t q6v5_spawn_interrupt(int irq, void *data) ++{ ++ struct qcom_q6v5 *q6v5 = data; ++ ++ complete(&q6v5->spawn_done); ++ ++ return IRQ_HANDLED; ++} ++EXPORT_SYMBOL_GPL(q6v5_spawn_interrupt); ++ ++irqreturn_t q6v5_stop_interrupt(int irq, void *data) + { + struct qcom_q6v5 *q6v5 = data; + +@@ -185,6 +197,7 @@ static irqreturn_t q6v5_stop_interrupt(int irq, void *data) + + return IRQ_HANDLED; + } ++EXPORT_SYMBOL_GPL(q6v5_stop_interrupt); + + /** + * qcom_q6v5_request_stop() - request the remote processor to stop +@@ -214,6 +227,28 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5, struct qcom_sysmon *sysmon) + } + EXPORT_SYMBOL_GPL(qcom_q6v5_request_stop); + ++/** ++ * qcom_q6v5_request_spawn() - request the remote processor to spawn ++ * @q6v5: reference to qcom_q6v5 context ++ * ++ * Return: 0 on success, negative errno on failure ++ */ ++int qcom_q6v5_request_spawn(struct qcom_q6v5 *q6v5) ++{ ++ int ret; ++ ++ ret = qcom_smem_state_update_bits(q6v5->spawn_state, ++ BIT(q6v5->spawn_bit), BIT(q6v5->spawn_bit)); ++ ++ ret = wait_for_completion_timeout(&q6v5->spawn_done, 5 * HZ); ++ ++ qcom_smem_state_update_bits(q6v5->spawn_state, ++ BIT(q6v5->spawn_bit), 0); ++ ++ return ret == 0 ? -ETIMEDOUT : 0; ++} ++EXPORT_SYMBOL_GPL(qcom_q6v5_request_spawn); ++ + /** + * qcom_q6v5_panic() - panic handler to invoke a stop on the remote + * @q6v5: reference to qcom_q6v5 context +diff --git a/drivers/remoteproc/qcom_q6v5.h b/drivers/remoteproc/qcom_q6v5.h +index 5a859c41896e..4e1bb1a68284 100644 +--- a/drivers/remoteproc/qcom_q6v5.h ++++ b/drivers/remoteproc/qcom_q6v5.h +@@ -18,21 +18,27 @@ struct qcom_q6v5 { + + struct qcom_smem_state *state; + struct qmp *qmp; ++ struct qcom_smem_state *shutdown_state; ++ struct qcom_smem_state *spawn_state; + + struct icc_path *path; + + unsigned stop_bit; ++ unsigned shutdown_bit; ++ unsigned spawn_bit; + + int wdog_irq; + int fatal_irq; + int ready_irq; + int handover_irq; + int stop_irq; ++ int spawn_irq; + + bool handover_issued; + + struct completion start_done; + struct completion stop_done; ++ struct completion spawn_done; + + int crash_reason; + +@@ -50,7 +56,12 @@ void qcom_q6v5_deinit(struct qcom_q6v5 *q6v5); + int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5); + int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5); + int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5, struct qcom_sysmon *sysmon); ++int qcom_q6v5_request_spawn(struct qcom_q6v5 *q6v5); + int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout); + unsigned long qcom_q6v5_panic(struct qcom_q6v5 *q6v5); ++irqreturn_t q6v5_fatal_interrupt(int irq, void *data); ++irqreturn_t q6v5_ready_interrupt(int irq, void *data); ++irqreturn_t q6v5_spawn_interrupt(int irq, void *data); ++irqreturn_t q6v5_stop_interrupt(int irq, void *data); + + #endif +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0805-remoteproc-qcom-Add-Hexagon-based-multipd-rproc-driv.patch b/target/linux/qualcommax/patches-6.6/0805-remoteproc-qcom-Add-Hexagon-based-multipd-rproc-driv.patch new file mode 100644 index 000000000..14371cc0f --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0805-remoteproc-qcom-Add-Hexagon-based-multipd-rproc-driv.patch @@ -0,0 +1,901 @@ +From 3f61ff1fb5c90f8b6c28a3a2b4a29203000ee585 Mon Sep 17 00:00:00 2001 +From: Manikanta Mylavarapu +Date: Fri, 10 Nov 2023 14:49:37 +0530 +Subject: [PATCH] remoteproc: qcom: Add Hexagon based multipd rproc + driver + +It adds support to bring up remoteproc's on multipd model. +Pd means protection domain. It's similar to process in Linux. +Here QDSP6 processor runs each wifi radio functionality on a +separate process. One process can't access other process +resources, so this is termed as PD i.e protection domain. + +Here we have two pd's called root and user pd. We can correlate +Root pd as root and user pd as user in linux. Root pd has more +privileges than user pd. Root will provide services to user pd. + +>From remoteproc driver perspective, root pd corresponds to QDSP6 +processor bring up and user pd corresponds to Wifi radio (WCSS) +bring up. + +Here WCSS(user) PD is dependent on Q6(root) PD, so first +q6 pd should be up before wcss pd. After wcss pd goes down, +q6 pd should be turned off. + + APPS QDSP6 +------- ------------- +| | Crash notification | | ------ +| |<---------------------|----------|-------|User| +| | | | |->|PD1 | +| | | ------- | | ------ +| | | | | | | +|Root | Start/stop Q6 | | R | | | +|PD |<---------------------|->| | | | +|rproc| Crash notification | | O | | | +| | | | | | | +|User |Start/stop UserPD1 | | O | | | +|PD1 |----------------------|->| |-|----| +|rproc| | | T | | | +| | | | | | | +|User |Start/stop UserPD2 | | P | | | +|PD2 |----------------------|->| |-|----| +|rproc| | | D | | | +| | | ------- | | ------ +| | Crash notification | | |->|User| +| |<---------------------|----------|-------|PD2 | +------- | | ------ + ------------ + +IPQ5332, IPQ9574 supports multipd remoteproc driver. + +Signed-off-by: Manikanta Mylavarapu +--- + drivers/remoteproc/Kconfig | 19 + + drivers/remoteproc/Makefile | 1 + + drivers/remoteproc/qcom_q6v5_mpd.c | 802 +++++++++++++++++++++++++++++ + 3 files changed, 822 insertions(+) + create mode 100644 drivers/remoteproc/qcom_q6v5_mpd.c + +--- a/drivers/remoteproc/Kconfig ++++ b/drivers/remoteproc/Kconfig +@@ -234,6 +234,25 @@ config QCOM_Q6V5_PAS + CDSP (Compute DSP), MPSS (Modem Peripheral SubSystem), and + SLPI (Sensor Low Power Island). + ++config QCOM_Q6V5_MPD ++ tristate "Qualcomm Hexagon based MPD model Peripheral Image Loader" ++ depends on OF && ARCH_QCOM ++ depends on QCOM_SMEM ++ depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n ++ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n ++ depends on QCOM_SYSMON || QCOM_SYSMON=n ++ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n ++ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n ++ select QCOM_MDT_LOADER ++ select QCOM_PIL_INFO ++ select QCOM_Q6V5_COMMON ++ select QCOM_RPROC_COMMON ++ select QCOM_SCM ++ help ++ Say y here to support the Qualcomm Secure Peripheral Image Loader ++ for the Hexagon based MultiPD model remote processors on e.g. IPQ5018. ++ This is trustZone wireless subsystem. ++ + config QCOM_Q6V5_WCSS + tristate "Qualcomm Hexagon based WCSS Peripheral Image Loader" + depends on OF && ARCH_QCOM +--- a/drivers/remoteproc/Makefile ++++ b/drivers/remoteproc/Makefile +@@ -25,6 +25,7 @@ obj-$(CONFIG_QCOM_PIL_INFO) += qcom_pil + obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o + obj-$(CONFIG_QCOM_Q6V5_COMMON) += qcom_q6v5.o + obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o ++obj-$(CONFIG_QCOM_Q6V5_MPD) += qcom_q6v5_mpd.o + obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o + obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o + obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o +--- /dev/null ++++ b/drivers/remoteproc/qcom_q6v5_mpd.c +@@ -0,0 +1,802 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2016-2018 Linaro Ltd. ++ * Copyright (C) 2014 Sony Mobile Communications AB ++ * Copyright (c) 2012-2018, 2021 The Linux Foundation. All rights reserved. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "qcom_common.h" ++#include "qcom_q6v5.h" ++ ++#include "remoteproc_internal.h" ++ ++#define WCSS_CRASH_REASON 421 ++#define WCSS_SMEM_HOST 1 ++ ++#define WCNSS_PAS_ID 6 ++#define MPD_WCNSS_PAS_ID 0xD ++ ++#define BUF_SIZE 35 ++ ++#define MAX_FIRMWARE 3 ++ ++#define RPD_SWID MPD_WCNSS_PAS_ID ++#define UPD_SWID 0x12 ++#define REMOTE_PID 1 ++#define UPD_BOOT_INFO_SMEM_SIZE 4096 ++#define UPD_BOOT_INFO_HEADER_TYPE 0x2 ++#define UPD_BOOT_INFO_SMEM_ID 507 ++#define VERSION2 2 ++ ++static LIST_HEAD(upd_rproc_list); ++enum { ++ Q6_IPQ, ++ WCSS_IPQ, ++}; ++ ++/** ++ * struct userpd_boot_info_header - header of user pd bootinfo ++ * @type: type of bootinfo passing over smem ++ * @length: length of header in bytes ++ */ ++struct userpd_boot_info_header { ++ u8 type; ++ u8 length; ++}; ++ ++/** ++ * struct userpd_boot_info - holds info required to boot user pd ++ * @header: pointer to header ++ * @pid: unique id represents each user pd process ++ * @bootaddr: load address of user pd firmware ++ * @data_size: user pd firmware memory size ++ */ ++struct userpd_boot_info { ++ struct userpd_boot_info_header header; ++ u8 pid; ++ u32 bootaddr; ++ u32 data_size; ++} __packed; ++ ++struct q6_wcss { ++ struct device *dev; ++ struct qcom_rproc_glink glink_subdev; ++ struct qcom_rproc_ssr ssr_subdev; ++ struct qcom_q6v5 q6; ++ phys_addr_t mem_phys; ++ phys_addr_t mem_reloc; ++ void *mem_region; ++ size_t mem_size; ++ u8 pd_asid; ++ const struct wcss_data *desc; ++ const char **firmware; ++ u32 version; ++}; ++ ++struct wcss_data { ++ u32 pasid; ++ bool share_upd_info_to_q6; ++}; ++ ++/** ++ * qcom_get_pd_asid() - get the pd asid number from PD spawn bit ++ * @rproc: rproc handle ++ * ++ * Returns asid on success ++ */ ++static u8 qcom_get_pd_asid(struct rproc *rproc) ++{ ++ struct q6_wcss *wcss = rproc->priv; ++ u8 bit = wcss->q6.spawn_bit; ++ ++ return bit / 8; ++} ++ ++static int q6_wcss_start(struct rproc *rproc) ++{ ++ struct q6_wcss *wcss = rproc->priv; ++ int ret; ++ const struct wcss_data *desc = wcss->desc; ++ ++ qcom_q6v5_prepare(&wcss->q6); ++ ++ ret = qcom_scm_pas_auth_and_reset(desc->pasid); ++ if (ret) { ++ dev_err(wcss->dev, "wcss_reset failed\n"); ++ return ret; ++ } ++ ++ ret = qcom_q6v5_wait_for_start(&wcss->q6, 5 * HZ); ++ if (ret == -ETIMEDOUT) ++ dev_err(wcss->dev, "start timed out\n"); ++ ++ return ret; ++} ++ ++static int q6_wcss_spawn_pd(struct rproc *rproc) ++{ ++ int ret; ++ struct q6_wcss *wcss = rproc->priv; ++ ++ ret = qcom_q6v5_request_spawn(&wcss->q6); ++ if (ret == -ETIMEDOUT) { ++ dev_err(wcss->dev, "%s spawn timedout\n", rproc->name); ++ return ret; ++ } ++ ++ ret = qcom_q6v5_wait_for_start(&wcss->q6, msecs_to_jiffies(10000)); ++ if (ret == -ETIMEDOUT) { ++ dev_err(wcss->dev, "%s start timedout\n", rproc->name); ++ wcss->q6.running = false; ++ return ret; ++ } ++ wcss->q6.running = true; ++ return ret; ++} ++ ++static int wcss_pd_start(struct rproc *rproc) ++{ ++ struct q6_wcss *wcss = rproc->priv; ++ u32 pasid = (wcss->pd_asid << 8) | UPD_SWID; ++ int ret; ++ ++ ret = qcom_scm_msa_lock(pasid); ++ if (ret) { ++ dev_err(wcss->dev, "failed to power up pd\n"); ++ return ret; ++ } ++ ++ if (wcss->q6.spawn_bit) { ++ ret = q6_wcss_spawn_pd(rproc); ++ if (ret) ++ return ret; ++ } ++ ++ return ret; ++} ++ ++static int q6_wcss_stop(struct rproc *rproc) ++{ ++ struct q6_wcss *wcss = rproc->priv; ++ const struct wcss_data *desc = wcss->desc; ++ int ret; ++ ++ ret = qcom_scm_pas_shutdown(desc->pasid); ++ if (ret) { ++ dev_err(wcss->dev, "not able to shutdown\n"); ++ return ret; ++ } ++ qcom_q6v5_unprepare(&wcss->q6); ++ ++ return 0; ++} ++ ++/** ++ * wcss_pd_stop() - Stop WCSS user pd ++ * @rproc: rproc handle ++ * ++ * Stop root pd after user pd down. Root pd ++ * is used to provide services to user pd, so ++ * keeping root pd alive when user pd is down ++ * is invalid. ++ * --------------------------------------------- ++ * ++ * ----------- ++ * |-------->| User PD1 | ++ * | ----------- ++ * | ++ * | ++ * ----- | ----------- ++ * | Q6 |---------------->| User Pd2 | ++ * ----- | ----------- ++ * | ++ * | ++ * | ----------- ++ * |--------->| User Pd3 | ++ * ----------- ++ * ---------------------------------------------- ++ */ ++static int wcss_pd_stop(struct rproc *rproc) ++{ ++ struct q6_wcss *wcss = rproc->priv; ++ struct rproc *rpd_rproc = dev_get_drvdata(wcss->dev->parent); ++ u32 pasid = (wcss->pd_asid << 8) | UPD_SWID; ++ int ret; ++ ++ if (rproc->state != RPROC_CRASHED && wcss->q6.stop_bit) { ++ ret = qcom_q6v5_request_stop(&wcss->q6, NULL); ++ if (ret) { ++ dev_err(&rproc->dev, "pd not stopped\n"); ++ return ret; ++ } ++ } ++ ++ ret = qcom_scm_msa_unlock(pasid); ++ if (ret) { ++ dev_err(wcss->dev, "failed to power down pd\n"); ++ return ret; ++ } ++ ++ rproc_shutdown(rpd_rproc); ++ ++ return 0; ++} ++ ++static void *q6_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len, ++ bool *is_iomem) ++{ ++ struct q6_wcss *wcss = rproc->priv; ++ int offset; ++ ++ offset = da - wcss->mem_reloc; ++ if (offset < 0 || offset + len > wcss->mem_size) ++ return NULL; ++ ++ return wcss->mem_region + offset; ++} ++ ++/** ++ * share_upd_bootinfo_to_q6() - Share userpd boot info to Q6 root pd ++ * @rproc: rproc handle ++ * ++ * Q6 needs user pd parameters like loadaddress and ++ * PIL size to authenticate user pd with underlying ++ * security software. If authenticatoin success then ++ * only Q6 spawns user pd and sends spawn ack to rproc ++ * driver. This API is passing userpd boot info to Q6 ++ * over SMEM. ++ * ++ * User pd boot-info format mentioned below ++ *
++ * ++ * ++ * Returns 0 on success else negative value on failure. ++ */ ++static int share_upd_bootinfo_to_q6(struct rproc *rproc) ++{ ++ int ret; ++ size_t size; ++ u16 cnt = 0, version; ++ void *ptr; ++ struct q6_wcss *wcss = rproc->priv, *upd_wcss; ++ struct rproc *upd_rproc; ++ struct userpd_boot_info upd_bootinfo = {0}; ++ const struct firmware *fw; ++ ++ ret = qcom_smem_alloc(REMOTE_PID, UPD_BOOT_INFO_SMEM_ID, ++ UPD_BOOT_INFO_SMEM_SIZE); ++ if (ret && ret != -EEXIST) { ++ dev_err(wcss->dev, ++ "failed to allocate q6 bootinfo smem segment\n"); ++ return ret; ++ } ++ ++ ptr = qcom_smem_get(REMOTE_PID, UPD_BOOT_INFO_SMEM_ID, &size); ++ if (IS_ERR(ptr) || size != UPD_BOOT_INFO_SMEM_SIZE) { ++ dev_err(wcss->dev, ++ "Unable to acquire smp2p item(%d) ret:%ld\n", ++ UPD_BOOT_INFO_SMEM_ID, PTR_ERR(ptr)); ++ return PTR_ERR(ptr); ++ } ++ ++ /*Version*/ ++ version = VERSION2; ++ memcpy_toio(ptr, &version, sizeof(version)); ++ ptr += sizeof(version); ++ ++ list_for_each_entry(upd_rproc, &upd_rproc_list, node) ++ cnt++; ++ ++ /* No of elements */ ++ cnt = (sizeof(upd_bootinfo) * cnt); ++ memcpy_toio(ptr, &cnt, sizeof(u16)); ++ ptr += sizeof(u16); ++ ++ list_for_each_entry(upd_rproc, &upd_rproc_list, node) { ++ upd_wcss = upd_rproc->priv; ++ ++ /* TYPE */ ++ upd_bootinfo.header.type = UPD_BOOT_INFO_HEADER_TYPE; ++ ++ /* LENGTH */ ++ upd_bootinfo.header.length = ++ sizeof(upd_bootinfo) - sizeof(upd_bootinfo.header); ++ ++ /* Process ID */ ++ upd_bootinfo.pid = upd_wcss->pd_asid + 1; ++ ++ ret = request_firmware(&fw, upd_rproc->firmware, upd_wcss->dev); ++ if (ret < 0) { ++ dev_err(upd_wcss->dev, "request_firmware failed: %d\n", ret); ++ return ret; ++ } ++ ++ /* Load address */ ++ upd_bootinfo.bootaddr = rproc_get_boot_addr(upd_rproc, fw); ++ ++ /* Firmware mem size */ ++ upd_bootinfo.data_size = qcom_mdt_get_size(fw); ++ ++ release_firmware(fw); ++ ++ /* copy into smem */ ++ memcpy_toio(ptr, &upd_bootinfo, sizeof(upd_bootinfo)); ++ ptr += sizeof(upd_bootinfo); ++ } ++ return 0; ++} ++ ++static int q6_wcss_load(struct rproc *rproc, const struct firmware *fw) ++{ ++ struct q6_wcss *wcss = rproc->priv; ++ const struct firmware *fw_hdl; ++ int ret; ++ const struct wcss_data *desc = wcss->desc; ++ int loop; ++ ++ /* Share user pd boot info to Q6 remote processor */ ++ if (desc->share_upd_info_to_q6) { ++ ret = share_upd_bootinfo_to_q6(rproc); ++ if (ret) { ++ dev_err(wcss->dev, ++ "user pd boot info sharing with q6 failed %d\n", ++ ret); ++ return ret; ++ } ++ } ++ ++ ret = qcom_mdt_load(wcss->dev, fw, rproc->firmware, ++ desc->pasid, wcss->mem_region, ++ wcss->mem_phys, wcss->mem_size, ++ &wcss->mem_reloc); ++ if (ret) ++ return ret; ++ ++ for (loop = 1; loop < MAX_FIRMWARE; loop++) { ++ if (!wcss->firmware[loop]) ++ continue; ++ ++ ret = request_firmware(&fw_hdl, wcss->firmware[loop], ++ wcss->dev); ++ if (ret) ++ continue; ++ ++ ret = qcom_mdt_load_no_init(wcss->dev, fw_hdl, ++ wcss->firmware[loop], 0, ++ wcss->mem_region, ++ wcss->mem_phys, ++ wcss->mem_size, ++ &wcss->mem_reloc); ++ ++ release_firmware(fw_hdl); ++ ++ if (ret) { ++ dev_err(wcss->dev, ++ "can't load %s ret:%d\n", wcss->firmware[loop], ret); ++ return ret; ++ } ++ } ++ return 0; ++} ++ ++/** ++ * wcss_pd_load() - Load WCSS user pd firmware ++ * @rproc: rproc handle ++ * @fw: firmware handle ++ * ++ * User pd get services from root pd. So first ++ * bring up root pd and then load userpd firmware. ++ * --------------------------------------------- ++ * ++ * ----------- ++ * |-------->| User PD1 | ++ * | ----------- ++ * | ++ * | ++ * ----- | ----------- ++ * | Q6 |---------------->| User Pd2 | ++ * ----- | ----------- ++ * | ++ * | ++ * | ----------- ++ * |--------->| User Pd3 | ++ * ----------- ++ * ---------------------------------------------- ++ * ++ */ ++static int wcss_pd_load(struct rproc *rproc, const struct firmware *fw) ++{ ++ struct q6_wcss *wcss = rproc->priv; ++ struct rproc *rpd_rproc = dev_get_drvdata(wcss->dev->parent); ++ u32 pasid = (wcss->pd_asid << 8) | UPD_SWID; ++ int ret; ++ ++ ret = rproc_boot(rpd_rproc); ++ if (ret) ++ return ret; ++ ++ return qcom_mdt_load(wcss->dev, fw, rproc->firmware, ++ pasid, wcss->mem_region, ++ wcss->mem_phys, wcss->mem_size, ++ &wcss->mem_reloc); ++} ++ ++static unsigned long q6_wcss_panic(struct rproc *rproc) ++{ ++ struct q6_wcss *wcss = rproc->priv; ++ ++ return qcom_q6v5_panic(&wcss->q6); ++} ++ ++static const struct rproc_ops wcss_ops = { ++ .start = wcss_pd_start, ++ .stop = wcss_pd_stop, ++ .load = wcss_pd_load, ++ .get_boot_addr = rproc_elf_get_boot_addr, ++}; ++ ++static const struct rproc_ops q6_wcss_ops = { ++ .start = q6_wcss_start, ++ .stop = q6_wcss_stop, ++ .da_to_va = q6_wcss_da_to_va, ++ .load = q6_wcss_load, ++ .get_boot_addr = rproc_elf_get_boot_addr, ++ .panic = q6_wcss_panic, ++}; ++ ++static int q6_alloc_memory_region(struct q6_wcss *wcss) ++{ ++ struct reserved_mem *rmem = NULL; ++ struct device_node *node; ++ struct device *dev = wcss->dev; ++ ++ if (wcss->version == Q6_IPQ) { ++ node = of_parse_phandle(dev->of_node, "memory-region", 0); ++ if (node) ++ rmem = of_reserved_mem_lookup(node); ++ ++ of_node_put(node); ++ ++ if (!rmem) { ++ dev_err(dev, "unable to acquire memory-region\n"); ++ return -EINVAL; ++ } ++ } else { ++ struct rproc *rpd_rproc = dev_get_drvdata(dev->parent); ++ struct q6_wcss *rpd_wcss = rpd_rproc->priv; ++ ++ wcss->mem_phys = rpd_wcss->mem_phys; ++ wcss->mem_reloc = rpd_wcss->mem_reloc; ++ wcss->mem_size = rpd_wcss->mem_size; ++ wcss->mem_region = rpd_wcss->mem_region; ++ return 0; ++ } ++ ++ wcss->mem_phys = rmem->base; ++ wcss->mem_reloc = rmem->base; ++ wcss->mem_size = rmem->size; ++ wcss->mem_region = devm_ioremap_wc(dev, wcss->mem_phys, wcss->mem_size); ++ if (!wcss->mem_region) { ++ dev_err(dev, "unable to map memory region: %pa+%pa\n", ++ &rmem->base, &rmem->size); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++static int q6_get_inbound_irq(struct qcom_q6v5 *q6, ++ struct platform_device *pdev, ++ const char *int_name, ++ int index, int *pirq, ++ irqreturn_t (*handler)(int irq, void *data)) ++{ ++ int ret, irq; ++ char *interrupt, *tmp = (char *)int_name; ++ struct q6_wcss *wcss = q6->rproc->priv; ++ ++ irq = platform_get_irq(pdev, index); ++ if (irq < 0) ++ return irq; ++ ++ *pirq = irq; ++ ++ interrupt = devm_kzalloc(&pdev->dev, BUF_SIZE, GFP_KERNEL); ++ if (!interrupt) ++ return -ENOMEM; ++ ++ snprintf(interrupt, BUF_SIZE, "q6v5_wcss_userpd%d_%s", wcss->pd_asid, tmp); ++ ++ ret = devm_request_threaded_irq(&pdev->dev, *pirq, ++ NULL, handler, ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, ++ interrupt, q6); ++ if (ret) ++ return dev_err_probe(&pdev->dev, ret, ++ "failed to acquire %s irq\n", interrupt); ++ return 0; ++} ++ ++static int q6_get_outbound_irq(struct qcom_q6v5 *q6, ++ struct platform_device *pdev, ++ const char *int_name) ++{ ++ struct qcom_smem_state *tmp_state; ++ unsigned bit; ++ ++ tmp_state = qcom_smem_state_get(&pdev->dev, int_name, &bit); ++ if (IS_ERR(tmp_state)) ++ return dev_err_probe(&pdev->dev, PTR_ERR(tmp_state), ++ "failed to acquire %s state\n", int_name); ++ ++ if (!strcmp(int_name, "stop")) { ++ q6->state = tmp_state; ++ q6->stop_bit = bit; ++ } else if (!strcmp(int_name, "spawn")) { ++ q6->spawn_state = tmp_state; ++ q6->spawn_bit = bit; ++ } ++ ++ return 0; ++} ++ ++static int init_irq(struct qcom_q6v5 *q6, ++ struct platform_device *pdev, struct rproc *rproc, ++ int crash_reason, const char *load_state, ++ void (*handover)(struct qcom_q6v5 *q6)) ++{ ++ int ret; ++ struct q6_wcss *wcss = rproc->priv; ++ ++ q6->rproc = rproc; ++ q6->dev = &pdev->dev; ++ q6->crash_reason = crash_reason; ++ q6->handover = handover; ++ ++ init_completion(&q6->start_done); ++ init_completion(&q6->stop_done); ++ init_completion(&q6->spawn_done); ++ ++ ret = q6_get_outbound_irq(q6, pdev, "stop"); ++ if (ret) ++ return ret; ++ ++ ret = q6_get_outbound_irq(q6, pdev, "spawn"); ++ if (ret) ++ return ret; ++ ++ /* Get pd_asid to prepare interrupt names */ ++ wcss->pd_asid = qcom_get_pd_asid(rproc); ++ ++ ret = q6_get_inbound_irq(q6, pdev, "fatal", 0, &q6->fatal_irq, ++ q6v5_fatal_interrupt); ++ if (ret) ++ return ret; ++ ++ ret = q6_get_inbound_irq(q6, pdev, "ready", 1, &q6->ready_irq, ++ q6v5_ready_interrupt); ++ if (ret) ++ return ret; ++ ++ ret = q6_get_inbound_irq(q6, pdev, "stop-ack", 3, &q6->stop_irq, ++ q6v5_stop_interrupt); ++ if (ret) ++ return ret; ++ ++ ret = q6_get_inbound_irq(q6, pdev, "spawn-ack", 2, &q6->spawn_irq, ++ q6v5_spawn_interrupt); ++ if (ret) ++ return ret; ++ return 0; ++} ++ ++static void q6_release_resources(void) ++{ ++ struct rproc *upd_rproc; ++ ++ /* Release userpd resources */ ++ list_for_each_entry(upd_rproc, &upd_rproc_list, node) { ++ rproc_del(upd_rproc); ++ rproc_free(upd_rproc); ++ } ++} ++ ++static int q6_register_userpd(struct platform_device *pdev, ++ struct device_node *userpd_np) ++{ ++ struct q6_wcss *wcss; ++ struct rproc *rproc = NULL; ++ int ret; ++ struct platform_device *userpd_pdev; ++ const char *firmware_name = NULL; ++ const char *label = NULL; ++ ++ ret = of_property_read_string(userpd_np, "firmware-name", ++ &firmware_name); ++ if (ret < 0) { ++ /* All userpd's who want to register as rproc must have firmware. ++ * Other than userpd like glink they don't need any firmware. ++ * So for glink child simply return success. ++ */ ++ if (ret == -EINVAL) { ++ /* Confirming userpd_np is glink node or not */ ++ if (!of_property_read_string(userpd_np, "label", &label)) ++ return 0; ++ } ++ return ret; ++ } ++ ++ dev_info(&pdev->dev, "%s node found\n", userpd_np->name); ++ ++ userpd_pdev = of_platform_device_create(userpd_np, userpd_np->name, ++ &pdev->dev); ++ if (!userpd_pdev) ++ return dev_err_probe(&pdev->dev, -ENODEV, ++ "failed to create %s platform device\n", ++ userpd_np->name); ++ ++ userpd_pdev->dev.driver = pdev->dev.driver; ++ rproc = rproc_alloc(&userpd_pdev->dev, userpd_pdev->name, &wcss_ops, ++ firmware_name, sizeof(*wcss)); ++ if (!rproc) { ++ ret = -ENOMEM; ++ goto free_rproc; ++ } ++ ++ wcss = rproc->priv; ++ wcss->dev = &userpd_pdev->dev; ++ wcss->version = WCSS_IPQ; ++ ++ ret = q6_alloc_memory_region(wcss); ++ if (ret) ++ goto free_rproc; ++ ++ ret = init_irq(&wcss->q6, userpd_pdev, rproc, ++ WCSS_CRASH_REASON, NULL, NULL); ++ if (ret) ++ goto free_rproc; ++ ++ rproc->auto_boot = false; ++ ret = rproc_add(rproc); ++ if (ret) ++ goto free_rproc; ++ ++ list_add(&rproc->node, &upd_rproc_list); ++ platform_set_drvdata(userpd_pdev, rproc); ++ qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, userpd_pdev->name); ++ return 0; ++ ++free_rproc: ++ kfree(rproc); ++ return ret; ++} ++ ++static int q6_wcss_probe(struct platform_device *pdev) ++{ ++ const struct wcss_data *desc; ++ struct q6_wcss *wcss; ++ struct rproc *rproc; ++ int ret; ++ const char **firmware; ++ struct device_node *userpd_np; ++ const struct rproc_ops *ops = &q6_wcss_ops; ++ ++ desc = of_device_get_match_data(&pdev->dev); ++ if (!desc) ++ return -EINVAL; ++ ++ firmware = devm_kcalloc(&pdev->dev, MAX_FIRMWARE, ++ sizeof(*firmware), GFP_KERNEL); ++ if (!firmware) ++ return -ENOMEM; ++ ++ ret = of_property_read_string_array(pdev->dev.of_node, "firmware-name", ++ firmware, MAX_FIRMWARE); ++ if (ret < 0) ++ return ret; ++ ++ rproc = rproc_alloc(&pdev->dev, pdev->name, ops, ++ firmware[0], sizeof(*wcss)); ++ if (!rproc) ++ return -ENOMEM; ++ ++ wcss = rproc->priv; ++ wcss->dev = &pdev->dev; ++ wcss->desc = desc; ++ wcss->firmware = firmware; ++ wcss->version = Q6_IPQ; ++ ++ ret = q6_alloc_memory_region(wcss); ++ if (ret) ++ goto free_rproc; ++ ++ ret = qcom_q6v5_init(&wcss->q6, pdev, rproc, ++ WCSS_CRASH_REASON, NULL, NULL); ++ if (ret) ++ goto free_rproc; ++ ++ qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss"); ++ qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss"); ++ ++ rproc->auto_boot = false; ++ ret = rproc_add(rproc); ++ if (ret) ++ goto free_rproc; ++ ++ platform_set_drvdata(pdev, rproc); ++ ++ /* Iterate over userpd child's and register with rproc */ ++ for_each_available_child_of_node(pdev->dev.of_node, userpd_np) { ++ ret = q6_register_userpd(pdev, userpd_np); ++ if (ret) { ++ /* release resources of successfully allocated userpd rproc's */ ++ q6_release_resources(); ++ return dev_err_probe(&pdev->dev, ret, ++ "Failed to register userpd(%s)\n", ++ userpd_np->name); ++ } ++ } ++ return 0; ++ ++free_rproc: ++ rproc_free(rproc); ++ ++ return ret; ++} ++ ++static int q6_wcss_remove(struct platform_device *pdev) ++{ ++ struct rproc *rproc = platform_get_drvdata(pdev); ++ struct q6_wcss *wcss = rproc->priv; ++ ++ qcom_q6v5_deinit(&wcss->q6); ++ ++ rproc_del(rproc); ++ rproc_free(rproc); ++ ++ return 0; ++} ++ ++static const struct wcss_data q6_ipq5332_res_init = { ++ .pasid = MPD_WCNSS_PAS_ID, ++ .share_upd_info_to_q6 = true, ++}; ++ ++static const struct wcss_data q6_ipq9574_res_init = { ++ .pasid = WCNSS_PAS_ID, ++}; ++ ++static const struct of_device_id q6_wcss_of_match[] = { ++ { .compatible = "qcom,ipq5332-q6-mpd", .data = &q6_ipq5332_res_init }, ++ { .compatible = "qcom,ipq9574-q6-mpd", .data = &q6_ipq9574_res_init }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, q6_wcss_of_match); ++ ++static struct platform_driver q6_wcss_driver = { ++ .probe = q6_wcss_probe, ++ .remove = q6_wcss_remove, ++ .driver = { ++ .name = "qcom-q6-mpd", ++ .of_match_table = q6_wcss_of_match, ++ }, ++}; ++module_platform_driver(q6_wcss_driver); ++ ++MODULE_DESCRIPTION("Hexagon WCSS Multipd Peripheral Image Loader"); ++MODULE_LICENSE("GPL v2"); diff --git a/target/linux/qualcommax/patches-6.6/0806-rproc-qcom_q6v5_mpd-split-q6_wcss-to-rootpd-and-user.patch b/target/linux/qualcommax/patches-6.6/0806-rproc-qcom_q6v5_mpd-split-q6_wcss-to-rootpd-and-user.patch new file mode 100644 index 000000000..95a211020 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0806-rproc-qcom_q6v5_mpd-split-q6_wcss-to-rootpd-and-user.patch @@ -0,0 +1,319 @@ +From 6c66dff196cbba8515380110dd3599cde31dd896 Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:12 +0800 +Subject: [PATCH 1/2] rproc: qcom_q6v5_mpd: split q6_wcss to rootpd and + userpd + +Signed-off-by: hzy +--- + drivers/remoteproc/qcom_q6v5_mpd.c | 126 +++++++++++++---------------- + 1 file changed, 56 insertions(+), 70 deletions(-) + +diff --git a/drivers/remoteproc/qcom_q6v5_mpd.c b/drivers/remoteproc/qcom_q6v5_mpd.c +index b133285888c7..a1c189ab1f42 100644 +--- a/drivers/remoteproc/qcom_q6v5_mpd.c ++++ b/drivers/remoteproc/qcom_q6v5_mpd.c +@@ -44,10 +44,6 @@ + #define VERSION2 2 + + static LIST_HEAD(upd_rproc_list); +-enum { +- Q6_IPQ, +- WCSS_IPQ, +-}; + + /** + * struct userpd_boot_info_header - header of user pd bootinfo +@@ -82,10 +78,15 @@ struct q6_wcss { + phys_addr_t mem_reloc; + void *mem_region; + size_t mem_size; +- u8 pd_asid; + const struct wcss_data *desc; + const char **firmware; +- u32 version; ++}; ++ ++struct userpd { ++ u8 pd_asid; ++ struct device *dev; ++ struct qcom_rproc_ssr ssr_subdev; ++ struct qcom_q6v5 q6; + }; + + struct wcss_data { +@@ -101,8 +102,8 @@ struct wcss_data { + */ + static u8 qcom_get_pd_asid(struct rproc *rproc) + { +- struct q6_wcss *wcss = rproc->priv; +- u8 bit = wcss->q6.spawn_bit; ++ struct userpd *upd = rproc->priv; ++ u8 bit = upd->q6.spawn_bit; + + return bit / 8; + } +@@ -131,37 +132,37 @@ static int q6_wcss_start(struct rproc *rproc) + static int q6_wcss_spawn_pd(struct rproc *rproc) + { + int ret; +- struct q6_wcss *wcss = rproc->priv; ++ struct userpd *upd = rproc->priv; + +- ret = qcom_q6v5_request_spawn(&wcss->q6); ++ ret = qcom_q6v5_request_spawn(&upd->q6); + if (ret == -ETIMEDOUT) { +- dev_err(wcss->dev, "%s spawn timedout\n", rproc->name); ++ dev_err(upd->dev, "%s spawn timedout\n", rproc->name); + return ret; + } + +- ret = qcom_q6v5_wait_for_start(&wcss->q6, msecs_to_jiffies(10000)); ++ ret = qcom_q6v5_wait_for_start(&upd->q6, msecs_to_jiffies(10000)); + if (ret == -ETIMEDOUT) { +- dev_err(wcss->dev, "%s start timedout\n", rproc->name); +- wcss->q6.running = false; ++ dev_err(upd->dev, "%s start timedout\n", rproc->name); ++ upd->q6.running = false; + return ret; + } +- wcss->q6.running = true; ++ upd->q6.running = true; + return ret; + } + + static int wcss_pd_start(struct rproc *rproc) + { +- struct q6_wcss *wcss = rproc->priv; +- u32 pasid = (wcss->pd_asid << 8) | UPD_SWID; ++ struct userpd *upd = rproc->priv; ++ u32 pasid = (upd->pd_asid << 8) | UPD_SWID; + int ret; + + ret = qcom_scm_msa_lock(pasid); + if (ret) { +- dev_err(wcss->dev, "failed to power up pd\n"); ++ dev_err(upd->dev, "failed to power up pd\n"); + return ret; + } + +- if (wcss->q6.spawn_bit) { ++ if (upd->q6.spawn_bit) { + ret = q6_wcss_spawn_pd(rproc); + if (ret) + return ret; +@@ -213,22 +214,22 @@ static int q6_wcss_stop(struct rproc *rproc) + */ + static int wcss_pd_stop(struct rproc *rproc) + { +- struct q6_wcss *wcss = rproc->priv; +- struct rproc *rpd_rproc = dev_get_drvdata(wcss->dev->parent); +- u32 pasid = (wcss->pd_asid << 8) | UPD_SWID; ++ struct userpd *upd = rproc->priv; ++ struct rproc *rpd_rproc = dev_get_drvdata(upd->dev->parent); ++ u32 pasid = (upd->pd_asid << 8) | UPD_SWID; + int ret; + +- if (rproc->state != RPROC_CRASHED && wcss->q6.stop_bit) { +- ret = qcom_q6v5_request_stop(&wcss->q6, NULL); ++ if (rproc->state != RPROC_CRASHED && upd->q6.stop_bit) { ++ ret = qcom_q6v5_request_stop(&upd->q6, NULL); + if (ret) { +- dev_err(&rproc->dev, "pd not stopped\n"); ++ dev_err(upd->dev, "pd not stopped\n"); + return ret; + } + } + + ret = qcom_scm_msa_unlock(pasid); + if (ret) { +- dev_err(wcss->dev, "failed to power down pd\n"); ++ dev_err(upd->dev, "failed to power down pd\n"); + return ret; + } + +@@ -273,7 +274,8 @@ static int share_upd_bootinfo_to_q6(struct rproc *rproc) + size_t size; + u16 cnt = 0, version; + void *ptr; +- struct q6_wcss *wcss = rproc->priv, *upd_wcss; ++ struct q6_wcss *wcss = rproc->priv; ++ struct userpd *upd; + struct rproc *upd_rproc; + struct userpd_boot_info upd_bootinfo = {0}; + const struct firmware *fw; +@@ -308,7 +310,7 @@ static int share_upd_bootinfo_to_q6(struct rproc *rproc) + ptr += sizeof(u16); + + list_for_each_entry(upd_rproc, &upd_rproc_list, node) { +- upd_wcss = upd_rproc->priv; ++ upd = upd_rproc->priv; + + /* TYPE */ + upd_bootinfo.header.type = UPD_BOOT_INFO_HEADER_TYPE; +@@ -318,11 +320,11 @@ static int share_upd_bootinfo_to_q6(struct rproc *rproc) + sizeof(upd_bootinfo) - sizeof(upd_bootinfo.header); + + /* Process ID */ +- upd_bootinfo.pid = upd_wcss->pd_asid + 1; ++ upd_bootinfo.pid = upd->pd_asid + 1; + +- ret = request_firmware(&fw, upd_rproc->firmware, upd_wcss->dev); ++ ret = request_firmware(&fw, upd_rproc->firmware, upd->dev); + if (ret < 0) { +- dev_err(upd_wcss->dev, "request_firmware failed: %d\n", ret); ++ dev_err(upd->dev, "request_firmware failed: %d\n", ret); + return ret; + } + +@@ -421,19 +423,20 @@ static int q6_wcss_load(struct rproc *rproc, const struct firmware *fw) + */ + static int wcss_pd_load(struct rproc *rproc, const struct firmware *fw) + { +- struct q6_wcss *wcss = rproc->priv; +- struct rproc *rpd_rproc = dev_get_drvdata(wcss->dev->parent); +- u32 pasid = (wcss->pd_asid << 8) | UPD_SWID; ++ struct userpd *upd = rproc->priv; ++ struct rproc *rpd_rproc = dev_get_drvdata(upd->dev->parent); ++ struct q6_wcss *wcss = rpd_rproc->priv; ++ u32 pasid = (upd->pd_asid << 8) | UPD_SWID; + int ret; + + ret = rproc_boot(rpd_rproc); + if (ret) + return ret; + +- return qcom_mdt_load(wcss->dev, fw, rproc->firmware, ++ return qcom_mdt_load(upd->dev, fw, rproc->firmware, + pasid, wcss->mem_region, + wcss->mem_phys, wcss->mem_size, +- &wcss->mem_reloc); ++ NULL); + } + + static unsigned long q6_wcss_panic(struct rproc *rproc) +@@ -465,26 +468,15 @@ static int q6_alloc_memory_region(struct q6_wcss *wcss) + struct device_node *node; + struct device *dev = wcss->dev; + +- if (wcss->version == Q6_IPQ) { +- node = of_parse_phandle(dev->of_node, "memory-region", 0); +- if (node) +- rmem = of_reserved_mem_lookup(node); ++ node = of_parse_phandle(dev->of_node, "memory-region", 0); ++ if (node) ++ rmem = of_reserved_mem_lookup(node); + +- of_node_put(node); ++ of_node_put(node); + +- if (!rmem) { +- dev_err(dev, "unable to acquire memory-region\n"); +- return -EINVAL; +- } +- } else { +- struct rproc *rpd_rproc = dev_get_drvdata(dev->parent); +- struct q6_wcss *rpd_wcss = rpd_rproc->priv; +- +- wcss->mem_phys = rpd_wcss->mem_phys; +- wcss->mem_reloc = rpd_wcss->mem_reloc; +- wcss->mem_size = rpd_wcss->mem_size; +- wcss->mem_region = rpd_wcss->mem_region; +- return 0; ++ if (!rmem) { ++ dev_err(dev, "unable to acquire memory-region\n"); ++ return -EINVAL; + } + + wcss->mem_phys = rmem->base; +@@ -508,7 +500,7 @@ static int q6_get_inbound_irq(struct qcom_q6v5 *q6, + { + int ret, irq; + char *interrupt, *tmp = (char *)int_name; +- struct q6_wcss *wcss = q6->rproc->priv; ++ struct userpd *upd = q6->rproc->priv; + + irq = platform_get_irq(pdev, index); + if (irq < 0) +@@ -520,7 +512,7 @@ static int q6_get_inbound_irq(struct qcom_q6v5 *q6, + if (!interrupt) + return -ENOMEM; + +- snprintf(interrupt, BUF_SIZE, "q6v5_wcss_userpd%d_%s", wcss->pd_asid, tmp); ++ snprintf(interrupt, BUF_SIZE, "q6v5_wcss_userpd%d_%s", upd->pd_asid, tmp); + + ret = devm_request_threaded_irq(&pdev->dev, *pirq, + NULL, handler, +@@ -561,7 +553,7 @@ static int init_irq(struct qcom_q6v5 *q6, + void (*handover)(struct qcom_q6v5 *q6)) + { + int ret; +- struct q6_wcss *wcss = rproc->priv; ++ struct userpd *upd = rproc->priv; + + q6->rproc = rproc; + q6->dev = &pdev->dev; +@@ -581,7 +573,7 @@ static int init_irq(struct qcom_q6v5 *q6, + return ret; + + /* Get pd_asid to prepare interrupt names */ +- wcss->pd_asid = qcom_get_pd_asid(rproc); ++ upd->pd_asid = qcom_get_pd_asid(rproc); + + ret = q6_get_inbound_irq(q6, pdev, "fatal", 0, &q6->fatal_irq, + q6v5_fatal_interrupt); +@@ -619,7 +611,7 @@ static void q6_release_resources(void) + static int q6_register_userpd(struct platform_device *pdev, + struct device_node *userpd_np) + { +- struct q6_wcss *wcss; ++ struct userpd *upd; + struct rproc *rproc = NULL; + int ret; + struct platform_device *userpd_pdev; +@@ -652,21 +644,16 @@ static int q6_register_userpd(struct platform_device *pdev, + + userpd_pdev->dev.driver = pdev->dev.driver; + rproc = rproc_alloc(&userpd_pdev->dev, userpd_pdev->name, &wcss_ops, +- firmware_name, sizeof(*wcss)); ++ firmware_name, sizeof(*upd)); + if (!rproc) { + ret = -ENOMEM; + goto free_rproc; + } + +- wcss = rproc->priv; +- wcss->dev = &userpd_pdev->dev; +- wcss->version = WCSS_IPQ; +- +- ret = q6_alloc_memory_region(wcss); +- if (ret) +- goto free_rproc; ++ upd = rproc->priv; ++ upd->dev = &userpd_pdev->dev; + +- ret = init_irq(&wcss->q6, userpd_pdev, rproc, ++ ret = init_irq(&upd->q6, userpd_pdev, rproc, + WCSS_CRASH_REASON, NULL, NULL); + if (ret) + goto free_rproc; +@@ -678,7 +665,7 @@ static int q6_register_userpd(struct platform_device *pdev, + + list_add(&rproc->node, &upd_rproc_list); + platform_set_drvdata(userpd_pdev, rproc); +- qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, userpd_pdev->name); ++ qcom_add_ssr_subdev(rproc, &upd->ssr_subdev, userpd_pdev->name); + return 0; + + free_rproc: +@@ -719,7 +706,6 @@ static int q6_wcss_probe(struct platform_device *pdev) + wcss->dev = &pdev->dev; + wcss->desc = desc; + wcss->firmware = firmware; +- wcss->version = Q6_IPQ; + + ret = q6_alloc_memory_region(wcss); + if (ret) +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0807-remoteproc-qcom_q6v5_mpd-fix-incorrent-use-of-rproc-.patch b/target/linux/qualcommax/patches-6.6/0807-remoteproc-qcom_q6v5_mpd-fix-incorrent-use-of-rproc-.patch new file mode 100644 index 000000000..72446b52b --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0807-remoteproc-qcom_q6v5_mpd-fix-incorrent-use-of-rproc-.patch @@ -0,0 +1,211 @@ +From 0fa7bdb855247b738d1d227d6f4b3417ebdf21a8 Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:12 +0800 +Subject: [PATCH 2/2] remoteproc: qcom_q6v5_mpd: fix incorrent use of + rproc->node + + 1.817524] list_add corruption. next->prev should be prev (ffffffc0814bbfc8), but was ffffffc0814bc358. (next=ffffff8003b56800). +[ 1.822435] WARNING: CPU: 1 PID: 24 at lib/list_debug.c:29 __list_add_valid_or_report+0x8c/0xdc +[ 1.833923] Modules linked in: +[ 1.842425] CPU: 1 PID: 24 Comm: kworker/u4:1 Not tainted 6.6.47 #0 +[ 1.845552] Hardware name: Qualcomm MP03 (DT) +[ 1.851716] Workqueue: events_unbound deferred_probe_work_func +[ 1.856229] pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +[ 1.861959] pc : __list_add_valid_or_report+0x8c/0xdc +[ 1.868816] lr : __list_add_valid_or_report+0x8c/0xdc +[ 1.874022] sp : ffffffc081603a50 +[ 1.879055] x29: ffffffc081603a50 x28: ffffff8000fa2810 x27: ffffff8003cba800 +[ 1.882358] x26: ffffff8000fa2800 x25: ffffff8003cbac80 x24: 0000000000000000 +[ 1.889476] x23: ffffffc08088b968 x22: ffffffc0814bb000 x21: ffffffc0814bbfc8 +[ 1.896593] x20: ffffffc08088b8e8 x19: ffffff8003cba800 x18: 00000000000000b1 +[ 1.903713] x17: 3863666262343138 x16: 3063666666666666 x15: ffffffc081416e20 +[ 1.910830] x14: 0000000000000213 x13: 00000000000000b1 x12: 00000000ffffffea +[ 1.917948] x11: 00000000ffffefff x10: ffffffc08146ee20 x9 : ffffffc081416dc8 +[ 1.925066] x8 : 0000000000017fe8 x7 : c0000000ffffefff x6 : 0000000000057fa8 +[ 1.932184] x5 : 0000000000000fff x4 : 0000000000000000 x3 : ffffffc081603850 +[ 1.939302] x2 : ffffffc081416d60 x1 : ffffffc081416d60 x0 : 0000000000000075 +[ 1.946422] Call trace: +[ 1.953535] __list_add_valid_or_report+0x8c/0xdc +[ 1.955793] rproc_add+0x1f4/0x25c +[ 1.960653] q6_wcss_probe+0x510/0x634 +[ 1.963950] platform_probe+0x68/0xc4 +[ 1.967684] really_probe+0x148/0x2b0 +[ 1.971417] __driver_probe_device+0x78/0x128 +[ 1.975063] driver_probe_device+0x40/0xdc +[ 1.979402] __device_attach_driver+0xb8/0xf8 +[ 1.983397] bus_for_each_drv+0x70/0xb8 +[ 1.987823] __device_attach+0xa0/0x184 +[ 1.991468] device_initial_probe+0x14/0x20 +[ 1.995289] bus_probe_device+0xac/0xb0 +[ 1.999455] deferred_probe_work_func+0xa4/0xec +[ 2.003275] process_one_work+0x178/0x2d4 +[ 2.007788] worker_thread+0x2ec/0x4d8 +[ 2.011954] kthread+0xdc/0xe0 +[ 2.015600] ret_from_fork+0x10/0x20 + +Signed-off-by: hzy +--- + drivers/remoteproc/qcom_q6v5_mpd.c | 53 +++++++++++++++++------------- + 1 file changed, 30 insertions(+), 23 deletions(-) + +diff --git a/drivers/remoteproc/qcom_q6v5_mpd.c b/drivers/remoteproc/qcom_q6v5_mpd.c +index a1c189ab1f42..a13ced46a158 100644 +--- a/drivers/remoteproc/qcom_q6v5_mpd.c ++++ b/drivers/remoteproc/qcom_q6v5_mpd.c +@@ -33,6 +33,7 @@ + + #define BUF_SIZE 35 + ++#define MAX_UPD 3 + #define MAX_FIRMWARE 3 + + #define RPD_SWID MPD_WCNSS_PAS_ID +@@ -43,8 +44,6 @@ + #define UPD_BOOT_INFO_SMEM_ID 507 + #define VERSION2 2 + +-static LIST_HEAD(upd_rproc_list); +- + /** + * struct userpd_boot_info_header - header of user pd bootinfo + * @type: type of bootinfo passing over smem +@@ -80,6 +79,7 @@ struct q6_wcss { + size_t mem_size; + const struct wcss_data *desc; + const char **firmware; ++ struct userpd *upd[MAX_UPD]; + }; + + struct userpd { +@@ -270,13 +270,12 @@ static void *q6_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len, + */ + static int share_upd_bootinfo_to_q6(struct rproc *rproc) + { +- int ret; ++ int i, ret; + size_t size; + u16 cnt = 0, version; + void *ptr; + struct q6_wcss *wcss = rproc->priv; + struct userpd *upd; +- struct rproc *upd_rproc; + struct userpd_boot_info upd_bootinfo = {0}; + const struct firmware *fw; + +@@ -301,16 +300,19 @@ static int share_upd_bootinfo_to_q6(struct rproc *rproc) + memcpy_toio(ptr, &version, sizeof(version)); + ptr += sizeof(version); + +- list_for_each_entry(upd_rproc, &upd_rproc_list, node) +- cnt++; ++ for (i = 0; i < ARRAY_SIZE(wcss->upd); i++) ++ if (wcss->upd[i]) ++ cnt++; + + /* No of elements */ + cnt = (sizeof(upd_bootinfo) * cnt); + memcpy_toio(ptr, &cnt, sizeof(u16)); + ptr += sizeof(u16); + +- list_for_each_entry(upd_rproc, &upd_rproc_list, node) { +- upd = upd_rproc->priv; ++ for (i = 0; i < ARRAY_SIZE(wcss->upd); i++) { ++ upd = wcss->upd[i]; ++ if (!upd) ++ continue; + + /* TYPE */ + upd_bootinfo.header.type = UPD_BOOT_INFO_HEADER_TYPE; +@@ -322,14 +324,14 @@ static int share_upd_bootinfo_to_q6(struct rproc *rproc) + /* Process ID */ + upd_bootinfo.pid = upd->pd_asid + 1; + +- ret = request_firmware(&fw, upd_rproc->firmware, upd->dev); ++ ret = request_firmware(&fw, upd->q6.rproc->firmware, upd->dev); + if (ret < 0) { + dev_err(upd->dev, "request_firmware failed: %d\n", ret); + return ret; + } + + /* Load address */ +- upd_bootinfo.bootaddr = rproc_get_boot_addr(upd_rproc, fw); ++ upd_bootinfo.bootaddr = rproc_get_boot_addr(upd->q6.rproc, fw); + + /* Firmware mem size */ + upd_bootinfo.data_size = qcom_mdt_get_size(fw); +@@ -597,18 +599,23 @@ static int init_irq(struct qcom_q6v5 *q6, + return 0; + } + +-static void q6_release_resources(void) ++static void q6_release_resources(struct q6_wcss *wcss) + { +- struct rproc *upd_rproc; ++ struct userpd *upd; ++ int i; + + /* Release userpd resources */ +- list_for_each_entry(upd_rproc, &upd_rproc_list, node) { +- rproc_del(upd_rproc); +- rproc_free(upd_rproc); ++ for (i = 0; i < ARRAY_SIZE(wcss->upd); i++) { ++ upd = wcss->upd[i]; ++ if (!upd) ++ continue; ++ ++ rproc_del(upd->q6.rproc); ++ rproc_free(upd->q6.rproc); + } + } + +-static int q6_register_userpd(struct platform_device *pdev, ++static int q6_register_userpd(struct q6_wcss *wcss, + struct device_node *userpd_np) + { + struct userpd *upd; +@@ -633,16 +640,16 @@ static int q6_register_userpd(struct platform_device *pdev, + return ret; + } + +- dev_info(&pdev->dev, "%s node found\n", userpd_np->name); ++ dev_info(wcss->dev, "%s node found\n", userpd_np->name); + + userpd_pdev = of_platform_device_create(userpd_np, userpd_np->name, +- &pdev->dev); ++ wcss->dev); + if (!userpd_pdev) +- return dev_err_probe(&pdev->dev, -ENODEV, ++ return dev_err_probe(wcss->dev, -ENODEV, + "failed to create %s platform device\n", + userpd_np->name); + +- userpd_pdev->dev.driver = pdev->dev.driver; ++ userpd_pdev->dev.driver = wcss->dev->driver; + rproc = rproc_alloc(&userpd_pdev->dev, userpd_pdev->name, &wcss_ops, + firmware_name, sizeof(*upd)); + if (!rproc) { +@@ -663,7 +670,7 @@ static int q6_register_userpd(struct platform_device *pdev, + if (ret) + goto free_rproc; + +- list_add(&rproc->node, &upd_rproc_list); ++ wcss->upd[upd->pd_asid] = upd; + platform_set_drvdata(userpd_pdev, rproc); + qcom_add_ssr_subdev(rproc, &upd->ssr_subdev, userpd_pdev->name); + return 0; +@@ -728,10 +735,10 @@ static int q6_wcss_probe(struct platform_device *pdev) + + /* Iterate over userpd child's and register with rproc */ + for_each_available_child_of_node(pdev->dev.of_node, userpd_np) { +- ret = q6_register_userpd(pdev, userpd_np); ++ ret = q6_register_userpd(wcss, userpd_np); + if (ret) { + /* release resources of successfully allocated userpd rproc's */ +- q6_release_resources(); ++ q6_release_resources(wcss); + return dev_err_probe(&pdev->dev, ret, + "Failed to register userpd(%s)\n", + userpd_np->name); +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0811-qcom_scm-support-MPD.patch b/target/linux/qualcommax/patches-6.6/0811-qcom_scm-support-MPD.patch new file mode 100644 index 000000000..0b7b119f7 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0811-qcom_scm-support-MPD.patch @@ -0,0 +1,124 @@ +From 6553d598cdb507f7ede020f25da646ba084a23c6 Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:12 +0800 +Subject: [PATCH 1/5] qcom_scm: support MPD + +Signed-off-by: hzy +--- + drivers/firmware/qcom_scm.c | 79 ++++++++++++++++++++++++++ + drivers/firmware/qcom_scm.h | 3 + + include/linux/firmware/qcom/qcom_scm.h | 3 + + 3 files changed, 85 insertions(+) + +--- a/drivers/firmware/qcom_scm.c ++++ b/drivers/firmware/qcom_scm.c +@@ -713,6 +713,85 @@ bool qcom_scm_pas_supported(u32 peripher + EXPORT_SYMBOL_GPL(qcom_scm_pas_supported); + + /** ++ * qcom_scm_internal_wifi_powerup() - Bring up internal wifi ++ * @peripheral: peripheral id ++ * ++ * Return 0 on success. ++ */ ++int qcom_scm_internal_wifi_powerup(u32 peripheral) ++{ ++ struct qcom_scm_desc desc = { ++ .svc = QCOM_SCM_SVC_PIL, ++ .cmd = QCOM_SCM_INTERNAL_WIFI_POWERUP, ++ .arginfo = QCOM_SCM_ARGS(1), ++ .args[0] = peripheral, ++ .owner = ARM_SMCCC_OWNER_SIP, ++ }; ++ struct qcom_scm_res res; ++ int ret; ++ ++ ret = qcom_scm_call(__scm->dev, &desc, &res); ++ ++ return ret ? : res.result[0]; ++} ++EXPORT_SYMBOL(qcom_scm_internal_wifi_powerup); ++ ++/** ++ * qcom_scm_internal_wifi_shutdown() - Shut down internal wifi ++ * @peripheral: peripheral id ++ * ++ * Returns 0 on success. ++ */ ++int qcom_scm_internal_wifi_shutdown(u32 peripheral) ++{ ++ struct qcom_scm_desc desc = { ++ .svc = QCOM_SCM_SVC_PIL, ++ .cmd = QCOM_SCM_INTERNAL_WIFI_SHUTDOWN, ++ .arginfo = QCOM_SCM_ARGS(1), ++ .args[0] = peripheral, ++ .owner = ARM_SMCCC_OWNER_SIP, ++ }; ++ struct qcom_scm_res res; ++ int ret; ++ ++ ret = qcom_scm_call(__scm->dev, &desc, &res); ++ ++ return ret ? : res.result[0]; ++} ++EXPORT_SYMBOL(qcom_scm_internal_wifi_shutdown); ++ ++/** ++ * qcom_scm_pas_load_segment() - copy userpd PIL segments data to dma blocks ++ * @peripheral: peripheral id ++ * @segment: segment id ++ * @dma: handle of dma region ++ * @seg_cnt: no of dma blocks ++ * ++ * Returns 0 if trustzone successfully loads userpd PIL segments from dma ++ * blocks to DDR ++ */ ++int qcom_scm_pas_load_segment(u32 peripheral, int segment, dma_addr_t dma, int seg_cnt) ++{ ++ struct qcom_scm_desc desc = { ++ .svc = QCOM_SCM_SVC_PIL, ++ .cmd = QCOM_SCM_PIL_PAS_LOAD_SEG, ++ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_RW, QCOM_SCM_VAL), ++ .args[0] = peripheral, ++ .args[1] = segment, ++ .args[2] = dma, ++ .args[3] = seg_cnt, ++ .owner = ARM_SMCCC_OWNER_SIP, ++ }; ++ struct qcom_scm_res res; ++ int ret; ++ ++ ret = qcom_scm_call(__scm->dev, &desc, &res); ++ ++ return ret ? : res.result[0]; ++} ++EXPORT_SYMBOL(qcom_scm_pas_load_segment); ++ ++/** + * qcom_scm_msa_lock() - Lock given peripheral firmware region as MSA + * + * @peripheral: peripheral id +--- a/drivers/firmware/qcom_scm.h ++++ b/drivers/firmware/qcom_scm.h +@@ -98,6 +98,9 @@ extern int scm_legacy_call(struct device + #define QCOM_SCM_PIL_PAS_SHUTDOWN 0x06 + #define QCOM_SCM_PIL_PAS_IS_SUPPORTED 0x07 + #define QCOM_SCM_PIL_PAS_MSS_RESET 0x0a ++#define QCOM_SCM_INTERNAL_WIFI_POWERUP 0x17 ++#define QCOM_SCM_INTERNAL_WIFI_SHUTDOWN 0x18 ++#define QCOM_SCM_PIL_PAS_LOAD_SEG 0x19 + #define QCOM_SCM_MSA_LOCK 0x24 + #define QCOM_SCM_MSA_UNLOCK 0x25 + +--- a/include/linux/firmware/qcom/qcom_scm.h ++++ b/include/linux/firmware/qcom/qcom_scm.h +@@ -81,6 +81,9 @@ extern int qcom_scm_pas_mem_setup(u32 pe + extern int qcom_scm_pas_auth_and_reset(u32 peripheral); + extern int qcom_scm_pas_shutdown(u32 peripheral); + extern bool qcom_scm_pas_supported(u32 peripheral); ++extern int qcom_scm_internal_wifi_powerup(u32 peripheral); ++extern int qcom_scm_internal_wifi_shutdown(u32 peripheral); ++extern int qcom_scm_pas_load_segment(u32 peripheral, int segment, dma_addr_t dma, int seg_cnt); + extern int qcom_scm_msa_lock(u32 peripheral); + extern int qcom_scm_msa_unlock(u32 peripheral); + diff --git a/target/linux/qualcommax/patches-6.6/0812-mdt_loader-support-MPD.patch b/target/linux/qualcommax/patches-6.6/0812-mdt_loader-support-MPD.patch new file mode 100644 index 000000000..a54bda9a8 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0812-mdt_loader-support-MPD.patch @@ -0,0 +1,202 @@ +From bf42d84868bc82a9cb334a33930f2d1da24f7070 Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:12 +0800 +Subject: [PATCH 2/5] mdt_loader: support MPD + +Signed-off-by: hzy +--- + drivers/soc/qcom/mdt_loader.c | 110 ++++++++++++++++++++++++++-- + include/linux/soc/qcom/mdt_loader.h | 5 ++ + 2 files changed, 110 insertions(+), 5 deletions(-) + +diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c +index 6f177e46fa0f..00c848abd996 100644 +--- a/drivers/soc/qcom/mdt_loader.c ++++ b/drivers/soc/qcom/mdt_loader.c +@@ -16,6 +16,16 @@ + #include + #include + #include ++#include ++ ++#include "../../remoteproc/qcom_common.h" ++ ++#define QCOM_MDT_PF_ASID_MASK GENMASK(19, 16) ++ ++struct segment_load_args { ++ __le64 addr; ++ __le64 blk_size; ++}; + + static bool mdt_phdr_valid(const struct elf32_phdr *phdr) + { +@@ -69,6 +79,56 @@ static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs, + return ret; + } + ++static int mdt_load_split_segment_dma(int pas_id, unsigned int segment, ++ const struct elf32_phdr *phdrs, ++ const char *fw_name, ++ struct device *dev) ++{ ++ const struct elf32_phdr *phdr = &phdrs[segment]; ++ struct segment_load_args *args; ++ dma_addr_t *addrs; ++ void *ptr; ++ dma_addr_t dma_args, dma_addrs, dma_ptr; ++ int ret; ++ ++ args = dma_alloc_coherent(dev, sizeof(*args) + sizeof(*addrs), &dma_args, GFP_DMA); ++ if (!args) { ++ dev_err(dev, "Error in dma alloc regin: %ld\n", sizeof(*args)); ++ return -ENOMEM; ++ } ++ ++ addrs = (void *) args + sizeof(*args); ++ dma_addrs = dma_args + sizeof(*args); ++ ++ ptr = dma_alloc_coherent(dev, phdr->p_filesz, &dma_ptr, GFP_DMA); ++ if (!ptr) { ++ dev_err(dev, "Error in dma alloc ptr: %d\n", phdr->p_filesz); ++ return -ENOMEM; ++ } ++ ++ args->addr = dma_addrs; ++ args->blk_size = phdr->p_filesz; ++ ++ addrs[0] = dma_ptr; ++ ++ ret = mdt_load_split_segment(ptr, phdrs, segment, fw_name, dev); ++ if (ret < 0) { ++ dev_err(dev, "Error in mdt_load_split_segment: %d\n", ret); ++ return ret; ++ } ++ ++ ret = qcom_scm_pas_load_segment(pas_id, segment, dma_args, 1); ++ if (ret < 0) { ++ dev_err(dev, "Error in qcom_scm_pas_load_segment: %d\n", ret); ++ return ret; ++ } ++ ++ dma_free_coherent(dev, phdr->p_filesz, ptr, dma_ptr); ++ dma_free_coherent(dev, sizeof(*args) + sizeof(*addrs), args, dma_args); ++ ++ return 0; ++} ++ + /** + * qcom_mdt_get_size() - acquire size of the memory region needed to load mdt + * @fw: firmware object for the mdt file +@@ -295,7 +355,8 @@ static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_na + static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, void *mem_region, + phys_addr_t mem_phys, size_t mem_size, +- phys_addr_t *reloc_base, bool pas_init) ++ phys_addr_t *reloc_base, bool pas_init, ++ bool dma_require, int pd_asid) + { + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; +@@ -349,6 +410,14 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, + if (!mdt_phdr_valid(phdr)) + continue; + ++ /* ++ * While doing PD specific reloading, load only that PD ++ * specific writeable entries. Skip others ++ */ ++ if (pd_asid && (FIELD_GET(QCOM_MDT_PF_ASID_MASK, phdr->p_flags) != pd_asid || ++ (phdr->p_flags & PF_W) == 0)) ++ continue; ++ + offset = phdr->p_paddr - mem_reloc; + if (offset < 0 || offset + phdr->p_memsz > mem_size) { + dev_err(dev, "segment outside memory range\n"); +@@ -366,7 +435,11 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, + + ptr = mem_region + offset; + +- if (phdr->p_filesz && !is_split) { ++ if (dma_require && phdr->p_filesz) { ++ ret = mdt_load_split_segment_dma(pas_id, i, phdrs, fw_name, dev); ++ if (ret) ++ break; ++ } else if (phdr->p_filesz && !is_split) { + /* Firmware is large enough to be non-split */ + if (phdr->p_offset + phdr->p_filesz > fw->size) { + dev_err(dev, "file %s segment %d would be truncated\n", +@@ -383,7 +456,7 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, + break; + } + +- if (phdr->p_memsz > phdr->p_filesz) ++ if (!dma_require && phdr->p_memsz > phdr->p_filesz) + memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz); + } + +@@ -418,7 +491,7 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw, + return ret; + + return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys, +- mem_size, reloc_base, true); ++ mem_size, reloc_base, true, false, 0); + } + EXPORT_SYMBOL_GPL(qcom_mdt_load); + +@@ -441,9 +514,36 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, + size_t mem_size, phys_addr_t *reloc_base) + { + return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys, +- mem_size, reloc_base, false); ++ mem_size, reloc_base, false, false, 0); + } + EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init); + ++/** ++ * qcom_mdt_load_pd_seg() - load userpd specific PIL segements ++ * @dev: device handle to associate resources with ++ * @fw: firmware object for the mdt file ++ * @firmware: name of the firmware, for construction of segment file names ++ * @pas_id: PAS identifier ++ * @mem_region: allocated memory region to load firmware into ++ * @mem_phys: physical address of allocated memory region ++ * @mem_size: size of the allocated memory region ++ * @reloc_base: adjusted physical address after relocation ++ * ++ * Here userpd PIL segements are stitched with rootpd firmware. ++ * This function reloads userpd specific PIL segments during SSR ++ * of userpd. ++ * ++ * Returns 0 on success, negative errno otherwise. ++ */ ++int qcom_mdt_load_pd_seg(struct device *dev, const struct firmware *fw, ++ const char *firmware, int pas_id, int pd_asid, void *mem_region, ++ phys_addr_t mem_phys, size_t mem_size, ++ phys_addr_t *reloc_base) ++{ ++ return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys, ++ mem_size, reloc_base, false, true, pd_asid); ++} ++EXPORT_SYMBOL_GPL(qcom_mdt_load_pd_seg); ++ + MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format"); + MODULE_LICENSE("GPL v2"); +diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h +index 9e8e60421192..cefccb709f2e 100644 +--- a/include/linux/soc/qcom/mdt_loader.h ++++ b/include/linux/soc/qcom/mdt_loader.h +@@ -30,6 +30,11 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, + void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, + const char *fw_name, struct device *dev); + ++int qcom_mdt_load_pd_seg(struct device *dev, const struct firmware *fw, ++ const char *firmware, int pas_id, int pd_asid, void *mem_region, ++ phys_addr_t mem_phys, size_t mem_size, ++ phys_addr_t *reloc_base); ++ + #else /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */ + + static inline ssize_t qcom_mdt_get_size(const struct firmware *fw) +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0813-remoteproc-qcom_q6v5_mpd-enable-clocks.patch b/target/linux/qualcommax/patches-6.6/0813-remoteproc-qcom_q6v5_mpd-enable-clocks.patch new file mode 100644 index 000000000..ced3d02d8 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0813-remoteproc-qcom_q6v5_mpd-enable-clocks.patch @@ -0,0 +1,43 @@ +From e83215d5d22946885fa388d375b12f1b991a43c1 Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:12 +0800 +Subject: [PATCH 3/5] remoteproc: qcom_q6v5_mpd: enable clocks + +Signed-off-by: hzy +--- + drivers/remoteproc/qcom_q6v5_mpd.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/drivers/remoteproc/qcom_q6v5_mpd.c b/drivers/remoteproc/qcom_q6v5_mpd.c +index a13ced46a158..edcd5ed8515f 100644 +--- a/drivers/remoteproc/qcom_q6v5_mpd.c ++++ b/drivers/remoteproc/qcom_q6v5_mpd.c +@@ -77,6 +77,8 @@ struct q6_wcss { + phys_addr_t mem_reloc; + void *mem_region; + size_t mem_size; ++ struct clk_bulk_data *clks; ++ int num_clks; + const struct wcss_data *desc; + const char **firmware; + struct userpd *upd[MAX_UPD]; +@@ -718,6 +720,16 @@ static int q6_wcss_probe(struct platform_device *pdev) + if (ret) + goto free_rproc; + ++ wcss->num_clks = devm_clk_bulk_get_all(wcss->dev, &wcss->clks); ++ if (wcss->num_clks < 0) ++ return dev_err_probe(wcss->dev, wcss->num_clks, ++ "failed to acquire clocks\n"); ++ ++ ret = clk_bulk_prepare_enable(wcss->num_clks, wcss->clks); ++ if (ret) ++ return dev_err_probe(wcss->dev, ret, ++ "failed to enable clocks\n"); ++ + ret = qcom_q6v5_init(&wcss->q6, pdev, rproc, + WCSS_CRASH_REASON, NULL, NULL); + if (ret) +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0814-remoteproc-qcom_q6v5_mpd-support-ipq5018.patch b/target/linux/qualcommax/patches-6.6/0814-remoteproc-qcom_q6v5_mpd-support-ipq5018.patch new file mode 100644 index 000000000..1238d5276 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0814-remoteproc-qcom_q6v5_mpd-support-ipq5018.patch @@ -0,0 +1,116 @@ +From 4ae334127f073aa5f7c9209c9f0a17fd9e331db1 Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:12 +0800 +Subject: [PATCH 4/5] remoteproc: qcom_q6v5_mpd: support ipq5018 + +Signed-off-by: hzy +--- + drivers/remoteproc/qcom_q6v5_mpd.c | 37 +++++++++++++++++++++++++++--- + 1 file changed, 34 insertions(+), 3 deletions(-) + +diff --git a/drivers/remoteproc/qcom_q6v5_mpd.c b/drivers/remoteproc/qcom_q6v5_mpd.c +index edcd5ed8515f..7416ba231445 100644 +--- a/drivers/remoteproc/qcom_q6v5_mpd.c ++++ b/drivers/remoteproc/qcom_q6v5_mpd.c +@@ -155,6 +155,8 @@ static int q6_wcss_spawn_pd(struct rproc *rproc) + static int wcss_pd_start(struct rproc *rproc) + { + struct userpd *upd = rproc->priv; ++ struct rproc *rpd_rproc = dev_get_drvdata(upd->dev->parent); ++ struct q6_wcss *wcss = rpd_rproc->priv; + u32 pasid = (upd->pd_asid << 8) | UPD_SWID; + int ret; + +@@ -170,6 +172,14 @@ static int wcss_pd_start(struct rproc *rproc) + return ret; + } + ++ if (upd->pd_asid == 1) { ++ ret = qcom_scm_internal_wifi_powerup(wcss->desc->pasid); ++ if (ret) { ++ dev_err(upd->dev, "failed to power up internal radio\n"); ++ return ret; ++ } ++ } ++ + return ret; + } + +@@ -179,6 +189,12 @@ static int q6_wcss_stop(struct rproc *rproc) + const struct wcss_data *desc = wcss->desc; + int ret; + ++ ret = qcom_q6v5_request_stop(&wcss->q6, NULL); ++ if (ret) { ++ dev_err(wcss->dev, "pd not stopped\n"); ++ return ret; ++ } ++ + ret = qcom_scm_pas_shutdown(desc->pasid); + if (ret) { + dev_err(wcss->dev, "not able to shutdown\n"); +@@ -218,6 +234,7 @@ static int wcss_pd_stop(struct rproc *rproc) + { + struct userpd *upd = rproc->priv; + struct rproc *rpd_rproc = dev_get_drvdata(upd->dev->parent); ++ struct q6_wcss *wcss = rpd_rproc->priv; + u32 pasid = (upd->pd_asid << 8) | UPD_SWID; + int ret; + +@@ -229,6 +246,14 @@ static int wcss_pd_stop(struct rproc *rproc) + } + } + ++ if (upd->pd_asid == 1) { ++ ret = qcom_scm_internal_wifi_shutdown(wcss->desc->pasid); ++ if (ret) { ++ dev_err(upd->dev, "failed to power down internal radio\n"); ++ return ret; ++ } ++ } ++ + ret = qcom_scm_msa_unlock(pasid); + if (ret) { + dev_err(upd->dev, "failed to power down pd\n"); +@@ -430,15 +455,14 @@ static int wcss_pd_load(struct rproc *rproc, const struct firmware *fw) + struct userpd *upd = rproc->priv; + struct rproc *rpd_rproc = dev_get_drvdata(upd->dev->parent); + struct q6_wcss *wcss = rpd_rproc->priv; +- u32 pasid = (upd->pd_asid << 8) | UPD_SWID; + int ret; + + ret = rproc_boot(rpd_rproc); + if (ret) + return ret; + +- return qcom_mdt_load(upd->dev, fw, rproc->firmware, +- pasid, wcss->mem_region, ++ return qcom_mdt_load_pd_seg(upd->dev, fw, rproc->firmware, ++ wcss->desc->pasid, upd->pd_asid, wcss->mem_region, + wcss->mem_phys, wcss->mem_size, + NULL); + } +@@ -777,6 +801,12 @@ static int q6_wcss_remove(struct platform_device *pdev) + return 0; + } + ++static const struct wcss_data q6_ipq5018_res_init = { ++ .pasid = MPD_WCNSS_PAS_ID, ++ // .share_upd_info_to_q6 = true, /* Version 1 */ ++ // .mdt_load_sec = qcom_mdt_load_pd_seg, ++}; ++ + static const struct wcss_data q6_ipq5332_res_init = { + .pasid = MPD_WCNSS_PAS_ID, + .share_upd_info_to_q6 = true, +@@ -787,6 +817,7 @@ static const struct wcss_data q6_ipq9574_res_init = { + }; + + static const struct of_device_id q6_wcss_of_match[] = { ++ { .compatible = "qcom,ipq5018-q6-mpd", .data = &q6_ipq5018_res_init }, + { .compatible = "qcom,ipq5332-q6-mpd", .data = &q6_ipq5332_res_init }, + { .compatible = "qcom,ipq9574-q6-mpd", .data = &q6_ipq9574_res_init }, + { }, +-- +2.40.1 + diff --git a/target/linux/qualcommax/patches-6.6/0815-arm64-dts-qcom-ipq5018-add-wifi-support.patch b/target/linux/qualcommax/patches-6.6/0815-arm64-dts-qcom-ipq5018-add-wifi-support.patch new file mode 100644 index 000000000..64368bf8c --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0815-arm64-dts-qcom-ipq5018-add-wifi-support.patch @@ -0,0 +1,210 @@ +From 4e2bfcd24848db58cc2a603acc2418d0190c5466 Mon Sep 17 00:00:00 2001 +From: Ziyang Huang +Date: Sun, 8 Sep 2024 16:40:12 +0800 +Subject: [PATCH 5/5] arm64: dts: qcom: ipq5018: add wifi support + +Signed-off-by: hzy +--- + arch/arm64/boot/dts/qcom/ipq5018.dtsi | 192 ++++++++++++++++++++++++++ + 1 file changed, 192 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq5018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq5018.dtsi +@@ -688,6 +688,197 @@ + }; + }; + ++ wifi0: wifi@c000000 { ++ compatible = "qcom,ipq5018-wifi"; ++ reg = <0xc000000 0x1000000>; ++ ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ ++ interrupt-names = "misc-pulse1", ++ "misc-latch", ++ "sw-exception", ++ "watchdog", ++ "ce0", ++ "ce1", ++ "ce2", ++ "ce3", ++ "ce4", ++ "ce5", ++ "ce6", ++ "ce7", ++ "ce8", ++ "ce9", ++ "ce10", ++ "ce11", ++ "host2wbm-desc-feed", ++ "host2reo-re-injection", ++ "host2reo-command", ++ "host2rxdma-monitor-ring3", ++ "host2rxdma-monitor-ring2", ++ "host2rxdma-monitor-ring1", ++ "reo2ost-exception", ++ "wbm2host-rx-release", ++ "reo2host-status", ++ "reo2host-destination-ring4", ++ "reo2host-destination-ring3", ++ "reo2host-destination-ring2", ++ "reo2host-destination-ring1", ++ "rxdma2host-monitor-destination-mac3", ++ "rxdma2host-monitor-destination-mac2", ++ "rxdma2host-monitor-destination-mac1", ++ "ppdu-end-interrupts-mac3", ++ "ppdu-end-interrupts-mac2", ++ "ppdu-end-interrupts-mac1", ++ "rxdma2host-monitor-status-ring-mac3", ++ "rxdma2host-monitor-status-ring-mac2", ++ "rxdma2host-monitor-status-ring-mac1", ++ "host2rxdma-host-buf-ring-mac3", ++ "host2rxdma-host-buf-ring-mac2", ++ "host2rxdma-host-buf-ring-mac1", ++ "rxdma2host-destination-ring-mac3", ++ "rxdma2host-destination-ring-mac2", ++ "rxdma2host-destination-ring-mac1", ++ "host2tcl-input-ring4", ++ "host2tcl-input-ring3", ++ "host2tcl-input-ring2", ++ "host2tcl-input-ring1", ++ "wbm2host-tx-completions-ring3", ++ "wbm2host-tx-completions-ring2", ++ "wbm2host-tx-completions-ring1", ++ "tcl2host-status-ring"; ++ ++ status = "disabled"; ++ }; ++ ++ wifi1: wifi1@c000000 { ++ compatible = "qcom,qcn6122-wifi"; ++ msi-parent = <&v2m0>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ wifi2: wifi2@c000000 { ++ compatible = "qcom,qcn6122-wifi"; ++ msi-parent = <&v2m0>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ q6v5_wcss: remoteproc@cd00000 { ++ compatible = "qcom,ipq5018-q6-mpd"; ++ reg = <0x0cd00000 0x4040>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges; ++ ++ clocks = <&gcc GCC_XO_CLK>, ++ <&gcc GCC_SLEEP_CLK_SRC>, ++ <&gcc GCC_SYS_NOC_WCSS_AHB_CLK>; ++ ++ interrupts-extended = <&intc GIC_SPI 291 IRQ_TYPE_EDGE_RISING>, ++ <&wcss_smp2p_in 0 0>, ++ <&wcss_smp2p_in 1 0>, ++ <&wcss_smp2p_in 2 0>, ++ <&wcss_smp2p_in 3 0>; ++ interrupt-names = "wdog", ++ "fatal", ++ "ready", ++ "handover", ++ "stop-ack"; ++ ++ qcom,smem-states = <&wcss_smp2p_out 0>, ++ <&wcss_smp2p_out 1>; ++ qcom,smem-state-names = "shutdown", ++ "stop"; ++ ++ glink-edge { ++ interrupts = ; ++ label = "rtr"; ++ qcom,remote-pid = <1>; ++ mboxes = <&apcs_glb 8>; ++ ++ qrtr_requests { ++ qcom,glink-channels = "IPCRTR"; ++ }; ++ }; ++ }; ++ ++ wcss: wcss-smp2p { ++ compatible = "qcom,smp2p"; ++ qcom,smem = <435>, <428>; ++ ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ ++ mboxes = <&apcs_glb 9>; ++ ++ qcom,local-pid = <0>; ++ qcom,remote-pid = <1>; ++ ++ wcss_smp2p_out: master-kernel { ++ qcom,entry-name = "master-kernel"; ++ qcom,smp2p-feature-ssr-ack; ++ #qcom,smem-state-cells = <1>; ++ }; ++ ++ wcss_smp2p_in: slave-kernel { ++ qcom,entry-name = "slave-kernel"; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ }; ++ + pcie_x1: pcie@80000000 { + compatible = "qcom,pcie-ipq5018"; + reg = <0x80000000 0xf1d>, diff --git a/target/linux/qualcommax/patches-6.6/0900-power-Add-Qualcomm-APM.patch b/target/linux/qualcommax/patches-6.6/0900-power-Add-Qualcomm-APM.patch new file mode 100644 index 000000000..2e5c72b7d --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0900-power-Add-Qualcomm-APM.patch @@ -0,0 +1,1047 @@ +From 6c98adf98236b8644b8f5e1aa7af9f1a88ea2766 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Mon, 11 Apr 2022 14:38:08 +0200 +Subject: [PATCH] power: Add Qualcomm APM + +Add Qualcomm APM driver, which allows scaling cache and memory fabrics. + +Signed-off-by: Robert Marko +--- + drivers/power/Kconfig | 1 + + drivers/power/Makefile | 1 + + drivers/power/qcom/Kconfig | 7 + + drivers/power/qcom/Makefile | 1 + + drivers/power/qcom/apm.c | 944 +++++++++++++++++++++++++++++++++ + include/linux/power/qcom/apm.h | 48 ++ + 6 files changed, 1002 insertions(+) + create mode 100644 drivers/power/qcom/Kconfig + create mode 100644 drivers/power/qcom/Makefile + create mode 100644 drivers/power/qcom/apm.c + create mode 100644 include/linux/power/qcom/apm.h + +--- a/drivers/power/Kconfig ++++ b/drivers/power/Kconfig +@@ -1,3 +1,4 @@ + # SPDX-License-Identifier: GPL-2.0-only + source "drivers/power/reset/Kconfig" + source "drivers/power/supply/Kconfig" ++source "drivers/power/qcom/Kconfig" +--- a/drivers/power/Makefile ++++ b/drivers/power/Makefile +@@ -1,3 +1,4 @@ + # SPDX-License-Identifier: GPL-2.0-only + obj-$(CONFIG_POWER_RESET) += reset/ + obj-$(CONFIG_POWER_SUPPLY) += supply/ ++obj-$(CONFIG_QCOM_APM) += qcom/ +--- /dev/null ++++ b/drivers/power/qcom/Kconfig +@@ -0,0 +1,7 @@ ++config QCOM_APM ++ bool "Qualcomm Technologies Inc platform specific APM driver" ++ help ++ Platform specific driver to manage the power source of ++ memory arrays. Interfaces with regulator drivers to ensure ++ SRAM Vmin requirements are met across different performance ++ levels. +--- /dev/null ++++ b/drivers/power/qcom/Makefile +@@ -0,0 +1 @@ ++obj-$(CONFIG_QCOM_APM) += apm.o +--- /dev/null ++++ b/drivers/power/qcom/apm.c +@@ -0,0 +1,944 @@ ++/* ++ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#define pr_fmt(fmt) "%s: " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * VDD_APCC ++ * ============================================================= ++ * | VDD_MX | | ++ * | ==========================|============= | ++ * ___|___ ___|___ ___|___ ___|___ ___|___ ___|___ ++ * | | | | | | | | | | | | ++ * | APCC | | MX HS | | MX HS | | APCC | | MX HS | | APCC | ++ * | HS | | | | | | HS | | | | HS | ++ * |_______| |_______| |_______| |_______| |_______| |_______| ++ * |_________| |_________| |__________| ++ * | | | ++ * ______|_____ ______|_____ _______|_____ ++ * | | | | | | ++ * | | | | | | ++ * | CPU MEM | | L2 MEM | | L3 MEM | ++ * | Arrays | | Arrays | | Arrays | ++ * | | | | | | ++ * |____________| |____________| |_____________| ++ * ++ */ ++ ++/* Register value definitions */ ++#define APCS_GFMUXA_SEL_VAL 0x13 ++#define APCS_GFMUXA_DESEL_VAL 0x03 ++#define MSM_APM_MX_MODE_VAL 0x00 ++#define MSM_APM_APCC_MODE_VAL 0x10 ++#define MSM_APM_MX_DONE_VAL 0x00 ++#define MSM_APM_APCC_DONE_VAL 0x03 ++#define MSM_APM_OVERRIDE_SEL_VAL 0xb0 ++#define MSM_APM_SEC_CLK_SEL_VAL 0x30 ++#define SPM_EVENT_SET_VAL 0x01 ++#define SPM_EVENT_CLEAR_VAL 0x00 ++ ++/* Register bit mask definitions */ ++#define MSM_APM_CTL_STS_MASK 0x0f ++ ++/* Register offset definitions */ ++#define APCC_APM_MODE 0x00000098 ++#define APCC_APM_CTL_STS 0x000000a8 ++#define APCS_SPARE 0x00000068 ++#define APCS_VERSION 0x00000fd0 ++ ++#define HMSS_VERSION_1P2 0x10020000 ++ ++#define MSM_APM_SWITCH_TIMEOUT_US 10 ++#define SPM_WAKEUP_DELAY_US 2 ++#define SPM_EVENT_NUM 6 ++ ++#define MSM_APM_DRIVER_NAME "qcom,msm-apm" ++ ++enum { ++ MSM8996_ID, ++ MSM8953_ID, ++ IPQ807x_ID, ++}; ++ ++struct msm_apm_ctrl_dev { ++ struct list_head list; ++ struct device *dev; ++ enum msm_apm_supply supply; ++ spinlock_t lock; ++ void __iomem *reg_base; ++ void __iomem *apcs_csr_base; ++ void __iomem **apcs_spm_events_addr; ++ void __iomem *apc0_pll_ctl_addr; ++ void __iomem *apc1_pll_ctl_addr; ++ u32 version; ++ struct dentry *debugfs; ++ u32 msm_id; ++}; ++ ++#if defined(CONFIG_DEBUG_FS) ++static struct dentry *apm_debugfs_base; ++#endif ++ ++static DEFINE_MUTEX(apm_ctrl_list_mutex); ++static LIST_HEAD(apm_ctrl_list); ++ ++/* ++ * Get the resources associated with the APM controller from device tree ++ * and remap all I/O addresses that are relevant to this HW revision. ++ */ ++static int msm_apm_ctrl_devm_ioremap(struct platform_device *pdev, ++ struct msm_apm_ctrl_dev *ctrl) ++{ ++ struct device *dev = &pdev->dev; ++ struct resource *res; ++ static const char *res_name[SPM_EVENT_NUM] = { ++ "apc0-l2-spm", ++ "apc1-l2-spm", ++ "apc0-cpu0-spm", ++ "apc0-cpu1-spm", ++ "apc1-cpu0-spm", ++ "apc1-cpu1-spm" ++ }; ++ int i, ret = 0; ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pm-apcc-glb"); ++ if (!res) { ++ dev_err(dev, "Missing PM APCC Global register physical address"); ++ return -EINVAL; ++ } ++ ctrl->reg_base = devm_ioremap(dev, res->start, resource_size(res)); ++ if (!ctrl->reg_base) { ++ dev_err(dev, "Failed to map PM APCC Global registers\n"); ++ return -ENOMEM; ++ } ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs-csr"); ++ if (!res) { ++ dev_err(dev, "Missing APCS CSR physical base address"); ++ return -EINVAL; ++ } ++ ctrl->apcs_csr_base = devm_ioremap(dev, res->start, resource_size(res)); ++ if (!ctrl->apcs_csr_base) { ++ dev_err(dev, "Failed to map APCS CSR registers\n"); ++ return -ENOMEM; ++ } ++ ++ ctrl->version = readl_relaxed(ctrl->apcs_csr_base + APCS_VERSION); ++ ++ if (ctrl->version >= HMSS_VERSION_1P2) ++ return ret; ++ ++ ctrl->apcs_spm_events_addr = devm_kzalloc(&pdev->dev, ++ SPM_EVENT_NUM ++ * sizeof(void __iomem *), ++ GFP_KERNEL); ++ if (!ctrl->apcs_spm_events_addr) { ++ dev_err(dev, "Failed to allocate memory for APCS SPM event registers\n"); ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < SPM_EVENT_NUM; i++) { ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, ++ res_name[i]); ++ if (!res) { ++ dev_err(dev, "Missing address for %s\n", res_name[i]); ++ ret = -EINVAL; ++ goto free_events; ++ } ++ ++ ctrl->apcs_spm_events_addr[i] = devm_ioremap(dev, res->start, ++ resource_size(res)); ++ if (!ctrl->apcs_spm_events_addr[i]) { ++ dev_err(dev, "Failed to map %s\n", res_name[i]); ++ ret = -ENOMEM; ++ goto free_events; ++ } ++ ++ dev_dbg(dev, "%s event phys: %pa virt:0x%p\n", res_name[i], ++ &res->start, ctrl->apcs_spm_events_addr[i]); ++ } ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, ++ "apc0-pll-ctl"); ++ if (!res) { ++ dev_err(dev, "Missing APC0 PLL CTL physical address\n"); ++ ret = -EINVAL; ++ goto free_events; ++ } ++ ++ ctrl->apc0_pll_ctl_addr = devm_ioremap(dev, ++ res->start, ++ resource_size(res)); ++ if (!ctrl->apc0_pll_ctl_addr) { ++ dev_err(dev, "Failed to map APC0 PLL CTL register\n"); ++ ret = -ENOMEM; ++ goto free_events; ++ } ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, ++ "apc1-pll-ctl"); ++ if (!res) { ++ dev_err(dev, "Missing APC1 PLL CTL physical address\n"); ++ ret = -EINVAL; ++ goto free_events; ++ } ++ ++ ctrl->apc1_pll_ctl_addr = devm_ioremap(dev, ++ res->start, ++ resource_size(res)); ++ if (!ctrl->apc1_pll_ctl_addr) { ++ dev_err(dev, "Failed to map APC1 PLL CTL register\n"); ++ ret = -ENOMEM; ++ goto free_events; ++ } ++ ++ return ret; ++ ++free_events: ++ devm_kfree(dev, ctrl->apcs_spm_events_addr); ++ return ret; ++} ++ ++/* 8953 register offset definition */ ++#define MSM8953_APM_DLY_CNTR 0x2ac ++ ++/* Register field shift definitions */ ++#define APM_CTL_SEL_SWITCH_DLY_SHIFT 0 ++#define APM_CTL_RESUME_CLK_DLY_SHIFT 8 ++#define APM_CTL_HALT_CLK_DLY_SHIFT 16 ++#define APM_CTL_POST_HALT_DLY_SHIFT 24 ++ ++/* Register field mask definitions */ ++#define APM_CTL_SEL_SWITCH_DLY_MASK GENMASK(7, 0) ++#define APM_CTL_RESUME_CLK_DLY_MASK GENMASK(15, 8) ++#define APM_CTL_HALT_CLK_DLY_MASK GENMASK(23, 16) ++#define APM_CTL_POST_HALT_DLY_MASK GENMASK(31, 24) ++ ++/* ++ * Get the resources associated with the msm8953 APM controller from ++ * device tree, remap all I/O addresses, and program the initial ++ * register configuration required for the 8953 APM controller device. ++ */ ++static int msm8953_apm_ctrl_init(struct platform_device *pdev, ++ struct msm_apm_ctrl_dev *ctrl) ++{ ++ struct device *dev = &pdev->dev; ++ struct resource *res; ++ u32 delay_counter, val = 0, regval = 0; ++ int rc = 0; ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pm-apcc-glb"); ++ if (!res) { ++ dev_err(dev, "Missing PM APCC Global register physical address\n"); ++ return -ENODEV; ++ } ++ ctrl->reg_base = devm_ioremap(dev, res->start, resource_size(res)); ++ if (!ctrl->reg_base) { ++ dev_err(dev, "Failed to map PM APCC Global registers\n"); ++ return -ENOMEM; ++ } ++ ++ /* ++ * Initial APM register configuration required before starting ++ * APM HW controller. ++ */ ++ regval = readl_relaxed(ctrl->reg_base + MSM8953_APM_DLY_CNTR); ++ val = regval; ++ ++ if (of_find_property(dev->of_node, "qcom,apm-post-halt-delay", NULL)) { ++ rc = of_property_read_u32(dev->of_node, ++ "qcom,apm-post-halt-delay", &delay_counter); ++ if (rc < 0) { ++ dev_err(dev, "apm-post-halt-delay read failed, rc = %d", ++ rc); ++ return rc; ++ } ++ ++ val &= ~APM_CTL_POST_HALT_DLY_MASK; ++ val |= (delay_counter << APM_CTL_POST_HALT_DLY_SHIFT) ++ & APM_CTL_POST_HALT_DLY_MASK; ++ } ++ ++ if (of_find_property(dev->of_node, "qcom,apm-halt-clk-delay", NULL)) { ++ rc = of_property_read_u32(dev->of_node, ++ "qcom,apm-halt-clk-delay", &delay_counter); ++ if (rc < 0) { ++ dev_err(dev, "apm-halt-clk-delay read failed, rc = %d", ++ rc); ++ return rc; ++ } ++ ++ val &= ~APM_CTL_HALT_CLK_DLY_MASK; ++ val |= (delay_counter << APM_CTL_HALT_CLK_DLY_SHIFT) ++ & APM_CTL_HALT_CLK_DLY_MASK; ++ } ++ ++ if (of_find_property(dev->of_node, "qcom,apm-resume-clk-delay", NULL)) { ++ rc = of_property_read_u32(dev->of_node, ++ "qcom,apm-resume-clk-delay", &delay_counter); ++ if (rc < 0) { ++ dev_err(dev, "apm-resume-clk-delay read failed, rc = %d", ++ rc); ++ return rc; ++ } ++ ++ val &= ~APM_CTL_RESUME_CLK_DLY_MASK; ++ val |= (delay_counter << APM_CTL_RESUME_CLK_DLY_SHIFT) ++ & APM_CTL_RESUME_CLK_DLY_MASK; ++ } ++ ++ if (of_find_property(dev->of_node, "qcom,apm-sel-switch-delay", NULL)) { ++ rc = of_property_read_u32(dev->of_node, ++ "qcom,apm-sel-switch-delay", &delay_counter); ++ if (rc < 0) { ++ dev_err(dev, "apm-sel-switch-delay read failed, rc = %d", ++ rc); ++ return rc; ++ } ++ ++ val &= ~APM_CTL_SEL_SWITCH_DLY_MASK; ++ val |= (delay_counter << APM_CTL_SEL_SWITCH_DLY_SHIFT) ++ & APM_CTL_SEL_SWITCH_DLY_MASK; ++ } ++ ++ if (val != regval) { ++ writel_relaxed(val, ctrl->reg_base + MSM8953_APM_DLY_CNTR); ++ /* make sure write completes before return */ ++ mb(); ++ } ++ ++ return rc; ++} ++ ++static int msm8996_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev) ++{ ++ int i, timeout = MSM_APM_SWITCH_TIMEOUT_US; ++ u32 regval; ++ int ret = 0; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&ctrl_dev->lock, flags); ++ ++ /* Perform revision-specific programming steps */ ++ if (ctrl_dev->version < HMSS_VERSION_1P2) { ++ /* Clear SPM events */ ++ for (i = 0; i < SPM_EVENT_NUM; i++) ++ writel_relaxed(SPM_EVENT_CLEAR_VAL, ++ ctrl_dev->apcs_spm_events_addr[i]); ++ ++ udelay(SPM_WAKEUP_DELAY_US); ++ ++ /* Switch APC/CBF to GPLL0 clock */ ++ writel_relaxed(APCS_GFMUXA_SEL_VAL, ++ ctrl_dev->apcs_csr_base + APCS_SPARE); ++ ndelay(200); ++ writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL, ++ ctrl_dev->apc0_pll_ctl_addr); ++ ndelay(200); ++ writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL, ++ ctrl_dev->apc1_pll_ctl_addr); ++ ++ /* Ensure writes complete before proceeding */ ++ mb(); ++ } ++ ++ /* Switch arrays to MX supply and wait for its completion */ ++ writel_relaxed(MSM_APM_MX_MODE_VAL, ctrl_dev->reg_base + ++ APCC_APM_MODE); ++ ++ /* Ensure write above completes before delaying */ ++ mb(); ++ ++ while (timeout > 0) { ++ regval = readl_relaxed(ctrl_dev->reg_base + APCC_APM_CTL_STS); ++ if ((regval & MSM_APM_CTL_STS_MASK) == ++ MSM_APM_MX_DONE_VAL) ++ break; ++ ++ udelay(1); ++ timeout--; ++ } ++ ++ if (timeout == 0) { ++ ret = -ETIMEDOUT; ++ dev_err(ctrl_dev->dev, "APCC to MX APM switch timed out. APCC_APM_CTL_STS=0x%x\n", ++ regval); ++ } ++ ++ /* Perform revision-specific programming steps */ ++ if (ctrl_dev->version < HMSS_VERSION_1P2) { ++ /* Switch APC/CBF clocks to original source */ ++ writel_relaxed(APCS_GFMUXA_DESEL_VAL, ++ ctrl_dev->apcs_csr_base + APCS_SPARE); ++ ndelay(200); ++ writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL, ++ ctrl_dev->apc0_pll_ctl_addr); ++ ndelay(200); ++ writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL, ++ ctrl_dev->apc1_pll_ctl_addr); ++ ++ /* Complete clock source switch before SPM event sequence */ ++ mb(); ++ ++ /* Set SPM events */ ++ for (i = 0; i < SPM_EVENT_NUM; i++) ++ writel_relaxed(SPM_EVENT_SET_VAL, ++ ctrl_dev->apcs_spm_events_addr[i]); ++ } ++ ++ if (!ret) { ++ ctrl_dev->supply = MSM_APM_SUPPLY_MX; ++ dev_dbg(ctrl_dev->dev, "APM supply switched to MX\n"); ++ } ++ ++ spin_unlock_irqrestore(&ctrl_dev->lock, flags); ++ ++ return ret; ++} ++ ++static int msm8996_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev) ++{ ++ int i, timeout = MSM_APM_SWITCH_TIMEOUT_US; ++ u32 regval; ++ int ret = 0; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&ctrl_dev->lock, flags); ++ ++ /* Perform revision-specific programming steps */ ++ if (ctrl_dev->version < HMSS_VERSION_1P2) { ++ /* Clear SPM events */ ++ for (i = 0; i < SPM_EVENT_NUM; i++) ++ writel_relaxed(SPM_EVENT_CLEAR_VAL, ++ ctrl_dev->apcs_spm_events_addr[i]); ++ ++ udelay(SPM_WAKEUP_DELAY_US); ++ ++ /* Switch APC/CBF to GPLL0 clock */ ++ writel_relaxed(APCS_GFMUXA_SEL_VAL, ++ ctrl_dev->apcs_csr_base + APCS_SPARE); ++ ndelay(200); ++ writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL, ++ ctrl_dev->apc0_pll_ctl_addr); ++ ndelay(200); ++ writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL, ++ ctrl_dev->apc1_pll_ctl_addr); ++ ++ /* Ensure previous writes complete before proceeding */ ++ mb(); ++ } ++ ++ /* Switch arrays to APCC supply and wait for its completion */ ++ writel_relaxed(MSM_APM_APCC_MODE_VAL, ctrl_dev->reg_base + ++ APCC_APM_MODE); ++ ++ /* Ensure write above completes before delaying */ ++ mb(); ++ ++ while (timeout > 0) { ++ regval = readl_relaxed(ctrl_dev->reg_base + APCC_APM_CTL_STS); ++ if ((regval & MSM_APM_CTL_STS_MASK) == ++ MSM_APM_APCC_DONE_VAL) ++ break; ++ ++ udelay(1); ++ timeout--; ++ } ++ ++ if (timeout == 0) { ++ ret = -ETIMEDOUT; ++ dev_err(ctrl_dev->dev, "MX to APCC APM switch timed out. APCC_APM_CTL_STS=0x%x\n", ++ regval); ++ } ++ ++ /* Perform revision-specific programming steps */ ++ if (ctrl_dev->version < HMSS_VERSION_1P2) { ++ /* Set SPM events */ ++ for (i = 0; i < SPM_EVENT_NUM; i++) ++ writel_relaxed(SPM_EVENT_SET_VAL, ++ ctrl_dev->apcs_spm_events_addr[i]); ++ ++ /* Complete SPM event sequence before clock source switch */ ++ mb(); ++ ++ /* Switch APC/CBF clocks to original source */ ++ writel_relaxed(APCS_GFMUXA_DESEL_VAL, ++ ctrl_dev->apcs_csr_base + APCS_SPARE); ++ ndelay(200); ++ writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL, ++ ctrl_dev->apc0_pll_ctl_addr); ++ ndelay(200); ++ writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL, ++ ctrl_dev->apc1_pll_ctl_addr); ++ } ++ ++ if (!ret) { ++ ctrl_dev->supply = MSM_APM_SUPPLY_APCC; ++ dev_dbg(ctrl_dev->dev, "APM supply switched to APCC\n"); ++ } ++ ++ spin_unlock_irqrestore(&ctrl_dev->lock, flags); ++ ++ return ret; ++} ++ ++/* 8953 register value definitions */ ++#define MSM8953_APM_MX_MODE_VAL 0x00 ++#define MSM8953_APM_APCC_MODE_VAL 0x02 ++#define MSM8953_APM_MX_DONE_VAL 0x00 ++#define MSM8953_APM_APCC_DONE_VAL 0x03 ++ ++/* 8953 register offset definitions */ ++#define MSM8953_APCC_APM_MODE 0x000002a8 ++#define MSM8953_APCC_APM_CTL_STS 0x000002b0 ++ ++/* 8953 constants */ ++#define MSM8953_APM_SWITCH_TIMEOUT_US 500 ++ ++/* Register bit mask definitions */ ++#define MSM8953_APM_CTL_STS_MASK 0x1f ++ ++static int msm8953_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev) ++{ ++ int timeout = MSM8953_APM_SWITCH_TIMEOUT_US; ++ u32 regval; ++ int ret = 0; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&ctrl_dev->lock, flags); ++ ++ /* Switch arrays to MX supply and wait for its completion */ ++ writel_relaxed(MSM8953_APM_MX_MODE_VAL, ctrl_dev->reg_base + ++ MSM8953_APCC_APM_MODE); ++ ++ /* Ensure write above completes before delaying */ ++ mb(); ++ ++ while (timeout > 0) { ++ regval = readl_relaxed(ctrl_dev->reg_base + ++ MSM8953_APCC_APM_CTL_STS); ++ if ((regval & MSM8953_APM_CTL_STS_MASK) == ++ MSM8953_APM_MX_DONE_VAL) ++ break; ++ ++ udelay(1); ++ timeout--; ++ } ++ ++ if (timeout == 0) { ++ ret = -ETIMEDOUT; ++ dev_err(ctrl_dev->dev, "APCC to MX APM switch timed out. APCC_APM_CTL_STS=0x%x\n", ++ regval); ++ } else { ++ ctrl_dev->supply = MSM_APM_SUPPLY_MX; ++ dev_dbg(ctrl_dev->dev, "APM supply switched to MX\n"); ++ } ++ ++ spin_unlock_irqrestore(&ctrl_dev->lock, flags); ++ ++ return ret; ++} ++ ++static int msm8953_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev) ++{ ++ int timeout = MSM8953_APM_SWITCH_TIMEOUT_US; ++ u32 regval; ++ int ret = 0; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&ctrl_dev->lock, flags); ++ ++ /* Switch arrays to APCC supply and wait for its completion */ ++ writel_relaxed(MSM8953_APM_APCC_MODE_VAL, ctrl_dev->reg_base + ++ MSM8953_APCC_APM_MODE); ++ ++ /* Ensure write above completes before delaying */ ++ mb(); ++ ++ while (timeout > 0) { ++ regval = readl_relaxed(ctrl_dev->reg_base + ++ MSM8953_APCC_APM_CTL_STS); ++ if ((regval & MSM8953_APM_CTL_STS_MASK) == ++ MSM8953_APM_APCC_DONE_VAL) ++ break; ++ ++ udelay(1); ++ timeout--; ++ } ++ ++ if (timeout == 0) { ++ ret = -ETIMEDOUT; ++ dev_err(ctrl_dev->dev, "MX to APCC APM switch timed out. APCC_APM_CTL_STS=0x%x\n", ++ regval); ++ } else { ++ ctrl_dev->supply = MSM_APM_SUPPLY_APCC; ++ dev_dbg(ctrl_dev->dev, "APM supply switched to APCC\n"); ++ } ++ ++ spin_unlock_irqrestore(&ctrl_dev->lock, flags); ++ ++ return ret; ++} ++ ++static int msm_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev) ++{ ++ int ret = 0; ++ ++ switch (ctrl_dev->msm_id) { ++ case MSM8996_ID: ++ ret = msm8996_apm_switch_to_mx(ctrl_dev); ++ break; ++ case MSM8953_ID: ++ case IPQ807x_ID: ++ ret = msm8953_apm_switch_to_mx(ctrl_dev); ++ break; ++ } ++ ++ return ret; ++} ++ ++static int msm_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev) ++{ ++ int ret = 0; ++ ++ switch (ctrl_dev->msm_id) { ++ case MSM8996_ID: ++ ret = msm8996_apm_switch_to_apcc(ctrl_dev); ++ break; ++ case MSM8953_ID: ++ case IPQ807x_ID: ++ ret = msm8953_apm_switch_to_apcc(ctrl_dev); ++ break; ++ } ++ ++ return ret; ++} ++ ++/** ++ * msm_apm_get_supply() - Returns the supply that is currently ++ * powering the memory arrays ++ * @ctrl_dev: Pointer to an MSM APM controller device ++ * ++ * Returns the supply currently selected by the APM. ++ */ ++int msm_apm_get_supply(struct msm_apm_ctrl_dev *ctrl_dev) ++{ ++ return ctrl_dev->supply; ++} ++EXPORT_SYMBOL(msm_apm_get_supply); ++ ++/** ++ * msm_apm_set_supply() - Perform the necessary steps to switch the voltage ++ * source of the memory arrays to a given supply ++ * @ctrl_dev: Pointer to an MSM APM controller device ++ * @supply: Power rail to use as supply for the memory ++ * arrays ++ * ++ * Returns 0 on success, -ETIMEDOUT on APM switch timeout, or -EPERM if ++ * the supply is not supported. ++ */ ++int msm_apm_set_supply(struct msm_apm_ctrl_dev *ctrl_dev, ++ enum msm_apm_supply supply) ++{ ++ int ret; ++ ++ switch (supply) { ++ case MSM_APM_SUPPLY_APCC: ++ ret = msm_apm_switch_to_apcc(ctrl_dev); ++ break; ++ case MSM_APM_SUPPLY_MX: ++ ret = msm_apm_switch_to_mx(ctrl_dev); ++ break; ++ default: ++ ret = -EPERM; ++ break; ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(msm_apm_set_supply); ++ ++/** ++ * msm_apm_ctrl_dev_get() - get a handle to the MSM APM controller linked to ++ * the device in device tree ++ * @dev: Pointer to the device ++ * ++ * The device must specify "qcom,apm-ctrl" property in its device tree ++ * node which points to an MSM APM controller device node. ++ * ++ * Returns an MSM APM controller handle if successful or ERR_PTR on any error. ++ * If the APM controller device hasn't probed yet, ERR_PTR(-EPROBE_DEFER) is ++ * returned. ++ */ ++struct msm_apm_ctrl_dev *msm_apm_ctrl_dev_get(struct device *dev) ++{ ++ struct msm_apm_ctrl_dev *ctrl_dev = NULL; ++ struct msm_apm_ctrl_dev *dev_found = ERR_PTR(-EPROBE_DEFER); ++ struct device_node *ctrl_node; ++ ++ if (!dev || !dev->of_node) { ++ pr_err("Invalid device node\n"); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ ctrl_node = of_parse_phandle(dev->of_node, "qcom,apm-ctrl", 0); ++ if (!ctrl_node) { ++ pr_err("Could not find qcom,apm-ctrl property in %s\n", ++ dev->of_node->full_name); ++ return ERR_PTR(-ENXIO); ++ } ++ ++ mutex_lock(&apm_ctrl_list_mutex); ++ list_for_each_entry(ctrl_dev, &apm_ctrl_list, list) { ++ if (ctrl_dev->dev && ctrl_dev->dev->of_node == ctrl_node) { ++ dev_found = ctrl_dev; ++ break; ++ } ++ } ++ mutex_unlock(&apm_ctrl_list_mutex); ++ ++ of_node_put(ctrl_node); ++ return dev_found; ++} ++EXPORT_SYMBOL(msm_apm_ctrl_dev_get); ++ ++#if defined(CONFIG_DEBUG_FS) ++ ++static int apm_supply_dbg_open(struct inode *inode, struct file *filep) ++{ ++ filep->private_data = inode->i_private; ++ ++ return 0; ++} ++ ++static ssize_t apm_supply_dbg_read(struct file *filep, char __user *ubuf, ++ size_t count, loff_t *ppos) ++{ ++ struct msm_apm_ctrl_dev *ctrl_dev = filep->private_data; ++ char buf[10]; ++ int len; ++ ++ if (!ctrl_dev) { ++ pr_err("invalid apm ctrl handle\n"); ++ return -ENODEV; ++ } ++ ++ if (ctrl_dev->supply == MSM_APM_SUPPLY_APCC) ++ len = snprintf(buf, sizeof(buf), "APCC\n"); ++ else if (ctrl_dev->supply == MSM_APM_SUPPLY_MX) ++ len = snprintf(buf, sizeof(buf), "MX\n"); ++ else ++ len = snprintf(buf, sizeof(buf), "ERR\n"); ++ ++ return simple_read_from_buffer(ubuf, count, ppos, buf, len); ++} ++ ++static const struct file_operations apm_supply_fops = { ++ .open = apm_supply_dbg_open, ++ .read = apm_supply_dbg_read, ++}; ++ ++static void apm_debugfs_base_init(void) ++{ ++ apm_debugfs_base = debugfs_create_dir("msm-apm", NULL); ++ ++ if (IS_ERR_OR_NULL(apm_debugfs_base)) ++ pr_err("msm-apm debugfs base directory creation failed\n"); ++} ++ ++static void apm_debugfs_init(struct msm_apm_ctrl_dev *ctrl_dev) ++{ ++ struct dentry *temp; ++ ++ if (IS_ERR_OR_NULL(apm_debugfs_base)) { ++ pr_err("Base directory missing, cannot create apm debugfs nodes\n"); ++ return; ++ } ++ ++ ctrl_dev->debugfs = debugfs_create_dir(dev_name(ctrl_dev->dev), ++ apm_debugfs_base); ++ if (IS_ERR_OR_NULL(ctrl_dev->debugfs)) { ++ pr_err("%s debugfs directory creation failed\n", ++ dev_name(ctrl_dev->dev)); ++ return; ++ } ++ ++ temp = debugfs_create_file("supply", S_IRUGO, ctrl_dev->debugfs, ++ ctrl_dev, &apm_supply_fops); ++ if (IS_ERR_OR_NULL(temp)) { ++ pr_err("supply mode creation failed\n"); ++ return; ++ } ++} ++ ++static void apm_debugfs_deinit(struct msm_apm_ctrl_dev *ctrl_dev) ++{ ++ if (!IS_ERR_OR_NULL(ctrl_dev->debugfs)) ++ debugfs_remove_recursive(ctrl_dev->debugfs); ++} ++ ++static void apm_debugfs_base_remove(void) ++{ ++ debugfs_remove_recursive(apm_debugfs_base); ++} ++#else ++ ++static void apm_debugfs_base_init(void) ++{} ++ ++static void apm_debugfs_init(struct msm_apm_ctrl_dev *ctrl_dev) ++{} ++ ++static void apm_debugfs_deinit(struct msm_apm_ctrl_dev *ctrl_dev) ++{} ++ ++static void apm_debugfs_base_remove(void) ++{} ++ ++#endif ++ ++static struct of_device_id msm_apm_match_table[] = { ++ { ++ .compatible = "qcom,msm-apm", ++ .data = (void *)(uintptr_t)MSM8996_ID, ++ }, ++ { ++ .compatible = "qcom,msm8953-apm", ++ .data = (void *)(uintptr_t)MSM8953_ID, ++ }, ++ { ++ .compatible = "qcom,ipq807x-apm", ++ .data = (void *)(uintptr_t)IPQ807x_ID, ++ }, ++ {} ++}; ++ ++static int msm_apm_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct msm_apm_ctrl_dev *ctrl; ++ const struct of_device_id *match; ++ int ret = 0; ++ ++ dev_dbg(dev, "probing MSM Array Power Mux driver\n"); ++ ++ if (!dev->of_node) { ++ dev_err(dev, "Device tree node is missing\n"); ++ return -ENODEV; ++ } ++ ++ match = of_match_device(msm_apm_match_table, dev); ++ if (!match) ++ return -ENODEV; ++ ++ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); ++ if (!ctrl) { ++ dev_err(dev, "MSM APM controller memory allocation failed\n"); ++ return -ENOMEM; ++ } ++ ++ INIT_LIST_HEAD(&ctrl->list); ++ spin_lock_init(&ctrl->lock); ++ ctrl->dev = dev; ++ ctrl->msm_id = (uintptr_t)match->data; ++ platform_set_drvdata(pdev, ctrl); ++ ++ switch (ctrl->msm_id) { ++ case MSM8996_ID: ++ ret = msm_apm_ctrl_devm_ioremap(pdev, ctrl); ++ if (ret) { ++ dev_err(dev, "Failed to add APM controller device\n"); ++ return ret; ++ } ++ break; ++ case MSM8953_ID: ++ case IPQ807x_ID: ++ ret = msm8953_apm_ctrl_init(pdev, ctrl); ++ if (ret) { ++ dev_err(dev, "Failed to initialize APM controller device: ret=%d\n", ++ ret); ++ return ret; ++ } ++ break; ++ default: ++ dev_err(dev, "unable to add APM controller device for msm_id:%d\n", ++ ctrl->msm_id); ++ return -ENODEV; ++ } ++ ++ apm_debugfs_init(ctrl); ++ mutex_lock(&apm_ctrl_list_mutex); ++ list_add_tail(&ctrl->list, &apm_ctrl_list); ++ mutex_unlock(&apm_ctrl_list_mutex); ++ ++ dev_dbg(dev, "MSM Array Power Mux driver probe successful"); ++ ++ return ret; ++} ++ ++static int msm_apm_remove(struct platform_device *pdev) ++{ ++ struct msm_apm_ctrl_dev *ctrl_dev; ++ ++ ctrl_dev = platform_get_drvdata(pdev); ++ if (ctrl_dev) { ++ mutex_lock(&apm_ctrl_list_mutex); ++ list_del(&ctrl_dev->list); ++ mutex_unlock(&apm_ctrl_list_mutex); ++ apm_debugfs_deinit(ctrl_dev); ++ } ++ ++ return 0; ++} ++ ++static struct platform_driver msm_apm_driver = { ++ .driver = { ++ .name = MSM_APM_DRIVER_NAME, ++ .of_match_table = msm_apm_match_table, ++ .owner = THIS_MODULE, ++ }, ++ .probe = msm_apm_probe, ++ .remove = msm_apm_remove, ++}; ++ ++static int __init msm_apm_init(void) ++{ ++ apm_debugfs_base_init(); ++ return platform_driver_register(&msm_apm_driver); ++} ++ ++static void __exit msm_apm_exit(void) ++{ ++ platform_driver_unregister(&msm_apm_driver); ++ apm_debugfs_base_remove(); ++} ++ ++arch_initcall(msm_apm_init); ++module_exit(msm_apm_exit); ++ ++MODULE_DESCRIPTION("MSM Array Power Mux driver"); ++MODULE_LICENSE("GPL v2"); +--- /dev/null ++++ b/include/linux/power/qcom/apm.h +@@ -0,0 +1,48 @@ ++/* ++ * Copyright (c) 2015, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef __LINUX_POWER_QCOM_APM_H__ ++#define __LINUX_POWER_QCOM_APM_H__ ++ ++#include ++#include ++ ++/** ++ * enum msm_apm_supply - supported power rails to supply memory arrays ++ * %MSM_APM_SUPPLY_APCC: to enable selection of VDD_APCC rail as supply ++ * %MSM_APM_SUPPLY_MX: to enable selection of VDD_MX rail as supply ++ */ ++enum msm_apm_supply { ++ MSM_APM_SUPPLY_APCC, ++ MSM_APM_SUPPLY_MX, ++}; ++ ++/* Handle used to identify an APM controller device */ ++struct msm_apm_ctrl_dev; ++ ++#ifdef CONFIG_QCOM_APM ++struct msm_apm_ctrl_dev *msm_apm_ctrl_dev_get(struct device *dev); ++int msm_apm_set_supply(struct msm_apm_ctrl_dev *ctrl_dev, ++ enum msm_apm_supply supply); ++int msm_apm_get_supply(struct msm_apm_ctrl_dev *ctrl_dev); ++ ++#else ++static inline struct msm_apm_ctrl_dev *msm_apm_ctrl_dev_get(struct device *dev) ++{ return ERR_PTR(-EPERM); } ++static inline int msm_apm_set_supply(struct msm_apm_ctrl_dev *ctrl_dev, ++ enum msm_apm_supply supply) ++{ return -EPERM; } ++static inline int msm_apm_get_supply(struct msm_apm_ctrl_dev *ctrl_dev) ++{ return -EPERM; } ++#endif ++#endif diff --git a/target/linux/qualcommax/patches-6.6/0901-regulator-add-Qualcomm-CPR-regulators.patch b/target/linux/qualcommax/patches-6.6/0901-regulator-add-Qualcomm-CPR-regulators.patch new file mode 100644 index 000000000..c85be0357 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0901-regulator-add-Qualcomm-CPR-regulators.patch @@ -0,0 +1,11688 @@ +From c9df32c057e43e38c8113199e64f7a64f8d341df Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Mon, 11 Apr 2022 14:35:36 +0200 +Subject: [PATCH] regulator: add Qualcomm CPR regulators + +Allow building Qualcomm CPR regulators. + +Signed-off-by: Robert Marko +--- + drivers/regulator/Kconfig | 33 + + drivers/regulator/Makefile | 3 + + drivers/regulator/cpr3-npu-regulator.c | 695 +++ + drivers/regulator/cpr3-regulator.c | 5111 +++++++++++++++++++++++ + drivers/regulator/cpr3-regulator.h | 1211 ++++++ + drivers/regulator/cpr3-util.c | 2750 ++++++++++++ + drivers/regulator/cpr4-apss-regulator.c | 1819 ++++++++ + include/soc/qcom/socinfo.h | 463 ++ + 8 files changed, 12085 insertions(+) + create mode 100644 drivers/regulator/cpr3-npu-regulator.c + create mode 100644 drivers/regulator/cpr3-regulator.c + create mode 100644 drivers/regulator/cpr3-regulator.h + create mode 100644 drivers/regulator/cpr3-util.c + create mode 100644 drivers/regulator/cpr4-apss-regulator.c + create mode 100644 include/soc/qcom/socinfo.h + +--- a/drivers/regulator/Kconfig ++++ b/drivers/regulator/Kconfig +@@ -1663,4 +1663,37 @@ config REGULATOR_QCOM_LABIBB + boost regulator and IBB can be used as a negative boost regulator + for LCD display panel. + ++config REGULATOR_CPR3 ++ bool "QCOM CPR3 regulator core support" ++ help ++ This driver supports Core Power Reduction (CPR) version 3 controllers ++ which are used by some Qualcomm Technologies, Inc. SoCs to ++ manage important voltage regulators. CPR3 controllers are capable of ++ monitoring several ring oscillator sensing loops simultaneously. The ++ CPR3 controller informs software when the silicon conditions require ++ the supply voltage to be increased or decreased. On certain supply ++ rails, the CPR3 controller is able to propagate the voltage increase ++ or decrease requests all the way to the PMIC without software ++ involvement. ++ ++config REGULATOR_CPR3_NPU ++ bool "QCOM CPR3 regulator for NPU" ++ depends on OF && REGULATOR_CPR3 ++ help ++ This driver supports Qualcomm Technologies, Inc. NPU CPR3 ++ regulator Which will always operate in open loop. ++ ++config REGULATOR_CPR4_APSS ++ bool "QCOM CPR4 regulator for APSS" ++ depends on OF && REGULATOR_CPR3 ++ help ++ This driver supports Qualcomm Technologies, Inc. APSS application ++ processor specific features including memory array power mux (APM) ++ switching, one CPR4 thread which monitor the two APSS clusters that ++ are both powered by a shared supply, hardware closed-loop auto ++ voltage stepping, voltage adjustments based on online core count, ++ voltage adjustments based on temperature readings, and voltage ++ adjustments for performance boost mode. This driver reads both initial ++ voltage and CPR target quotient values out of hardware fuses. ++ + endif +--- a/drivers/regulator/Makefile ++++ b/drivers/regulator/Makefile +@@ -116,6 +116,9 @@ obj-$(CONFIG_REGULATOR_QCOM_RPMH) += qco + obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o + obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o + obj-$(CONFIG_REGULATOR_QCOM_USB_VBUS) += qcom_usb_vbus-regulator.o ++obj-$(CONFIG_REGULATOR_CPR3) += cpr3-regulator.o cpr3-util.o ++obj-$(CONFIG_REGULATOR_CPR3_NPU) += cpr3-npu-regulator.o ++obj-$(CONFIG_REGULATOR_CPR4_APSS) += cpr4-apss-regulator.o + obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o + obj-$(CONFIG_REGULATOR_PCA9450) += pca9450-regulator.o + obj-$(CONFIG_REGULATOR_PF8X00) += pf8x00-regulator.o +--- /dev/null ++++ b/drivers/regulator/cpr3-npu-regulator.c +@@ -0,0 +1,695 @@ ++/* ++ * Copyright (c) 2017, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "cpr3-regulator.h" ++ ++#define IPQ807x_NPU_FUSE_CORNERS 2 ++#define IPQ817x_NPU_FUSE_CORNERS 1 ++#define IPQ807x_NPU_FUSE_STEP_VOLT 8000 ++#define IPQ807x_NPU_VOLTAGE_FUSE_SIZE 6 ++#define IPQ807x_NPU_CPR_CLOCK_RATE 19200000 ++ ++#define IPQ807x_NPU_CPR_TCSR_START 6 ++#define IPQ807x_NPU_CPR_TCSR_END 7 ++ ++#define NPU_TSENS 5 ++ ++u32 g_valid_npu_fuse_count = IPQ807x_NPU_FUSE_CORNERS; ++/** ++ * struct cpr3_ipq807x_npu_fuses - NPU specific fuse data for IPQ807x ++ * @init_voltage: Initial (i.e. open-loop) voltage fuse parameter value ++ * for each fuse corner (raw, not converted to a voltage) ++ * This struct holds the values for all of the fuses read from memory. ++ */ ++struct cpr3_ipq807x_npu_fuses { ++ u64 init_voltage[IPQ807x_NPU_FUSE_CORNERS]; ++}; ++ ++/* ++ * Constants which define the name of each fuse corner. ++ */ ++enum cpr3_ipq807x_npu_fuse_corner { ++ CPR3_IPQ807x_NPU_FUSE_CORNER_NOM = 0, ++ CPR3_IPQ807x_NPU_FUSE_CORNER_TURBO = 1, ++}; ++ ++static const char * const cpr3_ipq807x_npu_fuse_corner_name[] = { ++ [CPR3_IPQ807x_NPU_FUSE_CORNER_NOM] = "NOM", ++ [CPR3_IPQ807x_NPU_FUSE_CORNER_TURBO] = "TURBO", ++}; ++ ++/* ++ * IPQ807x NPU fuse parameter locations: ++ * ++ * Structs are organized with the following dimensions: ++ * Outer: 0 to 1 for fuse corners from lowest to highest corner ++ * Inner: large enough to hold the longest set of parameter segments which ++ * fully defines a fuse parameter, +1 (for NULL termination). ++ * Each segment corresponds to a contiguous group of bits from a ++ * single fuse row. These segments are concatentated together in ++ * order to form the full fuse parameter value. The segments for ++ * a given parameter may correspond to different fuse rows. ++ */ ++static struct cpr3_fuse_param ++ipq807x_npu_init_voltage_param[IPQ807x_NPU_FUSE_CORNERS][2] = { ++ {{73, 22, 27}, {} }, ++ {{73, 16, 21}, {} }, ++}; ++ ++/* ++ * Open loop voltage fuse reference voltages in microvolts for IPQ807x ++ */ ++static int ++ipq807x_npu_fuse_ref_volt [IPQ807x_NPU_FUSE_CORNERS] = { ++ 912000, ++ 992000, ++}; ++ ++/* ++ * IPQ9574 (Few parameters are changed, remaining are same as IPQ807x) ++ */ ++#define IPQ9574_NPU_FUSE_CORNERS 2 ++#define IPQ9574_NPU_FUSE_STEP_VOLT 10000 ++#define IPQ9574_NPU_CPR_CLOCK_RATE 24000000 ++ ++/* ++ * fues parameters for IPQ9574 ++ */ ++static struct cpr3_fuse_param ++ipq9574_npu_init_voltage_param[IPQ9574_NPU_FUSE_CORNERS][2] = { ++ {{105, 12, 17}, {} }, ++ {{105, 6, 11}, {} }, ++}; ++ ++/* ++ * Open loop voltage fuse reference voltages in microvolts for IPQ9574 ++ */ ++static int ++ipq9574_npu_fuse_ref_volt [IPQ9574_NPU_FUSE_CORNERS] = { ++ 862500, ++ 987500, ++}; ++ ++struct cpr3_controller *g_ctrl; ++ ++void cpr3_npu_temp_notify(int sensor, int temp, int low_notif) ++{ ++ u32 prev_sensor_state; ++ ++ if (sensor != NPU_TSENS) ++ return; ++ ++ prev_sensor_state = g_ctrl->cur_sensor_state; ++ if (low_notif) ++ g_ctrl->cur_sensor_state |= BIT(sensor); ++ else ++ g_ctrl->cur_sensor_state &= ~BIT(sensor); ++ ++ if (!prev_sensor_state && g_ctrl->cur_sensor_state) ++ cpr3_handle_temp_open_loop_adjustment(g_ctrl, true); ++ else if (prev_sensor_state && !g_ctrl->cur_sensor_state) ++ cpr3_handle_temp_open_loop_adjustment(g_ctrl, false); ++} ++ ++/** ++ * cpr3_ipq807x_npu_read_fuse_data() - load NPU specific fuse parameter values ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * This function allocates a cpr3_ipq807x_npu_fuses struct, fills it with ++ * values read out of hardware fuses, and finally copies common fuse values ++ * into the CPR3 regulator struct. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_ipq807x_npu_read_fuse_data(struct cpr3_regulator *vreg) ++{ ++ void __iomem *base = vreg->thread->ctrl->fuse_base; ++ struct cpr3_ipq807x_npu_fuses *fuse; ++ int i, rc; ++ ++ fuse = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*fuse), GFP_KERNEL); ++ if (!fuse) ++ return -ENOMEM; ++ ++ for (i = 0; i < g_valid_npu_fuse_count; i++) { ++ rc = cpr3_read_fuse_param(base, ++ vreg->cpr3_regulator_data->init_voltage_param[i], ++ &fuse->init_voltage[i]); ++ if (rc) { ++ cpr3_err(vreg, "Unable to read fuse-corner %d initial voltage fuse, rc=%d\n", ++ i, rc); ++ return rc; ++ } ++ } ++ ++ vreg->fuse_corner_count = g_valid_npu_fuse_count; ++ vreg->platform_fuses = fuse; ++ ++ return 0; ++} ++ ++/** ++ * cpr3_npu_parse_corner_data() - parse NPU corner data from device tree ++ * properties of the CPR3 regulator's device node ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_npu_parse_corner_data(struct cpr3_regulator *vreg) ++{ ++ int rc; ++ ++ rc = cpr3_parse_common_corner_data(vreg); ++ if (rc) { ++ cpr3_err(vreg, "error reading corner data, rc=%d\n", rc); ++ return rc; ++ } ++ ++ return rc; ++} ++ ++/** ++ * cpr3_ipq807x_npu_calculate_open_loop_voltages() - calculate the open-loop ++ * voltage for each corner of a CPR3 regulator ++ * @vreg: Pointer to the CPR3 regulator ++ * @temp_correction: Temperature based correction ++ * ++ * If open-loop voltage interpolation is allowed in device tree, then ++ * this function calculates the open-loop voltage for a given corner using ++ * linear interpolation. This interpolation is performed using the processor ++ * frequencies of the lower and higher Fmax corners along with their fused ++ * open-loop voltages. ++ * ++ * If open-loop voltage interpolation is not allowed, then this function uses ++ * the Fmax fused open-loop voltage for all of the corners associated with a ++ * given fuse corner. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_ipq807x_npu_calculate_open_loop_voltages( ++ struct cpr3_regulator *vreg, bool temp_correction) ++{ ++ struct cpr3_ipq807x_npu_fuses *fuse = vreg->platform_fuses; ++ struct cpr3_controller *ctrl = vreg->thread->ctrl; ++ int i, j, rc = 0; ++ u64 freq_low, volt_low, freq_high, volt_high; ++ int *fuse_volt; ++ int *fmax_corner; ++ ++ fuse_volt = kcalloc(vreg->fuse_corner_count, sizeof(*fuse_volt), ++ GFP_KERNEL); ++ fmax_corner = kcalloc(vreg->fuse_corner_count, sizeof(*fmax_corner), ++ GFP_KERNEL); ++ if (!fuse_volt || !fmax_corner) { ++ rc = -ENOMEM; ++ goto done; ++ } ++ ++ for (i = 0; i < vreg->fuse_corner_count; i++) { ++ if (ctrl->cpr_global_setting == CPR_DISABLED) ++ fuse_volt[i] = vreg->cpr3_regulator_data->fuse_ref_volt[i]; ++ else ++ fuse_volt[i] = cpr3_convert_open_loop_voltage_fuse( ++ vreg->cpr3_regulator_data->fuse_ref_volt[i], ++ vreg->cpr3_regulator_data->fuse_step_volt, ++ fuse->init_voltage[i], ++ IPQ807x_NPU_VOLTAGE_FUSE_SIZE); ++ ++ /* Log fused open-loop voltage values for debugging purposes. */ ++ cpr3_info(vreg, "fused %8s: open-loop=%7d uV\n", ++ cpr3_ipq807x_npu_fuse_corner_name[i], ++ fuse_volt[i]); ++ } ++ ++ rc = cpr3_determine_part_type(vreg, ++ fuse_volt[CPR3_IPQ807x_NPU_FUSE_CORNER_TURBO]); ++ if (rc) { ++ cpr3_err(vreg, ++ "fused part type detection failed failed, rc=%d\n", rc); ++ goto done; ++ } ++ ++ rc = cpr3_adjust_fused_open_loop_voltages(vreg, fuse_volt); ++ if (rc) { ++ cpr3_err(vreg, ++ "fused open-loop voltage adjustment failed, rc=%d\n", ++ rc); ++ goto done; ++ } ++ if (temp_correction) { ++ rc = cpr3_determine_temp_base_open_loop_correction(vreg, ++ fuse_volt); ++ if (rc) { ++ cpr3_err(vreg, ++ "temp open-loop voltage adj. failed, rc=%d\n", ++ rc); ++ goto done; ++ } ++ } ++ ++ for (i = 1; i < vreg->fuse_corner_count; i++) { ++ if (fuse_volt[i] < fuse_volt[i - 1]) { ++ cpr3_info(vreg, ++ "fuse corner %d voltage=%d uV < fuse corner %d \ ++ voltage=%d uV; overriding: fuse corner %d \ ++ voltage=%d\n", ++ i, fuse_volt[i], i - 1, fuse_volt[i - 1], ++ i, fuse_volt[i - 1]); ++ fuse_volt[i] = fuse_volt[i - 1]; ++ } ++ } ++ ++ /* Determine highest corner mapped to each fuse corner */ ++ j = vreg->fuse_corner_count - 1; ++ for (i = vreg->corner_count - 1; i >= 0; i--) { ++ if (vreg->corner[i].cpr_fuse_corner == j) { ++ fmax_corner[j] = i; ++ j--; ++ } ++ } ++ ++ if (j >= 0) { ++ cpr3_err(vreg, "invalid fuse corner mapping\n"); ++ rc = -EINVAL; ++ goto done; ++ } ++ ++ /* ++ * Interpolation is not possible for corners mapped to the lowest fuse ++ * corner so use the fuse corner value directly. ++ */ ++ for (i = 0; i <= fmax_corner[0]; i++) ++ vreg->corner[i].open_loop_volt = fuse_volt[0]; ++ ++ /* Interpolate voltages for the higher fuse corners. */ ++ for (i = 1; i < vreg->fuse_corner_count; i++) { ++ freq_low = vreg->corner[fmax_corner[i - 1]].proc_freq; ++ volt_low = fuse_volt[i - 1]; ++ freq_high = vreg->corner[fmax_corner[i]].proc_freq; ++ volt_high = fuse_volt[i]; ++ ++ for (j = fmax_corner[i - 1] + 1; j <= fmax_corner[i]; j++) ++ vreg->corner[j].open_loop_volt = cpr3_interpolate( ++ freq_low, volt_low, freq_high, volt_high, ++ vreg->corner[j].proc_freq); ++ } ++ ++done: ++ if (rc == 0) { ++ cpr3_debug(vreg, "unadjusted per-corner open-loop voltages:\n"); ++ for (i = 0; i < vreg->corner_count; i++) ++ cpr3_debug(vreg, "open-loop[%2d] = %d uV\n", i, ++ vreg->corner[i].open_loop_volt); ++ ++ rc = cpr3_adjust_open_loop_voltages(vreg); ++ if (rc) ++ cpr3_err(vreg, ++ "open-loop voltage adjustment failed, rc=%d\n", ++ rc); ++ } ++ ++ kfree(fuse_volt); ++ kfree(fmax_corner); ++ return rc; ++} ++ ++/** ++ * cpr3_npu_print_settings() - print out NPU CPR configuration settings into ++ * the kernel log for debugging purposes ++ * @vreg: Pointer to the CPR3 regulator ++ */ ++static void cpr3_npu_print_settings(struct cpr3_regulator *vreg) ++{ ++ struct cpr3_corner *corner; ++ int i; ++ ++ cpr3_debug(vreg, ++ "Corner: Frequency (Hz), Fuse Corner, Floor (uV), \ ++ Open-Loop (uV), Ceiling (uV)\n"); ++ for (i = 0; i < vreg->corner_count; i++) { ++ corner = &vreg->corner[i]; ++ cpr3_debug(vreg, "%3d: %10u, %2d, %7d, %7d, %7d\n", ++ i, corner->proc_freq, corner->cpr_fuse_corner, ++ corner->floor_volt, corner->open_loop_volt, ++ corner->ceiling_volt); ++ } ++ ++ if (vreg->thread->ctrl->apm) ++ cpr3_debug(vreg, "APM threshold = %d uV, APM adjust = %d uV\n", ++ vreg->thread->ctrl->apm_threshold_volt, ++ vreg->thread->ctrl->apm_adj_volt); ++} ++ ++/** ++ * cpr3_ipq807x_npu_calc_temp_based_ol_voltages() - Calculate the open loop ++ * voltages based on temperature based correction margins ++ * @vreg: Pointer to the CPR3 regulator ++ */ ++ ++static int ++cpr3_ipq807x_npu_calc_temp_based_ol_voltages(struct cpr3_regulator *vreg, ++ bool temp_correction) ++{ ++ int rc, i; ++ ++ rc = cpr3_ipq807x_npu_calculate_open_loop_voltages(vreg, ++ temp_correction); ++ if (rc) { ++ cpr3_err(vreg, ++ "unable to calculate open-loop voltages, rc=%d\n", rc); ++ return rc; ++ } ++ ++ rc = cpr3_limit_open_loop_voltages(vreg); ++ if (rc) { ++ cpr3_err(vreg, "unable to limit open-loop voltages, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ cpr3_open_loop_voltage_as_ceiling(vreg); ++ ++ rc = cpr3_limit_floor_voltages(vreg); ++ if (rc) { ++ cpr3_err(vreg, "unable to limit floor voltages, rc=%d\n", rc); ++ return rc; ++ } ++ ++ for (i = 0; i < vreg->corner_count; i++) { ++ if (temp_correction) ++ vreg->corner[i].cold_temp_open_loop_volt = ++ vreg->corner[i].open_loop_volt; ++ else ++ vreg->corner[i].normal_temp_open_loop_volt = ++ vreg->corner[i].open_loop_volt; ++ } ++ ++ cpr3_npu_print_settings(vreg); ++ ++ return rc; ++} ++ ++/** ++ * cpr3_npu_init_thread() - perform steps necessary to initialize the ++ * configuration data for a CPR3 thread ++ * @thread: Pointer to the CPR3 thread ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_npu_init_thread(struct cpr3_thread *thread) ++{ ++ int rc; ++ ++ rc = cpr3_parse_common_thread_data(thread); ++ if (rc) { ++ cpr3_err(thread->ctrl, ++ "thread %u CPR thread data from DT- failed, rc=%d\n", ++ thread->thread_id, rc); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_npu_init_regulator() - perform all steps necessary to initialize the ++ * configuration data for a CPR3 regulator ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_npu_init_regulator(struct cpr3_regulator *vreg) ++{ ++ struct cpr3_ipq807x_npu_fuses *fuse; ++ int rc, cold_temp = 0; ++ bool can_adj_cold_temp = cpr3_can_adjust_cold_temp(vreg); ++ ++ rc = cpr3_ipq807x_npu_read_fuse_data(vreg); ++ if (rc) { ++ cpr3_err(vreg, "unable to read CPR fuse data, rc=%d\n", rc); ++ return rc; ++ } ++ ++ fuse = vreg->platform_fuses; ++ ++ rc = cpr3_npu_parse_corner_data(vreg); ++ if (rc) { ++ cpr3_err(vreg, ++ "Cannot read CPR corner data from DT, rc=%d\n", rc); ++ return rc; ++ } ++ ++ rc = cpr3_mem_acc_init(vreg); ++ if (rc) { ++ if (rc != -EPROBE_DEFER) ++ cpr3_err(vreg, ++ "Cannot initialize mem-acc regulator settings, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ if (can_adj_cold_temp) { ++ rc = cpr3_ipq807x_npu_calc_temp_based_ol_voltages(vreg, true); ++ if (rc) { ++ cpr3_err(vreg, ++ "unable to calculate open-loop voltages, rc=%d\n", rc); ++ return rc; ++ } ++ } ++ ++ rc = cpr3_ipq807x_npu_calc_temp_based_ol_voltages(vreg, false); ++ if (rc) { ++ cpr3_err(vreg, ++ "unable to calculate open-loop voltages, rc=%d\n", rc); ++ return rc; ++ } ++ ++ if (can_adj_cold_temp) { ++ cpr3_info(vreg, ++ "Normal and Cold condition init done. Default to normal.\n"); ++ ++ rc = cpr3_get_cold_temp_threshold(vreg, &cold_temp); ++ if (rc) { ++ cpr3_err(vreg, ++ "Get cold temperature threshold failed, rc=%d\n", rc); ++ return rc; ++ } ++ register_low_temp_notif(NPU_TSENS, cold_temp, ++ cpr3_npu_temp_notify); ++ } ++ ++ return rc; ++} ++ ++/** ++ * cpr3_npu_init_controller() - perform NPU CPR3 controller specific ++ * initializations ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_npu_init_controller(struct cpr3_controller *ctrl) ++{ ++ int rc; ++ ++ rc = cpr3_parse_open_loop_common_ctrl_data(ctrl); ++ if (rc) { ++ if (rc != -EPROBE_DEFER) ++ cpr3_err(ctrl, "unable to parse common controller data, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ ctrl->ctrl_type = CPR_CTRL_TYPE_CPR3; ++ ctrl->supports_hw_closed_loop = false; ++ ++ return 0; ++} ++ ++static const struct cpr3_reg_data ipq807x_cpr_npu = { ++ .cpr_valid_fuse_count = IPQ807x_NPU_FUSE_CORNERS, ++ .init_voltage_param = ipq807x_npu_init_voltage_param, ++ .fuse_ref_volt = ipq807x_npu_fuse_ref_volt, ++ .fuse_step_volt = IPQ807x_NPU_FUSE_STEP_VOLT, ++ .cpr_clk_rate = IPQ807x_NPU_CPR_CLOCK_RATE, ++}; ++ ++static const struct cpr3_reg_data ipq817x_cpr_npu = { ++ .cpr_valid_fuse_count = IPQ817x_NPU_FUSE_CORNERS, ++ .init_voltage_param = ipq807x_npu_init_voltage_param, ++ .fuse_ref_volt = ipq807x_npu_fuse_ref_volt, ++ .fuse_step_volt = IPQ807x_NPU_FUSE_STEP_VOLT, ++ .cpr_clk_rate = IPQ807x_NPU_CPR_CLOCK_RATE, ++}; ++ ++static const struct cpr3_reg_data ipq9574_cpr_npu = { ++ .cpr_valid_fuse_count = IPQ9574_NPU_FUSE_CORNERS, ++ .init_voltage_param = ipq9574_npu_init_voltage_param, ++ .fuse_ref_volt = ipq9574_npu_fuse_ref_volt, ++ .fuse_step_volt = IPQ9574_NPU_FUSE_STEP_VOLT, ++ .cpr_clk_rate = IPQ9574_NPU_CPR_CLOCK_RATE, ++}; ++ ++static struct of_device_id cpr3_regulator_match_table[] = { ++ { ++ .compatible = "qcom,cpr3-ipq807x-npu-regulator", ++ .data = &ipq807x_cpr_npu ++ }, ++ { ++ .compatible = "qcom,cpr3-ipq817x-npu-regulator", ++ .data = &ipq817x_cpr_npu ++ }, ++ { ++ .compatible = "qcom,cpr3-ipq9574-npu-regulator", ++ .data = &ipq9574_cpr_npu ++ }, ++ {} ++}; ++ ++static int cpr3_npu_regulator_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct cpr3_controller *ctrl; ++ int i, rc; ++ const struct of_device_id *match; ++ struct cpr3_reg_data *cpr_data; ++ ++ if (!dev->of_node) { ++ dev_err(dev, "Device tree node is missing\n"); ++ return -EINVAL; ++ } ++ ++ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); ++ if (!ctrl) ++ return -ENOMEM; ++ g_ctrl = ctrl; ++ ++ match = of_match_device(cpr3_regulator_match_table, &pdev->dev); ++ if (!match) ++ return -ENODEV; ++ ++ cpr_data = (struct cpr3_reg_data *)match->data; ++ g_valid_npu_fuse_count = cpr_data->cpr_valid_fuse_count; ++ dev_info(dev, "NPU CPR valid fuse count: %d\n", g_valid_npu_fuse_count); ++ ctrl->cpr_clock_rate = cpr_data->cpr_clk_rate; ++ ++ ctrl->dev = dev; ++ /* Set to false later if anything precludes CPR operation. */ ++ ctrl->cpr_allowed_hw = true; ++ ++ rc = of_property_read_string(dev->of_node, "qcom,cpr-ctrl-name", ++ &ctrl->name); ++ if (rc) { ++ cpr3_err(ctrl, "unable to read qcom,cpr-ctrl-name, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ rc = cpr3_map_fuse_base(ctrl, pdev); ++ if (rc) { ++ cpr3_err(ctrl, "could not map fuse base address\n"); ++ return rc; ++ } ++ ++ rc = cpr3_read_tcsr_setting(ctrl, pdev, IPQ807x_NPU_CPR_TCSR_START, ++ IPQ807x_NPU_CPR_TCSR_END); ++ if (rc) { ++ cpr3_err(ctrl, "could not read CPR tcsr rsetting\n"); ++ return rc; ++ } ++ ++ rc = cpr3_allocate_threads(ctrl, 0, 0); ++ if (rc) { ++ cpr3_err(ctrl, "failed to allocate CPR thread array, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ if (ctrl->thread_count != 1) { ++ cpr3_err(ctrl, "expected 1 thread but found %d\n", ++ ctrl->thread_count); ++ return -EINVAL; ++ } ++ ++ rc = cpr3_npu_init_controller(ctrl); ++ if (rc) { ++ if (rc != -EPROBE_DEFER) ++ cpr3_err(ctrl, "failed to initialize CPR controller parameters, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ rc = cpr3_npu_init_thread(&ctrl->thread[0]); ++ if (rc) { ++ cpr3_err(ctrl, "thread initialization failed, rc=%d\n", rc); ++ return rc; ++ } ++ ++ for (i = 0; i < ctrl->thread[0].vreg_count; i++) { ++ ctrl->thread[0].vreg[i].cpr3_regulator_data = cpr_data; ++ rc = cpr3_npu_init_regulator(&ctrl->thread[0].vreg[i]); ++ if (rc) { ++ cpr3_err(&ctrl->thread[0].vreg[i], "regulator initialization failed, rc=%d\n", ++ rc); ++ return rc; ++ } ++ } ++ ++ platform_set_drvdata(pdev, ctrl); ++ ++ return cpr3_open_loop_regulator_register(pdev, ctrl); ++} ++ ++static int cpr3_npu_regulator_remove(struct platform_device *pdev) ++{ ++ struct cpr3_controller *ctrl = platform_get_drvdata(pdev); ++ ++ return cpr3_open_loop_regulator_unregister(ctrl); ++} ++ ++static struct platform_driver cpr3_npu_regulator_driver = { ++ .driver = { ++ .name = "qcom,cpr3-npu-regulator", ++ .of_match_table = cpr3_regulator_match_table, ++ .owner = THIS_MODULE, ++ }, ++ .probe = cpr3_npu_regulator_probe, ++ .remove = cpr3_npu_regulator_remove, ++}; ++ ++static int cpr3_regulator_init(void) ++{ ++ return platform_driver_register(&cpr3_npu_regulator_driver); ++} ++arch_initcall(cpr3_regulator_init); ++ ++static void cpr3_regulator_exit(void) ++{ ++ platform_driver_unregister(&cpr3_npu_regulator_driver); ++} ++module_exit(cpr3_regulator_exit); ++ ++MODULE_DESCRIPTION("QCOM CPR3 NPU regulator driver"); ++MODULE_LICENSE("Dual BSD/GPLv2"); ++MODULE_ALIAS("platform:npu-ipq807x"); +--- /dev/null ++++ b/drivers/regulator/cpr3-regulator.c +@@ -0,0 +1,5111 @@ ++/* ++ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#define pr_fmt(fmt) "%s: " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "cpr3-regulator.h" ++ ++#define CPR3_REGULATOR_CORNER_INVALID (-1) ++#define CPR3_RO_MASK GENMASK(CPR3_RO_COUNT - 1, 0) ++ ++/* CPR3 registers */ ++#define CPR3_REG_CPR_CTL 0x4 ++#define CPR3_CPR_CTL_LOOP_EN_MASK BIT(0) ++#define CPR3_CPR_CTL_LOOP_ENABLE BIT(0) ++#define CPR3_CPR_CTL_LOOP_DISABLE 0 ++#define CPR3_CPR_CTL_IDLE_CLOCKS_MASK GENMASK(5, 1) ++#define CPR3_CPR_CTL_IDLE_CLOCKS_SHIFT 1 ++#define CPR3_CPR_CTL_COUNT_MODE_MASK GENMASK(7, 6) ++#define CPR3_CPR_CTL_COUNT_MODE_SHIFT 6 ++#define CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_MIN 0 ++#define CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_MAX 1 ++#define CPR3_CPR_CTL_COUNT_MODE_STAGGERED 2 ++#define CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_AGE 3 ++#define CPR3_CPR_CTL_COUNT_REPEAT_MASK GENMASK(31, 9) ++#define CPR3_CPR_CTL_COUNT_REPEAT_SHIFT 9 ++ ++#define CPR3_REG_CPR_STATUS 0x8 ++#define CPR3_CPR_STATUS_BUSY_MASK BIT(0) ++#define CPR3_CPR_STATUS_AGING_MEASUREMENT_MASK BIT(1) ++ ++/* ++ * This register is not present on controllers that support HW closed-loop ++ * except CPR4 APSS controller. ++ */ ++#define CPR3_REG_CPR_TIMER_AUTO_CONT 0xC ++ ++#define CPR3_REG_CPR_STEP_QUOT 0x14 ++#define CPR3_CPR_STEP_QUOT_MIN_MASK GENMASK(5, 0) ++#define CPR3_CPR_STEP_QUOT_MIN_SHIFT 0 ++#define CPR3_CPR_STEP_QUOT_MAX_MASK GENMASK(11, 6) ++#define CPR3_CPR_STEP_QUOT_MAX_SHIFT 6 ++ ++#define CPR3_REG_GCNT(ro) (0xA0 + 0x4 * (ro)) ++ ++#define CPR3_REG_SENSOR_BYPASS_WRITE(sensor) (0xE0 + 0x4 * ((sensor) / 32)) ++#define CPR3_REG_SENSOR_BYPASS_WRITE_BANK(bank) (0xE0 + 0x4 * (bank)) ++ ++#define CPR3_REG_SENSOR_MASK_WRITE(sensor) (0x120 + 0x4 * ((sensor) / 32)) ++#define CPR3_REG_SENSOR_MASK_WRITE_BANK(bank) (0x120 + 0x4 * (bank)) ++#define CPR3_REG_SENSOR_MASK_READ(sensor) (0x140 + 0x4 * ((sensor) / 32)) ++ ++#define CPR3_REG_SENSOR_OWNER(sensor) (0x200 + 0x4 * (sensor)) ++ ++#define CPR3_REG_CONT_CMD 0x800 ++#define CPR3_CONT_CMD_ACK 0x1 ++#define CPR3_CONT_CMD_NACK 0x0 ++ ++#define CPR3_REG_THRESH(thread) (0x808 + 0x440 * (thread)) ++#define CPR3_THRESH_CONS_DOWN_MASK GENMASK(3, 0) ++#define CPR3_THRESH_CONS_DOWN_SHIFT 0 ++#define CPR3_THRESH_CONS_UP_MASK GENMASK(7, 4) ++#define CPR3_THRESH_CONS_UP_SHIFT 4 ++#define CPR3_THRESH_DOWN_THRESH_MASK GENMASK(12, 8) ++#define CPR3_THRESH_DOWN_THRESH_SHIFT 8 ++#define CPR3_THRESH_UP_THRESH_MASK GENMASK(17, 13) ++#define CPR3_THRESH_UP_THRESH_SHIFT 13 ++ ++#define CPR3_REG_RO_MASK(thread) (0x80C + 0x440 * (thread)) ++ ++#define CPR3_REG_RESULT0(thread) (0x810 + 0x440 * (thread)) ++#define CPR3_RESULT0_BUSY_MASK BIT(0) ++#define CPR3_RESULT0_STEP_DN_MASK BIT(1) ++#define CPR3_RESULT0_STEP_UP_MASK BIT(2) ++#define CPR3_RESULT0_ERROR_STEPS_MASK GENMASK(7, 3) ++#define CPR3_RESULT0_ERROR_STEPS_SHIFT 3 ++#define CPR3_RESULT0_ERROR_MASK GENMASK(19, 8) ++#define CPR3_RESULT0_ERROR_SHIFT 8 ++#define CPR3_RESULT0_NEGATIVE_MASK BIT(20) ++ ++#define CPR3_REG_RESULT1(thread) (0x814 + 0x440 * (thread)) ++#define CPR3_RESULT1_QUOT_MIN_MASK GENMASK(11, 0) ++#define CPR3_RESULT1_QUOT_MIN_SHIFT 0 ++#define CPR3_RESULT1_QUOT_MAX_MASK GENMASK(23, 12) ++#define CPR3_RESULT1_QUOT_MAX_SHIFT 12 ++#define CPR3_RESULT1_RO_MIN_MASK GENMASK(27, 24) ++#define CPR3_RESULT1_RO_MIN_SHIFT 24 ++#define CPR3_RESULT1_RO_MAX_MASK GENMASK(31, 28) ++#define CPR3_RESULT1_RO_MAX_SHIFT 28 ++ ++#define CPR3_REG_RESULT2(thread) (0x818 + 0x440 * (thread)) ++#define CPR3_RESULT2_STEP_QUOT_MIN_MASK GENMASK(5, 0) ++#define CPR3_RESULT2_STEP_QUOT_MIN_SHIFT 0 ++#define CPR3_RESULT2_STEP_QUOT_MAX_MASK GENMASK(11, 6) ++#define CPR3_RESULT2_STEP_QUOT_MAX_SHIFT 6 ++#define CPR3_RESULT2_SENSOR_MIN_MASK GENMASK(23, 16) ++#define CPR3_RESULT2_SENSOR_MIN_SHIFT 16 ++#define CPR3_RESULT2_SENSOR_MAX_MASK GENMASK(31, 24) ++#define CPR3_RESULT2_SENSOR_MAX_SHIFT 24 ++ ++#define CPR3_REG_IRQ_EN 0x81C ++#define CPR3_REG_IRQ_CLEAR 0x820 ++#define CPR3_REG_IRQ_STATUS 0x824 ++#define CPR3_IRQ_UP BIT(3) ++#define CPR3_IRQ_MID BIT(2) ++#define CPR3_IRQ_DOWN BIT(1) ++ ++#define CPR3_REG_TARGET_QUOT(thread, ro) \ ++ (0x840 + 0x440 * (thread) + 0x4 * (ro)) ++ ++/* Registers found only on controllers that support HW closed-loop. */ ++#define CPR3_REG_PD_THROTTLE 0xE8 ++#define CPR3_PD_THROTTLE_DISABLE 0x0 ++ ++#define CPR3_REG_HW_CLOSED_LOOP 0x3000 ++#define CPR3_HW_CLOSED_LOOP_ENABLE 0x0 ++#define CPR3_HW_CLOSED_LOOP_DISABLE 0x1 ++ ++#define CPR3_REG_CPR_TIMER_MID_CONT 0x3004 ++#define CPR3_REG_CPR_TIMER_UP_DN_CONT 0x3008 ++ ++#define CPR3_REG_LAST_MEASUREMENT 0x7F8 ++#define CPR3_LAST_MEASUREMENT_THREAD_DN_SHIFT 0 ++#define CPR3_LAST_MEASUREMENT_THREAD_UP_SHIFT 4 ++#define CPR3_LAST_MEASUREMENT_THREAD_DN(thread) \ ++ (BIT(thread) << CPR3_LAST_MEASUREMENT_THREAD_DN_SHIFT) ++#define CPR3_LAST_MEASUREMENT_THREAD_UP(thread) \ ++ (BIT(thread) << CPR3_LAST_MEASUREMENT_THREAD_UP_SHIFT) ++#define CPR3_LAST_MEASUREMENT_AGGR_DN BIT(8) ++#define CPR3_LAST_MEASUREMENT_AGGR_MID BIT(9) ++#define CPR3_LAST_MEASUREMENT_AGGR_UP BIT(10) ++#define CPR3_LAST_MEASUREMENT_VALID BIT(11) ++#define CPR3_LAST_MEASUREMENT_SAW_ERROR BIT(12) ++#define CPR3_LAST_MEASUREMENT_PD_BYPASS_MASK GENMASK(23, 16) ++#define CPR3_LAST_MEASUREMENT_PD_BYPASS_SHIFT 16 ++ ++/* CPR4 controller specific registers and bit definitions */ ++#define CPR4_REG_CPR_TIMER_CLAMP 0x10 ++#define CPR4_CPR_TIMER_CLAMP_THREAD_AGGREGATION_EN BIT(27) ++ ++#define CPR4_REG_MISC 0x700 ++#define CPR4_MISC_MARGIN_TABLE_ROW_SELECT_MASK GENMASK(23, 20) ++#define CPR4_MISC_MARGIN_TABLE_ROW_SELECT_SHIFT 20 ++#define CPR4_MISC_TEMP_SENSOR_ID_START_MASK GENMASK(27, 24) ++#define CPR4_MISC_TEMP_SENSOR_ID_START_SHIFT 24 ++#define CPR4_MISC_TEMP_SENSOR_ID_END_MASK GENMASK(31, 28) ++#define CPR4_MISC_TEMP_SENSOR_ID_END_SHIFT 28 ++ ++#define CPR4_REG_SAW_ERROR_STEP_LIMIT 0x7A4 ++#define CPR4_SAW_ERROR_STEP_LIMIT_UP_MASK GENMASK(4, 0) ++#define CPR4_SAW_ERROR_STEP_LIMIT_UP_SHIFT 0 ++#define CPR4_SAW_ERROR_STEP_LIMIT_DN_MASK GENMASK(9, 5) ++#define CPR4_SAW_ERROR_STEP_LIMIT_DN_SHIFT 5 ++ ++#define CPR4_REG_MARGIN_TEMP_CORE_TIMERS 0x7A8 ++#define CPR4_MARGIN_TEMP_CORE_TIMERS_SETTLE_VOLTAGE_COUNT_MASK GENMASK(28, 18) ++#define CPR4_MARGIN_TEMP_CORE_TIMERS_SETTLE_VOLTAGE_COUNT_SHIFT 18 ++ ++#define CPR4_REG_MARGIN_TEMP_CORE(core) (0x7AC + 0x4 * (core)) ++#define CPR4_MARGIN_TEMP_CORE_ADJ_MASK GENMASK(7, 0) ++#define CPR4_MARGIN_TEMP_CORE_ADJ_SHIFT 8 ++ ++#define CPR4_REG_MARGIN_TEMP_POINT0N1 0x7F0 ++#define CPR4_MARGIN_TEMP_POINT0_MASK GENMASK(11, 0) ++#define CPR4_MARGIN_TEMP_POINT0_SHIFT 0 ++#define CPR4_MARGIN_TEMP_POINT1_MASK GENMASK(23, 12) ++#define CPR4_MARGIN_TEMP_POINT1_SHIFT 12 ++#define CPR4_REG_MARGIN_TEMP_POINT2 0x7F4 ++#define CPR4_MARGIN_TEMP_POINT2_MASK GENMASK(11, 0) ++#define CPR4_MARGIN_TEMP_POINT2_SHIFT 0 ++ ++#define CPR4_REG_MARGIN_ADJ_CTL 0x7F8 ++#define CPR4_MARGIN_ADJ_CTL_BOOST_EN BIT(0) ++#define CPR4_MARGIN_ADJ_CTL_CORE_ADJ_EN BIT(1) ++#define CPR4_MARGIN_ADJ_CTL_TEMP_ADJ_EN BIT(2) ++#define CPR4_MARGIN_ADJ_CTL_TIMER_SETTLE_VOLTAGE_EN BIT(3) ++#define CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK BIT(4) ++#define CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE BIT(4) ++#define CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE 0 ++#define CPR4_MARGIN_ADJ_CTL_PER_RO_KV_MARGIN_EN BIT(7) ++#define CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_EN BIT(8) ++#define CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_MASK GENMASK(16, 12) ++#define CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_SHIFT 12 ++#define CPR4_MARGIN_ADJ_CTL_INITIAL_TEMP_BAND_MASK GENMASK(21, 19) ++#define CPR4_MARGIN_ADJ_CTL_INITIAL_TEMP_BAND_SHIFT 19 ++#define CPR4_MARGIN_ADJ_CTL_MAX_NUM_CORES_MASK GENMASK(25, 22) ++#define CPR4_MARGIN_ADJ_CTL_MAX_NUM_CORES_SHIFT 22 ++#define CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_STEP_QUOT_MASK GENMASK(31, 26) ++#define CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_STEP_QUOT_SHIFT 26 ++ ++#define CPR4_REG_CPR_MASK_THREAD(thread) (0x80C + 0x440 * (thread)) ++#define CPR4_CPR_MASK_THREAD_DISABLE_THREAD BIT(31) ++#define CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK GENMASK(15, 0) ++ ++/* ++ * The amount of time to wait for the CPR controller to become idle when ++ * performing an aging measurement. ++ */ ++#define CPR3_AGING_MEASUREMENT_TIMEOUT_NS 5000000 ++ ++/* ++ * The number of individual aging measurements to perform which are then ++ * averaged together in order to determine the final aging adjustment value. ++ */ ++#define CPR3_AGING_MEASUREMENT_ITERATIONS 16 ++ ++/* ++ * Aging measurements for the aged and unaged ring oscillators take place a few ++ * microseconds apart. If the vdd-supply voltage fluctuates between the two ++ * measurements, then the difference between them will be incorrect. The ++ * difference could end up too high or too low. This constant defines the ++ * number of lowest and highest measurements to ignore when averaging. ++ */ ++#define CPR3_AGING_MEASUREMENT_FILTER 3 ++ ++/* ++ * The number of times to attempt the full aging measurement sequence before ++ * declaring a measurement failure. ++ */ ++#define CPR3_AGING_RETRY_COUNT 5 ++ ++/* ++ * The maximum time to wait in microseconds for a CPR register write to ++ * complete. ++ */ ++#define CPR3_REGISTER_WRITE_DELAY_US 200 ++ ++static DEFINE_MUTEX(cpr3_controller_list_mutex); ++static LIST_HEAD(cpr3_controller_list); ++static struct dentry *cpr3_debugfs_base; ++ ++/** ++ * cpr3_read() - read four bytes from the memory address specified ++ * @ctrl: Pointer to the CPR3 controller ++ * @offset: Offset in bytes from the CPR3 controller's base address ++ * ++ * Return: memory address value ++ */ ++static inline u32 cpr3_read(struct cpr3_controller *ctrl, u32 offset) ++{ ++ if (!ctrl->cpr_enabled) { ++ cpr3_err(ctrl, "CPR register reads are not possible when CPR clocks are disabled\n"); ++ return 0; ++ } ++ ++ return readl_relaxed(ctrl->cpr_ctrl_base + offset); ++} ++ ++/** ++ * cpr3_write() - write four bytes to the memory address specified ++ * @ctrl: Pointer to the CPR3 controller ++ * @offset: Offset in bytes from the CPR3 controller's base address ++ * @value: Value to write to the memory address ++ * ++ * Return: none ++ */ ++static inline void cpr3_write(struct cpr3_controller *ctrl, u32 offset, ++ u32 value) ++{ ++ if (!ctrl->cpr_enabled) { ++ cpr3_err(ctrl, "CPR register writes are not possible when CPR clocks are disabled\n"); ++ return; ++ } ++ ++ writel_relaxed(value, ctrl->cpr_ctrl_base + offset); ++} ++ ++/** ++ * cpr3_masked_write() - perform a read-modify-write sequence so that only ++ * masked bits are modified ++ * @ctrl: Pointer to the CPR3 controller ++ * @offset: Offset in bytes from the CPR3 controller's base address ++ * @mask: Mask identifying the bits that should be modified ++ * @value: Value to write to the memory address ++ * ++ * Return: none ++ */ ++static inline void cpr3_masked_write(struct cpr3_controller *ctrl, u32 offset, ++ u32 mask, u32 value) ++{ ++ u32 reg_val, orig_val; ++ ++ if (!ctrl->cpr_enabled) { ++ cpr3_err(ctrl, "CPR register writes are not possible when CPR clocks are disabled\n"); ++ return; ++ } ++ ++ reg_val = orig_val = readl_relaxed(ctrl->cpr_ctrl_base + offset); ++ reg_val &= ~mask; ++ reg_val |= value & mask; ++ ++ if (reg_val != orig_val) ++ writel_relaxed(reg_val, ctrl->cpr_ctrl_base + offset); ++} ++ ++/** ++ * cpr3_ctrl_loop_enable() - enable the CPR sensing loop for a given controller ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: none ++ */ ++static inline void cpr3_ctrl_loop_enable(struct cpr3_controller *ctrl) ++{ ++ if (ctrl->cpr_enabled && !(ctrl->aggr_corner.sdelta ++ && ctrl->aggr_corner.sdelta->allow_boost)) ++ cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL, ++ CPR3_CPR_CTL_LOOP_EN_MASK, CPR3_CPR_CTL_LOOP_ENABLE); ++} ++ ++/** ++ * cpr3_ctrl_loop_disable() - disable the CPR sensing loop for a given ++ * controller ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: none ++ */ ++static inline void cpr3_ctrl_loop_disable(struct cpr3_controller *ctrl) ++{ ++ if (ctrl->cpr_enabled) ++ cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL, ++ CPR3_CPR_CTL_LOOP_EN_MASK, CPR3_CPR_CTL_LOOP_DISABLE); ++} ++ ++/** ++ * cpr3_clock_enable() - prepare and enable all clocks used by this CPR3 ++ * controller ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_clock_enable(struct cpr3_controller *ctrl) ++{ ++ int rc; ++ ++ rc = clk_prepare_enable(ctrl->bus_clk); ++ if (rc) { ++ cpr3_err(ctrl, "failed to enable bus clock, rc=%d\n", rc); ++ return rc; ++ } ++ ++ rc = clk_prepare_enable(ctrl->iface_clk); ++ if (rc) { ++ cpr3_err(ctrl, "failed to enable interface clock, rc=%d\n", rc); ++ clk_disable_unprepare(ctrl->bus_clk); ++ return rc; ++ } ++ ++ rc = clk_prepare_enable(ctrl->core_clk); ++ if (rc) { ++ cpr3_err(ctrl, "failed to enable core clock, rc=%d\n", rc); ++ clk_disable_unprepare(ctrl->iface_clk); ++ clk_disable_unprepare(ctrl->bus_clk); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_clock_disable() - disable and unprepare all clocks used by this CPR3 ++ * controller ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: none ++ */ ++static void cpr3_clock_disable(struct cpr3_controller *ctrl) ++{ ++ clk_disable_unprepare(ctrl->core_clk); ++ clk_disable_unprepare(ctrl->iface_clk); ++ clk_disable_unprepare(ctrl->bus_clk); ++} ++ ++/** ++ * cpr3_ctrl_clear_cpr4_config() - clear the CPR4 register configuration ++ * programmed for current aggregated corner of a given controller ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static inline int cpr3_ctrl_clear_cpr4_config(struct cpr3_controller *ctrl) ++{ ++ struct cpr4_sdelta *aggr_sdelta = ctrl->aggr_corner.sdelta; ++ bool cpr_enabled = ctrl->cpr_enabled; ++ int i, rc = 0; ++ ++ if (!aggr_sdelta || !(aggr_sdelta->allow_core_count_adj ++ || aggr_sdelta->allow_temp_adj || aggr_sdelta->allow_boost)) ++ /* cpr4 features are not enabled */ ++ return 0; ++ ++ /* Ensure that CPR clocks are enabled before writing to registers. */ ++ if (!cpr_enabled) { ++ rc = cpr3_clock_enable(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "clock enable failed, rc=%d\n", rc); ++ return rc; ++ } ++ ctrl->cpr_enabled = true; ++ } ++ ++ /* ++ * Clear feature enable configuration made for current ++ * aggregated corner. ++ */ ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, ++ CPR4_MARGIN_ADJ_CTL_MAX_NUM_CORES_MASK ++ | CPR4_MARGIN_ADJ_CTL_CORE_ADJ_EN ++ | CPR4_MARGIN_ADJ_CTL_TEMP_ADJ_EN ++ | CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_EN ++ | CPR4_MARGIN_ADJ_CTL_BOOST_EN ++ | CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK, 0); ++ ++ cpr3_masked_write(ctrl, CPR4_REG_MISC, ++ CPR4_MISC_MARGIN_TABLE_ROW_SELECT_MASK, ++ 0 << CPR4_MISC_MARGIN_TABLE_ROW_SELECT_SHIFT); ++ ++ for (i = 0; i <= aggr_sdelta->max_core_count; i++) { ++ /* Clear voltage margin adjustments programmed in TEMP_COREi */ ++ cpr3_write(ctrl, CPR4_REG_MARGIN_TEMP_CORE(i), 0); ++ } ++ ++ /* Turn off CPR clocks if they were off before this function call. */ ++ if (!cpr_enabled) { ++ cpr3_clock_disable(ctrl); ++ ctrl->cpr_enabled = false; ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_closed_loop_enable() - enable logical CPR closed-loop operation ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_closed_loop_enable(struct cpr3_controller *ctrl) ++{ ++ int rc; ++ ++ if (!ctrl->cpr_allowed_hw || !ctrl->cpr_allowed_sw) { ++ cpr3_err(ctrl, "cannot enable closed-loop CPR operation because it is disallowed\n"); ++ return -EPERM; ++ } else if (ctrl->cpr_enabled) { ++ /* Already enabled */ ++ return 0; ++ } else if (ctrl->cpr_suspended) { ++ /* ++ * CPR must remain disabled as the system is entering suspend. ++ */ ++ return 0; ++ } ++ ++ rc = cpr3_clock_enable(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "unable to enable CPR clocks, rc=%d\n", rc); ++ return rc; ++ } ++ ++ ctrl->cpr_enabled = true; ++ cpr3_debug(ctrl, "CPR closed-loop operation enabled\n"); ++ ++ return 0; ++} ++ ++/** ++ * cpr3_closed_loop_disable() - disable logical CPR closed-loop operation ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static inline int cpr3_closed_loop_disable(struct cpr3_controller *ctrl) ++{ ++ if (!ctrl->cpr_enabled) { ++ /* Already disabled */ ++ return 0; ++ } ++ ++ cpr3_clock_disable(ctrl); ++ ctrl->cpr_enabled = false; ++ cpr3_debug(ctrl, "CPR closed-loop operation disabled\n"); ++ ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_get_gcnt() - returns the GCNT register value corresponding ++ * to the clock rate and sensor time of the CPR3 controller ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: GCNT value ++ */ ++static u32 cpr3_regulator_get_gcnt(struct cpr3_controller *ctrl) ++{ ++ u64 temp; ++ unsigned int remainder; ++ u32 gcnt; ++ ++ temp = (u64)ctrl->cpr_clock_rate * (u64)ctrl->sensor_time; ++ remainder = do_div(temp, 1000000000); ++ if (remainder) ++ temp++; ++ /* ++ * GCNT == 0 corresponds to a single ref clock measurement interval so ++ * offset GCNT values by 1. ++ */ ++ gcnt = temp - 1; ++ ++ return gcnt; ++} ++ ++/** ++ * cpr3_regulator_init_thread() - performs hardware initialization of CPR ++ * thread registers ++ * @thread: Pointer to the CPR3 thread ++ * ++ * CPR interface/bus clocks must be enabled before calling this function. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_init_thread(struct cpr3_thread *thread) ++{ ++ u32 reg; ++ ++ reg = (thread->consecutive_up << CPR3_THRESH_CONS_UP_SHIFT) ++ & CPR3_THRESH_CONS_UP_MASK; ++ reg |= (thread->consecutive_down << CPR3_THRESH_CONS_DOWN_SHIFT) ++ & CPR3_THRESH_CONS_DOWN_MASK; ++ reg |= (thread->up_threshold << CPR3_THRESH_UP_THRESH_SHIFT) ++ & CPR3_THRESH_UP_THRESH_MASK; ++ reg |= (thread->down_threshold << CPR3_THRESH_DOWN_THRESH_SHIFT) ++ & CPR3_THRESH_DOWN_THRESH_MASK; ++ ++ cpr3_write(thread->ctrl, CPR3_REG_THRESH(thread->thread_id), reg); ++ ++ /* ++ * Mask all RO's initially so that unused thread doesn't contribute ++ * to closed-loop voltage. ++ */ ++ cpr3_write(thread->ctrl, CPR3_REG_RO_MASK(thread->thread_id), ++ CPR3_RO_MASK); ++ ++ return 0; ++} ++ ++/** ++ * cpr4_regulator_init_temp_points() - performs hardware initialization of CPR4 ++ * registers to track tsen temperature data and also specify the ++ * temperature band range values to apply different voltage margins ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * CPR interface/bus clocks must be enabled before calling this function. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr4_regulator_init_temp_points(struct cpr3_controller *ctrl) ++{ ++ if (!ctrl->allow_temp_adj) ++ return 0; ++ ++ cpr3_masked_write(ctrl, CPR4_REG_MISC, ++ CPR4_MISC_TEMP_SENSOR_ID_START_MASK, ++ ctrl->temp_sensor_id_start ++ << CPR4_MISC_TEMP_SENSOR_ID_START_SHIFT); ++ ++ cpr3_masked_write(ctrl, CPR4_REG_MISC, ++ CPR4_MISC_TEMP_SENSOR_ID_END_MASK, ++ ctrl->temp_sensor_id_end ++ << CPR4_MISC_TEMP_SENSOR_ID_END_SHIFT); ++ ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_TEMP_POINT2, ++ CPR4_MARGIN_TEMP_POINT2_MASK, ++ (ctrl->temp_band_count == 4 ? ctrl->temp_points[2] : 0x7FF) ++ << CPR4_MARGIN_TEMP_POINT2_SHIFT); ++ ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_TEMP_POINT0N1, ++ CPR4_MARGIN_TEMP_POINT1_MASK, ++ (ctrl->temp_band_count >= 3 ? ctrl->temp_points[1] : 0x7FF) ++ << CPR4_MARGIN_TEMP_POINT1_SHIFT); ++ ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_TEMP_POINT0N1, ++ CPR4_MARGIN_TEMP_POINT0_MASK, ++ (ctrl->temp_band_count >= 2 ? ctrl->temp_points[0] : 0x7FF) ++ << CPR4_MARGIN_TEMP_POINT0_SHIFT); ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_init_cpr4() - performs hardware initialization at the ++ * controller and thread level required for CPR4 operation. ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * CPR interface/bus clocks must be enabled before calling this function. ++ * This function allocates sdelta structures and sdelta tables for aggregated ++ * corners of the controller and its threads. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_init_cpr4(struct cpr3_controller *ctrl) ++{ ++ struct cpr3_thread *thread; ++ struct cpr3_regulator *vreg; ++ struct cpr4_sdelta *sdelta; ++ int i, j, ctrl_max_core_count, thread_max_core_count, rc = 0; ++ bool ctrl_valid_sdelta, thread_valid_sdelta; ++ u32 pmic_step_size = 1; ++ int thread_id = 0; ++ u64 temp; ++ ++ if (ctrl->supports_hw_closed_loop) { ++ if (ctrl->saw_use_unit_mV) ++ pmic_step_size = ctrl->step_volt / 1000; ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, ++ CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_MASK, ++ (pmic_step_size ++ << CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_SHIFT)); ++ ++ cpr3_masked_write(ctrl, CPR4_REG_SAW_ERROR_STEP_LIMIT, ++ CPR4_SAW_ERROR_STEP_LIMIT_DN_MASK, ++ (ctrl->down_error_step_limit ++ << CPR4_SAW_ERROR_STEP_LIMIT_DN_SHIFT)); ++ ++ cpr3_masked_write(ctrl, CPR4_REG_SAW_ERROR_STEP_LIMIT, ++ CPR4_SAW_ERROR_STEP_LIMIT_UP_MASK, ++ (ctrl->up_error_step_limit ++ << CPR4_SAW_ERROR_STEP_LIMIT_UP_SHIFT)); ++ ++ /* ++ * Enable thread aggregation regardless of which threads are ++ * enabled or disabled. ++ */ ++ cpr3_masked_write(ctrl, CPR4_REG_CPR_TIMER_CLAMP, ++ CPR4_CPR_TIMER_CLAMP_THREAD_AGGREGATION_EN, ++ CPR4_CPR_TIMER_CLAMP_THREAD_AGGREGATION_EN); ++ ++ switch (ctrl->thread_count) { ++ case 0: ++ /* Disable both threads */ ++ cpr3_masked_write(ctrl, CPR4_REG_CPR_MASK_THREAD(0), ++ CPR4_CPR_MASK_THREAD_DISABLE_THREAD ++ | CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK, ++ CPR4_CPR_MASK_THREAD_DISABLE_THREAD ++ | CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK); ++ ++ cpr3_masked_write(ctrl, CPR4_REG_CPR_MASK_THREAD(1), ++ CPR4_CPR_MASK_THREAD_DISABLE_THREAD ++ | CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK, ++ CPR4_CPR_MASK_THREAD_DISABLE_THREAD ++ | CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK); ++ break; ++ case 1: ++ /* Disable unused thread */ ++ thread_id = ctrl->thread[0].thread_id ? 0 : 1; ++ cpr3_masked_write(ctrl, ++ CPR4_REG_CPR_MASK_THREAD(thread_id), ++ CPR4_CPR_MASK_THREAD_DISABLE_THREAD ++ | CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK, ++ CPR4_CPR_MASK_THREAD_DISABLE_THREAD ++ | CPR4_CPR_MASK_THREAD_RO_MASK4THREAD_MASK); ++ break; ++ } ++ } ++ ++ if (!ctrl->allow_core_count_adj && !ctrl->allow_temp_adj ++ && !ctrl->allow_boost) { ++ /* ++ * Skip below configuration as none of the features ++ * are enabled. ++ */ ++ return rc; ++ } ++ ++ if (ctrl->supports_hw_closed_loop) ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, ++ CPR4_MARGIN_ADJ_CTL_TIMER_SETTLE_VOLTAGE_EN, ++ CPR4_MARGIN_ADJ_CTL_TIMER_SETTLE_VOLTAGE_EN); ++ ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, ++ CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_STEP_QUOT_MASK, ++ ctrl->step_quot_fixed ++ << CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_STEP_QUOT_SHIFT); ++ ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, ++ CPR4_MARGIN_ADJ_CTL_PER_RO_KV_MARGIN_EN, ++ (ctrl->use_dynamic_step_quot ++ ? CPR4_MARGIN_ADJ_CTL_PER_RO_KV_MARGIN_EN : 0)); ++ ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, ++ CPR4_MARGIN_ADJ_CTL_INITIAL_TEMP_BAND_MASK, ++ ctrl->initial_temp_band ++ << CPR4_MARGIN_ADJ_CTL_INITIAL_TEMP_BAND_SHIFT); ++ ++ rc = cpr4_regulator_init_temp_points(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "initialize temp points failed, rc=%d\n", rc); ++ return rc; ++ } ++ ++ if (ctrl->voltage_settling_time) { ++ /* ++ * Configure the settling timer used to account for ++ * one VDD supply step. ++ */ ++ temp = (u64)ctrl->cpr_clock_rate ++ * (u64)ctrl->voltage_settling_time; ++ do_div(temp, 1000000000); ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_TEMP_CORE_TIMERS, ++ CPR4_MARGIN_TEMP_CORE_TIMERS_SETTLE_VOLTAGE_COUNT_MASK, ++ temp ++ << CPR4_MARGIN_TEMP_CORE_TIMERS_SETTLE_VOLTAGE_COUNT_SHIFT); ++ } ++ ++ /* ++ * Allocate memory for cpr4_sdelta structure and sdelta table for ++ * controller aggregated corner by finding the maximum core count ++ * used by any cpr3 regulators. ++ */ ++ ctrl_max_core_count = 1; ++ ctrl_valid_sdelta = false; ++ for (i = 0; i < ctrl->thread_count; i++) { ++ thread = &ctrl->thread[i]; ++ ++ /* ++ * Allocate memory for cpr4_sdelta structure and sdelta table ++ * for thread aggregated corner by finding the maximum core ++ * count used by any cpr3 regulators of the thread. ++ */ ++ thread_max_core_count = 1; ++ thread_valid_sdelta = false; ++ for (j = 0; j < thread->vreg_count; j++) { ++ vreg = &thread->vreg[j]; ++ thread_max_core_count = max(thread_max_core_count, ++ vreg->max_core_count); ++ thread_valid_sdelta |= (vreg->allow_core_count_adj ++ | vreg->allow_temp_adj ++ | vreg->allow_boost); ++ } ++ if (thread_valid_sdelta) { ++ sdelta = devm_kzalloc(ctrl->dev, sizeof(*sdelta), ++ GFP_KERNEL); ++ if (!sdelta) ++ return -ENOMEM; ++ ++ sdelta->table = devm_kcalloc(ctrl->dev, ++ thread_max_core_count ++ * ctrl->temp_band_count, ++ sizeof(*sdelta->table), ++ GFP_KERNEL); ++ if (!sdelta->table) ++ return -ENOMEM; ++ ++ sdelta->boost_table = devm_kcalloc(ctrl->dev, ++ ctrl->temp_band_count, ++ sizeof(*sdelta->boost_table), ++ GFP_KERNEL); ++ if (!sdelta->boost_table) ++ return -ENOMEM; ++ ++ thread->aggr_corner.sdelta = sdelta; ++ } ++ ++ ctrl_valid_sdelta |= thread_valid_sdelta; ++ ctrl_max_core_count = max(ctrl_max_core_count, ++ thread_max_core_count); ++ } ++ ++ if (ctrl_valid_sdelta) { ++ sdelta = devm_kzalloc(ctrl->dev, sizeof(*sdelta), GFP_KERNEL); ++ if (!sdelta) ++ return -ENOMEM; ++ ++ sdelta->table = devm_kcalloc(ctrl->dev, ctrl_max_core_count ++ * ctrl->temp_band_count, ++ sizeof(*sdelta->table), GFP_KERNEL); ++ if (!sdelta->table) ++ return -ENOMEM; ++ ++ sdelta->boost_table = devm_kcalloc(ctrl->dev, ++ ctrl->temp_band_count, ++ sizeof(*sdelta->boost_table), ++ GFP_KERNEL); ++ if (!sdelta->boost_table) ++ return -ENOMEM; ++ ++ ctrl->aggr_corner.sdelta = sdelta; ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_write_temp_core_margin() - programs hardware SDELTA registers with ++ * the voltage margin adjustments that need to be applied for ++ * different online core-count and temperature bands. ++ * @ctrl: Pointer to the CPR3 controller ++ * @addr: SDELTA register address ++ * @temp_core_adj: Array of voltage margin values for different temperature ++ * bands. ++ * ++ * CPR interface/bus clocks must be enabled before calling this function. ++ * ++ * Return: none ++ */ ++static void cpr3_write_temp_core_margin(struct cpr3_controller *ctrl, ++ int addr, int *temp_core_adj) ++{ ++ int i, margin_steps; ++ u32 reg = 0; ++ ++ for (i = 0; i < ctrl->temp_band_count; i++) { ++ margin_steps = max(min(temp_core_adj[i], 127), -128); ++ reg |= (margin_steps & CPR4_MARGIN_TEMP_CORE_ADJ_MASK) << ++ (i * CPR4_MARGIN_TEMP_CORE_ADJ_SHIFT); ++ } ++ ++ cpr3_write(ctrl, addr, reg); ++ cpr3_debug(ctrl, "sdelta offset=0x%08x, val=0x%08x\n", addr, reg); ++} ++ ++/** ++ * cpr3_controller_program_sdelta() - programs hardware SDELTA registers with ++ * the voltage margin adjustments that need to be applied at ++ * different online core-count and temperature bands. Also, ++ * programs hardware register configuration for per-online-core ++ * and per-temperature based adjustments. ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * CPR interface/bus clocks must be enabled before calling this function. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_controller_program_sdelta(struct cpr3_controller *ctrl) ++{ ++ struct cpr3_corner *corner = &ctrl->aggr_corner; ++ struct cpr4_sdelta *sdelta = corner->sdelta; ++ int i, index, max_core_count, rc = 0; ++ bool cpr_enabled = ctrl->cpr_enabled; ++ ++ if (!sdelta) ++ /* cpr4_sdelta not defined for current aggregated corner */ ++ return 0; ++ ++ if (ctrl->supports_hw_closed_loop && ctrl->cpr_enabled) { ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, ++ CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK, ++ (ctrl->use_hw_closed_loop && !sdelta->allow_boost) ++ ? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE : 0); ++ } ++ ++ if (!sdelta->allow_core_count_adj && !sdelta->allow_temp_adj ++ && !sdelta->allow_boost) { ++ /* ++ * Per-online-core, per-temperature and voltage boost ++ * adjustments are disabled for this aggregation corner. ++ */ ++ return 0; ++ } ++ ++ /* Ensure that CPR clocks are enabled before writing to registers. */ ++ if (!cpr_enabled) { ++ rc = cpr3_clock_enable(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "clock enable failed, rc=%d\n", rc); ++ return rc; ++ } ++ ctrl->cpr_enabled = true; ++ } ++ ++ max_core_count = sdelta->max_core_count; ++ ++ if (sdelta->allow_core_count_adj || sdelta->allow_temp_adj) { ++ if (sdelta->allow_core_count_adj) { ++ /* Program TEMP_CORE0 to same margins as TEMP_CORE1 */ ++ cpr3_write_temp_core_margin(ctrl, ++ CPR4_REG_MARGIN_TEMP_CORE(0), ++ &sdelta->table[0]); ++ } ++ ++ for (i = 0; i < max_core_count; i++) { ++ index = i * sdelta->temp_band_count; ++ /* ++ * Program TEMP_COREi with voltage margin adjustments ++ * that need to be applied when the number of cores ++ * becomes i. ++ */ ++ cpr3_write_temp_core_margin(ctrl, ++ CPR4_REG_MARGIN_TEMP_CORE( ++ sdelta->allow_core_count_adj ++ ? i + 1 : max_core_count), ++ &sdelta->table[index]); ++ } ++ } ++ ++ if (sdelta->allow_boost) { ++ /* Program only boost_num_cores row of SDELTA */ ++ cpr3_write_temp_core_margin(ctrl, ++ CPR4_REG_MARGIN_TEMP_CORE(sdelta->boost_num_cores), ++ &sdelta->boost_table[0]); ++ } ++ ++ if (!sdelta->allow_core_count_adj && !sdelta->allow_boost) { ++ cpr3_masked_write(ctrl, CPR4_REG_MISC, ++ CPR4_MISC_MARGIN_TABLE_ROW_SELECT_MASK, ++ max_core_count ++ << CPR4_MISC_MARGIN_TABLE_ROW_SELECT_SHIFT); ++ } ++ ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, ++ CPR4_MARGIN_ADJ_CTL_MAX_NUM_CORES_MASK ++ | CPR4_MARGIN_ADJ_CTL_CORE_ADJ_EN ++ | CPR4_MARGIN_ADJ_CTL_TEMP_ADJ_EN ++ | CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_EN ++ | CPR4_MARGIN_ADJ_CTL_BOOST_EN, ++ max_core_count << CPR4_MARGIN_ADJ_CTL_MAX_NUM_CORES_SHIFT ++ | ((sdelta->allow_core_count_adj || sdelta->allow_boost) ++ ? CPR4_MARGIN_ADJ_CTL_CORE_ADJ_EN : 0) ++ | ((sdelta->allow_temp_adj && ctrl->supports_hw_closed_loop) ++ ? CPR4_MARGIN_ADJ_CTL_TEMP_ADJ_EN : 0) ++ | (((ctrl->use_hw_closed_loop && !sdelta->allow_boost) ++ || !ctrl->supports_hw_closed_loop) ++ ? CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_EN : 0) ++ | (sdelta->allow_boost ++ ? CPR4_MARGIN_ADJ_CTL_BOOST_EN : 0)); ++ ++ /* ++ * Ensure that all previous CPR register writes have completed before ++ * continuing. ++ */ ++ mb(); ++ ++ /* Turn off CPR clocks if they were off before this function call. */ ++ if (!cpr_enabled) { ++ cpr3_clock_disable(ctrl); ++ ctrl->cpr_enabled = false; ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_init_ctrl() - performs hardware initialization of CPR ++ * controller registers ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_init_ctrl(struct cpr3_controller *ctrl) ++{ ++ int i, j, k, m, rc; ++ u32 ro_used = 0; ++ u32 gcnt, cont_dly, up_down_dly, val; ++ u64 temp; ++ char *mode; ++ ++ if (ctrl->core_clk) { ++ rc = clk_set_rate(ctrl->core_clk, ctrl->cpr_clock_rate); ++ if (rc) { ++ cpr3_err(ctrl, "clk_set_rate(core_clk, %u) failed, rc=%d\n", ++ ctrl->cpr_clock_rate, rc); ++ return rc; ++ } ++ } ++ ++ rc = cpr3_clock_enable(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "clock enable failed, rc=%d\n", rc); ++ return rc; ++ } ++ ctrl->cpr_enabled = true; ++ ++ /* Find all RO's used by any corner of any regulator. */ ++ for (i = 0; i < ctrl->thread_count; i++) ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) ++ for (k = 0; k < ctrl->thread[i].vreg[j].corner_count; ++ k++) ++ for (m = 0; m < CPR3_RO_COUNT; m++) ++ if (ctrl->thread[i].vreg[j].corner[k]. ++ target_quot[m]) ++ ro_used |= BIT(m); ++ ++ /* Configure the GCNT of the RO's that will be used */ ++ gcnt = cpr3_regulator_get_gcnt(ctrl); ++ for (i = 0; i < CPR3_RO_COUNT; i++) ++ if (ro_used & BIT(i)) ++ cpr3_write(ctrl, CPR3_REG_GCNT(i), gcnt); ++ ++ /* Configure the loop delay time */ ++ temp = (u64)ctrl->cpr_clock_rate * (u64)ctrl->loop_time; ++ do_div(temp, 1000000000); ++ cont_dly = temp; ++ if (ctrl->supports_hw_closed_loop ++ && ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) ++ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_MID_CONT, cont_dly); ++ else ++ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_AUTO_CONT, cont_dly); ++ ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) { ++ temp = (u64)ctrl->cpr_clock_rate * ++ (u64)ctrl->up_down_delay_time; ++ do_div(temp, 1000000000); ++ up_down_dly = temp; ++ if (ctrl->supports_hw_closed_loop) ++ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_UP_DN_CONT, ++ up_down_dly); ++ cpr3_debug(ctrl, "up_down_dly=%u, up_down_delay_time=%u ns\n", ++ up_down_dly, ctrl->up_down_delay_time); ++ } ++ ++ cpr3_debug(ctrl, "cpr_clock_rate=%u HZ, sensor_time=%u ns, loop_time=%u ns, gcnt=%u, cont_dly=%u\n", ++ ctrl->cpr_clock_rate, ctrl->sensor_time, ctrl->loop_time, ++ gcnt, cont_dly); ++ ++ /* Configure CPR sensor operation */ ++ val = (ctrl->idle_clocks << CPR3_CPR_CTL_IDLE_CLOCKS_SHIFT) ++ & CPR3_CPR_CTL_IDLE_CLOCKS_MASK; ++ val |= (ctrl->count_mode << CPR3_CPR_CTL_COUNT_MODE_SHIFT) ++ & CPR3_CPR_CTL_COUNT_MODE_MASK; ++ val |= (ctrl->count_repeat << CPR3_CPR_CTL_COUNT_REPEAT_SHIFT) ++ & CPR3_CPR_CTL_COUNT_REPEAT_MASK; ++ cpr3_write(ctrl, CPR3_REG_CPR_CTL, val); ++ ++ cpr3_debug(ctrl, "idle_clocks=%u, count_mode=%u, count_repeat=%u; CPR_CTL=0x%08X\n", ++ ctrl->idle_clocks, ctrl->count_mode, ctrl->count_repeat, val); ++ ++ /* Configure CPR default step quotients */ ++ val = (ctrl->step_quot_init_min << CPR3_CPR_STEP_QUOT_MIN_SHIFT) ++ & CPR3_CPR_STEP_QUOT_MIN_MASK; ++ val |= (ctrl->step_quot_init_max << CPR3_CPR_STEP_QUOT_MAX_SHIFT) ++ & CPR3_CPR_STEP_QUOT_MAX_MASK; ++ cpr3_write(ctrl, CPR3_REG_CPR_STEP_QUOT, val); ++ ++ cpr3_debug(ctrl, "step_quot_min=%u, step_quot_max=%u; STEP_QUOT=0x%08X\n", ++ ctrl->step_quot_init_min, ctrl->step_quot_init_max, val); ++ ++ /* Configure the CPR sensor ownership */ ++ for (i = 0; i < ctrl->sensor_count; i++) ++ cpr3_write(ctrl, CPR3_REG_SENSOR_OWNER(i), ++ ctrl->sensor_owner[i]); ++ ++ /* Configure per-thread registers */ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ rc = cpr3_regulator_init_thread(&ctrl->thread[i]); ++ if (rc) { ++ cpr3_err(ctrl, "CPR thread register initialization failed, rc=%d\n", ++ rc); ++ return rc; ++ } ++ } ++ ++ if (ctrl->supports_hw_closed_loop) { ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, ++ CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK, ++ ctrl->use_hw_closed_loop ++ ? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE ++ : CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE); ++ } else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) { ++ cpr3_write(ctrl, CPR3_REG_HW_CLOSED_LOOP, ++ ctrl->use_hw_closed_loop ++ ? CPR3_HW_CLOSED_LOOP_ENABLE ++ : CPR3_HW_CLOSED_LOOP_DISABLE); ++ ++ cpr3_debug(ctrl, "PD_THROTTLE=0x%08X\n", ++ ctrl->proc_clock_throttle); ++ } ++ ++ if ((ctrl->use_hw_closed_loop || ++ ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) && ++ ctrl->vdd_limit_regulator) { ++ rc = regulator_enable(ctrl->vdd_limit_regulator); ++ if (rc) { ++ cpr3_err(ctrl, "CPR limit regulator enable failed, rc=%d\n", ++ rc); ++ return rc; ++ } ++ } ++ } ++ ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ rc = cpr3_regulator_init_cpr4(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "CPR4-specific controller initialization failed, rc=%d\n", ++ rc); ++ return rc; ++ } ++ } ++ ++ /* Ensure that all register writes complete before disabling clocks. */ ++ wmb(); ++ ++ cpr3_clock_disable(ctrl); ++ ctrl->cpr_enabled = false; ++ ++ if (!ctrl->cpr_allowed_sw || !ctrl->cpr_allowed_hw) ++ mode = "open-loop"; ++ else if (ctrl->supports_hw_closed_loop) ++ mode = ctrl->use_hw_closed_loop ++ ? "HW closed-loop" : "SW closed-loop"; ++ else ++ mode = "closed-loop"; ++ ++ cpr3_info(ctrl, "Default CPR mode = %s", mode); ++ ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_set_target_quot() - configure the target quotient for each ++ * RO of the CPR3 thread and set the RO mask ++ * @thread: Pointer to the CPR3 thread ++ * ++ * Return: none ++ */ ++static void cpr3_regulator_set_target_quot(struct cpr3_thread *thread) ++{ ++ u32 new_quot, last_quot; ++ int i; ++ ++ if (thread->aggr_corner.ro_mask == CPR3_RO_MASK ++ && thread->last_closed_loop_aggr_corner.ro_mask == CPR3_RO_MASK) { ++ /* Avoid writing target quotients since all RO's are masked. */ ++ return; ++ } else if (thread->aggr_corner.ro_mask == CPR3_RO_MASK) { ++ cpr3_write(thread->ctrl, CPR3_REG_RO_MASK(thread->thread_id), ++ CPR3_RO_MASK); ++ thread->last_closed_loop_aggr_corner.ro_mask = CPR3_RO_MASK; ++ /* ++ * Only the RO_MASK register needs to be written since all ++ * RO's are masked. ++ */ ++ return; ++ } else if (thread->aggr_corner.ro_mask ++ != thread->last_closed_loop_aggr_corner.ro_mask) { ++ cpr3_write(thread->ctrl, CPR3_REG_RO_MASK(thread->thread_id), ++ thread->aggr_corner.ro_mask); ++ } ++ ++ for (i = 0; i < CPR3_RO_COUNT; i++) { ++ new_quot = thread->aggr_corner.target_quot[i]; ++ last_quot = thread->last_closed_loop_aggr_corner.target_quot[i]; ++ if (new_quot != last_quot) ++ cpr3_write(thread->ctrl, ++ CPR3_REG_TARGET_QUOT(thread->thread_id, i), ++ new_quot); ++ } ++ ++ thread->last_closed_loop_aggr_corner = thread->aggr_corner; ++ ++ return; ++} ++ ++/** ++ * cpr3_update_vreg_closed_loop_volt() - update the last known settled ++ * closed loop voltage for a CPR3 regulator ++ * @vreg: Pointer to the CPR3 regulator ++ * @vdd_volt: Last known settled voltage in microvolts for the ++ * VDD supply ++ * @reg_last_measurement: Value read from the LAST_MEASUREMENT register ++ * ++ * Return: none ++ */ ++static void cpr3_update_vreg_closed_loop_volt(struct cpr3_regulator *vreg, ++ int vdd_volt, u32 reg_last_measurement) ++{ ++ bool step_dn, step_up, aggr_step_up, aggr_step_dn, aggr_step_mid; ++ bool valid, pd_valid, saw_error; ++ struct cpr3_controller *ctrl = vreg->thread->ctrl; ++ struct cpr3_corner *corner; ++ u32 id; ++ ++ if (vreg->last_closed_loop_corner == CPR3_REGULATOR_CORNER_INVALID) ++ return; ++ else ++ corner = &vreg->corner[vreg->last_closed_loop_corner]; ++ ++ if (vreg->thread->last_closed_loop_aggr_corner.ro_mask ++ == CPR3_RO_MASK || !vreg->aggregated) { ++ return; ++ } else if (!ctrl->cpr_enabled || !ctrl->last_corner_was_closed_loop) { ++ return; ++ } else if (ctrl->thread_count == 1 ++ && vdd_volt >= corner->floor_volt ++ && vdd_volt <= corner->ceiling_volt) { ++ corner->last_volt = vdd_volt; ++ cpr3_debug(vreg, "last_volt updated: last_volt[%d]=%d, ceiling_volt[%d]=%d, floor_volt[%d]=%d\n", ++ vreg->last_closed_loop_corner, corner->last_volt, ++ vreg->last_closed_loop_corner, ++ corner->ceiling_volt, ++ vreg->last_closed_loop_corner, ++ corner->floor_volt); ++ return; ++ } else if (!ctrl->supports_hw_closed_loop) { ++ return; ++ } else if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPR3) { ++ corner->last_volt = vdd_volt; ++ cpr3_debug(vreg, "last_volt updated: last_volt[%d]=%d, ceiling_volt[%d]=%d, floor_volt[%d]=%d\n", ++ vreg->last_closed_loop_corner, corner->last_volt, ++ vreg->last_closed_loop_corner, ++ corner->ceiling_volt, ++ vreg->last_closed_loop_corner, ++ corner->floor_volt); ++ return; ++ } ++ ++ /* CPR clocks are on and HW closed loop is supported */ ++ valid = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_VALID); ++ if (!valid) { ++ cpr3_debug(vreg, "CPR_LAST_VALID_MEASUREMENT=0x%X valid bit not set\n", ++ reg_last_measurement); ++ return; ++ } ++ ++ id = vreg->thread->thread_id; ++ ++ step_dn ++ = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_THREAD_DN(id)); ++ step_up ++ = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_THREAD_UP(id)); ++ aggr_step_dn = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_AGGR_DN); ++ aggr_step_mid ++ = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_AGGR_MID); ++ aggr_step_up = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_AGGR_UP); ++ saw_error = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_SAW_ERROR); ++ pd_valid ++ = !((((reg_last_measurement & CPR3_LAST_MEASUREMENT_PD_BYPASS_MASK) ++ >> CPR3_LAST_MEASUREMENT_PD_BYPASS_SHIFT) ++ & vreg->pd_bypass_mask) == vreg->pd_bypass_mask); ++ ++ if (!pd_valid) { ++ cpr3_debug(vreg, "CPR_LAST_VALID_MEASUREMENT=0x%X, all power domains bypassed\n", ++ reg_last_measurement); ++ return; ++ } else if (step_dn && step_up) { ++ cpr3_err(vreg, "both up and down status bits set, CPR_LAST_VALID_MEASUREMENT=0x%X\n", ++ reg_last_measurement); ++ return; ++ } else if (aggr_step_dn && step_dn && vdd_volt < corner->last_volt ++ && vdd_volt >= corner->floor_volt) { ++ corner->last_volt = vdd_volt; ++ } else if (aggr_step_up && step_up && vdd_volt > corner->last_volt ++ && vdd_volt <= corner->ceiling_volt) { ++ corner->last_volt = vdd_volt; ++ } else if (aggr_step_mid ++ && vdd_volt >= corner->floor_volt ++ && vdd_volt <= corner->ceiling_volt) { ++ corner->last_volt = vdd_volt; ++ } else if (saw_error && (vdd_volt == corner->ceiling_volt ++ || vdd_volt == corner->floor_volt)) { ++ corner->last_volt = vdd_volt; ++ } else { ++ cpr3_debug(vreg, "last_volt not updated: last_volt[%d]=%d, ceiling_volt[%d]=%d, floor_volt[%d]=%d, vdd_volt=%d, CPR_LAST_VALID_MEASUREMENT=0x%X\n", ++ vreg->last_closed_loop_corner, corner->last_volt, ++ vreg->last_closed_loop_corner, ++ corner->ceiling_volt, ++ vreg->last_closed_loop_corner, corner->floor_volt, ++ vdd_volt, reg_last_measurement); ++ return; ++ } ++ ++ cpr3_debug(vreg, "last_volt updated: last_volt[%d]=%d, ceiling_volt[%d]=%d, floor_volt[%d]=%d, CPR_LAST_VALID_MEASUREMENT=0x%X\n", ++ vreg->last_closed_loop_corner, corner->last_volt, ++ vreg->last_closed_loop_corner, corner->ceiling_volt, ++ vreg->last_closed_loop_corner, corner->floor_volt, ++ reg_last_measurement); ++} ++ ++/** ++ * cpr3_regulator_mem_acc_bhs_used() - determines if mem-acc regulators powered ++ * through a BHS are associated with the CPR3 controller or any of ++ * the CPR3 regulators it controls. ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * This function determines if the CPR3 controller or any of its CPR3 regulators ++ * need to manage mem-acc regulators that are currently powered through a BHS ++ * and whose corner selection is based upon a particular voltage threshold. ++ * ++ * Return: true or false ++ */ ++static bool cpr3_regulator_mem_acc_bhs_used(struct cpr3_controller *ctrl) ++{ ++ struct cpr3_regulator *vreg; ++ int i, j; ++ ++ if (!ctrl->mem_acc_threshold_volt) ++ return false; ++ ++ if (ctrl->mem_acc_regulator) ++ return true; ++ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) { ++ vreg = &ctrl->thread[i].vreg[j]; ++ ++ if (vreg->mem_acc_regulator) ++ return true; ++ } ++ } ++ ++ return false; ++} ++ ++/** ++ * cpr3_regulator_config_bhs_mem_acc() - configure the mem-acc regulator ++ * settings for hardware blocks currently powered through the BHS. ++ * @ctrl: Pointer to the CPR3 controller ++ * @new_volt: New voltage in microvolts that VDD supply needs to ++ * end up at ++ * @last_volt: Pointer to the last known voltage in microvolts for the ++ * VDD supply ++ * @aggr_corner: Pointer to the CPR3 corner which corresponds to the max ++ * corner aggregated from all CPR3 threads managed by the ++ * CPR3 controller ++ * ++ * This function programs the mem-acc regulator corners for CPR3 regulators ++ * whose LDO regulators are in bypassed state. The function also handles ++ * CPR3 controllers which utilize mem-acc regulators that operate independently ++ * from the LDO hardware and that must be programmed when the VDD supply ++ * crosses a particular voltage threshold. ++ * ++ * Return: 0 on success, errno on failure. If the VDD supply voltage is ++ * modified, last_volt is updated to reflect the new voltage setpoint. ++ */ ++static int cpr3_regulator_config_bhs_mem_acc(struct cpr3_controller *ctrl, ++ int new_volt, int *last_volt, ++ struct cpr3_corner *aggr_corner) ++{ ++ struct cpr3_regulator *vreg; ++ int i, j, rc, mem_acc_corn, safe_volt; ++ int mem_acc_volt = ctrl->mem_acc_threshold_volt; ++ int ref_volt; ++ ++ if (!cpr3_regulator_mem_acc_bhs_used(ctrl)) ++ return 0; ++ ++ ref_volt = ctrl->use_hw_closed_loop ? aggr_corner->floor_volt : ++ new_volt; ++ ++ if (((*last_volt < mem_acc_volt && mem_acc_volt <= ref_volt) || ++ (*last_volt >= mem_acc_volt && mem_acc_volt > ref_volt))) { ++ if (ref_volt < *last_volt) ++ safe_volt = max(mem_acc_volt, aggr_corner->last_volt); ++ else ++ safe_volt = max(mem_acc_volt, *last_volt); ++ ++ rc = regulator_set_voltage(ctrl->vdd_regulator, safe_volt, ++ new_volt < *last_volt ? ++ ctrl->aggr_corner.ceiling_volt : ++ new_volt); ++ if (rc) { ++ cpr3_err(ctrl, "regulator_set_voltage(vdd) == %d failed, rc=%d\n", ++ safe_volt, rc); ++ return rc; ++ } ++ ++ *last_volt = safe_volt; ++ ++ mem_acc_corn = ref_volt < mem_acc_volt ? ++ ctrl->mem_acc_corner_map[CPR3_MEM_ACC_LOW_CORNER] : ++ ctrl->mem_acc_corner_map[CPR3_MEM_ACC_HIGH_CORNER]; ++ ++ if (ctrl->mem_acc_regulator) { ++ rc = regulator_set_voltage(ctrl->mem_acc_regulator, ++ mem_acc_corn, mem_acc_corn); ++ if (rc) { ++ cpr3_err(ctrl, "regulator_set_voltage(mem_acc) == %d failed, rc=%d\n", ++ mem_acc_corn, rc); ++ return rc; ++ } ++ } ++ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) { ++ vreg = &ctrl->thread[i].vreg[j]; ++ ++ if (!vreg->mem_acc_regulator) ++ continue; ++ ++ rc = regulator_set_voltage( ++ vreg->mem_acc_regulator, mem_acc_corn, ++ mem_acc_corn); ++ if (rc) { ++ cpr3_err(vreg, "regulator_set_voltage(mem_acc) == %d failed, rc=%d\n", ++ mem_acc_corn, rc); ++ return rc; ++ } ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_switch_apm_mode() - switch the mode of the APM controller ++ * associated with a given CPR3 controller ++ * @ctrl: Pointer to the CPR3 controller ++ * @new_volt: New voltage in microvolts that VDD supply needs to ++ * end up at ++ * @last_volt: Pointer to the last known voltage in microvolts for the ++ * VDD supply ++ * @aggr_corner: Pointer to the CPR3 corner which corresponds to the max ++ * corner aggregated from all CPR3 threads managed by the ++ * CPR3 controller ++ * ++ * This function requests a switch of the APM mode while guaranteeing ++ * any LDO regulator hardware requirements are satisfied. The function must ++ * be called once it is known a new VDD supply setpoint crosses the APM ++ * voltage threshold. ++ * ++ * Return: 0 on success, errno on failure. If the VDD supply voltage is ++ * modified, last_volt is updated to reflect the new voltage setpoint. ++ */ ++static int cpr3_regulator_switch_apm_mode(struct cpr3_controller *ctrl, ++ int new_volt, int *last_volt, ++ struct cpr3_corner *aggr_corner) ++{ ++ struct regulator *vdd = ctrl->vdd_regulator; ++ int apm_volt = ctrl->apm_threshold_volt; ++ int orig_last_volt = *last_volt; ++ int rc; ++ ++ rc = regulator_set_voltage(vdd, apm_volt, apm_volt); ++ if (rc) { ++ cpr3_err(ctrl, "regulator_set_voltage(vdd) == %d failed, rc=%d\n", ++ apm_volt, rc); ++ return rc; ++ } ++ ++ *last_volt = apm_volt; ++ ++ rc = msm_apm_set_supply(ctrl->apm, new_volt >= apm_volt ++ ? ctrl->apm_high_supply : ctrl->apm_low_supply); ++ if (rc) { ++ cpr3_err(ctrl, "APM switch failed, rc=%d\n", rc); ++ /* Roll back the voltage. */ ++ regulator_set_voltage(vdd, orig_last_volt, INT_MAX); ++ *last_volt = orig_last_volt; ++ return rc; ++ } ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_config_voltage_crossings() - configure APM and mem-acc ++ * settings depending upon a new VDD supply setpoint ++ * ++ * @ctrl: Pointer to the CPR3 controller ++ * @new_volt: New voltage in microvolts that VDD supply needs to ++ * end up at ++ * @last_volt: Pointer to the last known voltage in microvolts for the ++ * VDD supply ++ * @aggr_corner: Pointer to the CPR3 corner which corresponds to the max ++ * corner aggregated from all CPR3 threads managed by the ++ * CPR3 controller ++ * ++ * This function handles the APM and mem-acc regulator reconfiguration if ++ * the new VDD supply voltage will result in crossing their respective voltage ++ * thresholds. ++ * ++ * Return: 0 on success, errno on failure. If the VDD supply voltage is ++ * modified, last_volt is updated to reflect the new voltage setpoint. ++ */ ++static int cpr3_regulator_config_voltage_crossings(struct cpr3_controller *ctrl, ++ int new_volt, int *last_volt, ++ struct cpr3_corner *aggr_corner) ++{ ++ bool apm_crossing = false, mem_acc_crossing = false; ++ bool mem_acc_bhs_used; ++ int apm_volt = ctrl->apm_threshold_volt; ++ int mem_acc_volt = ctrl->mem_acc_threshold_volt; ++ int ref_volt, rc; ++ ++ if (ctrl->apm && apm_volt > 0 ++ && ((*last_volt < apm_volt && apm_volt <= new_volt) ++ || (*last_volt >= apm_volt && apm_volt > new_volt))) ++ apm_crossing = true; ++ ++ mem_acc_bhs_used = cpr3_regulator_mem_acc_bhs_used(ctrl); ++ ++ ref_volt = ctrl->use_hw_closed_loop ? aggr_corner->floor_volt : ++ new_volt; ++ ++ if (mem_acc_bhs_used && ++ (((*last_volt < mem_acc_volt && mem_acc_volt <= ref_volt) || ++ (*last_volt >= mem_acc_volt && mem_acc_volt > ref_volt)))) ++ mem_acc_crossing = true; ++ ++ if (apm_crossing && mem_acc_crossing) { ++ if ((new_volt < *last_volt && apm_volt >= mem_acc_volt) || ++ (new_volt >= *last_volt && apm_volt < mem_acc_volt)) { ++ rc = cpr3_regulator_switch_apm_mode(ctrl, new_volt, ++ last_volt, ++ aggr_corner); ++ if (rc) { ++ cpr3_err(ctrl, "unable to switch APM mode\n"); ++ return rc; ++ } ++ ++ rc = cpr3_regulator_config_bhs_mem_acc(ctrl, new_volt, ++ last_volt, aggr_corner); ++ if (rc) { ++ cpr3_err(ctrl, "unable to configure BHS mem-acc settings\n"); ++ return rc; ++ } ++ } else { ++ rc = cpr3_regulator_config_bhs_mem_acc(ctrl, new_volt, ++ last_volt, aggr_corner); ++ if (rc) { ++ cpr3_err(ctrl, "unable to configure BHS mem-acc settings\n"); ++ return rc; ++ } ++ ++ rc = cpr3_regulator_switch_apm_mode(ctrl, new_volt, ++ last_volt, ++ aggr_corner); ++ if (rc) { ++ cpr3_err(ctrl, "unable to switch APM mode\n"); ++ return rc; ++ } ++ } ++ } else if (apm_crossing) { ++ rc = cpr3_regulator_switch_apm_mode(ctrl, new_volt, last_volt, ++ aggr_corner); ++ if (rc) { ++ cpr3_err(ctrl, "unable to switch APM mode\n"); ++ return rc; ++ } ++ } else if (mem_acc_crossing) { ++ rc = cpr3_regulator_config_bhs_mem_acc(ctrl, new_volt, ++ last_volt, aggr_corner); ++ if (rc) { ++ cpr3_err(ctrl, "unable to configure BHS mem-acc settings\n"); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_config_mem_acc() - configure the corner of the mem-acc ++ * regulator associated with the CPR3 controller ++ * @ctrl: Pointer to the CPR3 controller ++ * @aggr_corner: Pointer to the CPR3 corner which corresponds to the max ++ * corner aggregated from all CPR3 threads managed by the ++ * CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_config_mem_acc(struct cpr3_controller *ctrl, ++ struct cpr3_corner *aggr_corner) ++{ ++ int rc; ++ ++ if (ctrl->mem_acc_regulator && aggr_corner->mem_acc_volt) { ++ rc = regulator_set_voltage(ctrl->mem_acc_regulator, ++ aggr_corner->mem_acc_volt, ++ aggr_corner->mem_acc_volt); ++ if (rc) { ++ cpr3_err(ctrl, "regulator_set_voltage(mem_acc) == %d failed, rc=%d\n", ++ aggr_corner->mem_acc_volt, rc); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_scale_vdd_voltage() - scale the CPR controlled VDD supply ++ * voltage to the new level while satisfying any other hardware ++ * requirements ++ * @ctrl: Pointer to the CPR3 controller ++ * @new_volt: New voltage in microvolts that VDD supply needs to end ++ * up at ++ * @last_volt: Last known voltage in microvolts for the VDD supply ++ * @aggr_corner: Pointer to the CPR3 corner which corresponds to the max ++ * corner aggregated from all CPR3 threads managed by the ++ * CPR3 controller ++ * ++ * This function scales the CPR controlled VDD supply voltage from its ++ * current level to the new voltage that is specified. If the supply is ++ * configured to use the APM and the APM threshold is crossed as a result of ++ * the voltage scaling, then this function also stops at the APM threshold, ++ * switches the APM source, and finally sets the final new voltage. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_scale_vdd_voltage(struct cpr3_controller *ctrl, ++ int new_volt, int last_volt, ++ struct cpr3_corner *aggr_corner) ++{ ++ struct regulator *vdd = ctrl->vdd_regulator; ++ int rc; ++ ++ if (new_volt < last_volt) { ++ rc = cpr3_regulator_config_mem_acc(ctrl, aggr_corner); ++ if (rc) ++ return rc; ++ } else { ++ /* Increasing VDD voltage */ ++ if (ctrl->system_regulator) { ++ rc = regulator_set_voltage(ctrl->system_regulator, ++ aggr_corner->system_volt, INT_MAX); ++ if (rc) { ++ cpr3_err(ctrl, "regulator_set_voltage(system) == %d failed, rc=%d\n", ++ aggr_corner->system_volt, rc); ++ return rc; ++ } ++ } ++ } ++ ++ rc = cpr3_regulator_config_voltage_crossings(ctrl, new_volt, &last_volt, ++ aggr_corner); ++ if (rc) { ++ cpr3_err(ctrl, "unable to handle voltage threshold crossing configurations, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ /* ++ * Subtract a small amount from the min_uV parameter so that the ++ * set voltage request is not dropped by the framework due to being ++ * duplicate. This is needed in order to switch from hardware ++ * closed-loop to open-loop successfully. ++ */ ++ rc = regulator_set_voltage(vdd, new_volt - (ctrl->cpr_enabled ? 0 : 1), ++ aggr_corner->ceiling_volt); ++ if (rc) { ++ cpr3_err(ctrl, "regulator_set_voltage(vdd) == %d failed, rc=%d\n", ++ new_volt, rc); ++ return rc; ++ } ++ ++ if (new_volt == last_volt && ctrl->supports_hw_closed_loop ++ && ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ /* ++ * CPR4 features enforce voltage reprogramming when the last ++ * set voltage and new set voltage are same. This way, we can ++ * ensure that SAW PMIC STATUS register is updated with newly ++ * programmed voltage. ++ */ ++ rc = regulator_sync_voltage(vdd); ++ if (rc) { ++ cpr3_err(ctrl, "regulator_sync_voltage(vdd) == %d failed, rc=%d\n", ++ new_volt, rc); ++ return rc; ++ } ++ } ++ ++ if (new_volt >= last_volt) { ++ rc = cpr3_regulator_config_mem_acc(ctrl, aggr_corner); ++ if (rc) ++ return rc; ++ } else { ++ /* Decreasing VDD voltage */ ++ if (ctrl->system_regulator) { ++ rc = regulator_set_voltage(ctrl->system_regulator, ++ aggr_corner->system_volt, INT_MAX); ++ if (rc) { ++ cpr3_err(ctrl, "regulator_set_voltage(system) == %d failed, rc=%d\n", ++ aggr_corner->system_volt, rc); ++ return rc; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_get_dynamic_floor_volt() - returns the current dynamic floor ++ * voltage based upon static configurations and the state of all ++ * power domains during the last CPR measurement ++ * @ctrl: Pointer to the CPR3 controller ++ * @reg_last_measurement: Value read from the LAST_MEASUREMENT register ++ * ++ * When using HW closed-loop, the dynamic floor voltage is always returned ++ * regardless of the current state of the power domains. ++ * ++ * Return: dynamic floor voltage in microvolts or 0 if dynamic floor is not ++ * currently required ++ */ ++static int cpr3_regulator_get_dynamic_floor_volt(struct cpr3_controller *ctrl, ++ u32 reg_last_measurement) ++{ ++ int dynamic_floor_volt = 0; ++ struct cpr3_regulator *vreg; ++ bool valid, pd_valid; ++ u32 bypass_bits; ++ int i, j; ++ ++ if (!ctrl->supports_hw_closed_loop) ++ return 0; ++ ++ if (likely(!ctrl->use_hw_closed_loop)) { ++ valid = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_VALID); ++ bypass_bits ++ = (reg_last_measurement & CPR3_LAST_MEASUREMENT_PD_BYPASS_MASK) ++ >> CPR3_LAST_MEASUREMENT_PD_BYPASS_SHIFT; ++ } else { ++ /* ++ * Ensure that the dynamic floor voltage is always used for ++ * HW closed-loop since the conditions below cannot be evaluated ++ * after each CPR measurement. ++ */ ++ valid = false; ++ bypass_bits = 0; ++ } ++ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) { ++ vreg = &ctrl->thread[i].vreg[j]; ++ ++ if (!vreg->uses_dynamic_floor) ++ continue; ++ ++ pd_valid = !((bypass_bits & vreg->pd_bypass_mask) ++ == vreg->pd_bypass_mask); ++ ++ if (!valid || !pd_valid) ++ dynamic_floor_volt = max(dynamic_floor_volt, ++ vreg->corner[ ++ vreg->dynamic_floor_corner].last_volt); ++ } ++ } ++ ++ return dynamic_floor_volt; ++} ++ ++/** ++ * cpr3_regulator_max_sdelta_diff() - returns the maximum voltage difference in ++ * microvolts that can result from different operating conditions ++ * for the specified sdelta struct ++ * @sdelta: Pointer to the sdelta structure ++ * @step_volt: Step size in microvolts between available set ++ * points of the VDD supply. ++ * ++ * Return: voltage difference between the highest and lowest adjustments if ++ * sdelta and sdelta->table are valid, else 0. ++ */ ++static int cpr3_regulator_max_sdelta_diff(const struct cpr4_sdelta *sdelta, ++ int step_volt) ++{ ++ int i, j, index, sdelta_min = INT_MAX, sdelta_max = INT_MIN; ++ ++ if (!sdelta || !sdelta->table) ++ return 0; ++ ++ for (i = 0; i < sdelta->max_core_count; i++) { ++ for (j = 0; j < sdelta->temp_band_count; j++) { ++ index = i * sdelta->temp_band_count + j; ++ sdelta_min = min(sdelta_min, sdelta->table[index]); ++ sdelta_max = max(sdelta_max, sdelta->table[index]); ++ } ++ } ++ ++ return (sdelta_max - sdelta_min) * step_volt; ++} ++ ++/** ++ * cpr3_regulator_aggregate_sdelta() - check open-loop voltages of current ++ * aggregated corner and current corner of a given regulator ++ * and adjust the sdelta strucuture data of aggregate corner. ++ * @aggr_corner: Pointer to accumulated aggregated corner which ++ * is both an input and an output ++ * @corner: Pointer to the corner to be aggregated with ++ * aggr_corner ++ * @step_volt: Step size in microvolts between available set ++ * points of the VDD supply. ++ * ++ * Return: none ++ */ ++static void cpr3_regulator_aggregate_sdelta( ++ struct cpr3_corner *aggr_corner, ++ const struct cpr3_corner *corner, int step_volt) ++{ ++ struct cpr4_sdelta *aggr_sdelta, *sdelta; ++ int aggr_core_count, core_count, temp_band_count; ++ u32 aggr_index, index; ++ int i, j, sdelta_size, cap_steps, adjust_sdelta; ++ ++ aggr_sdelta = aggr_corner->sdelta; ++ sdelta = corner->sdelta; ++ ++ if (aggr_corner->open_loop_volt < corner->open_loop_volt) { ++ /* ++ * Found the new dominant regulator as its open-loop requirement ++ * is higher than previous dominant regulator. Calculate cap ++ * voltage to limit the SDELTA values to make sure the runtime ++ * (Core-count/temp) adjustments do not violate other ++ * regulators' voltage requirements. Use cpr4_sdelta values of ++ * new dominant regulator. ++ */ ++ aggr_sdelta->cap_volt = min(aggr_sdelta->cap_volt, ++ (corner->open_loop_volt - ++ aggr_corner->open_loop_volt)); ++ ++ /* Clear old data in the sdelta table */ ++ sdelta_size = aggr_sdelta->max_core_count ++ * aggr_sdelta->temp_band_count; ++ ++ if (aggr_sdelta->allow_core_count_adj ++ || aggr_sdelta->allow_temp_adj) ++ memset(aggr_sdelta->table, 0, sdelta_size ++ * sizeof(*aggr_sdelta->table)); ++ ++ if (sdelta->allow_temp_adj || sdelta->allow_core_count_adj) { ++ /* Copy new data in sdelta table */ ++ sdelta_size = sdelta->max_core_count ++ * sdelta->temp_band_count; ++ if (sdelta->table) ++ memcpy(aggr_sdelta->table, sdelta->table, ++ sdelta_size * sizeof(*sdelta->table)); ++ } ++ ++ if (sdelta->allow_boost) { ++ memcpy(aggr_sdelta->boost_table, sdelta->boost_table, ++ sdelta->temp_band_count ++ * sizeof(*sdelta->boost_table)); ++ aggr_sdelta->boost_num_cores = sdelta->boost_num_cores; ++ } else if (aggr_sdelta->allow_boost) { ++ for (i = 0; i < aggr_sdelta->temp_band_count; i++) { ++ adjust_sdelta = (corner->open_loop_volt ++ - aggr_corner->open_loop_volt) ++ / step_volt; ++ aggr_sdelta->boost_table[i] += adjust_sdelta; ++ aggr_sdelta->boost_table[i] ++ = min(aggr_sdelta->boost_table[i], 0); ++ } ++ } ++ ++ aggr_corner->open_loop_volt = corner->open_loop_volt; ++ aggr_sdelta->allow_temp_adj = sdelta->allow_temp_adj; ++ aggr_sdelta->allow_core_count_adj ++ = sdelta->allow_core_count_adj; ++ aggr_sdelta->max_core_count = sdelta->max_core_count; ++ aggr_sdelta->temp_band_count = sdelta->temp_band_count; ++ } else if (aggr_corner->open_loop_volt > corner->open_loop_volt) { ++ /* ++ * Adjust the cap voltage if the open-loop requirement of new ++ * regulator is the next highest. ++ */ ++ aggr_sdelta->cap_volt = min(aggr_sdelta->cap_volt, ++ (aggr_corner->open_loop_volt ++ - corner->open_loop_volt)); ++ ++ if (sdelta->allow_boost) { ++ for (i = 0; i < aggr_sdelta->temp_band_count; i++) { ++ adjust_sdelta = (aggr_corner->open_loop_volt ++ - corner->open_loop_volt) ++ / step_volt; ++ aggr_sdelta->boost_table[i] = ++ sdelta->boost_table[i] + adjust_sdelta; ++ aggr_sdelta->boost_table[i] ++ = min(aggr_sdelta->boost_table[i], 0); ++ } ++ aggr_sdelta->boost_num_cores = sdelta->boost_num_cores; ++ } ++ } else { ++ /* ++ * Found another dominant regulator with same open-loop ++ * requirement. Make cap voltage to '0'. Disable core-count ++ * adjustments as we couldn't support for both regulators. ++ * Keep enable temp based adjustments if enabled for both ++ * regulators and choose mininum margin adjustment values ++ * between them. ++ */ ++ aggr_sdelta->cap_volt = 0; ++ aggr_sdelta->allow_core_count_adj = false; ++ ++ if (aggr_sdelta->allow_temp_adj ++ && sdelta->allow_temp_adj) { ++ aggr_core_count = aggr_sdelta->max_core_count - 1; ++ core_count = sdelta->max_core_count - 1; ++ temp_band_count = sdelta->temp_band_count; ++ for (j = 0; j < temp_band_count; j++) { ++ aggr_index = aggr_core_count * temp_band_count ++ + j; ++ index = core_count * temp_band_count + j; ++ aggr_sdelta->table[aggr_index] = ++ min(aggr_sdelta->table[aggr_index], ++ sdelta->table[index]); ++ } ++ } else { ++ aggr_sdelta->allow_temp_adj = false; ++ } ++ ++ if (sdelta->allow_boost) { ++ memcpy(aggr_sdelta->boost_table, sdelta->boost_table, ++ sdelta->temp_band_count ++ * sizeof(*sdelta->boost_table)); ++ aggr_sdelta->boost_num_cores = sdelta->boost_num_cores; ++ } ++ } ++ ++ /* Keep non-dominant clients boost enable state */ ++ aggr_sdelta->allow_boost |= sdelta->allow_boost; ++ if (aggr_sdelta->allow_boost) ++ aggr_sdelta->allow_core_count_adj = false; ++ ++ if (aggr_sdelta->cap_volt && !(aggr_sdelta->cap_volt == INT_MAX)) { ++ core_count = aggr_sdelta->max_core_count; ++ temp_band_count = aggr_sdelta->temp_band_count; ++ /* ++ * Convert cap voltage from uV to PMIC steps and use to limit ++ * sdelta margin adjustments. ++ */ ++ cap_steps = aggr_sdelta->cap_volt / step_volt; ++ for (i = 0; i < core_count; i++) ++ for (j = 0; j < temp_band_count; j++) { ++ index = i * temp_band_count + j; ++ aggr_sdelta->table[index] = ++ min(aggr_sdelta->table[index], ++ cap_steps); ++ } ++ } ++} ++ ++/** ++ * cpr3_regulator_aggregate_corners() - aggregate two corners together ++ * @aggr_corner: Pointer to accumulated aggregated corner which ++ * is both an input and an output ++ * @corner: Pointer to the corner to be aggregated with ++ * aggr_corner ++ * @aggr_quot: Flag indicating that target quotients should be ++ * aggregated as well. ++ * @step_volt: Step size in microvolts between available set ++ * points of the VDD supply. ++ * ++ * Return: none ++ */ ++static void cpr3_regulator_aggregate_corners(struct cpr3_corner *aggr_corner, ++ const struct cpr3_corner *corner, bool aggr_quot, ++ int step_volt) ++{ ++ int i; ++ ++ aggr_corner->ceiling_volt ++ = max(aggr_corner->ceiling_volt, corner->ceiling_volt); ++ aggr_corner->floor_volt ++ = max(aggr_corner->floor_volt, corner->floor_volt); ++ aggr_corner->last_volt ++ = max(aggr_corner->last_volt, corner->last_volt); ++ aggr_corner->system_volt ++ = max(aggr_corner->system_volt, corner->system_volt); ++ aggr_corner->mem_acc_volt ++ = max(aggr_corner->mem_acc_volt, corner->mem_acc_volt); ++ aggr_corner->irq_en |= corner->irq_en; ++ aggr_corner->use_open_loop |= corner->use_open_loop; ++ ++ if (aggr_quot) { ++ aggr_corner->ro_mask &= corner->ro_mask; ++ ++ for (i = 0; i < CPR3_RO_COUNT; i++) ++ aggr_corner->target_quot[i] ++ = max(aggr_corner->target_quot[i], ++ corner->target_quot[i]); ++ } ++ ++ if (aggr_corner->sdelta && corner->sdelta ++ && (aggr_corner->sdelta->table ++ || aggr_corner->sdelta->boost_table)) { ++ cpr3_regulator_aggregate_sdelta(aggr_corner, corner, step_volt); ++ } else { ++ aggr_corner->open_loop_volt ++ = max(aggr_corner->open_loop_volt, ++ corner->open_loop_volt); ++ } ++} ++ ++/** ++ * cpr3_regulator_update_ctrl_state() - update the state of the CPR controller ++ * to reflect the corners used by all CPR3 regulators as well as ++ * the CPR operating mode ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * This function aggregates the CPR parameters for all CPR3 regulators ++ * associated with the VDD supply. Upon success, it sets the aggregated last ++ * known good voltage. ++ * ++ * The VDD supply voltage will not be physically configured unless this ++ * condition is met by at least one of the regulators of the controller: ++ * regulator->vreg_enabled == true && ++ * regulator->current_corner != CPR3_REGULATOR_CORNER_INVALID ++ * ++ * CPR registers for the controller and each thread are updated as long as ++ * ctrl->cpr_enabled == true. ++ * ++ * Note, CPR3 controller lock must be held by the caller. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int _cpr3_regulator_update_ctrl_state(struct cpr3_controller *ctrl) ++{ ++ struct cpr3_corner aggr_corner = {}; ++ struct cpr3_thread *thread; ++ struct cpr3_regulator *vreg; ++ struct cpr4_sdelta *sdelta; ++ bool valid = false; ++ bool thread_valid; ++ int i, j, rc, new_volt, vdd_volt, dynamic_floor_volt, last_corner_volt; ++ u32 reg_last_measurement = 0, sdelta_size; ++ int *sdelta_table, *boost_table; ++ ++ last_corner_volt = 0; ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ rc = cpr3_ctrl_clear_cpr4_config(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n", ++ rc); ++ return rc; ++ } ++ } ++ ++ cpr3_ctrl_loop_disable(ctrl); ++ ++ vdd_volt = regulator_get_voltage(ctrl->vdd_regulator); ++ if (vdd_volt < 0) { ++ cpr3_err(ctrl, "regulator_get_voltage(vdd) failed, rc=%d\n", ++ vdd_volt); ++ return vdd_volt; ++ } ++ ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ /* ++ * Save aggregated corner open-loop voltage which was programmed ++ * during last corner switch which is used when programming new ++ * aggregated corner open-loop voltage. ++ */ ++ last_corner_volt = ctrl->aggr_corner.open_loop_volt; ++ } ++ ++ if (ctrl->cpr_enabled && ctrl->use_hw_closed_loop && ++ ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) ++ reg_last_measurement ++ = cpr3_read(ctrl, CPR3_REG_LAST_MEASUREMENT); ++ ++ aggr_corner.sdelta = ctrl->aggr_corner.sdelta; ++ if (aggr_corner.sdelta) { ++ sdelta = aggr_corner.sdelta; ++ sdelta_table = sdelta->table; ++ if (sdelta_table) { ++ sdelta_size = sdelta->max_core_count * ++ sdelta->temp_band_count; ++ memset(sdelta_table, 0, sdelta_size ++ * sizeof(*sdelta_table)); ++ } ++ ++ boost_table = sdelta->boost_table; ++ if (boost_table) ++ memset(boost_table, 0, sdelta->temp_band_count ++ * sizeof(*boost_table)); ++ ++ memset(sdelta, 0, sizeof(*sdelta)); ++ sdelta->table = sdelta_table; ++ sdelta->cap_volt = INT_MAX; ++ sdelta->boost_table = boost_table; ++ } ++ ++ /* Aggregate the requests of all threads */ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ thread = &ctrl->thread[i]; ++ thread_valid = false; ++ ++ sdelta = thread->aggr_corner.sdelta; ++ if (sdelta) { ++ sdelta_table = sdelta->table; ++ if (sdelta_table) { ++ sdelta_size = sdelta->max_core_count * ++ sdelta->temp_band_count; ++ memset(sdelta_table, 0, sdelta_size ++ * sizeof(*sdelta_table)); ++ } ++ ++ boost_table = sdelta->boost_table; ++ if (boost_table) ++ memset(boost_table, 0, sdelta->temp_band_count ++ * sizeof(*boost_table)); ++ ++ memset(sdelta, 0, sizeof(*sdelta)); ++ sdelta->table = sdelta_table; ++ sdelta->cap_volt = INT_MAX; ++ sdelta->boost_table = boost_table; ++ } ++ ++ memset(&thread->aggr_corner, 0, sizeof(thread->aggr_corner)); ++ thread->aggr_corner.sdelta = sdelta; ++ thread->aggr_corner.ro_mask = CPR3_RO_MASK; ++ ++ for (j = 0; j < thread->vreg_count; j++) { ++ vreg = &thread->vreg[j]; ++ ++ if (ctrl->cpr_enabled && ctrl->use_hw_closed_loop) ++ cpr3_update_vreg_closed_loop_volt(vreg, ++ vdd_volt, reg_last_measurement); ++ ++ if (!vreg->vreg_enabled ++ || vreg->current_corner ++ == CPR3_REGULATOR_CORNER_INVALID) { ++ /* Cannot participate in aggregation. */ ++ vreg->aggregated = false; ++ continue; ++ } else { ++ vreg->aggregated = true; ++ thread_valid = true; ++ } ++ ++ cpr3_regulator_aggregate_corners(&thread->aggr_corner, ++ &vreg->corner[vreg->current_corner], ++ true, ctrl->step_volt); ++ } ++ ++ valid |= thread_valid; ++ ++ if (thread_valid) ++ cpr3_regulator_aggregate_corners(&aggr_corner, ++ &thread->aggr_corner, ++ false, ctrl->step_volt); ++ } ++ ++ if (valid && ctrl->cpr_allowed_hw && ctrl->cpr_allowed_sw) { ++ rc = cpr3_closed_loop_enable(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "could not enable CPR, rc=%d\n", rc); ++ return rc; ++ } ++ } else { ++ rc = cpr3_closed_loop_disable(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "could not disable CPR, rc=%d\n", rc); ++ return rc; ++ } ++ } ++ ++ /* No threads are enabled with a valid corner so exit. */ ++ if (!valid) ++ return 0; ++ ++ /* ++ * When using CPR hardware closed-loop, the voltage may vary anywhere ++ * between the floor and ceiling voltage without software notification. ++ * Therefore, it is required that the floor to ceiling range for the ++ * aggregated corner not intersect the APM threshold voltage. Adjust ++ * the floor to ceiling range if this requirement is violated. ++ * ++ * The following algorithm is applied in the case that ++ * floor < threshold <= ceiling: ++ * if open_loop >= threshold - adj, then floor = threshold ++ * else ceiling = threshold - step ++ * where adj = an adjustment factor to ensure sufficient voltage margin ++ * and step = VDD output step size ++ * ++ * The open-loop and last known voltages are also bounded by the new ++ * floor or ceiling value as needed. ++ */ ++ if (ctrl->use_hw_closed_loop ++ && aggr_corner.ceiling_volt >= ctrl->apm_threshold_volt ++ && aggr_corner.floor_volt < ctrl->apm_threshold_volt) { ++ ++ if (aggr_corner.open_loop_volt ++ >= ctrl->apm_threshold_volt - ctrl->apm_adj_volt) ++ aggr_corner.floor_volt = ctrl->apm_threshold_volt; ++ else ++ aggr_corner.ceiling_volt ++ = ctrl->apm_threshold_volt - ctrl->step_volt; ++ ++ aggr_corner.last_volt ++ = max(aggr_corner.last_volt, aggr_corner.floor_volt); ++ aggr_corner.last_volt ++ = min(aggr_corner.last_volt, aggr_corner.ceiling_volt); ++ aggr_corner.open_loop_volt ++ = max(aggr_corner.open_loop_volt, aggr_corner.floor_volt); ++ aggr_corner.open_loop_volt ++ = min(aggr_corner.open_loop_volt, aggr_corner.ceiling_volt); ++ } ++ ++ if (ctrl->use_hw_closed_loop ++ && aggr_corner.ceiling_volt >= ctrl->mem_acc_threshold_volt ++ && aggr_corner.floor_volt < ctrl->mem_acc_threshold_volt) { ++ aggr_corner.floor_volt = ctrl->mem_acc_threshold_volt; ++ aggr_corner.last_volt = max(aggr_corner.last_volt, ++ aggr_corner.floor_volt); ++ aggr_corner.open_loop_volt = max(aggr_corner.open_loop_volt, ++ aggr_corner.floor_volt); ++ } ++ ++ if (ctrl->use_hw_closed_loop) { ++ dynamic_floor_volt ++ = cpr3_regulator_get_dynamic_floor_volt(ctrl, ++ reg_last_measurement); ++ if (aggr_corner.floor_volt < dynamic_floor_volt) { ++ aggr_corner.floor_volt = dynamic_floor_volt; ++ aggr_corner.last_volt = max(aggr_corner.last_volt, ++ aggr_corner.floor_volt); ++ aggr_corner.open_loop_volt ++ = max(aggr_corner.open_loop_volt, ++ aggr_corner.floor_volt); ++ aggr_corner.ceiling_volt = max(aggr_corner.ceiling_volt, ++ aggr_corner.floor_volt); ++ } ++ } ++ ++ if (ctrl->cpr_enabled && ctrl->last_corner_was_closed_loop) { ++ /* ++ * Always program open-loop voltage for CPR4 controllers which ++ * support hardware closed-loop. Storing the last closed loop ++ * voltage in corner structure can still help with debugging. ++ */ ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) ++ new_volt = aggr_corner.last_volt; ++ else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4 ++ && ctrl->supports_hw_closed_loop) ++ new_volt = aggr_corner.open_loop_volt; ++ else ++ new_volt = min(aggr_corner.last_volt + ++ cpr3_regulator_max_sdelta_diff(aggr_corner.sdelta, ++ ctrl->step_volt), ++ aggr_corner.ceiling_volt); ++ ++ aggr_corner.last_volt = new_volt; ++ } else { ++ new_volt = aggr_corner.open_loop_volt; ++ aggr_corner.last_volt = aggr_corner.open_loop_volt; ++ } ++ ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4 ++ && ctrl->supports_hw_closed_loop) { ++ /* ++ * Store last aggregated corner open-loop voltage in vdd_volt ++ * which is used when programming current aggregated corner ++ * required voltage. ++ */ ++ vdd_volt = last_corner_volt; ++ } ++ ++ cpr3_debug(ctrl, "setting new voltage=%d uV\n", new_volt); ++ rc = cpr3_regulator_scale_vdd_voltage(ctrl, new_volt, ++ vdd_volt, &aggr_corner); ++ if (rc) { ++ cpr3_err(ctrl, "vdd voltage scaling failed, rc=%d\n", rc); ++ return rc; ++ } ++ ++ /* Only update registers if CPR is enabled. */ ++ if (ctrl->cpr_enabled) { ++ if (ctrl->use_hw_closed_loop) { ++ /* Hardware closed-loop */ ++ ++ /* Set ceiling and floor limits in hardware */ ++ rc = regulator_set_voltage(ctrl->vdd_limit_regulator, ++ aggr_corner.floor_volt, ++ aggr_corner.ceiling_volt); ++ if (rc) { ++ cpr3_err(ctrl, "could not configure HW closed-loop voltage limits, rc=%d\n", ++ rc); ++ return rc; ++ } ++ } else { ++ /* Software closed-loop */ ++ ++ /* ++ * Disable UP or DOWN interrupts when at ceiling or ++ * floor respectively. ++ */ ++ if (new_volt == aggr_corner.floor_volt) ++ aggr_corner.irq_en &= ~CPR3_IRQ_DOWN; ++ if (new_volt == aggr_corner.ceiling_volt) ++ aggr_corner.irq_en &= ~CPR3_IRQ_UP; ++ ++ cpr3_write(ctrl, CPR3_REG_IRQ_CLEAR, ++ CPR3_IRQ_UP | CPR3_IRQ_DOWN); ++ cpr3_write(ctrl, CPR3_REG_IRQ_EN, aggr_corner.irq_en); ++ } ++ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ cpr3_regulator_set_target_quot(&ctrl->thread[i]); ++ ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) { ++ vreg = &ctrl->thread[i].vreg[j]; ++ ++ if (vreg->vreg_enabled) ++ vreg->last_closed_loop_corner ++ = vreg->current_corner; ++ } ++ } ++ ++ if (ctrl->proc_clock_throttle) { ++ if (aggr_corner.ceiling_volt > aggr_corner.floor_volt ++ && (ctrl->use_hw_closed_loop ++ || new_volt < aggr_corner.ceiling_volt)) ++ cpr3_write(ctrl, CPR3_REG_PD_THROTTLE, ++ ctrl->proc_clock_throttle); ++ else ++ cpr3_write(ctrl, CPR3_REG_PD_THROTTLE, ++ CPR3_PD_THROTTLE_DISABLE); ++ } ++ ++ /* ++ * Ensure that all CPR register writes complete before ++ * re-enabling CPR loop operation. ++ */ ++ wmb(); ++ } else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4 ++ && ctrl->vdd_limit_regulator) { ++ /* Set ceiling and floor limits in hardware */ ++ rc = regulator_set_voltage(ctrl->vdd_limit_regulator, ++ aggr_corner.floor_volt, ++ aggr_corner.ceiling_volt); ++ if (rc) { ++ cpr3_err(ctrl, "could not configure HW closed-loop voltage limits, rc=%d\n", ++ rc); ++ return rc; ++ } ++ } ++ ++ ctrl->aggr_corner = aggr_corner; ++ ++ if (ctrl->allow_core_count_adj || ctrl->allow_temp_adj ++ || ctrl->allow_boost) { ++ rc = cpr3_controller_program_sdelta(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "failed to program sdelta, rc=%d\n", rc); ++ return rc; ++ } ++ } ++ ++ /* ++ * Only enable the CPR controller if it is possible to set more than ++ * one vdd-supply voltage. ++ */ ++ if (aggr_corner.ceiling_volt > aggr_corner.floor_volt && ++ !aggr_corner.use_open_loop) ++ cpr3_ctrl_loop_enable(ctrl); ++ ++ ctrl->last_corner_was_closed_loop = ctrl->cpr_enabled; ++ cpr3_debug(ctrl, "CPR configuration updated\n"); ++ ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_wait_for_idle() - wait for the CPR controller to no longer be ++ * busy ++ * @ctrl: Pointer to the CPR3 controller ++ * @max_wait_ns: Max wait time in nanoseconds ++ * ++ * Return: 0 on success or -ETIMEDOUT if the controller was still busy after ++ * the maximum delay time ++ */ ++static int cpr3_regulator_wait_for_idle(struct cpr3_controller *ctrl, ++ s64 max_wait_ns) ++{ ++ ktime_t start, end; ++ s64 time_ns; ++ u32 reg; ++ ++ /* ++ * Ensure that all previous CPR register writes have completed before ++ * checking the status register. ++ */ ++ mb(); ++ ++ start = ktime_get(); ++ do { ++ end = ktime_get(); ++ time_ns = ktime_to_ns(ktime_sub(end, start)); ++ if (time_ns > max_wait_ns) { ++ cpr3_err(ctrl, "CPR controller still busy after %lld us\n", ++ div_s64(time_ns, 1000)); ++ return -ETIMEDOUT; ++ } ++ usleep_range(50, 100); ++ reg = cpr3_read(ctrl, CPR3_REG_CPR_STATUS); ++ } while (reg & CPR3_CPR_STATUS_BUSY_MASK); ++ ++ return 0; ++} ++ ++/** ++ * cmp_int() - int comparison function to be passed into the sort() function ++ * which leads to ascending sorting ++ * @a: First int value ++ * @b: Second int value ++ * ++ * Return: >0 if a > b, 0 if a == b, <0 if a < b ++ */ ++static int cmp_int(const void *a, const void *b) ++{ ++ return *(int *)a - *(int *)b; ++} ++ ++/** ++ * cpr3_regulator_measure_aging() - measure the quotient difference for the ++ * specified CPR aging sensor ++ * @ctrl: Pointer to the CPR3 controller ++ * @aging_sensor: Aging sensor to measure ++ * ++ * Note that vdd-supply must be configured to the aging reference voltage before ++ * calling this function. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_measure_aging(struct cpr3_controller *ctrl, ++ struct cpr3_aging_sensor_info *aging_sensor) ++{ ++ u32 mask, reg, result, quot_min, quot_max, sel_min, sel_max; ++ u32 quot_min_scaled, quot_max_scaled; ++ u32 gcnt, gcnt_ref, gcnt0_restore, gcnt1_restore, irq_restore; ++ u32 ro_mask_restore, cont_dly_restore, up_down_dly_restore = 0; ++ int quot_delta, quot_delta_scaled, quot_delta_scaled_sum; ++ int *quot_delta_results; ++ int rc, rc2, i, aging_measurement_count, filtered_count; ++ bool is_aging_measurement; ++ ++ quot_delta_results = kcalloc(CPR3_AGING_MEASUREMENT_ITERATIONS, ++ sizeof(*quot_delta_results), GFP_KERNEL); ++ if (!quot_delta_results) ++ return -ENOMEM; ++ ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ rc = cpr3_ctrl_clear_cpr4_config(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n", ++ rc); ++ kfree(quot_delta_results); ++ return rc; ++ } ++ } ++ ++ cpr3_ctrl_loop_disable(ctrl); ++ ++ /* Enable up, down, and mid CPR interrupts */ ++ irq_restore = cpr3_read(ctrl, CPR3_REG_IRQ_EN); ++ cpr3_write(ctrl, CPR3_REG_IRQ_EN, ++ CPR3_IRQ_UP | CPR3_IRQ_DOWN | CPR3_IRQ_MID); ++ ++ /* Ensure that the aging sensor is assigned to CPR thread 0 */ ++ cpr3_write(ctrl, CPR3_REG_SENSOR_OWNER(aging_sensor->sensor_id), 0); ++ ++ /* Switch from HW to SW closed-loop if necessary */ ++ if (ctrl->supports_hw_closed_loop) { ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, ++ CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK, ++ CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE); ++ } else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) { ++ cpr3_write(ctrl, CPR3_REG_HW_CLOSED_LOOP, ++ CPR3_HW_CLOSED_LOOP_DISABLE); ++ } ++ } ++ ++ /* Configure the GCNT for RO0 and RO1 that are used for aging */ ++ gcnt0_restore = cpr3_read(ctrl, CPR3_REG_GCNT(0)); ++ gcnt1_restore = cpr3_read(ctrl, CPR3_REG_GCNT(1)); ++ gcnt_ref = cpr3_regulator_get_gcnt(ctrl); ++ gcnt = gcnt_ref * 3 / 2; ++ cpr3_write(ctrl, CPR3_REG_GCNT(0), gcnt); ++ cpr3_write(ctrl, CPR3_REG_GCNT(1), gcnt); ++ ++ /* Unmask all RO's */ ++ ro_mask_restore = cpr3_read(ctrl, CPR3_REG_RO_MASK(0)); ++ cpr3_write(ctrl, CPR3_REG_RO_MASK(0), 0); ++ ++ /* ++ * Mask all sensors except for the one to measure and bypass all ++ * sensors in collapsible domains. ++ */ ++ for (i = 0; i <= ctrl->sensor_count / 32; i++) { ++ mask = GENMASK(min(31, ctrl->sensor_count - i * 32), 0); ++ if (aging_sensor->sensor_id / 32 >= i ++ && aging_sensor->sensor_id / 32 < (i + 1)) ++ mask &= ~BIT(aging_sensor->sensor_id % 32); ++ cpr3_write(ctrl, CPR3_REG_SENSOR_MASK_WRITE_BANK(i), mask); ++ cpr3_write(ctrl, CPR3_REG_SENSOR_BYPASS_WRITE_BANK(i), ++ aging_sensor->bypass_mask[i]); ++ } ++ ++ /* Set CPR loop delays to 0 us */ ++ if (ctrl->supports_hw_closed_loop ++ && ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) { ++ cont_dly_restore = cpr3_read(ctrl, CPR3_REG_CPR_TIMER_MID_CONT); ++ up_down_dly_restore = cpr3_read(ctrl, ++ CPR3_REG_CPR_TIMER_UP_DN_CONT); ++ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_MID_CONT, 0); ++ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_UP_DN_CONT, 0); ++ } else { ++ cont_dly_restore = cpr3_read(ctrl, ++ CPR3_REG_CPR_TIMER_AUTO_CONT); ++ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_AUTO_CONT, 0); ++ } ++ ++ /* Set count mode to all-at-once min with no repeat */ ++ cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL, ++ CPR3_CPR_CTL_COUNT_MODE_MASK | CPR3_CPR_CTL_COUNT_REPEAT_MASK, ++ CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_MIN ++ << CPR3_CPR_CTL_COUNT_MODE_SHIFT); ++ ++ cpr3_ctrl_loop_enable(ctrl); ++ ++ rc = cpr3_regulator_wait_for_idle(ctrl, ++ CPR3_AGING_MEASUREMENT_TIMEOUT_NS); ++ if (rc) ++ goto cleanup; ++ ++ /* Set count mode to all-at-once aging */ ++ cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL, CPR3_CPR_CTL_COUNT_MODE_MASK, ++ CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_AGE ++ << CPR3_CPR_CTL_COUNT_MODE_SHIFT); ++ ++ aging_measurement_count = 0; ++ for (i = 0; i < CPR3_AGING_MEASUREMENT_ITERATIONS; i++) { ++ /* Send CONT_NACK */ ++ cpr3_write(ctrl, CPR3_REG_CONT_CMD, CPR3_CONT_CMD_NACK); ++ ++ rc = cpr3_regulator_wait_for_idle(ctrl, ++ CPR3_AGING_MEASUREMENT_TIMEOUT_NS); ++ if (rc) ++ goto cleanup; ++ ++ /* Check for PAGE_IS_AGE flag in status register */ ++ reg = cpr3_read(ctrl, CPR3_REG_CPR_STATUS); ++ is_aging_measurement ++ = reg & CPR3_CPR_STATUS_AGING_MEASUREMENT_MASK; ++ ++ /* Read CPR measurement results */ ++ result = cpr3_read(ctrl, CPR3_REG_RESULT1(0)); ++ quot_min = (result & CPR3_RESULT1_QUOT_MIN_MASK) ++ >> CPR3_RESULT1_QUOT_MIN_SHIFT; ++ quot_max = (result & CPR3_RESULT1_QUOT_MAX_MASK) ++ >> CPR3_RESULT1_QUOT_MAX_SHIFT; ++ sel_min = (result & CPR3_RESULT1_RO_MIN_MASK) ++ >> CPR3_RESULT1_RO_MIN_SHIFT; ++ sel_max = (result & CPR3_RESULT1_RO_MAX_MASK) ++ >> CPR3_RESULT1_RO_MAX_SHIFT; ++ ++ /* ++ * Scale the quotients so that they are equivalent to the fused ++ * values. This accounts for the difference in measurement ++ * interval times. ++ */ ++ quot_min_scaled = quot_min * (gcnt_ref + 1) / (gcnt + 1); ++ quot_max_scaled = quot_max * (gcnt_ref + 1) / (gcnt + 1); ++ ++ if (sel_max == 1) { ++ quot_delta = quot_max - quot_min; ++ quot_delta_scaled = quot_max_scaled - quot_min_scaled; ++ } else { ++ quot_delta = quot_min - quot_max; ++ quot_delta_scaled = quot_min_scaled - quot_max_scaled; ++ } ++ ++ if (is_aging_measurement) ++ quot_delta_results[aging_measurement_count++] ++ = quot_delta_scaled; ++ ++ cpr3_debug(ctrl, "aging results: page_is_age=%u, sel_min=%u, sel_max=%u, quot_min=%u, quot_max=%u, quot_delta=%d, quot_min_scaled=%u, quot_max_scaled=%u, quot_delta_scaled=%d\n", ++ is_aging_measurement, sel_min, sel_max, quot_min, ++ quot_max, quot_delta, quot_min_scaled, quot_max_scaled, ++ quot_delta_scaled); ++ } ++ ++ filtered_count ++ = aging_measurement_count - CPR3_AGING_MEASUREMENT_FILTER * 2; ++ if (filtered_count > 0) { ++ sort(quot_delta_results, aging_measurement_count, ++ sizeof(*quot_delta_results), cmp_int, NULL); ++ ++ quot_delta_scaled_sum = 0; ++ for (i = 0; i < filtered_count; i++) ++ quot_delta_scaled_sum ++ += quot_delta_results[i ++ + CPR3_AGING_MEASUREMENT_FILTER]; ++ ++ aging_sensor->measured_quot_diff ++ = quot_delta_scaled_sum / filtered_count; ++ cpr3_info(ctrl, "average quotient delta=%d (count=%d)\n", ++ aging_sensor->measured_quot_diff, ++ filtered_count); ++ } else { ++ cpr3_err(ctrl, "%d aging measurements completed after %d iterations\n", ++ aging_measurement_count, ++ CPR3_AGING_MEASUREMENT_ITERATIONS); ++ rc = -EBUSY; ++ } ++ ++cleanup: ++ kfree(quot_delta_results); ++ ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ rc2 = cpr3_ctrl_clear_cpr4_config(ctrl); ++ if (rc2) { ++ cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n", ++ rc2); ++ rc = rc2; ++ } ++ } ++ ++ cpr3_ctrl_loop_disable(ctrl); ++ ++ cpr3_write(ctrl, CPR3_REG_IRQ_EN, irq_restore); ++ ++ cpr3_write(ctrl, CPR3_REG_RO_MASK(0), ro_mask_restore); ++ ++ cpr3_write(ctrl, CPR3_REG_GCNT(0), gcnt0_restore); ++ cpr3_write(ctrl, CPR3_REG_GCNT(1), gcnt1_restore); ++ ++ if (ctrl->supports_hw_closed_loop ++ && ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) { ++ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_MID_CONT, cont_dly_restore); ++ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_UP_DN_CONT, ++ up_down_dly_restore); ++ } else { ++ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_AUTO_CONT, ++ cont_dly_restore); ++ } ++ ++ for (i = 0; i <= ctrl->sensor_count / 32; i++) { ++ cpr3_write(ctrl, CPR3_REG_SENSOR_MASK_WRITE_BANK(i), 0); ++ cpr3_write(ctrl, CPR3_REG_SENSOR_BYPASS_WRITE_BANK(i), 0); ++ } ++ ++ cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL, ++ CPR3_CPR_CTL_COUNT_MODE_MASK | CPR3_CPR_CTL_COUNT_REPEAT_MASK, ++ (ctrl->count_mode << CPR3_CPR_CTL_COUNT_MODE_SHIFT) ++ | (ctrl->count_repeat << CPR3_CPR_CTL_COUNT_REPEAT_SHIFT)); ++ ++ cpr3_write(ctrl, CPR3_REG_SENSOR_OWNER(aging_sensor->sensor_id), ++ ctrl->sensor_owner[aging_sensor->sensor_id]); ++ ++ cpr3_write(ctrl, CPR3_REG_IRQ_CLEAR, ++ CPR3_IRQ_UP | CPR3_IRQ_DOWN | CPR3_IRQ_MID); ++ ++ if (ctrl->supports_hw_closed_loop) { ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, ++ CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK, ++ ctrl->use_hw_closed_loop ++ ? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE ++ : CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE); ++ } else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) { ++ cpr3_write(ctrl, CPR3_REG_HW_CLOSED_LOOP, ++ ctrl->use_hw_closed_loop ++ ? CPR3_HW_CLOSED_LOOP_ENABLE ++ : CPR3_HW_CLOSED_LOOP_DISABLE); ++ } ++ } ++ ++ return rc; ++} ++ ++/** ++ * cpr3_regulator_readjust_volt_and_quot() - readjust the target quotients as ++ * well as the floor, ceiling, and open-loop voltages for the ++ * regulator by removing the old adjustment and adding the new one ++ * @vreg: Pointer to the CPR3 regulator ++ * @old_adjust_volt: Old aging adjustment voltage in microvolts ++ * @new_adjust_volt: New aging adjustment voltage in microvolts ++ * ++ * Also reset the cached closed loop voltage (last_volt) to equal the open-loop ++ * voltage for each corner. ++ * ++ * Return: None ++ */ ++static void cpr3_regulator_readjust_volt_and_quot(struct cpr3_regulator *vreg, ++ int old_adjust_volt, int new_adjust_volt) ++{ ++ unsigned long long temp; ++ int i, j, old_volt, new_volt, rounded_volt; ++ ++ if (!vreg->aging_allowed) ++ return; ++ ++ for (i = 0; i < vreg->corner_count; i++) { ++ temp = (unsigned long long)old_adjust_volt ++ * (unsigned long long)vreg->corner[i].aging_derate; ++ do_div(temp, 1000); ++ old_volt = temp; ++ ++ temp = (unsigned long long)new_adjust_volt ++ * (unsigned long long)vreg->corner[i].aging_derate; ++ do_div(temp, 1000); ++ new_volt = temp; ++ ++ old_volt = min(vreg->aging_max_adjust_volt, old_volt); ++ new_volt = min(vreg->aging_max_adjust_volt, new_volt); ++ ++ for (j = 0; j < CPR3_RO_COUNT; j++) { ++ if (vreg->corner[i].target_quot[j] != 0) { ++ vreg->corner[i].target_quot[j] ++ += cpr3_quot_adjustment( ++ vreg->corner[i].ro_scale[j], ++ new_volt) ++ - cpr3_quot_adjustment( ++ vreg->corner[i].ro_scale[j], ++ old_volt); ++ } ++ } ++ ++ rounded_volt = CPR3_ROUND(new_volt, ++ vreg->thread->ctrl->step_volt); ++ ++ if (!vreg->aging_allow_open_loop_adj) ++ rounded_volt = 0; ++ ++ vreg->corner[i].ceiling_volt ++ = vreg->corner[i].unaged_ceiling_volt + rounded_volt; ++ vreg->corner[i].ceiling_volt = min(vreg->corner[i].ceiling_volt, ++ vreg->corner[i].abs_ceiling_volt); ++ vreg->corner[i].floor_volt ++ = vreg->corner[i].unaged_floor_volt + rounded_volt; ++ vreg->corner[i].floor_volt = min(vreg->corner[i].floor_volt, ++ vreg->corner[i].ceiling_volt); ++ vreg->corner[i].open_loop_volt ++ = vreg->corner[i].unaged_open_loop_volt + rounded_volt; ++ vreg->corner[i].open_loop_volt ++ = min(vreg->corner[i].open_loop_volt, ++ vreg->corner[i].ceiling_volt); ++ ++ vreg->corner[i].last_volt = vreg->corner[i].open_loop_volt; ++ ++ cpr3_debug(vreg, "corner %d: applying %d uV closed-loop and %d uV open-loop voltage margin adjustment\n", ++ i, new_volt, rounded_volt); ++ } ++} ++ ++/** ++ * cpr3_regulator_set_aging_ref_adjustment() - adjust target quotients for the ++ * regulators managed by this CPR controller to account for aging ++ * @ctrl: Pointer to the CPR3 controller ++ * @ref_adjust_volt: New aging reference adjustment voltage in microvolts to ++ * apply to all regulators managed by this CPR controller ++ * ++ * The existing aging adjustment as defined by ctrl->aging_ref_adjust_volt is ++ * first removed and then the adjustment is applied. Lastly, the value of ++ * ctrl->aging_ref_adjust_volt is updated to ref_adjust_volt. ++ */ ++static void cpr3_regulator_set_aging_ref_adjustment( ++ struct cpr3_controller *ctrl, int ref_adjust_volt) ++{ ++ struct cpr3_regulator *vreg; ++ int i, j; ++ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) { ++ vreg = &ctrl->thread[i].vreg[j]; ++ cpr3_regulator_readjust_volt_and_quot(vreg, ++ ctrl->aging_ref_adjust_volt, ref_adjust_volt); ++ } ++ } ++ ++ ctrl->aging_ref_adjust_volt = ref_adjust_volt; ++} ++ ++/** ++ * cpr3_regulator_aging_adjust() - adjust the target quotients for regulators ++ * based on the output of CPR aging sensors ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_aging_adjust(struct cpr3_controller *ctrl) ++{ ++ struct cpr3_regulator *vreg; ++ struct cpr3_corner restore_aging_corner; ++ struct cpr3_corner *corner; ++ int *restore_current_corner; ++ bool *restore_vreg_enabled; ++ int i, j, id, rc, rc2, vreg_count, aging_volt, max_aging_volt = 0; ++ u32 reg; ++ ++ if (!ctrl->aging_required || !ctrl->cpr_enabled ++ || ctrl->aggr_corner.ceiling_volt == 0 ++ || ctrl->aggr_corner.ceiling_volt > ctrl->aging_ref_volt) ++ return 0; ++ ++ for (i = 0, vreg_count = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) { ++ vreg = &ctrl->thread[i].vreg[j]; ++ vreg_count++; ++ ++ if (vreg->aging_allowed && vreg->vreg_enabled ++ && vreg->current_corner > vreg->aging_corner) ++ return 0; ++ } ++ } ++ ++ /* Verify that none of the aging sensors are currently masked. */ ++ for (i = 0; i < ctrl->aging_sensor_count; i++) { ++ id = ctrl->aging_sensor[i].sensor_id; ++ reg = cpr3_read(ctrl, CPR3_REG_SENSOR_MASK_READ(id)); ++ if (reg & BIT(id % 32)) ++ return 0; ++ } ++ ++ /* ++ * Verify that the aging possible register (if specified) has an ++ * acceptable value. ++ */ ++ if (ctrl->aging_possible_reg) { ++ reg = readl_relaxed(ctrl->aging_possible_reg); ++ reg &= ctrl->aging_possible_mask; ++ if (reg != ctrl->aging_possible_val) ++ return 0; ++ } ++ ++ restore_current_corner = kcalloc(vreg_count, ++ sizeof(*restore_current_corner), GFP_KERNEL); ++ restore_vreg_enabled = kcalloc(vreg_count, ++ sizeof(*restore_vreg_enabled), GFP_KERNEL); ++ if (!restore_current_corner || !restore_vreg_enabled) { ++ kfree(restore_current_corner); ++ kfree(restore_vreg_enabled); ++ return -ENOMEM; ++ } ++ ++ /* Force all regulators to the aging corner */ ++ for (i = 0, vreg_count = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++, vreg_count++) { ++ vreg = &ctrl->thread[i].vreg[j]; ++ ++ restore_current_corner[vreg_count] ++ = vreg->current_corner; ++ restore_vreg_enabled[vreg_count] ++ = vreg->vreg_enabled; ++ ++ vreg->current_corner = vreg->aging_corner; ++ vreg->vreg_enabled = true; ++ } ++ } ++ ++ /* Force one of the regulators to require the aging reference voltage */ ++ vreg = &ctrl->thread[0].vreg[0]; ++ corner = &vreg->corner[vreg->current_corner]; ++ restore_aging_corner = *corner; ++ corner->ceiling_volt = ctrl->aging_ref_volt; ++ corner->floor_volt = ctrl->aging_ref_volt; ++ corner->open_loop_volt = ctrl->aging_ref_volt; ++ corner->last_volt = ctrl->aging_ref_volt; ++ ++ /* Skip last_volt caching */ ++ ctrl->last_corner_was_closed_loop = false; ++ ++ /* Set the vdd supply voltage to the aging reference voltage */ ++ rc = _cpr3_regulator_update_ctrl_state(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "unable to force vdd-supply to the aging reference voltage=%d uV, rc=%d\n", ++ ctrl->aging_ref_volt, rc); ++ goto cleanup; ++ } ++ ++ if (ctrl->aging_vdd_mode) { ++ rc = regulator_set_mode(ctrl->vdd_regulator, ++ ctrl->aging_vdd_mode); ++ if (rc) { ++ cpr3_err(ctrl, "unable to configure vdd-supply for mode=%u, rc=%d\n", ++ ctrl->aging_vdd_mode, rc); ++ goto cleanup; ++ } ++ } ++ ++ /* Perform aging measurement on all aging sensors */ ++ for (i = 0; i < ctrl->aging_sensor_count; i++) { ++ for (j = 0; j < CPR3_AGING_RETRY_COUNT; j++) { ++ rc = cpr3_regulator_measure_aging(ctrl, ++ &ctrl->aging_sensor[i]); ++ if (!rc) ++ break; ++ } ++ ++ if (!rc) { ++ aging_volt = ++ cpr3_voltage_adjustment( ++ ctrl->aging_sensor[i].ro_scale, ++ ctrl->aging_sensor[i].measured_quot_diff ++ - ctrl->aging_sensor[i].init_quot_diff); ++ max_aging_volt = max(max_aging_volt, aging_volt); ++ } else { ++ cpr3_err(ctrl, "CPR aging measurement failed after %d tries, rc=%d\n", ++ j, rc); ++ ctrl->aging_failed = true; ++ ctrl->aging_required = false; ++ goto cleanup; ++ } ++ } ++ ++cleanup: ++ vreg = &ctrl->thread[0].vreg[0]; ++ vreg->corner[vreg->current_corner] = restore_aging_corner; ++ ++ for (i = 0, vreg_count = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++, vreg_count++) { ++ vreg = &ctrl->thread[i].vreg[j]; ++ vreg->current_corner ++ = restore_current_corner[vreg_count]; ++ vreg->vreg_enabled = restore_vreg_enabled[vreg_count]; ++ } ++ } ++ ++ kfree(restore_current_corner); ++ kfree(restore_vreg_enabled); ++ ++ /* Adjust the CPR target quotients according to the aging measurement */ ++ if (!rc) { ++ cpr3_regulator_set_aging_ref_adjustment(ctrl, max_aging_volt); ++ ++ cpr3_info(ctrl, "aging measurement successful; aging reference adjustment voltage=%d uV\n", ++ ctrl->aging_ref_adjust_volt); ++ ctrl->aging_succeeded = true; ++ ctrl->aging_required = false; ++ } ++ ++ if (ctrl->aging_complete_vdd_mode) { ++ rc = regulator_set_mode(ctrl->vdd_regulator, ++ ctrl->aging_complete_vdd_mode); ++ if (rc) ++ cpr3_err(ctrl, "unable to configure vdd-supply for mode=%u, rc=%d\n", ++ ctrl->aging_complete_vdd_mode, rc); ++ } ++ ++ /* Skip last_volt caching */ ++ ctrl->last_corner_was_closed_loop = false; ++ ++ /* ++ * Restore vdd-supply to the voltage before the aging measurement and ++ * restore the CPR3 controller hardware state. ++ */ ++ rc2 = _cpr3_regulator_update_ctrl_state(ctrl); ++ ++ /* Stop last_volt caching on for the next request */ ++ ctrl->last_corner_was_closed_loop = false; ++ ++ return rc ? rc : rc2; ++} ++ ++/** ++ * cpr3_regulator_update_ctrl_state() - update the state of the CPR controller ++ * to reflect the corners used by all CPR3 regulators as well as ++ * the CPR operating mode and perform aging adjustments if needed ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Note, CPR3 controller lock must be held by the caller. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_update_ctrl_state(struct cpr3_controller *ctrl) ++{ ++ int rc; ++ ++ rc = _cpr3_regulator_update_ctrl_state(ctrl); ++ if (rc) ++ return rc; ++ ++ return cpr3_regulator_aging_adjust(ctrl); ++} ++ ++/** ++ * cpr3_regulator_set_voltage() - set the voltage corner for the CPR3 regulator ++ * associated with the regulator device ++ * @rdev: Regulator device pointer for the cpr3-regulator ++ * @corner: New voltage corner to set (offset by CPR3_CORNER_OFFSET) ++ * @corner_max: Maximum voltage corner allowed (offset by ++ * CPR3_CORNER_OFFSET) ++ * @selector: Pointer which is filled with the selector value for the ++ * corner ++ * ++ * This function is passed as a callback function into the regulator ops that ++ * are registered for each cpr3-regulator device. The VDD voltage will not be ++ * physically configured until both this function and cpr3_regulator_enable() ++ * are called. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_set_voltage(struct regulator_dev *rdev, ++ int corner, int corner_max, unsigned *selector) ++{ ++ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev); ++ struct cpr3_controller *ctrl = vreg->thread->ctrl; ++ int rc = 0; ++ int last_corner; ++ ++ corner -= CPR3_CORNER_OFFSET; ++ corner_max -= CPR3_CORNER_OFFSET; ++ *selector = corner; ++ ++ mutex_lock(&ctrl->lock); ++ ++ if (!vreg->vreg_enabled) { ++ vreg->current_corner = corner; ++ cpr3_debug(vreg, "stored corner=%d\n", corner); ++ goto done; ++ } else if (vreg->current_corner == corner) { ++ goto done; ++ } ++ ++ last_corner = vreg->current_corner; ++ vreg->current_corner = corner; ++ ++ if (vreg->cpr4_regulator_data != NULL) ++ if (vreg->cpr4_regulator_data->mem_acc_funcs != NULL) ++ vreg->cpr4_regulator_data->mem_acc_funcs->set_mem_acc(rdev); ++ ++ rc = cpr3_regulator_update_ctrl_state(ctrl); ++ if (rc) { ++ cpr3_err(vreg, "could not update CPR state, rc=%d\n", rc); ++ vreg->current_corner = last_corner; ++ } ++ ++ if (vreg->cpr4_regulator_data != NULL) ++ if (vreg->cpr4_regulator_data->mem_acc_funcs != NULL) ++ vreg->cpr4_regulator_data->mem_acc_funcs->clear_mem_acc(rdev); ++ ++ cpr3_debug(vreg, "set corner=%d\n", corner); ++done: ++ mutex_unlock(&ctrl->lock); ++ ++ return rc; ++} ++ ++/** ++ * cpr3_handle_temp_open_loop_adjustment() - voltage based cold temperature ++ * ++ * @rdev: Regulator device pointer for the cpr3-regulator ++ * @is_cold: Flag to denote enter/exit cold condition ++ * ++ * This function is adjusts voltage margin based on cold condition ++ * ++ * Return: 0 = success ++ */ ++ ++int cpr3_handle_temp_open_loop_adjustment(struct cpr3_controller *ctrl, ++ bool is_cold) ++{ ++ int i ,j, k, rc; ++ struct cpr3_regulator *vreg; ++ ++ mutex_lock(&ctrl->lock); ++ for (i = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) { ++ vreg = &ctrl->thread[i].vreg[j]; ++ for (k = 0; k < vreg->corner_count; k++) { ++ vreg->corner[k].open_loop_volt = is_cold ? ++ vreg->corner[k].cold_temp_open_loop_volt : ++ vreg->corner[k].normal_temp_open_loop_volt; ++ } ++ } ++ } ++ rc = cpr3_regulator_update_ctrl_state(ctrl); ++ mutex_unlock(&ctrl->lock); ++ ++ return rc; ++} ++ ++/** ++ * cpr3_regulator_get_voltage() - get the voltage corner for the CPR3 regulator ++ * associated with the regulator device ++ * @rdev: Regulator device pointer for the cpr3-regulator ++ * ++ * This function is passed as a callback function into the regulator ops that ++ * are registered for each cpr3-regulator device. ++ * ++ * Return: voltage corner value offset by CPR3_CORNER_OFFSET ++ */ ++static int cpr3_regulator_get_voltage(struct regulator_dev *rdev) ++{ ++ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev); ++ ++ if (vreg->current_corner == CPR3_REGULATOR_CORNER_INVALID) ++ return CPR3_CORNER_OFFSET; ++ else ++ return vreg->current_corner + CPR3_CORNER_OFFSET; ++} ++ ++/** ++ * cpr3_regulator_list_voltage() - return the voltage corner mapped to the ++ * specified selector ++ * @rdev: Regulator device pointer for the cpr3-regulator ++ * @selector: Regulator selector ++ * ++ * This function is passed as a callback function into the regulator ops that ++ * are registered for each cpr3-regulator device. ++ * ++ * Return: voltage corner value offset by CPR3_CORNER_OFFSET ++ */ ++static int cpr3_regulator_list_voltage(struct regulator_dev *rdev, ++ unsigned selector) ++{ ++ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev); ++ ++ if (selector < vreg->corner_count) ++ return selector + CPR3_CORNER_OFFSET; ++ else ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_is_enabled() - return the enable state of the CPR3 regulator ++ * @rdev: Regulator device pointer for the cpr3-regulator ++ * ++ * This function is passed as a callback function into the regulator ops that ++ * are registered for each cpr3-regulator device. ++ * ++ * Return: true if regulator is enabled, false if regulator is disabled ++ */ ++static int cpr3_regulator_is_enabled(struct regulator_dev *rdev) ++{ ++ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev); ++ ++ return vreg->vreg_enabled; ++} ++ ++/** ++ * cpr3_regulator_enable() - enable the CPR3 regulator ++ * @rdev: Regulator device pointer for the cpr3-regulator ++ * ++ * This function is passed as a callback function into the regulator ops that ++ * are registered for each cpr3-regulator device. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_enable(struct regulator_dev *rdev) ++{ ++ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev); ++ struct cpr3_controller *ctrl = vreg->thread->ctrl; ++ int rc = 0; ++ ++ if (vreg->vreg_enabled == true) ++ return 0; ++ ++ mutex_lock(&ctrl->lock); ++ ++ if (ctrl->system_regulator) { ++ rc = regulator_enable(ctrl->system_regulator); ++ if (rc) { ++ cpr3_err(ctrl, "regulator_enable(system) failed, rc=%d\n", ++ rc); ++ goto done; ++ } ++ } ++ ++ rc = regulator_enable(ctrl->vdd_regulator); ++ if (rc) { ++ cpr3_err(vreg, "regulator_enable(vdd) failed, rc=%d\n", rc); ++ goto done; ++ } ++ ++ vreg->vreg_enabled = true; ++ rc = cpr3_regulator_update_ctrl_state(ctrl); ++ if (rc) { ++ cpr3_err(vreg, "could not update CPR state, rc=%d\n", rc); ++ regulator_disable(ctrl->vdd_regulator); ++ vreg->vreg_enabled = false; ++ goto done; ++ } ++ ++ cpr3_debug(vreg, "Enabled\n"); ++done: ++ mutex_unlock(&ctrl->lock); ++ ++ return rc; ++} ++ ++/** ++ * cpr3_regulator_disable() - disable the CPR3 regulator ++ * @rdev: Regulator device pointer for the cpr3-regulator ++ * ++ * This function is passed as a callback function into the regulator ops that ++ * are registered for each cpr3-regulator device. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_disable(struct regulator_dev *rdev) ++{ ++ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev); ++ struct cpr3_controller *ctrl = vreg->thread->ctrl; ++ int rc, rc2; ++ ++ if (vreg->vreg_enabled == false) ++ return 0; ++ ++ mutex_lock(&ctrl->lock); ++ rc = regulator_disable(ctrl->vdd_regulator); ++ if (rc) { ++ cpr3_err(vreg, "regulator_disable(vdd) failed, rc=%d\n", rc); ++ goto done; ++ } ++ ++ vreg->vreg_enabled = false; ++ rc = cpr3_regulator_update_ctrl_state(ctrl); ++ if (rc) { ++ cpr3_err(vreg, "could not update CPR state, rc=%d\n", rc); ++ rc2 = regulator_enable(ctrl->vdd_regulator); ++ vreg->vreg_enabled = true; ++ goto done; ++ } ++ ++ if (ctrl->system_regulator) { ++ rc = regulator_disable(ctrl->system_regulator); ++ if (rc) { ++ cpr3_err(ctrl, "regulator_disable(system) failed, rc=%d\n", ++ rc); ++ goto done; ++ } ++ } ++ ++ cpr3_debug(vreg, "Disabled\n"); ++done: ++ mutex_unlock(&ctrl->lock); ++ ++ return rc; ++} ++ ++static struct regulator_ops cpr3_regulator_ops = { ++ .enable = cpr3_regulator_enable, ++ .disable = cpr3_regulator_disable, ++ .is_enabled = cpr3_regulator_is_enabled, ++ .set_voltage = cpr3_regulator_set_voltage, ++ .get_voltage = cpr3_regulator_get_voltage, ++ .list_voltage = cpr3_regulator_list_voltage, ++}; ++ ++/** ++ * cpr3_print_result() - print CPR measurement results to the kernel log for ++ * debugging purposes ++ * @thread: Pointer to the CPR3 thread ++ * ++ * Return: None ++ */ ++static void cpr3_print_result(struct cpr3_thread *thread) ++{ ++ struct cpr3_controller *ctrl = thread->ctrl; ++ u32 result[3], busy, step_dn, step_up, error_steps, error, negative; ++ u32 quot_min, quot_max, ro_min, ro_max, step_quot_min, step_quot_max; ++ u32 sensor_min, sensor_max; ++ char *sign; ++ ++ result[0] = cpr3_read(ctrl, CPR3_REG_RESULT0(thread->thread_id)); ++ result[1] = cpr3_read(ctrl, CPR3_REG_RESULT1(thread->thread_id)); ++ result[2] = cpr3_read(ctrl, CPR3_REG_RESULT2(thread->thread_id)); ++ ++ busy = !!(result[0] & CPR3_RESULT0_BUSY_MASK); ++ step_dn = !!(result[0] & CPR3_RESULT0_STEP_DN_MASK); ++ step_up = !!(result[0] & CPR3_RESULT0_STEP_UP_MASK); ++ error_steps = (result[0] & CPR3_RESULT0_ERROR_STEPS_MASK) ++ >> CPR3_RESULT0_ERROR_STEPS_SHIFT; ++ error = (result[0] & CPR3_RESULT0_ERROR_MASK) ++ >> CPR3_RESULT0_ERROR_SHIFT; ++ negative = !!(result[0] & CPR3_RESULT0_NEGATIVE_MASK); ++ ++ quot_min = (result[1] & CPR3_RESULT1_QUOT_MIN_MASK) ++ >> CPR3_RESULT1_QUOT_MIN_SHIFT; ++ quot_max = (result[1] & CPR3_RESULT1_QUOT_MAX_MASK) ++ >> CPR3_RESULT1_QUOT_MAX_SHIFT; ++ ro_min = (result[1] & CPR3_RESULT1_RO_MIN_MASK) ++ >> CPR3_RESULT1_RO_MIN_SHIFT; ++ ro_max = (result[1] & CPR3_RESULT1_RO_MAX_MASK) ++ >> CPR3_RESULT1_RO_MAX_SHIFT; ++ ++ step_quot_min = (result[2] & CPR3_RESULT2_STEP_QUOT_MIN_MASK) ++ >> CPR3_RESULT2_STEP_QUOT_MIN_SHIFT; ++ step_quot_max = (result[2] & CPR3_RESULT2_STEP_QUOT_MAX_MASK) ++ >> CPR3_RESULT2_STEP_QUOT_MAX_SHIFT; ++ sensor_min = (result[2] & CPR3_RESULT2_SENSOR_MIN_MASK) ++ >> CPR3_RESULT2_SENSOR_MIN_SHIFT; ++ sensor_max = (result[2] & CPR3_RESULT2_SENSOR_MAX_MASK) ++ >> CPR3_RESULT2_SENSOR_MAX_SHIFT; ++ ++ sign = negative ? "-" : ""; ++ cpr3_debug(ctrl, "thread %u: busy=%u, step_dn=%u, step_up=%u, error_steps=%s%u, error=%s%u\n", ++ thread->thread_id, busy, step_dn, step_up, sign, error_steps, ++ sign, error); ++ cpr3_debug(ctrl, "thread %u: quot_min=%u, quot_max=%u, ro_min=%u, ro_max=%u\n", ++ thread->thread_id, quot_min, quot_max, ro_min, ro_max); ++ cpr3_debug(ctrl, "thread %u: step_quot_min=%u, step_quot_max=%u, sensor_min=%u, sensor_max=%u\n", ++ thread->thread_id, step_quot_min, step_quot_max, sensor_min, ++ sensor_max); ++} ++ ++/** ++ * cpr3_thread_busy() - returns if the specified CPR3 thread is busy taking ++ * a measurement ++ * @thread: Pointer to the CPR3 thread ++ * ++ * Return: CPR3 busy status ++ */ ++static bool cpr3_thread_busy(struct cpr3_thread *thread) ++{ ++ u32 result; ++ ++ result = cpr3_read(thread->ctrl, CPR3_REG_RESULT0(thread->thread_id)); ++ ++ return !!(result & CPR3_RESULT0_BUSY_MASK); ++} ++ ++/** ++ * cpr3_irq_handler() - CPR interrupt handler callback function used for ++ * software closed-loop operation ++ * @irq: CPR interrupt number ++ * @data: Private data corresponding to the CPR3 controller ++ * pointer ++ * ++ * This function increases or decreases the vdd supply voltage based upon the ++ * CPR controller recommendation. ++ * ++ * Return: IRQ_HANDLED ++ */ ++static irqreturn_t cpr3_irq_handler(int irq, void *data) ++{ ++ struct cpr3_controller *ctrl = data; ++ struct cpr3_corner *aggr = &ctrl->aggr_corner; ++ u32 cont = CPR3_CONT_CMD_NACK; ++ u32 reg_last_measurement = 0; ++ struct cpr3_regulator *vreg; ++ struct cpr3_corner *corner; ++ unsigned long flags; ++ int i, j, new_volt, last_volt, dynamic_floor_volt, rc; ++ u32 irq_en, status, cpr_status, ctl; ++ bool up, down; ++ ++ mutex_lock(&ctrl->lock); ++ ++ if (!ctrl->cpr_enabled) { ++ cpr3_debug(ctrl, "CPR interrupt received but CPR is disabled\n"); ++ mutex_unlock(&ctrl->lock); ++ return IRQ_HANDLED; ++ } else if (ctrl->use_hw_closed_loop) { ++ cpr3_debug(ctrl, "CPR interrupt received but CPR is using HW closed-loop\n"); ++ goto done; ++ } ++ ++ /* ++ * CPR IRQ status checking and CPR controller disabling must happen ++ * atomically and without invening delay in order to avoid an interrupt ++ * storm caused by the handler racing with the CPR controller. ++ */ ++ local_irq_save(flags); ++ preempt_disable(); ++ ++ status = cpr3_read(ctrl, CPR3_REG_IRQ_STATUS); ++ up = status & CPR3_IRQ_UP; ++ down = status & CPR3_IRQ_DOWN; ++ ++ if (!up && !down) { ++ /* ++ * Toggle the CPR controller off and then back on since the ++ * hardware and software states are out of sync. This condition ++ * occurs after an aging measurement completes as the CPR IRQ ++ * physically triggers during the aging measurement but the ++ * handler is stuck waiting on the mutex lock. ++ */ ++ cpr3_ctrl_loop_disable(ctrl); ++ ++ local_irq_restore(flags); ++ preempt_enable(); ++ ++ /* Wait for the loop disable write to complete */ ++ mb(); ++ ++ /* Wait for BUSY=1 and LOOP_EN=0 in CPR controller registers. */ ++ for (i = 0; i < CPR3_REGISTER_WRITE_DELAY_US / 10; i++) { ++ cpr_status = cpr3_read(ctrl, CPR3_REG_CPR_STATUS); ++ ctl = cpr3_read(ctrl, CPR3_REG_CPR_CTL); ++ if (cpr_status & CPR3_CPR_STATUS_BUSY_MASK ++ && (ctl & CPR3_CPR_CTL_LOOP_EN_MASK) ++ == CPR3_CPR_CTL_LOOP_DISABLE) ++ break; ++ udelay(10); ++ } ++ if (i == CPR3_REGISTER_WRITE_DELAY_US / 10) ++ cpr3_debug(ctrl, "CPR controller not disabled after %d us\n", ++ CPR3_REGISTER_WRITE_DELAY_US); ++ ++ /* Clear interrupt status */ ++ cpr3_write(ctrl, CPR3_REG_IRQ_CLEAR, ++ CPR3_IRQ_UP | CPR3_IRQ_DOWN); ++ ++ /* Wait for the interrupt clearing write to complete */ ++ mb(); ++ ++ /* Wait for IRQ_STATUS register to be cleared. */ ++ for (i = 0; i < CPR3_REGISTER_WRITE_DELAY_US / 10; i++) { ++ status = cpr3_read(ctrl, CPR3_REG_IRQ_STATUS); ++ if (!(status & (CPR3_IRQ_UP | CPR3_IRQ_DOWN))) ++ break; ++ udelay(10); ++ } ++ if (i == CPR3_REGISTER_WRITE_DELAY_US / 10) ++ cpr3_debug(ctrl, "CPR interrupts not cleared after %d us\n", ++ CPR3_REGISTER_WRITE_DELAY_US); ++ ++ cpr3_ctrl_loop_enable(ctrl); ++ ++ cpr3_debug(ctrl, "CPR interrupt received but no up or down status bit is set\n"); ++ ++ mutex_unlock(&ctrl->lock); ++ return IRQ_HANDLED; ++ } else if (up && down) { ++ cpr3_debug(ctrl, "both up and down status bits set\n"); ++ /* The up flag takes precedence over the down flag. */ ++ down = false; ++ } ++ ++ if (ctrl->supports_hw_closed_loop) ++ reg_last_measurement ++ = cpr3_read(ctrl, CPR3_REG_LAST_MEASUREMENT); ++ dynamic_floor_volt = cpr3_regulator_get_dynamic_floor_volt(ctrl, ++ reg_last_measurement); ++ ++ local_irq_restore(flags); ++ preempt_enable(); ++ ++ irq_en = aggr->irq_en; ++ last_volt = aggr->last_volt; ++ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ if (cpr3_thread_busy(&ctrl->thread[i])) { ++ cpr3_debug(ctrl, "CPR thread %u busy when it should be waiting for SW cont\n", ++ ctrl->thread[i].thread_id); ++ goto done; ++ } ++ } ++ ++ new_volt = up ? last_volt + ctrl->step_volt ++ : last_volt - ctrl->step_volt; ++ ++ /* Re-enable UP/DOWN interrupt when its opposite is received. */ ++ irq_en |= up ? CPR3_IRQ_DOWN : CPR3_IRQ_UP; ++ ++ if (new_volt > aggr->ceiling_volt) { ++ new_volt = aggr->ceiling_volt; ++ irq_en &= ~CPR3_IRQ_UP; ++ cpr3_debug(ctrl, "limiting to ceiling=%d uV\n", ++ aggr->ceiling_volt); ++ } else if (new_volt < aggr->floor_volt) { ++ new_volt = aggr->floor_volt; ++ irq_en &= ~CPR3_IRQ_DOWN; ++ cpr3_debug(ctrl, "limiting to floor=%d uV\n", aggr->floor_volt); ++ } ++ ++ if (down && new_volt < dynamic_floor_volt) { ++ /* ++ * The vdd-supply voltage should not be decreased below the ++ * dynamic floor voltage. However, it is not necessary (and ++ * counter productive) to force the voltage up to this level ++ * if it happened to be below it since the closed-loop voltage ++ * must have gotten there in a safe manner while the power ++ * domains for the CPR3 regulator imposing the dynamic floor ++ * were not bypassed. ++ */ ++ new_volt = last_volt; ++ irq_en &= ~CPR3_IRQ_DOWN; ++ cpr3_debug(ctrl, "limiting to dynamic floor=%d uV\n", ++ dynamic_floor_volt); ++ } ++ ++ for (i = 0; i < ctrl->thread_count; i++) ++ cpr3_print_result(&ctrl->thread[i]); ++ ++ cpr3_debug(ctrl, "%s: new_volt=%d uV, last_volt=%d uV\n", ++ up ? "UP" : "DN", new_volt, last_volt); ++ ++ if (ctrl->proc_clock_throttle && last_volt == aggr->ceiling_volt ++ && new_volt < last_volt) ++ cpr3_write(ctrl, CPR3_REG_PD_THROTTLE, ++ ctrl->proc_clock_throttle); ++ ++ if (new_volt != last_volt) { ++ rc = cpr3_regulator_scale_vdd_voltage(ctrl, new_volt, ++ last_volt, ++ aggr); ++ if (rc) { ++ cpr3_err(ctrl, "scale_vdd() failed to set vdd=%d uV, rc=%d\n", ++ new_volt, rc); ++ goto done; ++ } ++ cont = CPR3_CONT_CMD_ACK; ++ ++ /* ++ * Update the closed-loop voltage for all regulators managed ++ * by this CPR controller. ++ */ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) { ++ vreg = &ctrl->thread[i].vreg[j]; ++ cpr3_update_vreg_closed_loop_volt(vreg, ++ new_volt, reg_last_measurement); ++ } ++ } ++ } ++ ++ if (ctrl->proc_clock_throttle && new_volt == aggr->ceiling_volt) ++ cpr3_write(ctrl, CPR3_REG_PD_THROTTLE, ++ CPR3_PD_THROTTLE_DISABLE); ++ ++ corner = &ctrl->thread[0].vreg[0].corner[ ++ ctrl->thread[0].vreg[0].current_corner]; ++ ++ if (irq_en != aggr->irq_en) { ++ aggr->irq_en = irq_en; ++ cpr3_write(ctrl, CPR3_REG_IRQ_EN, irq_en); ++ } ++ ++ aggr->last_volt = new_volt; ++ ++done: ++ /* Clear interrupt status */ ++ cpr3_write(ctrl, CPR3_REG_IRQ_CLEAR, CPR3_IRQ_UP | CPR3_IRQ_DOWN); ++ ++ /* ACK or NACK the CPR controller */ ++ cpr3_write(ctrl, CPR3_REG_CONT_CMD, cont); ++ ++ mutex_unlock(&ctrl->lock); ++ return IRQ_HANDLED; ++} ++ ++/** ++ * cpr3_ceiling_irq_handler() - CPR ceiling reached interrupt handler callback ++ * function used for hardware closed-loop operation ++ * @irq: CPR ceiling interrupt number ++ * @data: Private data corresponding to the CPR3 controller ++ * pointer ++ * ++ * This function disables processor clock throttling and closed-loop operation ++ * when the ceiling voltage is reached. ++ * ++ * Return: IRQ_HANDLED ++ */ ++static irqreturn_t cpr3_ceiling_irq_handler(int irq, void *data) ++{ ++ struct cpr3_controller *ctrl = data; ++ int volt; ++ ++ mutex_lock(&ctrl->lock); ++ ++ if (!ctrl->cpr_enabled) { ++ cpr3_debug(ctrl, "CPR ceiling interrupt received but CPR is disabled\n"); ++ goto done; ++ } else if (!ctrl->use_hw_closed_loop) { ++ cpr3_debug(ctrl, "CPR ceiling interrupt received but CPR is using SW closed-loop\n"); ++ goto done; ++ } ++ ++ volt = regulator_get_voltage(ctrl->vdd_regulator); ++ if (volt < 0) { ++ cpr3_err(ctrl, "could not get vdd voltage, rc=%d\n", volt); ++ goto done; ++ } else if (volt != ctrl->aggr_corner.ceiling_volt) { ++ cpr3_debug(ctrl, "CPR ceiling interrupt received but vdd voltage: %d uV != ceiling voltage: %d uV\n", ++ volt, ctrl->aggr_corner.ceiling_volt); ++ goto done; ++ } ++ ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) { ++ /* ++ * Since the ceiling voltage has been reached, disable processor ++ * clock throttling as well as CPR closed-loop operation. ++ */ ++ cpr3_write(ctrl, CPR3_REG_PD_THROTTLE, ++ CPR3_PD_THROTTLE_DISABLE); ++ cpr3_ctrl_loop_disable(ctrl); ++ cpr3_debug(ctrl, "CPR closed-loop and throttling disabled\n"); ++ } ++ ++done: ++ mutex_unlock(&ctrl->lock); ++ return IRQ_HANDLED; ++} ++ ++/** ++ * cpr3_regulator_vreg_register() - register a regulator device for a CPR3 ++ * regulator ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * This function initializes all regulator framework related structures and then ++ * calls regulator_register() for the CPR3 regulator. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_vreg_register(struct cpr3_regulator *vreg) ++{ ++ struct regulator_config config = {}; ++ struct regulator_desc *rdesc; ++ struct regulator_init_data *init_data; ++ int rc; ++ ++ init_data = of_get_regulator_init_data(vreg->thread->ctrl->dev, ++ vreg->of_node, &vreg->rdesc); ++ if (!init_data) { ++ cpr3_err(vreg, "regulator init data is missing\n"); ++ return -EINVAL; ++ } ++ ++ init_data->constraints.input_uV = init_data->constraints.max_uV; ++ rdesc = &vreg->rdesc; ++ init_data->constraints.valid_ops_mask |= ++ REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS; ++ rdesc->ops = &cpr3_regulator_ops; ++ ++ rdesc->n_voltages = vreg->corner_count; ++ rdesc->name = init_data->constraints.name; ++ rdesc->owner = THIS_MODULE; ++ rdesc->type = REGULATOR_VOLTAGE; ++ ++ config.dev = vreg->thread->ctrl->dev; ++ config.driver_data = vreg; ++ config.init_data = init_data; ++ config.of_node = vreg->of_node; ++ ++ vreg->rdev = regulator_register(vreg->thread->ctrl->dev, rdesc, &config); ++ if (IS_ERR(vreg->rdev)) { ++ rc = PTR_ERR(vreg->rdev); ++ cpr3_err(vreg, "regulator_register failed, rc=%d\n", rc); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static int debugfs_int_set(void *data, u64 val) ++{ ++ *(int *)data = val; ++ return 0; ++} ++ ++static int debugfs_int_get(void *data, u64 *val) ++{ ++ *val = *(int *)data; ++ return 0; ++} ++DEFINE_SIMPLE_ATTRIBUTE(fops_int, debugfs_int_get, debugfs_int_set, "%lld\n"); ++DEFINE_SIMPLE_ATTRIBUTE(fops_int_ro, debugfs_int_get, NULL, "%lld\n"); ++DEFINE_SIMPLE_ATTRIBUTE(fops_int_wo, NULL, debugfs_int_set, "%lld\n"); ++ ++/** ++ * debugfs_create_int - create a debugfs file that is used to read and write a ++ * signed int value ++ * @name: Pointer to a string containing the name of the file to ++ * create ++ * @mode: The permissions that the file should have ++ * @parent: Pointer to the parent dentry for this file. This should ++ * be a directory dentry if set. If this parameter is ++ * %NULL, then the file will be created in the root of the ++ * debugfs filesystem. ++ * @value: Pointer to the variable that the file should read to and ++ * write from ++ * ++ * This function creates a file in debugfs with the given name that ++ * contains the value of the variable @value. If the @mode variable is so ++ * set, it can be read from, and written to. ++ * ++ * This function will return a pointer to a dentry if it succeeds. This ++ * pointer must be passed to the debugfs_remove() function when the file is ++ * to be removed. If an error occurs, %NULL will be returned. ++ */ ++static struct dentry *debugfs_create_int(const char *name, umode_t mode, ++ struct dentry *parent, int *value) ++{ ++ /* if there are no write bits set, make read only */ ++ if (!(mode & S_IWUGO)) ++ return debugfs_create_file(name, mode, parent, value, ++ &fops_int_ro); ++ /* if there are no read bits set, make write only */ ++ if (!(mode & S_IRUGO)) ++ return debugfs_create_file(name, mode, parent, value, ++ &fops_int_wo); ++ ++ return debugfs_create_file(name, mode, parent, value, &fops_int); ++} ++ ++static int debugfs_bool_get(void *data, u64 *val) ++{ ++ *val = *(bool *)data; ++ return 0; ++} ++DEFINE_SIMPLE_ATTRIBUTE(fops_bool_ro, debugfs_bool_get, NULL, "%lld\n"); ++ ++/** ++ * struct cpr3_debug_corner_info - data structure used by the ++ * cpr3_debugfs_create_corner_int function ++ * @vreg: Pointer to the CPR3 regulator ++ * @index: Pointer to the corner array index ++ * @member_offset: Offset in bytes from the beginning of struct cpr3_corner ++ * to the beginning of the value to be read from ++ * @corner: Pointer to the CPR3 corner array ++ */ ++struct cpr3_debug_corner_info { ++ struct cpr3_regulator *vreg; ++ int *index; ++ size_t member_offset; ++ struct cpr3_corner *corner; ++}; ++ ++static int cpr3_debug_corner_int_get(void *data, u64 *val) ++{ ++ struct cpr3_debug_corner_info *info = data; ++ struct cpr3_controller *ctrl = info->vreg->thread->ctrl; ++ int i; ++ ++ mutex_lock(&ctrl->lock); ++ ++ i = *info->index; ++ if (i < 0) ++ i = 0; ++ ++ *val = *(int *)((char *)&info->vreg->corner[i] + info->member_offset); ++ ++ mutex_unlock(&ctrl->lock); ++ ++ return 0; ++} ++DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_corner_int_fops, cpr3_debug_corner_int_get, ++ NULL, "%lld\n"); ++ ++/** ++ * cpr3_debugfs_create_corner_int - create a debugfs file that is used to read ++ * a signed int value out of a CPR3 regulator's corner array ++ * @vreg: Pointer to the CPR3 regulator ++ * @name: Pointer to a string containing the name of the file to ++ * create ++ * @mode: The permissions that the file should have ++ * @parent: Pointer to the parent dentry for this file. This should ++ * be a directory dentry if set. If this parameter is ++ * %NULL, then the file will be created in the root of the ++ * debugfs filesystem. ++ * @index: Pointer to the corner array index ++ * @member_offset: Offset in bytes from the beginning of struct cpr3_corner ++ * to the beginning of the value to be read from ++ * ++ * This function creates a file in debugfs with the given name that ++ * contains the value of the int type variable vreg->corner[index].member ++ * where member_offset == offsetof(struct cpr3_corner, member). ++ */ ++static struct dentry *cpr3_debugfs_create_corner_int( ++ struct cpr3_regulator *vreg, const char *name, umode_t mode, ++ struct dentry *parent, int *index, size_t member_offset) ++{ ++ struct cpr3_debug_corner_info *info; ++ ++ info = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*info), GFP_KERNEL); ++ if (!info) ++ return NULL; ++ ++ info->vreg = vreg; ++ info->index = index; ++ info->member_offset = member_offset; ++ ++ return debugfs_create_file(name, mode, parent, info, ++ &cpr3_debug_corner_int_fops); ++} ++ ++static int cpr3_debug_quot_open(struct inode *inode, struct file *file) ++{ ++ struct cpr3_debug_corner_info *info = inode->i_private; ++ struct cpr3_thread *thread = info->vreg->thread; ++ int size, i, pos; ++ u32 *quot; ++ char *buf; ++ ++ /* ++ * Max size: ++ * - 10 digits + ' ' or '\n' = 11 bytes per number ++ * - terminating '\0' ++ */ ++ size = CPR3_RO_COUNT * 11; ++ buf = kzalloc(size + 1, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ file->private_data = buf; ++ ++ mutex_lock(&thread->ctrl->lock); ++ ++ quot = info->corner[*info->index].target_quot; ++ ++ for (i = 0, pos = 0; i < CPR3_RO_COUNT; i++) ++ pos += scnprintf(buf + pos, size - pos, "%u%c", ++ quot[i], i < CPR3_RO_COUNT - 1 ? ' ' : '\n'); ++ ++ mutex_unlock(&thread->ctrl->lock); ++ ++ return nonseekable_open(inode, file); ++} ++ ++static ssize_t cpr3_debug_quot_read(struct file *file, char __user *buf, ++ size_t len, loff_t *ppos) ++{ ++ return simple_read_from_buffer(buf, len, ppos, file->private_data, ++ strlen(file->private_data)); ++} ++ ++static int cpr3_debug_quot_release(struct inode *inode, struct file *file) ++{ ++ kfree(file->private_data); ++ ++ return 0; ++} ++ ++static const struct file_operations cpr3_debug_quot_fops = { ++ .owner = THIS_MODULE, ++ .open = cpr3_debug_quot_open, ++ .release = cpr3_debug_quot_release, ++ .read = cpr3_debug_quot_read, ++}; ++ ++/** ++ * cpr3_regulator_debugfs_corner_add() - add debugfs files to expose ++ * configuration data for the CPR corner ++ * @vreg: Pointer to the CPR3 regulator ++ * @corner_dir: Pointer to the parent corner dentry for the new files ++ * @index: Pointer to the corner array index ++ * ++ * Return: none ++ */ ++static void cpr3_regulator_debugfs_corner_add(struct cpr3_regulator *vreg, ++ struct dentry *corner_dir, int *index) ++{ ++ struct cpr3_debug_corner_info *info; ++ struct dentry *temp; ++ ++ temp = cpr3_debugfs_create_corner_int(vreg, "floor_volt", S_IRUGO, ++ corner_dir, index, offsetof(struct cpr3_corner, floor_volt)); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(vreg, "floor_volt debugfs file creation failed\n"); ++ return; ++ } ++ ++ temp = cpr3_debugfs_create_corner_int(vreg, "ceiling_volt", S_IRUGO, ++ corner_dir, index, offsetof(struct cpr3_corner, ceiling_volt)); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(vreg, "ceiling_volt debugfs file creation failed\n"); ++ return; ++ } ++ ++ temp = cpr3_debugfs_create_corner_int(vreg, "open_loop_volt", S_IRUGO, ++ corner_dir, index, ++ offsetof(struct cpr3_corner, open_loop_volt)); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(vreg, "open_loop_volt debugfs file creation failed\n"); ++ return; ++ } ++ ++ temp = cpr3_debugfs_create_corner_int(vreg, "last_volt", S_IRUGO, ++ corner_dir, index, offsetof(struct cpr3_corner, last_volt)); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(vreg, "last_volt debugfs file creation failed\n"); ++ return; ++ } ++ ++ info = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*info), GFP_KERNEL); ++ if (!info) ++ return; ++ ++ info->vreg = vreg; ++ info->index = index; ++ info->corner = vreg->corner; ++ ++ temp = debugfs_create_file("target_quots", S_IRUGO, corner_dir, ++ info, &cpr3_debug_quot_fops); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(vreg, "target_quots debugfs file creation failed\n"); ++ return; ++ } ++} ++ ++/** ++ * cpr3_debug_corner_index_set() - debugfs callback used to change the ++ * value of the CPR3 regulator debug_corner index ++ * @data: Pointer to private data which is equal to the CPR3 ++ * regulator pointer ++ * @val: New value for debug_corner ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_debug_corner_index_set(void *data, u64 val) ++{ ++ struct cpr3_regulator *vreg = data; ++ ++ if (val < CPR3_CORNER_OFFSET || val > vreg->corner_count) { ++ cpr3_err(vreg, "invalid corner index %llu; allowed values: %d-%d\n", ++ val, CPR3_CORNER_OFFSET, vreg->corner_count); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&vreg->thread->ctrl->lock); ++ vreg->debug_corner = val - CPR3_CORNER_OFFSET; ++ mutex_unlock(&vreg->thread->ctrl->lock); ++ ++ return 0; ++} ++ ++/** ++ * cpr3_debug_corner_index_get() - debugfs callback used to retrieve ++ * the value of the CPR3 regulator debug_corner index ++ * @data: Pointer to private data which is equal to the CPR3 ++ * regulator pointer ++ * @val: Output parameter written with the value of ++ * debug_corner ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_debug_corner_index_get(void *data, u64 *val) ++{ ++ struct cpr3_regulator *vreg = data; ++ ++ *val = vreg->debug_corner + CPR3_CORNER_OFFSET; ++ ++ return 0; ++} ++DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_corner_index_fops, ++ cpr3_debug_corner_index_get, ++ cpr3_debug_corner_index_set, ++ "%llu\n"); ++ ++/** ++ * cpr3_debug_current_corner_index_get() - debugfs callback used to retrieve ++ * the value of the CPR3 regulator current_corner index ++ * @data: Pointer to private data which is equal to the CPR3 ++ * regulator pointer ++ * @val: Output parameter written with the value of ++ * current_corner ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_debug_current_corner_index_get(void *data, u64 *val) ++{ ++ struct cpr3_regulator *vreg = data; ++ ++ *val = vreg->current_corner + CPR3_CORNER_OFFSET; ++ ++ return 0; ++} ++DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_current_corner_index_fops, ++ cpr3_debug_current_corner_index_get, ++ NULL, "%llu\n"); ++ ++/** ++ * cpr3_regulator_debugfs_vreg_add() - add debugfs files to expose configuration ++ * data for the CPR3 regulator ++ * @vreg: Pointer to the CPR3 regulator ++ * @thread_dir CPR3 thread debugfs directory handle ++ * ++ * Return: none ++ */ ++static void cpr3_regulator_debugfs_vreg_add(struct cpr3_regulator *vreg, ++ struct dentry *thread_dir) ++{ ++ struct dentry *temp, *corner_dir, *vreg_dir; ++ ++ vreg_dir = debugfs_create_dir(vreg->name, thread_dir); ++ if (IS_ERR_OR_NULL(vreg_dir)) { ++ cpr3_err(vreg, "%s debugfs directory creation failed\n", ++ vreg->name); ++ return; ++ } ++ ++ temp = debugfs_create_int("speed_bin_fuse", S_IRUGO, vreg_dir, ++ &vreg->speed_bin_fuse); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(vreg, "speed_bin_fuse debugfs file creation failed\n"); ++ return; ++ } ++ ++ temp = debugfs_create_int("cpr_rev_fuse", S_IRUGO, vreg_dir, ++ &vreg->cpr_rev_fuse); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(vreg, "cpr_rev_fuse debugfs file creation failed\n"); ++ return; ++ } ++ ++ temp = debugfs_create_int("fuse_combo", S_IRUGO, vreg_dir, ++ &vreg->fuse_combo); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(vreg, "fuse_combo debugfs file creation failed\n"); ++ return; ++ } ++ ++ temp = debugfs_create_int("corner_count", S_IRUGO, vreg_dir, ++ &vreg->corner_count); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(vreg, "corner_count debugfs file creation failed\n"); ++ return; ++ } ++ ++ corner_dir = debugfs_create_dir("corner", vreg_dir); ++ if (IS_ERR_OR_NULL(corner_dir)) { ++ cpr3_err(vreg, "corner debugfs directory creation failed\n"); ++ return; ++ } ++ ++ temp = debugfs_create_file("index", S_IRUGO | S_IWUSR, corner_dir, ++ vreg, &cpr3_debug_corner_index_fops); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(vreg, "index debugfs file creation failed\n"); ++ return; ++ } ++ ++ cpr3_regulator_debugfs_corner_add(vreg, corner_dir, ++ &vreg->debug_corner); ++ ++ corner_dir = debugfs_create_dir("current_corner", vreg_dir); ++ if (IS_ERR_OR_NULL(corner_dir)) { ++ cpr3_err(vreg, "current_corner debugfs directory creation failed\n"); ++ return; ++ } ++ ++ temp = debugfs_create_file("index", S_IRUGO, corner_dir, ++ vreg, &cpr3_debug_current_corner_index_fops); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(vreg, "index debugfs file creation failed\n"); ++ return; ++ } ++ ++ cpr3_regulator_debugfs_corner_add(vreg, corner_dir, ++ &vreg->current_corner); ++} ++ ++/** ++ * cpr3_regulator_debugfs_thread_add() - add debugfs files to expose ++ * configuration data for the CPR thread ++ * @thread: Pointer to the CPR3 thread ++ * ++ * Return: none ++ */ ++static void cpr3_regulator_debugfs_thread_add(struct cpr3_thread *thread) ++{ ++ struct cpr3_controller *ctrl = thread->ctrl; ++ struct dentry *aggr_dir, *temp, *thread_dir; ++ struct cpr3_debug_corner_info *info; ++ char buf[20]; ++ int *index; ++ int i; ++ ++ scnprintf(buf, sizeof(buf), "thread%u", thread->thread_id); ++ thread_dir = debugfs_create_dir(buf, thread->ctrl->debugfs); ++ if (IS_ERR_OR_NULL(thread_dir)) { ++ cpr3_err(ctrl, "thread %u %s debugfs directory creation failed\n", ++ thread->thread_id, buf); ++ return; ++ } ++ ++ aggr_dir = debugfs_create_dir("max_aggregated_params", thread_dir); ++ if (IS_ERR_OR_NULL(aggr_dir)) { ++ cpr3_err(ctrl, "thread %u max_aggregated_params debugfs directory creation failed\n", ++ thread->thread_id); ++ return; ++ } ++ ++ temp = debugfs_create_int("floor_volt", S_IRUGO, aggr_dir, ++ &thread->aggr_corner.floor_volt); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "thread %u aggr floor_volt debugfs file creation failed\n", ++ thread->thread_id); ++ return; ++ } ++ ++ temp = debugfs_create_int("ceiling_volt", S_IRUGO, aggr_dir, ++ &thread->aggr_corner.ceiling_volt); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "thread %u aggr ceiling_volt debugfs file creation failed\n", ++ thread->thread_id); ++ return; ++ } ++ ++ temp = debugfs_create_int("open_loop_volt", S_IRUGO, aggr_dir, ++ &thread->aggr_corner.open_loop_volt); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "thread %u aggr open_loop_volt debugfs file creation failed\n", ++ thread->thread_id); ++ return; ++ } ++ ++ temp = debugfs_create_int("last_volt", S_IRUGO, aggr_dir, ++ &thread->aggr_corner.last_volt); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "thread %u aggr last_volt debugfs file creation failed\n", ++ thread->thread_id); ++ return; ++ } ++ ++ info = devm_kzalloc(thread->ctrl->dev, sizeof(*info), GFP_KERNEL); ++ index = devm_kzalloc(thread->ctrl->dev, sizeof(*index), GFP_KERNEL); ++ if (!info || !index) ++ return; ++ *index = 0; ++ info->vreg = &thread->vreg[0]; ++ info->index = index; ++ info->corner = &thread->aggr_corner; ++ ++ temp = debugfs_create_file("target_quots", S_IRUGO, aggr_dir, ++ info, &cpr3_debug_quot_fops); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "thread %u target_quots debugfs file creation failed\n", ++ thread->thread_id); ++ return; ++ } ++ ++ for (i = 0; i < thread->vreg_count; i++) ++ cpr3_regulator_debugfs_vreg_add(&thread->vreg[i], thread_dir); ++} ++ ++/** ++ * cpr3_debug_closed_loop_enable_set() - debugfs callback used to change the ++ * value of the CPR controller cpr_allowed_sw flag which enables or ++ * disables closed-loop operation ++ * @data: Pointer to private data which is equal to the CPR ++ * controller pointer ++ * @val: New value for cpr_allowed_sw ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_debug_closed_loop_enable_set(void *data, u64 val) ++{ ++ struct cpr3_controller *ctrl = data; ++ bool enable = !!val; ++ int rc; ++ ++ mutex_lock(&ctrl->lock); ++ ++ if (ctrl->cpr_allowed_sw == enable) ++ goto done; ++ ++ if (enable && !ctrl->cpr_allowed_hw) { ++ cpr3_err(ctrl, "CPR closed-loop operation is not allowed\n"); ++ goto done; ++ } ++ ++ ctrl->cpr_allowed_sw = enable; ++ ++ rc = cpr3_regulator_update_ctrl_state(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "could not change CPR enable state=%u, rc=%d\n", ++ enable, rc); ++ goto done; ++ } ++ ++ if (ctrl->proc_clock_throttle && !ctrl->cpr_enabled) { ++ rc = cpr3_clock_enable(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "clock enable failed, rc=%d\n", ++ rc); ++ goto done; ++ } ++ ctrl->cpr_enabled = true; ++ ++ cpr3_write(ctrl, CPR3_REG_PD_THROTTLE, ++ CPR3_PD_THROTTLE_DISABLE); ++ ++ cpr3_clock_disable(ctrl); ++ ctrl->cpr_enabled = false; ++ } ++ ++ cpr3_debug(ctrl, "closed-loop=%s\n", enable ? "enabled" : "disabled"); ++done: ++ mutex_unlock(&ctrl->lock); ++ return 0; ++} ++ ++/** ++ * cpr3_debug_closed_loop_enable_get() - debugfs callback used to retrieve ++ * the value of the CPR controller cpr_allowed_sw flag which ++ * indicates if closed-loop operation is enabled ++ * @data: Pointer to private data which is equal to the CPR ++ * controller pointer ++ * @val: Output parameter written with the value of ++ * cpr_allowed_sw ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_debug_closed_loop_enable_get(void *data, u64 *val) ++{ ++ struct cpr3_controller *ctrl = data; ++ ++ *val = ctrl->cpr_allowed_sw; ++ ++ return 0; ++} ++DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_closed_loop_enable_fops, ++ cpr3_debug_closed_loop_enable_get, ++ cpr3_debug_closed_loop_enable_set, ++ "%llu\n"); ++ ++/** ++ * cpr3_debug_hw_closed_loop_enable_set() - debugfs callback used to change the ++ * value of the CPR controller use_hw_closed_loop flag which ++ * switches between software closed-loop and hardware closed-loop ++ * operation for CPR3 and CPR4 controllers and between open-loop ++ * and full hardware closed-loop operation for CPRh controllers. ++ * @data: Pointer to private data which is equal to the CPR ++ * controller pointer ++ * @val: New value for use_hw_closed_loop ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_debug_hw_closed_loop_enable_set(void *data, u64 val) ++{ ++ struct cpr3_controller *ctrl = data; ++ bool use_hw_closed_loop = !!val; ++ struct cpr3_regulator *vreg; ++ bool cpr_enabled; ++ int i, j, k, rc; ++ ++ mutex_lock(&ctrl->lock); ++ ++ if (ctrl->use_hw_closed_loop == use_hw_closed_loop) ++ goto done; ++ ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ rc = cpr3_ctrl_clear_cpr4_config(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n", ++ rc); ++ goto done; ++ } ++ } ++ ++ cpr3_ctrl_loop_disable(ctrl); ++ ++ ctrl->use_hw_closed_loop = use_hw_closed_loop; ++ ++ cpr_enabled = ctrl->cpr_enabled; ++ ++ /* Ensure that CPR clocks are enabled before writing to registers. */ ++ if (!cpr_enabled) { ++ rc = cpr3_clock_enable(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "clock enable failed, rc=%d\n", rc); ++ goto done; ++ } ++ ctrl->cpr_enabled = true; ++ } ++ ++ if (ctrl->use_hw_closed_loop) ++ cpr3_write(ctrl, CPR3_REG_IRQ_EN, 0); ++ ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, ++ CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK, ++ ctrl->use_hw_closed_loop ++ ? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE ++ : CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE); ++ } else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) { ++ cpr3_write(ctrl, CPR3_REG_HW_CLOSED_LOOP, ++ ctrl->use_hw_closed_loop ++ ? CPR3_HW_CLOSED_LOOP_ENABLE ++ : CPR3_HW_CLOSED_LOOP_DISABLE); ++ } ++ ++ /* Turn off CPR clocks if they were off before this function call. */ ++ if (!cpr_enabled) { ++ cpr3_clock_disable(ctrl); ++ ctrl->cpr_enabled = false; ++ } ++ ++ if (ctrl->use_hw_closed_loop && ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) { ++ rc = regulator_enable(ctrl->vdd_limit_regulator); ++ if (rc) { ++ cpr3_err(ctrl, "CPR limit regulator enable failed, rc=%d\n", ++ rc); ++ goto done; ++ } ++ } else if (!ctrl->use_hw_closed_loop ++ && ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) { ++ rc = regulator_disable(ctrl->vdd_limit_regulator); ++ if (rc) { ++ cpr3_err(ctrl, "CPR limit regulator disable failed, rc=%d\n", ++ rc); ++ goto done; ++ } ++ } ++ ++ /* ++ * Due to APM and mem-acc floor restriction constraints, ++ * the closed-loop voltage may be different when using ++ * software closed-loop vs hardware closed-loop. Therefore, ++ * reset the cached closed-loop voltage for all corners to the ++ * corresponding open-loop voltage when switching between ++ * SW and HW closed-loop mode. ++ */ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) { ++ vreg = &ctrl->thread[i].vreg[j]; ++ for (k = 0; k < vreg->corner_count; k++) ++ vreg->corner[k].last_volt ++ = vreg->corner[k].open_loop_volt; ++ } ++ } ++ ++ /* Skip last_volt caching */ ++ ctrl->last_corner_was_closed_loop = false; ++ ++ rc = cpr3_regulator_update_ctrl_state(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "could not change CPR HW closed-loop enable state=%u, rc=%d\n", ++ use_hw_closed_loop, rc); ++ goto done; ++ } ++ ++ cpr3_debug(ctrl, "CPR mode=%s\n", ++ use_hw_closed_loop ? ++ "HW closed-loop" : "SW closed-loop"); ++done: ++ mutex_unlock(&ctrl->lock); ++ return 0; ++} ++ ++/** ++ * cpr3_debug_hw_closed_loop_enable_get() - debugfs callback used to retrieve ++ * the value of the CPR controller use_hw_closed_loop flag which ++ * indicates if hardware closed-loop operation is being used in ++ * place of software closed-loop operation ++ * @data: Pointer to private data which is equal to the CPR ++ * controller pointer ++ * @val: Output parameter written with the value of ++ * use_hw_closed_loop ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_debug_hw_closed_loop_enable_get(void *data, u64 *val) ++{ ++ struct cpr3_controller *ctrl = data; ++ ++ *val = ctrl->use_hw_closed_loop; ++ ++ return 0; ++} ++DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_hw_closed_loop_enable_fops, ++ cpr3_debug_hw_closed_loop_enable_get, ++ cpr3_debug_hw_closed_loop_enable_set, ++ "%llu\n"); ++ ++/** ++ * cpr3_debug_trigger_aging_measurement_set() - debugfs callback used to trigger ++ * another CPR measurement ++ * @data: Pointer to private data which is equal to the CPR ++ * controller pointer ++ * @val: Unused ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_debug_trigger_aging_measurement_set(void *data, u64 val) ++{ ++ struct cpr3_controller *ctrl = data; ++ int rc; ++ ++ mutex_lock(&ctrl->lock); ++ ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ rc = cpr3_ctrl_clear_cpr4_config(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n", ++ rc); ++ goto done; ++ } ++ } ++ ++ cpr3_ctrl_loop_disable(ctrl); ++ ++ cpr3_regulator_set_aging_ref_adjustment(ctrl, INT_MAX); ++ ctrl->aging_required = true; ++ ctrl->aging_succeeded = false; ++ ctrl->aging_failed = false; ++ ++ rc = cpr3_regulator_update_ctrl_state(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "could not update the CPR controller state, rc=%d\n", ++ rc); ++ goto done; ++ } ++ ++done: ++ mutex_unlock(&ctrl->lock); ++ return 0; ++} ++DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_trigger_aging_measurement_fops, ++ NULL, ++ cpr3_debug_trigger_aging_measurement_set, ++ "%llu\n"); ++ ++/** ++ * cpr3_regulator_debugfs_ctrl_add() - add debugfs files to expose configuration ++ * data for the CPR controller ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: none ++ */ ++static void cpr3_regulator_debugfs_ctrl_add(struct cpr3_controller *ctrl) ++{ ++ struct dentry *temp, *aggr_dir; ++ int i; ++ ++ /* Add cpr3-regulator base directory if it isn't present already. */ ++ if (cpr3_debugfs_base == NULL) { ++ cpr3_debugfs_base = debugfs_create_dir("cpr3-regulator", NULL); ++ if (IS_ERR_OR_NULL(cpr3_debugfs_base)) { ++ cpr3_err(ctrl, "cpr3-regulator debugfs base directory creation failed\n"); ++ cpr3_debugfs_base = NULL; ++ return; ++ } ++ } ++ ++ ctrl->debugfs = debugfs_create_dir(ctrl->name, cpr3_debugfs_base); ++ if (IS_ERR_OR_NULL(ctrl->debugfs)) { ++ cpr3_err(ctrl, "cpr3-regulator controller debugfs directory creation failed\n"); ++ return; ++ } ++ ++ temp = debugfs_create_file("cpr_closed_loop_enable", S_IRUGO | S_IWUSR, ++ ctrl->debugfs, ctrl, ++ &cpr3_debug_closed_loop_enable_fops); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "cpr_closed_loop_enable debugfs file creation failed\n"); ++ return; ++ } ++ ++ if (ctrl->supports_hw_closed_loop) { ++ temp = debugfs_create_file("use_hw_closed_loop", ++ S_IRUGO | S_IWUSR, ctrl->debugfs, ctrl, ++ &cpr3_debug_hw_closed_loop_enable_fops); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "use_hw_closed_loop debugfs file creation failed\n"); ++ return; ++ } ++ } ++ ++ temp = debugfs_create_int("thread_count", S_IRUGO, ctrl->debugfs, ++ &ctrl->thread_count); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "thread_count debugfs file creation failed\n"); ++ return; ++ } ++ ++ if (ctrl->apm) { ++ temp = debugfs_create_int("apm_threshold_volt", S_IRUGO, ++ ctrl->debugfs, &ctrl->apm_threshold_volt); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "apm_threshold_volt debugfs file creation failed\n"); ++ return; ++ } ++ } ++ ++ if (ctrl->aging_required || ctrl->aging_succeeded ++ || ctrl->aging_failed) { ++ temp = debugfs_create_int("aging_adj_volt", S_IRUGO, ++ ctrl->debugfs, &ctrl->aging_ref_adjust_volt); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "aging_adj_volt debugfs file creation failed\n"); ++ return; ++ } ++ ++ temp = debugfs_create_file("aging_succeeded", S_IRUGO, ++ ctrl->debugfs, &ctrl->aging_succeeded, &fops_bool_ro); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "aging_succeeded debugfs file creation failed\n"); ++ return; ++ } ++ ++ temp = debugfs_create_file("aging_failed", S_IRUGO, ++ ctrl->debugfs, &ctrl->aging_failed, &fops_bool_ro); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "aging_failed debugfs file creation failed\n"); ++ return; ++ } ++ ++ temp = debugfs_create_file("aging_trigger", S_IWUSR, ++ ctrl->debugfs, ctrl, ++ &cpr3_debug_trigger_aging_measurement_fops); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "aging_trigger debugfs file creation failed\n"); ++ return; ++ } ++ } ++ ++ aggr_dir = debugfs_create_dir("max_aggregated_voltages", ctrl->debugfs); ++ if (IS_ERR_OR_NULL(aggr_dir)) { ++ cpr3_err(ctrl, "max_aggregated_voltages debugfs directory creation failed\n"); ++ return; ++ } ++ ++ temp = debugfs_create_int("floor_volt", S_IRUGO, aggr_dir, ++ &ctrl->aggr_corner.floor_volt); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "aggr floor_volt debugfs file creation failed\n"); ++ return; ++ } ++ ++ temp = debugfs_create_int("ceiling_volt", S_IRUGO, aggr_dir, ++ &ctrl->aggr_corner.ceiling_volt); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "aggr ceiling_volt debugfs file creation failed\n"); ++ return; ++ } ++ ++ temp = debugfs_create_int("open_loop_volt", S_IRUGO, aggr_dir, ++ &ctrl->aggr_corner.open_loop_volt); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "aggr open_loop_volt debugfs file creation failed\n"); ++ return; ++ } ++ ++ temp = debugfs_create_int("last_volt", S_IRUGO, aggr_dir, ++ &ctrl->aggr_corner.last_volt); ++ if (IS_ERR_OR_NULL(temp)) { ++ cpr3_err(ctrl, "aggr last_volt debugfs file creation failed\n"); ++ return; ++ } ++ ++ for (i = 0; i < ctrl->thread_count; i++) ++ cpr3_regulator_debugfs_thread_add(&ctrl->thread[i]); ++} ++ ++/** ++ * cpr3_regulator_debugfs_ctrl_remove() - remove debugfs files for the CPR ++ * controller ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Note, this function must be called after the controller has been removed from ++ * cpr3_controller_list and while the cpr3_controller_list_mutex lock is held. ++ * ++ * Return: none ++ */ ++static void cpr3_regulator_debugfs_ctrl_remove(struct cpr3_controller *ctrl) ++{ ++ if (list_empty(&cpr3_controller_list)) { ++ debugfs_remove_recursive(cpr3_debugfs_base); ++ cpr3_debugfs_base = NULL; ++ } else { ++ debugfs_remove_recursive(ctrl->debugfs); ++ } ++} ++ ++/** ++ * cpr3_regulator_init_ctrl_data() - performs initialization of CPR controller ++ * elements ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_init_ctrl_data(struct cpr3_controller *ctrl) ++{ ++ /* Read the initial vdd voltage from hardware. */ ++ ctrl->aggr_corner.last_volt ++ = regulator_get_voltage(ctrl->vdd_regulator); ++ if (ctrl->aggr_corner.last_volt < 0) { ++ cpr3_err(ctrl, "regulator_get_voltage(vdd) failed, rc=%d\n", ++ ctrl->aggr_corner.last_volt); ++ return ctrl->aggr_corner.last_volt; ++ } ++ ctrl->aggr_corner.open_loop_volt = ctrl->aggr_corner.last_volt; ++ ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_init_vreg_data() - performs initialization of common CPR3 ++ * regulator elements and validate aging configurations ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_init_vreg_data(struct cpr3_regulator *vreg) ++{ ++ int i, j; ++ bool init_aging; ++ ++ vreg->current_corner = CPR3_REGULATOR_CORNER_INVALID; ++ vreg->last_closed_loop_corner = CPR3_REGULATOR_CORNER_INVALID; ++ ++ init_aging = vreg->aging_allowed && vreg->thread->ctrl->aging_required; ++ ++ for (i = 0; i < vreg->corner_count; i++) { ++ vreg->corner[i].last_volt = vreg->corner[i].open_loop_volt; ++ vreg->corner[i].irq_en = CPR3_IRQ_UP | CPR3_IRQ_DOWN; ++ ++ vreg->corner[i].ro_mask = 0; ++ for (j = 0; j < CPR3_RO_COUNT; j++) { ++ if (vreg->corner[i].target_quot[j] == 0) ++ vreg->corner[i].ro_mask |= BIT(j); ++ } ++ ++ if (init_aging) { ++ vreg->corner[i].unaged_floor_volt ++ = vreg->corner[i].floor_volt; ++ vreg->corner[i].unaged_ceiling_volt ++ = vreg->corner[i].ceiling_volt; ++ vreg->corner[i].unaged_open_loop_volt ++ = vreg->corner[i].open_loop_volt; ++ } ++ ++ if (vreg->aging_allowed) { ++ if (vreg->corner[i].unaged_floor_volt <= 0) { ++ cpr3_err(vreg, "invalid unaged_floor_volt[%d] = %d\n", ++ i, vreg->corner[i].unaged_floor_volt); ++ return -EINVAL; ++ } ++ if (vreg->corner[i].unaged_ceiling_volt <= 0) { ++ cpr3_err(vreg, "invalid unaged_ceiling_volt[%d] = %d\n", ++ i, vreg->corner[i].unaged_ceiling_volt); ++ return -EINVAL; ++ } ++ if (vreg->corner[i].unaged_open_loop_volt <= 0) { ++ cpr3_err(vreg, "invalid unaged_open_loop_volt[%d] = %d\n", ++ i, vreg->corner[i].unaged_open_loop_volt); ++ return -EINVAL; ++ } ++ } ++ } ++ ++ if (vreg->aging_allowed && vreg->corner[vreg->aging_corner].ceiling_volt ++ > vreg->thread->ctrl->aging_ref_volt) { ++ cpr3_err(vreg, "aging corner %d ceiling voltage = %d > aging ref voltage = %d uV\n", ++ vreg->aging_corner, ++ vreg->corner[vreg->aging_corner].ceiling_volt, ++ vreg->thread->ctrl->aging_ref_volt); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_suspend() - perform common required CPR3 power down steps ++ * before the system enters suspend ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_regulator_suspend(struct cpr3_controller *ctrl) ++{ ++ int rc; ++ ++ mutex_lock(&ctrl->lock); ++ ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ rc = cpr3_ctrl_clear_cpr4_config(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n", ++ rc); ++ mutex_unlock(&ctrl->lock); ++ return rc; ++ } ++ } ++ ++ cpr3_ctrl_loop_disable(ctrl); ++ ++ rc = cpr3_closed_loop_disable(ctrl); ++ if (rc) ++ cpr3_err(ctrl, "could not disable CPR, rc=%d\n", rc); ++ ++ ctrl->cpr_suspended = true; ++ ++ mutex_unlock(&ctrl->lock); ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_resume() - perform common required CPR3 power up steps after ++ * the system resumes from suspend ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_regulator_resume(struct cpr3_controller *ctrl) ++{ ++ int rc; ++ ++ mutex_lock(&ctrl->lock); ++ ++ ctrl->cpr_suspended = false; ++ rc = cpr3_regulator_update_ctrl_state(ctrl); ++ if (rc) ++ cpr3_err(ctrl, "could not enable CPR, rc=%d\n", rc); ++ ++ mutex_unlock(&ctrl->lock); ++ return 0; ++} ++ ++/** ++ * cpr3_regulator_validate_controller() - verify the data passed in via the ++ * cpr3_controller data structure ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_regulator_validate_controller(struct cpr3_controller *ctrl) ++{ ++ struct cpr3_thread *thread; ++ struct cpr3_regulator *vreg; ++ int i, j, allow_boost_vreg_count = 0; ++ ++ if (!ctrl->vdd_regulator) { ++ cpr3_err(ctrl, "vdd regulator missing\n"); ++ return -EINVAL; ++ } else if (ctrl->sensor_count <= 0 ++ || ctrl->sensor_count > CPR3_MAX_SENSOR_COUNT) { ++ cpr3_err(ctrl, "invalid CPR sensor count=%d\n", ++ ctrl->sensor_count); ++ return -EINVAL; ++ } else if (!ctrl->sensor_owner) { ++ cpr3_err(ctrl, "CPR sensor ownership table missing\n"); ++ return -EINVAL; ++ } ++ ++ if (ctrl->aging_required) { ++ for (i = 0; i < ctrl->aging_sensor_count; i++) { ++ if (ctrl->aging_sensor[i].sensor_id ++ >= ctrl->sensor_count) { ++ cpr3_err(ctrl, "aging_sensor[%d] id=%u is not in the value range 0-%d", ++ i, ctrl->aging_sensor[i].sensor_id, ++ ctrl->sensor_count - 1); ++ return -EINVAL; ++ } ++ } ++ } ++ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ thread = &ctrl->thread[i]; ++ for (j = 0; j < thread->vreg_count; j++) { ++ vreg = &thread->vreg[j]; ++ if (vreg->allow_boost) ++ allow_boost_vreg_count++; ++ } ++ } ++ ++ if (allow_boost_vreg_count > 1) { ++ /* ++ * Boost feature is not allowed to be used for more ++ * than one CPR3 regulator of a CPR3 controller. ++ */ ++ cpr3_err(ctrl, "Boost feature is enabled for more than one regulator\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_panic_callback() - panic notification callback function. This function ++ * is invoked when a kernel panic occurs. ++ * @nfb: Notifier block pointer of CPR3 controller ++ * @event: Value passed unmodified to notifier function ++ * @data: Pointer passed unmodified to notifier function ++ * ++ * Return: NOTIFY_OK ++ */ ++static int cpr3_panic_callback(struct notifier_block *nfb, ++ unsigned long event, void *data) ++{ ++ struct cpr3_controller *ctrl = container_of(nfb, ++ struct cpr3_controller, panic_notifier); ++ struct cpr3_panic_regs_info *regs_info = ctrl->panic_regs_info; ++ struct cpr3_reg_info *reg; ++ int i = 0; ++ ++ for (i = 0; i < regs_info->reg_count; i++) { ++ reg = &(regs_info->regs[i]); ++ reg->value = readl_relaxed(reg->virt_addr); ++ pr_err("%s[0x%08x] = 0x%08x\n", reg->name, reg->addr, ++ reg->value); ++ } ++ /* ++ * Barrier to ensure that the information has been updated in the ++ * structure. ++ */ ++ mb(); ++ ++ return NOTIFY_OK; ++} ++ ++/** ++ * cpr3_regulator_register() - register the regulators for a CPR3 controller and ++ * perform CPR hardware initialization ++ * @pdev: Platform device pointer for the CPR3 controller ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_regulator_register(struct platform_device *pdev, ++ struct cpr3_controller *ctrl) ++{ ++ struct device *dev = &pdev->dev; ++ struct resource *res; ++ int i, j, rc; ++ ++ if (!dev->of_node) { ++ dev_err(dev, "%s: Device tree node is missing\n", __func__); ++ return -EINVAL; ++ } ++ ++ if (!ctrl || !ctrl->name) { ++ dev_err(dev, "%s: CPR controller data is missing\n", __func__); ++ return -EINVAL; ++ } ++ ++ rc = cpr3_regulator_validate_controller(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "controller validation failed, rc=%d\n", rc); ++ return rc; ++ } ++ ++ mutex_init(&ctrl->lock); ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpr_ctrl"); ++ if (!res || !res->start) { ++ cpr3_err(ctrl, "CPR controller address is missing\n"); ++ return -ENXIO; ++ } ++ ctrl->cpr_ctrl_base = devm_ioremap(dev, res->start, resource_size(res)); ++ ++ if (ctrl->aging_possible_mask) { ++ /* ++ * Aging possible register address is required if an aging ++ * possible mask has been specified. ++ */ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, ++ "aging_allowed"); ++ if (!res || !res->start) { ++ cpr3_err(ctrl, "CPR aging allowed address is missing\n"); ++ return -ENXIO; ++ } ++ ctrl->aging_possible_reg = devm_ioremap(dev, res->start, ++ resource_size(res)); ++ } ++ ++ ctrl->irq = platform_get_irq_byname(pdev, "cpr"); ++ if (ctrl->irq < 0) { ++ cpr3_err(ctrl, "missing CPR interrupt\n"); ++ return ctrl->irq; ++ } ++ ++ if (ctrl->supports_hw_closed_loop) { ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) { ++ ctrl->ceiling_irq = platform_get_irq_byname(pdev, ++ "ceiling"); ++ if (ctrl->ceiling_irq < 0) { ++ cpr3_err(ctrl, "missing ceiling interrupt\n"); ++ return ctrl->ceiling_irq; ++ } ++ } ++ } ++ ++ rc = cpr3_regulator_init_ctrl_data(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "CPR controller data initialization failed, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) { ++ rc = cpr3_regulator_init_vreg_data( ++ &ctrl->thread[i].vreg[j]); ++ if (rc) ++ return rc; ++ cpr3_print_quots(&ctrl->thread[i].vreg[j]); ++ } ++ } ++ ++ /* ++ * Add the maximum possible aging voltage margin until it is possible ++ * to perform an aging measurement. ++ */ ++ if (ctrl->aging_required) ++ cpr3_regulator_set_aging_ref_adjustment(ctrl, INT_MAX); ++ ++ rc = cpr3_regulator_init_ctrl(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "CPR controller initialization failed, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ /* Register regulator devices for all threads. */ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) { ++ rc = cpr3_regulator_vreg_register( ++ &ctrl->thread[i].vreg[j]); ++ if (rc) { ++ cpr3_err(&ctrl->thread[i].vreg[j], "failed to register regulator, rc=%d\n", ++ rc); ++ goto free_regulators; ++ } ++ } ++ } ++ ++ rc = devm_request_threaded_irq(dev, ctrl->irq, NULL, ++ cpr3_irq_handler, ++ IRQF_ONESHOT | ++ IRQF_TRIGGER_RISING, ++ "cpr3", ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "could not request IRQ %d, rc=%d\n", ++ ctrl->irq, rc); ++ goto free_regulators; ++ } ++ ++ if (ctrl->supports_hw_closed_loop && ++ ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) { ++ rc = devm_request_threaded_irq(dev, ctrl->ceiling_irq, NULL, ++ cpr3_ceiling_irq_handler, ++ IRQF_ONESHOT | IRQF_TRIGGER_RISING, ++ "cpr3_ceiling", ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "could not request ceiling IRQ %d, rc=%d\n", ++ ctrl->ceiling_irq, rc); ++ goto free_regulators; ++ } ++ } ++ ++ mutex_lock(&cpr3_controller_list_mutex); ++ cpr3_regulator_debugfs_ctrl_add(ctrl); ++ list_add(&ctrl->list, &cpr3_controller_list); ++ mutex_unlock(&cpr3_controller_list_mutex); ++ ++ if (ctrl->panic_regs_info) { ++ /* Register panic notification call back */ ++ ctrl->panic_notifier.notifier_call = cpr3_panic_callback; ++ atomic_notifier_chain_register(&panic_notifier_list, ++ &ctrl->panic_notifier); ++ } ++ ++ return 0; ++ ++free_regulators: ++ for (i = 0; i < ctrl->thread_count; i++) ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) ++ if (!IS_ERR_OR_NULL(ctrl->thread[i].vreg[j].rdev)) ++ regulator_unregister( ++ ctrl->thread[i].vreg[j].rdev); ++ return rc; ++} ++ ++/** ++ * cpr3_open_loop_regulator_register() - register the regulators for a CPR3 ++ * controller which will always work in Open loop and ++ * won't support close loop. ++ * @pdev: Platform device pointer for the CPR3 controller ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_open_loop_regulator_register(struct platform_device *pdev, ++ struct cpr3_controller *ctrl) ++{ ++ struct device *dev = &pdev->dev; ++ struct cpr3_regulator *vreg; ++ int i, j, rc; ++ ++ if (!dev->of_node) { ++ dev_err(dev, "%s: Device tree node is missing\n", __func__); ++ return -EINVAL; ++ } ++ ++ if (!ctrl || !ctrl->name) { ++ dev_err(dev, "%s: CPR controller data is missing\n", __func__); ++ return -EINVAL; ++ } ++ ++ if (!ctrl->vdd_regulator) { ++ cpr3_err(ctrl, "vdd regulator missing\n"); ++ return -EINVAL; ++ } ++ ++ mutex_init(&ctrl->lock); ++ ++ rc = cpr3_regulator_init_ctrl_data(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "CPR controller data initialization failed, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) { ++ vreg = &ctrl->thread[i].vreg[j]; ++ vreg->corner[i].last_volt = ++ vreg->corner[i].open_loop_volt; ++ } ++ } ++ ++ /* Register regulator devices for all threads. */ ++ for (i = 0; i < ctrl->thread_count; i++) { ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) { ++ rc = cpr3_regulator_vreg_register( ++ &ctrl->thread[i].vreg[j]); ++ if (rc) { ++ cpr3_err(&ctrl->thread[i].vreg[j], "failed to register regulator, rc=%d\n", ++ rc); ++ goto free_regulators; ++ } ++ } ++ } ++ ++ mutex_lock(&cpr3_controller_list_mutex); ++ list_add(&ctrl->list, &cpr3_controller_list); ++ mutex_unlock(&cpr3_controller_list_mutex); ++ ++ return 0; ++ ++free_regulators: ++ for (i = 0; i < ctrl->thread_count; i++) ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) ++ if (!IS_ERR_OR_NULL(ctrl->thread[i].vreg[j].rdev)) ++ regulator_unregister( ++ ctrl->thread[i].vreg[j].rdev); ++ return rc; ++} ++ ++/** ++ * cpr3_regulator_unregister() - unregister the regulators for a CPR3 controller ++ * and perform CPR hardware shutdown ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_regulator_unregister(struct cpr3_controller *ctrl) ++{ ++ int i, j, rc = 0; ++ ++ mutex_lock(&cpr3_controller_list_mutex); ++ list_del(&ctrl->list); ++ cpr3_regulator_debugfs_ctrl_remove(ctrl); ++ mutex_unlock(&cpr3_controller_list_mutex); ++ ++ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { ++ rc = cpr3_ctrl_clear_cpr4_config(ctrl); ++ if (rc) ++ cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n", ++ rc); ++ } ++ ++ cpr3_ctrl_loop_disable(ctrl); ++ ++ cpr3_closed_loop_disable(ctrl); ++ ++ if (ctrl->vdd_limit_regulator) { ++ regulator_disable(ctrl->vdd_limit_regulator); ++ } ++ ++ for (i = 0; i < ctrl->thread_count; i++) ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) ++ regulator_unregister(ctrl->thread[i].vreg[j].rdev); ++ ++ if (ctrl->panic_notifier.notifier_call) ++ atomic_notifier_chain_unregister(&panic_notifier_list, ++ &ctrl->panic_notifier); ++ ++ return 0; ++} ++ ++/** ++ * cpr3_open_loop_regulator_unregister() - unregister the regulators for a CPR3 ++ * open loop controller and perform CPR hardware shutdown ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_open_loop_regulator_unregister(struct cpr3_controller *ctrl) ++{ ++ int i, j; ++ ++ mutex_lock(&cpr3_controller_list_mutex); ++ list_del(&ctrl->list); ++ mutex_unlock(&cpr3_controller_list_mutex); ++ ++ if (ctrl->vdd_limit_regulator) { ++ regulator_disable(ctrl->vdd_limit_regulator); ++ } ++ ++ for (i = 0; i < ctrl->thread_count; i++) ++ for (j = 0; j < ctrl->thread[i].vreg_count; j++) ++ regulator_unregister(ctrl->thread[i].vreg[j].rdev); ++ ++ if (ctrl->panic_notifier.notifier_call) ++ atomic_notifier_chain_unregister(&panic_notifier_list, ++ &ctrl->panic_notifier); ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/regulator/cpr3-regulator.h +@@ -0,0 +1,1211 @@ ++/* ++ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef __REGULATOR_CPR3_REGULATOR_H__ ++#define __REGULATOR_CPR3_REGULATOR_H__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct cpr3_controller; ++struct cpr3_thread; ++ ++/** ++ * struct cpr3_fuse_param - defines one contiguous segment of a fuse parameter ++ * that is contained within a given row. ++ * @row: Fuse row number ++ * @bit_start: The first bit within the row of the fuse parameter segment ++ * @bit_end: The last bit within the row of the fuse parameter segment ++ * ++ * Each fuse row is 64 bits in length. bit_start and bit_end may take values ++ * from 0 to 63. bit_start must be less than or equal to bit_end. ++ */ ++struct cpr3_fuse_param { ++ unsigned row; ++ unsigned bit_start; ++ unsigned bit_end; ++}; ++ ++/* Each CPR3 sensor has 16 ring oscillators */ ++#define CPR3_RO_COUNT 16 ++ ++/* The maximum number of sensors that can be present on a single CPR loop. */ ++#define CPR3_MAX_SENSOR_COUNT 256 ++ ++/* This constant is used when allocating array printing buffers. */ ++#define MAX_CHARS_PER_INT 10 ++ ++/** ++ * struct cpr4_sdelta - CPR4 controller specific data structure for the sdelta ++ * adjustment table which is used to adjust the VDD supply ++ * voltage automatically based upon the temperature and/or ++ * the number of online CPU cores. ++ * @allow_core_count_adj: Core count adjustments are allowed. ++ * @allow_temp_adj: Temperature based adjustments are allowed. ++ * @max_core_count: Maximum number of cores considered for core count ++ * adjustment logic. ++ * @temp_band_count: Number of temperature bands considered for temperature ++ * based adjustment logic. ++ * @cap_volt: CAP in uV to apply to SDELTA margins with multiple ++ * cpr3-regulators defined for single controller. ++ * @table: SDELTA table with per-online-core and temperature based ++ * adjustments of size (max_core_count * temp_band_count) ++ * Outer: core count ++ * Inner: temperature band ++ * Each element has units of VDD supply steps. Positive ++ * values correspond to a reduction in voltage and negative ++ * value correspond to an increase (this follows the SDELTA ++ * register semantics). ++ * @allow_boost: Voltage boost allowed. ++ * @boost_num_cores: The number of online cores at which the boost voltage ++ * adjustments will be applied ++ * @boost_table: SDELTA table with boost voltage adjustments of size ++ * temp_band_count. Each element has units of VDD supply ++ * steps. Positive values correspond to a reduction in ++ * voltage and negative value correspond to an increase ++ * (this follows the SDELTA register semantics). ++ */ ++struct cpr4_sdelta { ++ bool allow_core_count_adj; ++ bool allow_temp_adj; ++ int max_core_count; ++ int temp_band_count; ++ int cap_volt; ++ int *table; ++ bool allow_boost; ++ int boost_num_cores; ++ int *boost_table; ++}; ++ ++/** ++ * struct cpr3_corner - CPR3 virtual voltage corner data structure ++ * @floor_volt: CPR closed-loop floor voltage in microvolts ++ * @ceiling_volt: CPR closed-loop ceiling voltage in microvolts ++ * @open_loop_volt: CPR open-loop voltage (i.e. initial voltage) in ++ * microvolts ++ * @last_volt: Last known settled CPR closed-loop voltage which is used ++ * when switching to a new corner ++ * @abs_ceiling_volt: The absolute CPR closed-loop ceiling voltage in ++ * microvolts. This is used to limit the ceiling_volt ++ * value when it is increased as a result of aging ++ * adjustment. ++ * @unaged_floor_volt: The CPR closed-loop floor voltage in microvolts before ++ * any aging adjustment is performed ++ * @unaged_ceiling_volt: The CPR closed-loop ceiling voltage in microvolts ++ * before any aging adjustment is performed ++ * @unaged_open_loop_volt: The CPR open-loop voltage (i.e. initial voltage) in ++ * microvolts before any aging adjusment is performed ++ * @system_volt: The system-supply voltage in microvolts or corners or ++ * levels ++ * @mem_acc_volt: The mem-acc-supply voltage in corners ++ * @proc_freq: Processor frequency in Hertz. For CPR rev. 3 and 4 ++ * conrollers, this field is only used by platform specific ++ * CPR3 driver for interpolation. For CPRh-compliant ++ * controllers, this frequency is also utilized by the ++ * clock driver to determine the corner to CPU clock ++ * frequency mappings. ++ * @cpr_fuse_corner: Fused corner index associated with this virtual corner ++ * (only used by platform specific CPR3 driver for ++ * mapping purposes) ++ * @target_quot: Array of target quotient values to use for each ring ++ * oscillator (RO) for this corner. A value of 0 should be ++ * specified as the target quotient for each RO that is ++ * unused by this corner. ++ * @ro_scale: Array of CPR ring oscillator (RO) scaling factors. The ++ * scaling factor for each RO is defined from RO0 to RO15 ++ * with units of QUOT/V. A value of 0 may be specified for ++ * an RO that is unused. ++ * @ro_mask: Bitmap where each of the 16 LSBs indicate if the ++ * corresponding ROs should be masked for this corner ++ * @irq_en: Bitmap of the CPR interrupts to enable for this corner ++ * @aging_derate: The amount to derate the aging voltage adjustment ++ * determined for the reference corner in units of uV/mV. ++ * E.g. a value of 900 would imply that the adjustment for ++ * this corner should be 90% (900/1000) of that for the ++ * reference corner. ++ * @use_open_loop: Boolean indicating that open-loop (i.e CPR disabled) as ++ * opposed to closed-loop operation must be used for this ++ * corner on CPRh controllers. ++ * @sdelta: The CPR4 controller specific data for this corner. This ++ * field is applicable for CPR4 controllers. ++ * ++ * The value of last_volt is initialized inside of the cpr3_regulator_register() ++ * call with the open_loop_volt value. It can later be updated to the settled ++ * VDD supply voltage. The values for unaged_floor_volt, unaged_ceiling_volt, ++ * and unaged_open_loop_volt are initialized inside of cpr3_regulator_register() ++ * if ctrl->aging_required == true. These three values must be pre-initialized ++ * if cpr3_regulator_register() is called with ctrl->aging_required == false and ++ * ctrl->aging_succeeded == true. ++ * ++ * The values of ro_mask and irq_en are initialized inside of the ++ * cpr3_regulator_register() call. ++ */ ++struct cpr3_corner { ++ int floor_volt; ++ int ceiling_volt; ++ int cold_temp_open_loop_volt; ++ int normal_temp_open_loop_volt; ++ int open_loop_volt; ++ int last_volt; ++ int abs_ceiling_volt; ++ int unaged_floor_volt; ++ int unaged_ceiling_volt; ++ int unaged_open_loop_volt; ++ int system_volt; ++ int mem_acc_volt; ++ u32 proc_freq; ++ int cpr_fuse_corner; ++ u32 target_quot[CPR3_RO_COUNT]; ++ u32 ro_scale[CPR3_RO_COUNT]; ++ u32 ro_mask; ++ u32 irq_en; ++ int aging_derate; ++ bool use_open_loop; ++ struct cpr4_sdelta *sdelta; ++}; ++ ++/** ++ * struct cprh_corner_band - CPRh controller specific data structure which ++ * encapsulates the range of corners and the SDELTA ++ * adjustment table to be applied to the corners within ++ * the min and max bounds of the corner band. ++ * @corner: Corner number which defines the corner band boundary ++ * @sdelta: The SDELTA adjustment table which contains core-count ++ * and temp based margin adjustments that are applicable ++ * to the corner band. ++ */ ++struct cprh_corner_band { ++ int corner; ++ struct cpr4_sdelta *sdelta; ++}; ++ ++/** ++ * struct cpr3_fuse_parameters - CPR4 fuse specific data structure which has ++ * the required fuse parameters need for Close Loop CPR ++ * @(*apss_ro_sel_param)[2]: Pointer to RO select fuse details ++ * @(*apss_init_voltage_param)[2]: Pointer to Target voltage fuse details ++ * @(*apss_target_quot_param)[2]: Pointer to Target quot fuse details ++ * @(*apss_quot_offset_param)[2]: Pointer to quot offset fuse details ++ * @cpr_fusing_rev_param: Pointer to CPR revision fuse details ++ * @apss_speed_bin_param: Pointer to Speed bin fuse details ++ * @cpr_boost_fuse_cfg_param: Pointer to Boost fuse cfg details ++ * @apss_boost_fuse_volt_param: Pointer to Boost fuse volt details ++ * @misc_fuse_volt_adj_param: Pointer to Misc fuse volt fuse details ++ */ ++struct cpr3_fuse_parameters { ++ struct cpr3_fuse_param (*apss_ro_sel_param)[2]; ++ struct cpr3_fuse_param (*apss_init_voltage_param)[2]; ++ struct cpr3_fuse_param (*apss_target_quot_param)[2]; ++ struct cpr3_fuse_param (*apss_quot_offset_param)[2]; ++ struct cpr3_fuse_param *cpr_fusing_rev_param; ++ struct cpr3_fuse_param *apss_speed_bin_param; ++ struct cpr3_fuse_param *cpr_boost_fuse_cfg_param; ++ struct cpr3_fuse_param *apss_boost_fuse_volt_param; ++ struct cpr3_fuse_param *misc_fuse_volt_adj_param; ++}; ++ ++struct cpr4_mem_acc_func { ++ void (*set_mem_acc)(struct regulator_dev *); ++ void (*clear_mem_acc)(struct regulator_dev *); ++}; ++ ++/** ++ * struct cpr4_reg_data - CPR4 regulator specific data structure which is ++ * target specific ++ * @cpr_valid_fuse_count: Number of valid fuse corners ++ * @fuse_ref_volt: Pointer to fuse reference voltage ++ * @fuse_step_volt: CPR step voltage available in fuse ++ * @cpr_clk_rate: CPR clock rate ++ * @boost_fuse_ref_volt: Boost fuse reference voltage ++ * @boost_ceiling_volt: Boost ceiling voltage ++ * @boost_floor_volt: Boost floor voltage ++ * @cpr3_fuse_params: Pointer to CPR fuse parameters ++ * @mem_acc_funcs: Pointer to MEM ACC set/clear functions ++ **/ ++struct cpr4_reg_data { ++ u32 cpr_valid_fuse_count; ++ int *fuse_ref_volt; ++ u32 fuse_step_volt; ++ u32 cpr_clk_rate; ++ int boost_fuse_ref_volt; ++ int boost_ceiling_volt; ++ int boost_floor_volt; ++ struct cpr3_fuse_parameters *cpr3_fuse_params; ++ struct cpr4_mem_acc_func *mem_acc_funcs; ++}; ++/** ++ * struct cpr3_reg_data - CPR3 regulator specific data structure which is ++ * target specific ++ * @cpr_valid_fuse_count: Number of valid fuse corners ++ * @(*init_voltage_param)[2]: Pointer to Target voltage fuse details ++ * @fuse_ref_volt: Pointer to fuse reference voltage ++ * @fuse_step_volt: CPR step voltage available in fuse ++ * @cpr_clk_rate: CPR clock rate ++ * @cpr3_fuse_params: Pointer to CPR fuse parameters ++ **/ ++struct cpr3_reg_data { ++ u32 cpr_valid_fuse_count; ++ struct cpr3_fuse_param (*init_voltage_param)[2]; ++ int *fuse_ref_volt; ++ u32 fuse_step_volt; ++ u32 cpr_clk_rate; ++}; ++ ++/** ++ * struct cpr3_regulator - CPR3 logical regulator instance associated with a ++ * given CPR3 hardware thread ++ * @of_node: Device node associated with the device tree child node ++ * of this CPR3 regulator ++ * @thread: Pointer to the CPR3 thread which manages this CPR3 ++ * regulator ++ * @name: Unique name for this CPR3 regulator which is filled ++ * using the device tree regulator-name property ++ * @rdesc: Regulator description for this CPR3 regulator ++ * @rdev: Regulator device pointer for the regulator registered ++ * for this CPR3 regulator ++ * @mem_acc_regulator: Pointer to the optional mem-acc supply regulator used ++ * to manage memory circuitry settings based upon CPR3 ++ * regulator output voltage. ++ * @corner: Array of all corners supported by this CPR3 regulator ++ * @corner_count: The number of elements in the corner array ++ * @corner_band: Array of all corner bands supported by CPRh compatible ++ * controllers ++ * @cpr4_regulator_data Target specific cpr4 regulator data ++ * @cpr3_regulator_data Target specific cpr3 regulator data ++ * @corner_band_count: The number of elements in the corner band array ++ * @platform_fuses: Pointer to platform specific CPR fuse data (only used by ++ * platform specific CPR3 driver) ++ * @speed_bin_fuse: Value read from the speed bin fuse parameter ++ * @speed_bins_supported: The number of speed bins supported by the device tree ++ * configuration for this CPR3 regulator ++ * @cpr_rev_fuse: Value read from the CPR fusing revision fuse parameter ++ * @fuse_combo: Platform specific enum value identifying the specific ++ * combination of fuse values found on a given chip ++ * @fuse_combos_supported: The number of fuse combinations supported by the ++ * device tree configuration for this CPR3 regulator ++ * @fuse_corner_count: Number of corners defined by fuse parameters ++ * @fuse_corner_map: Array of length fuse_corner_count which specifies the ++ * highest corner associated with each fuse corner. Note ++ * that each element must correspond to a valid corner ++ * and that element values must be strictly increasing. ++ * Also, it is acceptable for the lowest fuse corner to map ++ * to a corner other than the lowest. Likewise, it is ++ * acceptable for the highest fuse corner to map to a ++ * corner other than the highest. ++ * @fuse_combo_corner_sum: The sum of the corner counts across all fuse combos ++ * @fuse_combo_offset: The device tree property array offset for the selected ++ * fuse combo ++ * @speed_bin_corner_sum: The sum of the corner counts across all speed bins ++ * This may be specified as 0 if per speed bin parsing ++ * support is not required. ++ * @speed_bin_offset: The device tree property array offset for the selected ++ * speed bin ++ * @fuse_combo_corner_band_sum: The sum of the corner band counts across all ++ * fuse combos ++ * @fuse_combo_corner_band_offset: The device tree property array offset for ++ * the corner band count corresponding to the selected ++ * fuse combo ++ * @speed_bin_corner_band_sum: The sum of the corner band counts across all ++ * speed bins. This may be specified as 0 if per speed bin ++ * parsing support is not required ++ * @speed_bin_corner_band_offset: The device tree property array offset for the ++ * corner band count corresponding to the selected speed ++ * bin ++ * @pd_bypass_mask: Bit mask of power domains associated with this CPR3 ++ * regulator ++ * @dynamic_floor_corner: Index identifying the voltage corner for the CPR3 ++ * regulator whose last_volt value should be used as the ++ * global CPR floor voltage if all of the power domains ++ * associated with this CPR3 regulator are bypassed ++ * @uses_dynamic_floor: Boolean flag indicating that dynamic_floor_corner should ++ * be utilized for the CPR3 regulator ++ * @current_corner: Index identifying the currently selected voltage corner ++ * for the CPR3 regulator or less than 0 if no corner has ++ * been requested ++ * @last_closed_loop_corner: Index identifying the last voltage corner for the ++ * CPR3 regulator which was configured when operating in ++ * CPR closed-loop mode or less than 0 if no corner has ++ * been requested. CPR registers are only written to when ++ * using closed-loop mode. ++ * @aggregated: Boolean flag indicating that this CPR3 regulator ++ * participated in the last aggregation event ++ * @debug_corner: Index identifying voltage corner used for displaying ++ * corner configuration values in debugfs ++ * @vreg_enabled: Boolean defining the enable state of the CPR3 ++ * regulator's regulator within the regulator framework. ++ * @aging_allowed: Boolean defining if CPR aging adjustments are allowed ++ * for this CPR3 regulator given the fuse combo of the ++ * device ++ * @aging_allow_open_loop_adj: Boolean defining if the open-loop voltage of each ++ * corner of this regulator should be adjusted as a result ++ * of an aging measurement. This flag can be set to false ++ * when the open-loop voltage adjustments have been ++ * specified such that they include the maximum possible ++ * aging adjustment. This flag is only used if ++ * aging_allowed == true. ++ * @aging_corner: The corner that should be configured for this regulator ++ * when an aging measurement is performed. ++ * @aging_max_adjust_volt: The maximum aging voltage margin in microvolts that ++ * may be added to the target quotients of this regulator. ++ * A value of 0 may be specified if this regulator does not ++ * require any aging adjustment. ++ * @allow_core_count_adj: Core count adjustments are allowed for this regulator. ++ * @allow_temp_adj: Temperature based adjustments are allowed for this ++ * regulator. ++ * @max_core_count: Maximum number of cores considered for core count ++ * adjustment logic. ++ * @allow_boost: Voltage boost allowed for this regulator. ++ * ++ * This structure contains both configuration and runtime state data. The ++ * elements current_corner, last_closed_loop_corner, aggregated, debug_corner, ++ * and vreg_enabled are state variables. ++ */ ++struct cpr3_regulator { ++ struct device_node *of_node; ++ struct cpr3_thread *thread; ++ const char *name; ++ struct regulator_desc rdesc; ++ struct regulator_dev *rdev; ++ struct regulator *mem_acc_regulator; ++ struct cpr3_corner *corner; ++ int corner_count; ++ struct cprh_corner_band *corner_band; ++ struct cpr4_reg_data *cpr4_regulator_data; ++ struct cpr3_reg_data *cpr3_regulator_data; ++ u32 corner_band_count; ++ ++ void *platform_fuses; ++ int speed_bin_fuse; ++ int speed_bins_supported; ++ int cpr_rev_fuse; ++ int part_type; ++ int part_type_supported; ++ int fuse_combo; ++ int fuse_combos_supported; ++ int fuse_corner_count; ++ int *fuse_corner_map; ++ int fuse_combo_corner_sum; ++ int fuse_combo_offset; ++ int speed_bin_corner_sum; ++ int speed_bin_offset; ++ int fuse_combo_corner_band_sum; ++ int fuse_combo_corner_band_offset; ++ int speed_bin_corner_band_sum; ++ int speed_bin_corner_band_offset; ++ u32 pd_bypass_mask; ++ int dynamic_floor_corner; ++ bool uses_dynamic_floor; ++ ++ int current_corner; ++ int last_closed_loop_corner; ++ bool aggregated; ++ int debug_corner; ++ bool vreg_enabled; ++ ++ bool aging_allowed; ++ bool aging_allow_open_loop_adj; ++ int aging_corner; ++ int aging_max_adjust_volt; ++ ++ bool allow_core_count_adj; ++ bool allow_temp_adj; ++ int max_core_count; ++ bool allow_boost; ++}; ++ ++/** ++ * struct cpr3_thread - CPR3 hardware thread data structure ++ * @thread_id: Hardware thread ID ++ * @of_node: Device node associated with the device tree child node ++ * of this CPR3 thread ++ * @ctrl: Pointer to the CPR3 controller which manages this thread ++ * @vreg: Array of CPR3 regulators handled by the CPR3 thread ++ * @vreg_count: Number of elements in the vreg array ++ * @aggr_corner: CPR corner containing the in process aggregated voltage ++ * and target quotient configurations which will be applied ++ * @last_closed_loop_aggr_corner: CPR corner containing the most recent ++ * configurations which were written into hardware ++ * registers when operating in closed loop mode (i.e. with ++ * CPR enabled) ++ * @consecutive_up: The number of consecutive CPR step up events needed to ++ * to trigger an up interrupt ++ * @consecutive_down: The number of consecutive CPR step down events needed to ++ * to trigger a down interrupt ++ * @up_threshold: The number CPR error steps required to generate an up ++ * event ++ * @down_threshold: The number CPR error steps required to generate a down ++ * event ++ * ++ * This structure contains both configuration and runtime state data. The ++ * elements aggr_corner and last_closed_loop_aggr_corner are state variables. ++ */ ++struct cpr3_thread { ++ u32 thread_id; ++ struct device_node *of_node; ++ struct cpr3_controller *ctrl; ++ struct cpr3_regulator *vreg; ++ int vreg_count; ++ struct cpr3_corner aggr_corner; ++ struct cpr3_corner last_closed_loop_aggr_corner; ++ ++ u32 consecutive_up; ++ u32 consecutive_down; ++ u32 up_threshold; ++ u32 down_threshold; ++}; ++ ++/* Per CPR controller data */ ++/** ++ * enum cpr3_mem_acc_corners - Constants which define the number of mem-acc ++ * regulator corners available in the mem-acc corner map array. ++ * %CPR3_MEM_ACC_LOW_CORNER: Index in mem-acc corner map array mapping to the ++ * mem-acc regulator corner ++ * to be used for low voltage vdd supply ++ * %CPR3_MEM_ACC_HIGH_CORNER: Index in mem-acc corner map array mapping to the ++ * mem-acc regulator corner to be used for high ++ * voltage vdd supply ++ * %CPR3_MEM_ACC_CORNERS: Number of elements in the mem-acc corner map ++ * array ++ */ ++enum cpr3_mem_acc_corners { ++ CPR3_MEM_ACC_LOW_CORNER = 0, ++ CPR3_MEM_ACC_HIGH_CORNER = 1, ++ CPR3_MEM_ACC_CORNERS = 2, ++}; ++ ++/** ++ * enum cpr3_count_mode - CPR3 controller count mode which defines the ++ * method that CPR sensor data is acquired ++ * %CPR3_COUNT_MODE_ALL_AT_ONCE_MIN: Capture all CPR sensor readings ++ * simultaneously and report the minimum ++ * value seen in successive measurements ++ * %CPR3_COUNT_MODE_ALL_AT_ONCE_MAX: Capture all CPR sensor readings ++ * simultaneously and report the maximum ++ * value seen in successive measurements ++ * %CPR3_COUNT_MODE_STAGGERED: Read one sensor at a time in a ++ * sequential fashion ++ * %CPR3_COUNT_MODE_ALL_AT_ONCE_AGE: Capture all CPR aging sensor readings ++ * simultaneously. ++ */ ++enum cpr3_count_mode { ++ CPR3_COUNT_MODE_ALL_AT_ONCE_MIN = 0, ++ CPR3_COUNT_MODE_ALL_AT_ONCE_MAX = 1, ++ CPR3_COUNT_MODE_STAGGERED = 2, ++ CPR3_COUNT_MODE_ALL_AT_ONCE_AGE = 3, ++}; ++ ++/** ++ * enum cpr_controller_type - supported CPR controller hardware types ++ * %CPR_CTRL_TYPE_CPR3: HW has CPR3 controller ++ * %CPR_CTRL_TYPE_CPR4: HW has CPR4 controller ++ */ ++enum cpr_controller_type { ++ CPR_CTRL_TYPE_CPR3, ++ CPR_CTRL_TYPE_CPR4, ++}; ++ ++/** ++ * cpr_setting - supported CPR global settings ++ * %CPR_DEFAULT: default mode from dts will be used ++ * %CPR_DISABLED: ceiling voltage will be used for all the corners ++ * %CPR_OPEN_LOOP_EN: CPR will work in OL ++ * %CPR_CLOSED_LOOP_EN: CPR will work in CL, if supported ++ */ ++enum cpr_setting { ++ CPR_DEFAULT = 0, ++ CPR_DISABLED = 1, ++ CPR_OPEN_LOOP_EN = 2, ++ CPR_CLOSED_LOOP_EN = 3, ++}; ++ ++/** ++ * struct cpr3_aging_sensor_info - CPR3 aging sensor information ++ * @sensor_id The index of the CPR3 sensor to be used in the aging ++ * measurement. ++ * @ro_scale The CPR ring oscillator (RO) scaling factor for the ++ * aging sensor with units of QUOT/V. ++ * @init_quot_diff: The fused quotient difference between aged and un-aged ++ * paths that was measured at manufacturing time. ++ * @measured_quot_diff: The quotient difference measured at runtime. ++ * @bypass_mask: Bit mask of the CPR sensors that must be bypassed during ++ * the aging measurement for this sensor ++ * ++ * This structure contains both configuration and runtime state data. The ++ * element measured_quot_diff is a state variable. ++ */ ++struct cpr3_aging_sensor_info { ++ u32 sensor_id; ++ u32 ro_scale; ++ int init_quot_diff; ++ int measured_quot_diff; ++ u32 bypass_mask[CPR3_MAX_SENSOR_COUNT / 32]; ++}; ++ ++/** ++ * struct cpr3_reg_info - Register information data structure ++ * @name: Register name ++ * @addr: Register physical address ++ * @value: Register content ++ * @virt_addr: Register virtual address ++ * ++ * This data structure is used to dump some critical register contents ++ * when the device crashes due to a kernel panic. ++ */ ++struct cpr3_reg_info { ++ const char *name; ++ u32 addr; ++ u32 value; ++ void __iomem *virt_addr; ++}; ++ ++/** ++ * struct cpr3_panic_regs_info - Data structure to dump critical register ++ * contents. ++ * @reg_count: Number of elements in the regs array ++ * @regs: Array of critical registers information ++ * ++ * This data structure is used to dump critical register contents when ++ * the device crashes due to a kernel panic. ++ */ ++struct cpr3_panic_regs_info { ++ int reg_count; ++ struct cpr3_reg_info *regs; ++}; ++ ++/** ++ * struct cpr3_controller - CPR3 controller data structure ++ * @dev: Device pointer for the CPR3 controller device ++ * @name: Unique name for the CPR3 controller ++ * @ctrl_id: Controller ID corresponding to the VDD supply number ++ * that this CPR3 controller manages. ++ * @cpr_ctrl_base: Virtual address of the CPR3 controller base register ++ * @fuse_base: Virtual address of fuse row 0 ++ * @aging_possible_reg: Virtual address of an optional platform-specific ++ * register that must be ready to determine if it is ++ * possible to perform an aging measurement. ++ * @list: list head used in a global cpr3-regulator list so that ++ * cpr3-regulator structs can be found easily in RAM dumps ++ * @thread: Array of CPR3 threads managed by the CPR3 controller ++ * @thread_count: Number of elements in the thread array ++ * @sensor_owner: Array of thread IDs indicating which thread owns a given ++ * CPR sensor ++ * @sensor_count: The number of CPR sensors found on the CPR loop managed ++ * by this CPR controller. Must be equal to the number of ++ * elements in the sensor_owner array ++ * @soc_revision: Revision number of the SoC. This may be unused by ++ * platforms that do not have different behavior for ++ * different SoC revisions. ++ * @lock: Mutex lock used to ensure mutual exclusion between ++ * all of the threads associated with the controller ++ * @vdd_regulator: Pointer to the VDD supply regulator which this CPR3 ++ * controller manages ++ * @system_regulator: Pointer to the optional system-supply regulator upon ++ * which the VDD supply regulator depends. ++ * @mem_acc_regulator: Pointer to the optional mem-acc supply regulator used ++ * to manage memory circuitry settings based upon the ++ * VDD supply output voltage. ++ * @vdd_limit_regulator: Pointer to the VDD supply limit regulator which is used ++ * for hardware closed-loop in order specify ceiling and ++ * floor voltage limits (platform specific) ++ * @system_supply_max_volt: Voltage in microvolts which corresponds to the ++ * absolute ceiling voltage of the system-supply ++ * @mem_acc_threshold_volt: mem-acc threshold voltage in microvolts ++ * @mem_acc_corner_map: mem-acc regulator corners mapping to low and high ++ * voltage mem-acc settings for the memories powered by ++ * this CPR3 controller and its associated CPR3 regulators ++ * @mem_acc_crossover_volt: Voltage in microvolts corresponding to the voltage ++ * that the VDD supply must be set to while a MEM ACC ++ * switch is in progress. This element must be initialized ++ * for CPRh controllers when a MEM ACC threshold voltage is ++ * defined. ++ * @core_clk: Pointer to the CPR3 controller core clock ++ * @iface_clk: Pointer to the CPR3 interface clock (platform specific) ++ * @bus_clk: Pointer to the CPR3 bus clock (platform specific) ++ * @irq: CPR interrupt number ++ * @irq_affinity_mask: The cpumask for the CPUs which the CPR interrupt should ++ * have affinity for ++ * @cpu_hotplug_notifier: CPU hotplug notifier used to reset IRQ affinity when a ++ * CPU is brought back online ++ * @ceiling_irq: Interrupt number for the interrupt that is triggered ++ * when hardware closed-loop attempts to exceed the ceiling ++ * voltage ++ * @apm: Handle to the array power mux (APM) ++ * @apm_threshold_volt: Voltage in microvolts which defines the threshold ++ * voltage to determine the APM supply selection for ++ * each corner ++ * @apm_crossover_volt: Voltage in microvolts corresponding to the voltage that ++ * the VDD supply must be set to while an APM switch is in ++ * progress. This element must be initialized for CPRh ++ * controllers when an APM threshold voltage is defined ++ * @apm_adj_volt: Minimum difference between APM threshold voltage and ++ * open-loop voltage which allows the APM threshold voltage ++ * to be used as a ceiling ++ * @apm_high_supply: APM supply to configure if VDD voltage is greater than ++ * or equal to the APM threshold voltage ++ * @apm_low_supply: APM supply to configure if the VDD voltage is less than ++ * the APM threshold voltage ++ * @base_volt: Minimum voltage in microvolts supported by the VDD ++ * supply managed by this CPR controller ++ * @corner_switch_delay_time: The delay time in nanoseconds used by the CPR ++ * controller to wait for voltage settling before ++ * acknowledging the OSM block after corner changes ++ * @cpr_clock_rate: CPR reference clock frequency in Hz. ++ * @sensor_time: The time in nanoseconds that each sensor takes to ++ * perform a measurement. ++ * @loop_time: The time in nanoseconds between consecutive CPR ++ * measurements. ++ * @up_down_delay_time: The time to delay in nanoseconds between consecutive CPR ++ * measurements when the last measurement recommended ++ * increasing or decreasing the vdd-supply voltage. ++ * (platform specific) ++ * @idle_clocks: Number of CPR reference clock ticks that the CPR ++ * controller waits in transitional states. ++ * @step_quot_init_min: The default minimum CPR step quotient value. The step ++ * quotient is the number of additional ring oscillator ++ * ticks observed when increasing one step in vdd-supply ++ * output voltage. ++ * @step_quot_init_max: The default maximum CPR step quotient value. ++ * @step_volt: Step size in microvolts between available set points ++ * of the VDD supply ++ * @down_error_step_limit: CPR4 hardware closed-loop down error step limit which ++ * defines the maximum number of VDD supply regulator steps ++ * that the voltage may be reduced as the result of a ++ * single CPR measurement. ++ * @up_error_step_limit: CPR4 hardware closed-loop up error step limit which ++ * defines the maximum number of VDD supply regulator steps ++ * that the voltage may be increased as the result of a ++ * single CPR measurement. ++ * @count_mode: CPR controller count mode ++ * @count_repeat: Number of times to perform consecutive sensor ++ * measurements when using all-at-once count modes. ++ * @proc_clock_throttle: Defines the processor clock frequency throttling ++ * register value to use. This can be used to reduce the ++ * clock frequency when a power domain exits a low power ++ * mode until CPR settles at a new voltage. ++ * (platform specific) ++ * @cpr_allowed_hw: Boolean which indicates if closed-loop CPR operation is ++ * permitted for a given chip based upon hardware fuse ++ * values ++ * @cpr_allowed_sw: Boolean which indicates if closed-loop CPR operation is ++ * permitted based upon software policies ++ * @supports_hw_closed_loop: Boolean which indicates if this CPR3/4 controller ++ * physically supports hardware closed-loop CPR operation ++ * @use_hw_closed_loop: Boolean which indicates that this controller will be ++ * using hardware closed-loop operation in place of ++ * software closed-loop operation. ++ * @ctrl_type: CPR controller type ++ * @saw_use_unit_mV: Boolean which indicates the unit used in SAW PVC ++ * interface is mV. ++ * @aggr_corner: CPR corner containing the most recently aggregated ++ * voltage configurations which are being used currently ++ * @cpr_enabled: Boolean which indicates that the CPR controller is ++ * enabled and operating in closed-loop mode. CPR clocks ++ * have been prepared and enabled whenever this flag is ++ * true. ++ * @last_corner_was_closed_loop: Boolean indicating if the last known corners ++ * were updated during closed loop operation. ++ * @cpr_suspended: Boolean which indicates that CPR has been temporarily ++ * disabled while enterring system suspend. ++ * @debugfs: Pointer to the debugfs directory of this CPR3 controller ++ * @aging_ref_volt: Reference voltage in microvolts to configure when ++ * performing CPR aging measurements. ++ * @aging_vdd_mode: vdd-supply regulator mode to configure before performing ++ * a CPR aging measurement. It should be one of ++ * REGULATOR_MODE_*. ++ * @aging_complete_vdd_mode: vdd-supply regulator mode to configure after ++ * performing a CPR aging measurement. It should be one of ++ * REGULATOR_MODE_*. ++ * @aging_ref_adjust_volt: The reference aging voltage margin in microvolts that ++ * should be added to the target quotients of the ++ * regulators managed by this controller after derating. ++ * @aging_required: Flag which indicates that a CPR aging measurement still ++ * needs to be performed for this CPR3 controller. ++ * @aging_succeeded: Flag which indicates that a CPR aging measurement has ++ * completed successfully. ++ * @aging_failed: Flag which indicates that a CPR aging measurement has ++ * failed to complete successfully. ++ * @aging_sensor: Array of CPR3 aging sensors which are used to perform ++ * aging measurements at a runtime. ++ * @aging_sensor_count: Number of elements in the aging_sensor array ++ * @aging_possible_mask: Optional bitmask used to mask off the ++ * aging_possible_reg register. ++ * @aging_possible_val: Optional value that the masked aging_possible_reg ++ * register must have in order for a CPR aging measurement ++ * to be possible. ++ * @step_quot_fixed: Fixed step quotient value used for target quotient ++ * adjustment if use_dynamic_step_quot is not set. ++ * This parameter is only relevant for CPR4 controllers ++ * when using the per-online-core or per-temperature ++ * adjustments. ++ * @initial_temp_band: Temperature band used for calculation of base-line ++ * target quotients (fused). ++ * @use_dynamic_step_quot: Boolean value which indicates that margin adjustment ++ * of target quotient will be based on the step quotient ++ * calculated dynamically in hardware for each RO. ++ * @allow_core_count_adj: Core count adjustments are allowed for this controller ++ * @allow_temp_adj: Temperature based adjustments are allowed for ++ * this controller ++ * @allow_boost: Voltage boost allowed for this controller. ++ * @temp_band_count: Number of temperature bands used for temperature based ++ * adjustment logic ++ * @temp_points: Array of temperature points in decidegrees Celsius used ++ * to specify the ranges for selected temperature bands. ++ * The array must have (temp_band_count - 1) elements ++ * allocated. ++ * @temp_sensor_id_start: Start ID of temperature sensors used for temperature ++ * based adjustments. ++ * @temp_sensor_id_end: End ID of temperature sensors used for temperature ++ * based adjustments. ++ * @voltage_settling_time: The time in nanoseconds that it takes for the ++ * VDD supply voltage to settle after being increased or ++ * decreased by step_volt microvolts which is used when ++ * SDELTA voltage margin adjustments are applied. ++ * @cpr_global_setting: Global setting for this CPR controller ++ * @panic_regs_info: Array of panic registers information which provides the ++ * list of registers to dump when the device crashes. ++ * @panic_notifier: Notifier block registered to global panic notifier list. ++ * ++ * This structure contains both configuration and runtime state data. The ++ * elements cpr_allowed_sw, use_hw_closed_loop, aggr_corner, cpr_enabled, ++ * last_corner_was_closed_loop, cpr_suspended, aging_ref_adjust_volt, ++ * aging_required, aging_succeeded, and aging_failed are state variables. ++ * ++ * The apm* elements do not need to be initialized if the VDD supply managed by ++ * the CPR3 controller does not utilize an APM. ++ * ++ * The elements step_quot_fixed, initial_temp_band, allow_core_count_adj, ++ * allow_temp_adj and temp* need to be initialized for CPR4 controllers which ++ * are using per-online-core or per-temperature adjustments. ++ */ ++struct cpr3_controller { ++ struct device *dev; ++ const char *name; ++ int ctrl_id; ++ void __iomem *cpr_ctrl_base; ++ void __iomem *fuse_base; ++ void __iomem *aging_possible_reg; ++ struct list_head list; ++ struct cpr3_thread *thread; ++ int thread_count; ++ u8 *sensor_owner; ++ int sensor_count; ++ int soc_revision; ++ struct mutex lock; ++ struct regulator *vdd_regulator; ++ struct regulator *system_regulator; ++ struct regulator *mem_acc_regulator; ++ struct regulator *vdd_limit_regulator; ++ int system_supply_max_volt; ++ int mem_acc_threshold_volt; ++ int mem_acc_corner_map[CPR3_MEM_ACC_CORNERS]; ++ int mem_acc_crossover_volt; ++ struct clk *core_clk; ++ struct clk *iface_clk; ++ struct clk *bus_clk; ++ int irq; ++ struct cpumask irq_affinity_mask; ++ struct notifier_block cpu_hotplug_notifier; ++ int ceiling_irq; ++ struct msm_apm_ctrl_dev *apm; ++ int apm_threshold_volt; ++ int apm_crossover_volt; ++ int apm_adj_volt; ++ enum msm_apm_supply apm_high_supply; ++ enum msm_apm_supply apm_low_supply; ++ int base_volt; ++ u32 corner_switch_delay_time; ++ u32 cpr_clock_rate; ++ u32 sensor_time; ++ u32 loop_time; ++ u32 up_down_delay_time; ++ u32 idle_clocks; ++ u32 step_quot_init_min; ++ u32 step_quot_init_max; ++ int step_volt; ++ u32 down_error_step_limit; ++ u32 up_error_step_limit; ++ enum cpr3_count_mode count_mode; ++ u32 count_repeat; ++ u32 proc_clock_throttle; ++ bool cpr_allowed_hw; ++ bool cpr_allowed_sw; ++ bool supports_hw_closed_loop; ++ bool use_hw_closed_loop; ++ enum cpr_controller_type ctrl_type; ++ bool saw_use_unit_mV; ++ struct cpr3_corner aggr_corner; ++ bool cpr_enabled; ++ bool last_corner_was_closed_loop; ++ bool cpr_suspended; ++ struct dentry *debugfs; ++ ++ int aging_ref_volt; ++ unsigned int aging_vdd_mode; ++ unsigned int aging_complete_vdd_mode; ++ int aging_ref_adjust_volt; ++ bool aging_required; ++ bool aging_succeeded; ++ bool aging_failed; ++ struct cpr3_aging_sensor_info *aging_sensor; ++ int aging_sensor_count; ++ u32 cur_sensor_state; ++ u32 aging_possible_mask; ++ u32 aging_possible_val; ++ ++ u32 step_quot_fixed; ++ u32 initial_temp_band; ++ bool use_dynamic_step_quot; ++ bool allow_core_count_adj; ++ bool allow_temp_adj; ++ bool allow_boost; ++ int temp_band_count; ++ int *temp_points; ++ u32 temp_sensor_id_start; ++ u32 temp_sensor_id_end; ++ u32 voltage_settling_time; ++ enum cpr_setting cpr_global_setting; ++ struct cpr3_panic_regs_info *panic_regs_info; ++ struct notifier_block panic_notifier; ++}; ++ ++/* Used for rounding voltages to the closest physically available set point. */ ++#define CPR3_ROUND(n, d) (DIV_ROUND_UP(n, d) * (d)) ++ ++#define cpr3_err(cpr3_thread, message, ...) \ ++ pr_err("%s: " message, (cpr3_thread)->name, ##__VA_ARGS__) ++#define cpr3_info(cpr3_thread, message, ...) \ ++ pr_info("%s: " message, (cpr3_thread)->name, ##__VA_ARGS__) ++#define cpr3_debug(cpr3_thread, message, ...) \ ++ pr_debug("%s: " message, (cpr3_thread)->name, ##__VA_ARGS__) ++ ++/* ++ * Offset subtracted from voltage corner values passed in from the regulator ++ * framework in order to get internal voltage corner values. This is needed ++ * since the regulator framework treats 0 as an error value at regulator ++ * registration time. ++ */ ++#define CPR3_CORNER_OFFSET 1 ++ ++#ifdef CONFIG_REGULATOR_CPR3 ++ ++int cpr3_regulator_register(struct platform_device *pdev, ++ struct cpr3_controller *ctrl); ++int cpr3_open_loop_regulator_register(struct platform_device *pdev, ++ struct cpr3_controller *ctrl); ++int cpr3_regulator_unregister(struct cpr3_controller *ctrl); ++int cpr3_open_loop_regulator_unregister(struct cpr3_controller *ctrl); ++int cpr3_regulator_suspend(struct cpr3_controller *ctrl); ++int cpr3_regulator_resume(struct cpr3_controller *ctrl); ++ ++int cpr3_allocate_threads(struct cpr3_controller *ctrl, u32 min_thread_id, ++ u32 max_thread_id); ++int cpr3_map_fuse_base(struct cpr3_controller *ctrl, ++ struct platform_device *pdev); ++int cpr3_read_tcsr_setting(struct cpr3_controller *ctrl, ++ struct platform_device *pdev, u8 start, u8 end); ++int cpr3_read_fuse_param(void __iomem *fuse_base_addr, ++ const struct cpr3_fuse_param *param, u64 *param_value); ++int cpr3_convert_open_loop_voltage_fuse(int ref_volt, int step_volt, u32 fuse, ++ int fuse_len); ++u64 cpr3_interpolate(u64 x1, u64 y1, u64 x2, u64 y2, u64 x); ++int cpr3_parse_array_property(struct cpr3_regulator *vreg, ++ const char *prop_name, int tuple_size, u32 *out); ++int cpr3_parse_corner_array_property(struct cpr3_regulator *vreg, ++ const char *prop_name, int tuple_size, u32 *out); ++int cpr3_parse_corner_band_array_property(struct cpr3_regulator *vreg, ++ const char *prop_name, int tuple_size, u32 *out); ++int cpr3_parse_common_corner_data(struct cpr3_regulator *vreg); ++int cpr3_parse_thread_u32(struct cpr3_thread *thread, const char *propname, ++ u32 *out_value, u32 value_min, u32 value_max); ++int cpr3_parse_ctrl_u32(struct cpr3_controller *ctrl, const char *propname, ++ u32 *out_value, u32 value_min, u32 value_max); ++int cpr3_parse_common_thread_data(struct cpr3_thread *thread); ++int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl); ++int cpr3_parse_open_loop_common_ctrl_data(struct cpr3_controller *ctrl); ++int cpr3_limit_open_loop_voltages(struct cpr3_regulator *vreg); ++void cpr3_open_loop_voltage_as_ceiling(struct cpr3_regulator *vreg); ++int cpr3_limit_floor_voltages(struct cpr3_regulator *vreg); ++void cpr3_print_quots(struct cpr3_regulator *vreg); ++int cpr3_determine_part_type(struct cpr3_regulator *vreg, int fuse_volt); ++int cpr3_determine_temp_base_open_loop_correction(struct cpr3_regulator *vreg, ++ int *fuse_volt); ++int cpr3_adjust_fused_open_loop_voltages(struct cpr3_regulator *vreg, ++ int *fuse_volt); ++int cpr3_adjust_open_loop_voltages(struct cpr3_regulator *vreg); ++int cpr3_quot_adjustment(int ro_scale, int volt_adjust); ++int cpr3_voltage_adjustment(int ro_scale, int quot_adjust); ++int cpr3_parse_closed_loop_voltage_adjustments(struct cpr3_regulator *vreg, ++ u64 *ro_sel, int *volt_adjust, ++ int *volt_adjust_fuse, int *ro_scale); ++int cpr4_parse_core_count_temp_voltage_adj(struct cpr3_regulator *vreg, ++ bool use_corner_band); ++int cpr3_apm_init(struct cpr3_controller *ctrl); ++int cpr3_mem_acc_init(struct cpr3_regulator *vreg); ++void cprh_adjust_voltages_for_apm(struct cpr3_regulator *vreg); ++void cprh_adjust_voltages_for_mem_acc(struct cpr3_regulator *vreg); ++int cpr3_adjust_target_quotients(struct cpr3_regulator *vreg, ++ int *fuse_volt_adjust); ++int cpr3_handle_temp_open_loop_adjustment(struct cpr3_controller *ctrl, ++ bool is_cold); ++int cpr3_get_cold_temp_threshold(struct cpr3_regulator *vreg, int *cold_temp); ++bool cpr3_can_adjust_cold_temp(struct cpr3_regulator *vreg); ++ ++#else ++ ++static inline int cpr3_regulator_register(struct platform_device *pdev, ++ struct cpr3_controller *ctrl) ++{ ++ return -ENXIO; ++} ++ ++static inline int ++cpr3_open_loop_regulator_register(struct platform_device *pdev, ++ struct cpr3_controller *ctrl); ++{ ++ return -ENXIO; ++} ++ ++static inline int cpr3_regulator_unregister(struct cpr3_controller *ctrl) ++{ ++ return -ENXIO; ++} ++ ++static inline int ++cpr3_open_loop_regulator_unregister(struct cpr3_controller *ctrl) ++{ ++ return -ENXIO; ++} ++ ++static inline int cpr3_regulator_suspend(struct cpr3_controller *ctrl) ++{ ++ return -ENXIO; ++} ++ ++static inline int cpr3_regulator_resume(struct cpr3_controller *ctrl) ++{ ++ return -ENXIO; ++} ++ ++static inline int cpr3_get_thread_name(struct cpr3_thread *thread, ++ struct device_node *thread_node) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_allocate_threads(struct cpr3_controller *ctrl, ++ u32 min_thread_id, u32 max_thread_id) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_map_fuse_base(struct cpr3_controller *ctrl, ++ struct platform_device *pdev) ++{ ++ return -ENXIO; ++} ++ ++static inline int cpr3_read_tcsr_setting(struct cpr3_controller *ctrl, ++ struct platform_device *pdev, u8 start, u8 end) ++{ ++ return 0; ++} ++ ++static inline int cpr3_read_fuse_param(void __iomem *fuse_base_addr, ++ const struct cpr3_fuse_param *param, u64 *param_value) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_convert_open_loop_voltage_fuse(int ref_volt, ++ int step_volt, u32 fuse, int fuse_len) ++{ ++ return -EPERM; ++} ++ ++static inline u64 cpr3_interpolate(u64 x1, u64 y1, u64 x2, u64 y2, u64 x) ++{ ++ return 0; ++} ++ ++static inline int cpr3_parse_array_property(struct cpr3_regulator *vreg, ++ const char *prop_name, int tuple_size, u32 *out) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_parse_corner_array_property(struct cpr3_regulator *vreg, ++ const char *prop_name, int tuple_size, u32 *out) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_parse_corner_band_array_property( ++ struct cpr3_regulator *vreg, const char *prop_name, ++ int tuple_size, u32 *out) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_parse_common_corner_data(struct cpr3_regulator *vreg) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_parse_thread_u32(struct cpr3_thread *thread, ++ const char *propname, u32 *out_value, u32 value_min, ++ u32 value_max) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_parse_ctrl_u32(struct cpr3_controller *ctrl, ++ const char *propname, u32 *out_value, u32 value_min, ++ u32 value_max) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_parse_common_thread_data(struct cpr3_thread *thread) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl) ++{ ++ return -EPERM; ++} ++ ++static inline int ++cpr3_parse_open_loop_common_ctrl_data(struct cpr3_controller *ctrl) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_limit_open_loop_voltages(struct cpr3_regulator *vreg) ++{ ++ return -EPERM; ++} ++ ++static inline void cpr3_open_loop_voltage_as_ceiling( ++ struct cpr3_regulator *vreg) ++{ ++ return; ++} ++ ++static inline int cpr3_limit_floor_voltages(struct cpr3_regulator *vreg) ++{ ++ return -EPERM; ++} ++ ++static inline void cpr3_print_quots(struct cpr3_regulator *vreg) ++{ ++ return; ++} ++ ++static inline int ++cpr3_determine_part_type(struct cpr3_regulator *vreg, int fuse_volt) ++{ ++ return -EPERM; ++} ++ ++static inline int ++cpr3_determine_temp_base_open_loop_correction(struct cpr3_regulator *vreg, ++ int *fuse_volt) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_adjust_fused_open_loop_voltages( ++ struct cpr3_regulator *vreg, int *fuse_volt) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_adjust_open_loop_voltages(struct cpr3_regulator *vreg) ++{ ++ return -EPERM; ++} ++ ++static inline int cpr3_quot_adjustment(int ro_scale, int volt_adjust) ++{ ++ return 0; ++} ++ ++static inline int cpr3_voltage_adjustment(int ro_scale, int quot_adjust) ++{ ++ return 0; ++} ++ ++static inline int cpr3_parse_closed_loop_voltage_adjustments( ++ struct cpr3_regulator *vreg, u64 *ro_sel, ++ int *volt_adjust, int *volt_adjust_fuse, int *ro_scale) ++{ ++ return 0; ++} ++ ++static inline int cpr4_parse_core_count_temp_voltage_adj( ++ struct cpr3_regulator *vreg, bool use_corner_band) ++{ ++ return 0; ++} ++ ++static inline int cpr3_apm_init(struct cpr3_controller *ctrl) ++{ ++ return 0; ++} ++ ++static inline int cpr3_mem_acc_init(struct cpr3_regulator *vreg) ++{ ++ return 0; ++} ++ ++static inline void cprh_adjust_voltages_for_apm(struct cpr3_regulator *vreg) ++{ ++} ++ ++static inline void cprh_adjust_voltages_for_mem_acc(struct cpr3_regulator *vreg) ++{ ++} ++ ++static inline int cpr3_adjust_target_quotients(struct cpr3_regulator *vreg, ++ int *fuse_volt_adjust) ++{ ++ return 0; ++} ++ ++static inline int ++cpr3_handle_temp_open_loop_adjustment(struct cpr3_controller *ctrl, ++ bool is_cold) ++{ ++ return 0; ++} ++ ++static inline bool ++cpr3_can_adjust_cold_temp(struct cpr3_regulator *vreg) ++{ ++ return false; ++} ++ ++static inline int ++cpr3_get_cold_temp_threshold(struct cpr3_regulator *vreg, int *cold_temp) ++{ ++ return 0; ++} ++#endif /* CONFIG_REGULATOR_CPR3 */ ++ ++#endif /* __REGULATOR_CPR_REGULATOR_H__ */ +--- /dev/null ++++ b/drivers/regulator/cpr3-util.c +@@ -0,0 +1,2760 @@ ++/* ++ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++/* ++ * This file contains utility functions to be used by platform specific CPR3 ++ * regulator drivers. ++ */ ++ ++#define pr_fmt(fmt) "%s: " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "cpr3-regulator.h" ++ ++#define BYTES_PER_FUSE_ROW 8 ++#define MAX_FUSE_ROW_BIT 63 ++ ++#define CPR3_CONSECUTIVE_UP_DOWN_MIN 0 ++#define CPR3_CONSECUTIVE_UP_DOWN_MAX 15 ++#define CPR3_UP_DOWN_THRESHOLD_MIN 0 ++#define CPR3_UP_DOWN_THRESHOLD_MAX 31 ++#define CPR3_STEP_QUOT_MIN 0 ++#define CPR3_STEP_QUOT_MAX 63 ++#define CPR3_IDLE_CLOCKS_MIN 0 ++#define CPR3_IDLE_CLOCKS_MAX 31 ++ ++/* This constant has units of uV/mV so 1000 corresponds to 100%. */ ++#define CPR3_AGING_DERATE_UNITY 1000 ++ ++static inline int read_ipq_soc_version_major(void) ++{ ++ const int *prop; ++ prop = of_get_property(of_find_node_by_path("/"), "soc_version_major", ++ NULL); ++ ++ if (!prop) ++ return -EINVAL; ++ ++ return le32_to_cpu(*prop); ++} ++ ++/** ++ * cpr3_allocate_regulators() - allocate and initialize CPR3 regulators for a ++ * given thread based upon device tree data ++ * @thread: Pointer to the CPR3 thread ++ * ++ * This function allocates the thread->vreg array based upon the number of ++ * device tree regulator subnodes. It also initializes generic elements of each ++ * regulator struct such as name, of_node, and thread. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_allocate_regulators(struct cpr3_thread *thread) ++{ ++ struct device_node *node; ++ int i, rc; ++ ++ thread->vreg_count = 0; ++ ++ for_each_available_child_of_node(thread->of_node, node) { ++ thread->vreg_count++; ++ } ++ ++ thread->vreg = devm_kcalloc(thread->ctrl->dev, thread->vreg_count, ++ sizeof(*thread->vreg), GFP_KERNEL); ++ if (!thread->vreg) ++ return -ENOMEM; ++ ++ i = 0; ++ for_each_available_child_of_node(thread->of_node, node) { ++ thread->vreg[i].of_node = node; ++ thread->vreg[i].thread = thread; ++ ++ rc = of_property_read_string(node, "regulator-name", ++ &thread->vreg[i].name); ++ if (rc) { ++ dev_err(thread->ctrl->dev, "could not find regulator name, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ i++; ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_allocate_threads() - allocate and initialize CPR3 threads for a given ++ * controller based upon device tree data ++ * @ctrl: Pointer to the CPR3 controller ++ * @min_thread_id: Minimum allowed hardware thread ID for this controller ++ * @max_thread_id: Maximum allowed hardware thread ID for this controller ++ * ++ * This function allocates the ctrl->thread array based upon the number of ++ * device tree thread subnodes. It also initializes generic elements of each ++ * thread struct such as thread_id, of_node, ctrl, and vreg array. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_allocate_threads(struct cpr3_controller *ctrl, u32 min_thread_id, ++ u32 max_thread_id) ++{ ++ struct device *dev = ctrl->dev; ++ struct device_node *thread_node; ++ int i, j, rc; ++ ++ ctrl->thread_count = 0; ++ ++ for_each_available_child_of_node(dev->of_node, thread_node) { ++ ctrl->thread_count++; ++ } ++ ++ ctrl->thread = devm_kcalloc(dev, ctrl->thread_count, ++ sizeof(*ctrl->thread), GFP_KERNEL); ++ if (!ctrl->thread) ++ return -ENOMEM; ++ ++ i = 0; ++ for_each_available_child_of_node(dev->of_node, thread_node) { ++ ctrl->thread[i].of_node = thread_node; ++ ctrl->thread[i].ctrl = ctrl; ++ ++ rc = of_property_read_u32(thread_node, "qcom,cpr-thread-id", ++ &ctrl->thread[i].thread_id); ++ if (rc) { ++ dev_err(dev, "could not read DT property qcom,cpr-thread-id, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ if (ctrl->thread[i].thread_id < min_thread_id || ++ ctrl->thread[i].thread_id > max_thread_id) { ++ dev_err(dev, "invalid thread id = %u; not within [%u, %u]\n", ++ ctrl->thread[i].thread_id, min_thread_id, ++ max_thread_id); ++ return -EINVAL; ++ } ++ ++ /* Verify that the thread ID is unique for all child nodes. */ ++ for (j = 0; j < i; j++) { ++ if (ctrl->thread[j].thread_id ++ == ctrl->thread[i].thread_id) { ++ dev_err(dev, "duplicate thread id = %u found\n", ++ ctrl->thread[i].thread_id); ++ return -EINVAL; ++ } ++ } ++ ++ rc = cpr3_allocate_regulators(&ctrl->thread[i]); ++ if (rc) ++ return rc; ++ ++ i++; ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_map_fuse_base() - ioremap the base address of the fuse region ++ * @ctrl: Pointer to the CPR3 controller ++ * @pdev: Platform device pointer for the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_map_fuse_base(struct cpr3_controller *ctrl, ++ struct platform_device *pdev) ++{ ++ struct resource *res; ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fuse_base"); ++ if (!res || !res->start) { ++ dev_err(&pdev->dev, "fuse base address is missing\n"); ++ return -ENXIO; ++ } ++ ++ ctrl->fuse_base = devm_ioremap(&pdev->dev, res->start, ++ resource_size(res)); ++ ++ return 0; ++} ++ ++/** ++ * cpr3_read_tcsr_setting - reads the CPR setting bits from TCSR register ++ * @ctrl: Pointer to the CPR3 controller ++ * @pdev: Platform device pointer for the CPR3 controller ++ * @start: start bit in TCSR register ++ * @end: end bit in TCSR register ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_read_tcsr_setting(struct cpr3_controller *ctrl, ++ struct platform_device *pdev, u8 start, u8 end) ++{ ++ struct resource *res; ++ void __iomem *tcsr_reg; ++ u32 val; ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, ++ "cpr_tcsr_reg"); ++ if (!res || !res->start) ++ return 0; ++ ++ tcsr_reg = ioremap(res->start, resource_size(res)); ++ if (!tcsr_reg) { ++ dev_err(&pdev->dev, "tcsr ioremap failed\n"); ++ return 0; ++ } ++ ++ val = readl_relaxed(tcsr_reg); ++ val &= GENMASK(end, start); ++ val >>= start; ++ ++ switch (val) { ++ case 1: ++ ctrl->cpr_global_setting = CPR_DISABLED; ++ break; ++ case 2: ++ ctrl->cpr_global_setting = CPR_OPEN_LOOP_EN; ++ break; ++ case 3: ++ ctrl->cpr_global_setting = CPR_CLOSED_LOOP_EN; ++ break; ++ default: ++ ctrl->cpr_global_setting = CPR_DEFAULT; ++ } ++ ++ iounmap(tcsr_reg); ++ ++ return 0; ++} ++ ++/** ++ * cpr3_read_fuse_param() - reads a CPR3 fuse parameter out of eFuses ++ * @fuse_base_addr: Virtual memory address of the eFuse base address ++ * @param: Null terminated array of fuse param segments to read ++ * from ++ * @param_value: Output with value read from the eFuses ++ * ++ * This function reads from each of the parameter segments listed in the param ++ * array and concatenates their values together. Reading stops when an element ++ * is reached which has all 0 struct values. The total number of bits specified ++ * for the fuse parameter across all segments must be less than or equal to 64. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_read_fuse_param(void __iomem *fuse_base_addr, ++ const struct cpr3_fuse_param *param, u64 *param_value) ++{ ++ u64 fuse_val, val; ++ int bits; ++ int bits_total = 0; ++ ++ *param_value = 0; ++ ++ while (param->row || param->bit_start || param->bit_end) { ++ if (param->bit_start > param->bit_end ++ || param->bit_end > MAX_FUSE_ROW_BIT) { ++ pr_err("Invalid fuse parameter segment: row=%u, start=%u, end=%u\n", ++ param->row, param->bit_start, param->bit_end); ++ return -EINVAL; ++ } ++ ++ bits = param->bit_end - param->bit_start + 1; ++ if (bits_total + bits > 64) { ++ pr_err("Invalid fuse parameter segments; total bits = %d\n", ++ bits_total + bits); ++ return -EINVAL; ++ } ++ ++ fuse_val = readq_relaxed(fuse_base_addr ++ + param->row * BYTES_PER_FUSE_ROW); ++ val = (fuse_val >> param->bit_start) & ((1ULL << bits) - 1); ++ *param_value |= val << bits_total; ++ bits_total += bits; ++ ++ param++; ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_convert_open_loop_voltage_fuse() - converts an open loop voltage fuse ++ * value into an absolute voltage with units of microvolts ++ * @ref_volt: Reference voltage in microvolts ++ * @step_volt: The step size in microvolts of the fuse LSB ++ * @fuse: Open loop voltage fuse value ++ * @fuse_len: The bit length of the fuse value ++ * ++ * The MSB of the fuse parameter corresponds to a sign bit. If it is set, then ++ * the lower bits correspond to the number of steps to go down from the ++ * reference voltage. If it is not set, then the lower bits correspond to the ++ * number of steps to go up from the reference voltage. ++ */ ++int cpr3_convert_open_loop_voltage_fuse(int ref_volt, int step_volt, u32 fuse, ++ int fuse_len) ++{ ++ int sign, steps; ++ ++ sign = (fuse & (1 << (fuse_len - 1))) ? -1 : 1; ++ steps = fuse & ((1 << (fuse_len - 1)) - 1); ++ ++ return ref_volt + sign * steps * step_volt; ++} ++ ++/** ++ * cpr3_interpolate() - performs linear interpolation ++ * @x1 Lower known x value ++ * @y1 Lower known y value ++ * @x2 Upper known x value ++ * @y2 Upper known y value ++ * @x Intermediate x value ++ * ++ * Returns y where (x, y) falls on the line between (x1, y1) and (x2, y2). ++ * It is required that x1 < x2, y1 <= y2, and x1 <= x <= x2. If these ++ * conditions are not met, then y2 will be returned. ++ */ ++u64 cpr3_interpolate(u64 x1, u64 y1, u64 x2, u64 y2, u64 x) ++{ ++ u64 temp; ++ ++ if (x1 >= x2 || y1 > y2 || x1 > x || x > x2) ++ return y2; ++ ++ temp = (x2 - x) * (y2 - y1); ++ do_div(temp, (u32)(x2 - x1)); ++ ++ return y2 - temp; ++} ++ ++/** ++ * cpr3_parse_array_property() - fill an array from a portion of the values ++ * specified for a device tree property ++ * @vreg: Pointer to the CPR3 regulator ++ * @prop_name: The name of the device tree property to read from ++ * @tuple_size: The number of elements in each tuple ++ * @out: Output data array which must be of size tuple_size ++ * ++ * cpr3_parse_common_corner_data() must be called for vreg before this function ++ * is called so that fuse combo and speed bin size elements are initialized. ++ * ++ * Three formats are supported for the device tree property: ++ * 1. Length == tuple_size ++ * (reading begins at index 0) ++ * 2. Length == tuple_size * vreg->fuse_combos_supported ++ * (reading begins at index tuple_size * vreg->fuse_combo) ++ * 3. Length == tuple_size * vreg->speed_bins_supported ++ * (reading begins at index tuple_size * vreg->speed_bin_fuse) ++ * ++ * All other property lengths are treated as errors. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_parse_array_property(struct cpr3_regulator *vreg, ++ const char *prop_name, int tuple_size, u32 *out) ++{ ++ struct device_node *node = vreg->of_node; ++ int len = 0; ++ int i, offset, rc; ++ ++ if (!of_find_property(node, prop_name, &len)) { ++ cpr3_err(vreg, "property %s is missing\n", prop_name); ++ return -EINVAL; ++ } ++ ++ if (len == tuple_size * sizeof(u32)) { ++ offset = 0; ++ } else if (len == tuple_size * vreg->fuse_combos_supported ++ * sizeof(u32)) { ++ offset = tuple_size * vreg->fuse_combo; ++ } else if (vreg->speed_bins_supported > 0 && ++ len == tuple_size * vreg->speed_bins_supported * sizeof(u32)) { ++ offset = tuple_size * vreg->speed_bin_fuse; ++ } else { ++ if (vreg->speed_bins_supported > 0) ++ cpr3_err(vreg, "property %s has invalid length=%d, should be %zu, %zu, or %zu\n", ++ prop_name, len, ++ tuple_size * sizeof(u32), ++ tuple_size * vreg->speed_bins_supported ++ * sizeof(u32), ++ tuple_size * vreg->fuse_combos_supported ++ * sizeof(u32)); ++ else ++ cpr3_err(vreg, "property %s has invalid length=%d, should be %zu or %zu\n", ++ prop_name, len, ++ tuple_size * sizeof(u32), ++ tuple_size * vreg->fuse_combos_supported ++ * sizeof(u32)); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < tuple_size; i++) { ++ rc = of_property_read_u32_index(node, prop_name, offset + i, ++ &out[i]); ++ if (rc) { ++ cpr3_err(vreg, "error reading property %s, rc=%d\n", ++ prop_name, rc); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_parse_corner_array_property() - fill a per-corner array from a portion ++ * of the values specified for a device tree property ++ * @vreg: Pointer to the CPR3 regulator ++ * @prop_name: The name of the device tree property to read from ++ * @tuple_size: The number of elements in each per-corner tuple ++ * @out: Output data array which must be of size: ++ * tuple_size * vreg->corner_count ++ * ++ * cpr3_parse_common_corner_data() must be called for vreg before this function ++ * is called so that fuse combo and speed bin size elements are initialized. ++ * ++ * Three formats are supported for the device tree property: ++ * 1. Length == tuple_size * vreg->corner_count ++ * (reading begins at index 0) ++ * 2. Length == tuple_size * vreg->fuse_combo_corner_sum ++ * (reading begins at index tuple_size * vreg->fuse_combo_offset) ++ * 3. Length == tuple_size * vreg->speed_bin_corner_sum ++ * (reading begins at index tuple_size * vreg->speed_bin_offset) ++ * ++ * All other property lengths are treated as errors. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_parse_corner_array_property(struct cpr3_regulator *vreg, ++ const char *prop_name, int tuple_size, u32 *out) ++{ ++ struct device_node *node = vreg->of_node; ++ int len = 0; ++ int i, offset, rc; ++ ++ if (!of_find_property(node, prop_name, &len)) { ++ cpr3_err(vreg, "property %s is missing\n", prop_name); ++ return -EINVAL; ++ } ++ ++ if (len == tuple_size * vreg->corner_count * sizeof(u32)) { ++ offset = 0; ++ } else if (len == tuple_size * vreg->fuse_combo_corner_sum ++ * sizeof(u32)) { ++ offset = tuple_size * vreg->fuse_combo_offset; ++ } else if (vreg->speed_bin_corner_sum > 0 && ++ len == tuple_size * vreg->speed_bin_corner_sum * sizeof(u32)) { ++ offset = tuple_size * vreg->speed_bin_offset; ++ } else { ++ if (vreg->speed_bin_corner_sum > 0) ++ cpr3_err(vreg, "property %s has invalid length=%d, should be %zu, %zu, or %zu\n", ++ prop_name, len, ++ tuple_size * vreg->corner_count * sizeof(u32), ++ tuple_size * vreg->speed_bin_corner_sum ++ * sizeof(u32), ++ tuple_size * vreg->fuse_combo_corner_sum ++ * sizeof(u32)); ++ else ++ cpr3_err(vreg, "property %s has invalid length=%d, should be %zu or %zu\n", ++ prop_name, len, ++ tuple_size * vreg->corner_count * sizeof(u32), ++ tuple_size * vreg->fuse_combo_corner_sum ++ * sizeof(u32)); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < tuple_size * vreg->corner_count; i++) { ++ rc = of_property_read_u32_index(node, prop_name, offset + i, ++ &out[i]); ++ if (rc) { ++ cpr3_err(vreg, "error reading property %s, rc=%d\n", ++ prop_name, rc); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_parse_corner_band_array_property() - fill a per-corner band array ++ * from a portion of the values specified for a device tree ++ * property ++ * @vreg: Pointer to the CPR3 regulator ++ * @prop_name: The name of the device tree property to read from ++ * @tuple_size: The number of elements in each per-corner band tuple ++ * @out: Output data array which must be of size: ++ * tuple_size * vreg->corner_band_count ++ * ++ * cpr3_parse_common_corner_data() must be called for vreg before this function ++ * is called so that fuse combo and speed bin size elements are initialized. ++ * In addition, corner band fuse combo and speed bin sum and offset elements ++ * must be initialized prior to executing this function. ++ * ++ * Three formats are supported for the device tree property: ++ * 1. Length == tuple_size * vreg->corner_band_count ++ * (reading begins at index 0) ++ * 2. Length == tuple_size * vreg->fuse_combo_corner_band_sum ++ * (reading begins at index tuple_size * ++ * vreg->fuse_combo_corner_band_offset) ++ * 3. Length == tuple_size * vreg->speed_bin_corner_band_sum ++ * (reading begins at index tuple_size * ++ * vreg->speed_bin_corner_band_offset) ++ * ++ * All other property lengths are treated as errors. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_parse_corner_band_array_property(struct cpr3_regulator *vreg, ++ const char *prop_name, int tuple_size, u32 *out) ++{ ++ struct device_node *node = vreg->of_node; ++ int len = 0; ++ int i, offset, rc; ++ ++ if (!of_find_property(node, prop_name, &len)) { ++ cpr3_err(vreg, "property %s is missing\n", prop_name); ++ return -EINVAL; ++ } ++ ++ if (len == tuple_size * vreg->corner_band_count * sizeof(u32)) { ++ offset = 0; ++ } else if (len == tuple_size * vreg->fuse_combo_corner_band_sum ++ * sizeof(u32)) { ++ offset = tuple_size * vreg->fuse_combo_corner_band_offset; ++ } else if (vreg->speed_bin_corner_band_sum > 0 && ++ len == tuple_size * vreg->speed_bin_corner_band_sum * ++ sizeof(u32)) { ++ offset = tuple_size * vreg->speed_bin_corner_band_offset; ++ } else { ++ if (vreg->speed_bin_corner_band_sum > 0) ++ cpr3_err(vreg, "property %s has invalid length=%d, should be %zu, %zu, or %zu\n", ++ prop_name, len, ++ tuple_size * vreg->corner_band_count * ++ sizeof(u32), ++ tuple_size * vreg->speed_bin_corner_band_sum ++ * sizeof(u32), ++ tuple_size * vreg->fuse_combo_corner_band_sum ++ * sizeof(u32)); ++ else ++ cpr3_err(vreg, "property %s has invalid length=%d, should be %zu or %zu\n", ++ prop_name, len, ++ tuple_size * vreg->corner_band_count * ++ sizeof(u32), ++ tuple_size * vreg->fuse_combo_corner_band_sum ++ * sizeof(u32)); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < tuple_size * vreg->corner_band_count; i++) { ++ rc = of_property_read_u32_index(node, prop_name, offset + i, ++ &out[i]); ++ if (rc) { ++ cpr3_err(vreg, "error reading property %s, rc=%d\n", ++ prop_name, rc); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_parse_common_corner_data() - parse common CPR3 properties relating to ++ * the corners supported by a CPR3 regulator from device tree ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * This function reads, validates, and utilizes the following device tree ++ * properties: qcom,cpr-fuse-corners, qcom,cpr-fuse-combos, qcom,cpr-speed-bins, ++ * qcom,cpr-speed-bin-corners, qcom,cpr-corners, qcom,cpr-voltage-ceiling, ++ * qcom,cpr-voltage-floor, qcom,corner-frequencies, ++ * and qcom,cpr-corner-fmax-map. ++ * ++ * It initializes these CPR3 regulator elements: corner, corner_count, ++ * fuse_combos_supported, fuse_corner_map, and speed_bins_supported. It ++ * initializes these elements for each corner: ceiling_volt, floor_volt, ++ * proc_freq, and cpr_fuse_corner. ++ * ++ * It requires that the following CPR3 regulator elements be initialized before ++ * being called: fuse_corner_count, fuse_combo, and speed_bin_fuse. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_parse_common_corner_data(struct cpr3_regulator *vreg) ++{ ++ struct device_node *node = vreg->of_node; ++ struct cpr3_controller *ctrl = vreg->thread->ctrl; ++ u32 max_fuse_combos, fuse_corners, aging_allowed = 0; ++ u32 max_speed_bins = 0; ++ u32 *combo_corners; ++ u32 *speed_bin_corners; ++ u32 *temp; ++ int i, j, rc; ++ ++ rc = of_property_read_u32(node, "qcom,cpr-fuse-corners", &fuse_corners); ++ if (rc) { ++ cpr3_err(vreg, "error reading property qcom,cpr-fuse-corners, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ if (vreg->fuse_corner_count != fuse_corners) { ++ cpr3_err(vreg, "device tree config supports %d fuse corners but the hardware has %d fuse corners\n", ++ fuse_corners, vreg->fuse_corner_count); ++ return -EINVAL; ++ } ++ ++ rc = of_property_read_u32(node, "qcom,cpr-fuse-combos", ++ &max_fuse_combos); ++ if (rc) { ++ cpr3_err(vreg, "error reading property qcom,cpr-fuse-combos, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ /* ++ * Sanity check against arbitrarily large value to avoid excessive ++ * memory allocation. ++ */ ++ if (max_fuse_combos > 100 || max_fuse_combos == 0) { ++ cpr3_err(vreg, "qcom,cpr-fuse-combos is invalid: %u\n", ++ max_fuse_combos); ++ return -EINVAL; ++ } ++ ++ if (vreg->fuse_combo >= max_fuse_combos) { ++ cpr3_err(vreg, "device tree config supports fuse combos 0-%u but the hardware has combo %d\n", ++ max_fuse_combos - 1, vreg->fuse_combo); ++ BUG_ON(1); ++ return -EINVAL; ++ } ++ ++ vreg->fuse_combos_supported = max_fuse_combos; ++ ++ of_property_read_u32(node, "qcom,cpr-speed-bins", &max_speed_bins); ++ ++ /* ++ * Sanity check against arbitrarily large value to avoid excessive ++ * memory allocation. ++ */ ++ if (max_speed_bins > 100) { ++ cpr3_err(vreg, "qcom,cpr-speed-bins is invalid: %u\n", ++ max_speed_bins); ++ return -EINVAL; ++ } ++ ++ if (max_speed_bins && vreg->speed_bin_fuse >= max_speed_bins) { ++ cpr3_err(vreg, "device tree config supports speed bins 0-%u but the hardware has speed bin %d\n", ++ max_speed_bins - 1, vreg->speed_bin_fuse); ++ BUG(); ++ return -EINVAL; ++ } ++ ++ vreg->speed_bins_supported = max_speed_bins; ++ ++ combo_corners = kcalloc(vreg->fuse_combos_supported, ++ sizeof(*combo_corners), GFP_KERNEL); ++ if (!combo_corners) ++ return -ENOMEM; ++ ++ rc = of_property_read_u32_array(node, "qcom,cpr-corners", combo_corners, ++ vreg->fuse_combos_supported); ++ if (rc == -EOVERFLOW) { ++ /* Single value case */ ++ rc = of_property_read_u32(node, "qcom,cpr-corners", ++ combo_corners); ++ for (i = 1; i < vreg->fuse_combos_supported; i++) ++ combo_corners[i] = combo_corners[0]; ++ } ++ if (rc) { ++ cpr3_err(vreg, "error reading property qcom,cpr-corners, rc=%d\n", ++ rc); ++ kfree(combo_corners); ++ return rc; ++ } ++ ++ vreg->fuse_combo_offset = 0; ++ vreg->fuse_combo_corner_sum = 0; ++ for (i = 0; i < vreg->fuse_combos_supported; i++) { ++ vreg->fuse_combo_corner_sum += combo_corners[i]; ++ if (i < vreg->fuse_combo) ++ vreg->fuse_combo_offset += combo_corners[i]; ++ } ++ ++ vreg->corner_count = combo_corners[vreg->fuse_combo]; ++ ++ kfree(combo_corners); ++ ++ vreg->speed_bin_offset = 0; ++ vreg->speed_bin_corner_sum = 0; ++ if (vreg->speed_bins_supported > 0) { ++ speed_bin_corners = kcalloc(vreg->speed_bins_supported, ++ sizeof(*speed_bin_corners), GFP_KERNEL); ++ if (!speed_bin_corners) ++ return -ENOMEM; ++ ++ rc = of_property_read_u32_array(node, ++ "qcom,cpr-speed-bin-corners", speed_bin_corners, ++ vreg->speed_bins_supported); ++ if (rc) { ++ cpr3_err(vreg, "error reading property qcom,cpr-speed-bin-corners, rc=%d\n", ++ rc); ++ kfree(speed_bin_corners); ++ return rc; ++ } ++ ++ for (i = 0; i < vreg->speed_bins_supported; i++) { ++ vreg->speed_bin_corner_sum += speed_bin_corners[i]; ++ if (i < vreg->speed_bin_fuse) ++ vreg->speed_bin_offset += speed_bin_corners[i]; ++ } ++ ++ if (speed_bin_corners[vreg->speed_bin_fuse] ++ != vreg->corner_count) { ++ cpr3_err(vreg, "qcom,cpr-corners and qcom,cpr-speed-bin-corners conflict on number of corners: %d vs %u\n", ++ vreg->corner_count, ++ speed_bin_corners[vreg->speed_bin_fuse]); ++ kfree(speed_bin_corners); ++ return -EINVAL; ++ } ++ ++ kfree(speed_bin_corners); ++ } ++ ++ vreg->corner = devm_kcalloc(ctrl->dev, vreg->corner_count, ++ sizeof(*vreg->corner), GFP_KERNEL); ++ temp = kcalloc(vreg->corner_count, sizeof(*temp), GFP_KERNEL); ++ if (!vreg->corner || !temp) ++ return -ENOMEM; ++ ++ rc = cpr3_parse_corner_array_property(vreg, "qcom,cpr-voltage-ceiling", ++ 1, temp); ++ if (rc) ++ goto free_temp; ++ for (i = 0; i < vreg->corner_count; i++) { ++ vreg->corner[i].ceiling_volt ++ = CPR3_ROUND(temp[i], ctrl->step_volt); ++ vreg->corner[i].abs_ceiling_volt = vreg->corner[i].ceiling_volt; ++ } ++ ++ rc = cpr3_parse_corner_array_property(vreg, "qcom,cpr-voltage-floor", ++ 1, temp); ++ if (rc) ++ goto free_temp; ++ for (i = 0; i < vreg->corner_count; i++) ++ vreg->corner[i].floor_volt ++ = CPR3_ROUND(temp[i], ctrl->step_volt); ++ ++ /* Validate ceiling and floor values */ ++ for (i = 0; i < vreg->corner_count; i++) { ++ if (vreg->corner[i].floor_volt ++ > vreg->corner[i].ceiling_volt) { ++ cpr3_err(vreg, "CPR floor[%d]=%d > ceiling[%d]=%d uV\n", ++ i, vreg->corner[i].floor_volt, ++ i, vreg->corner[i].ceiling_volt); ++ rc = -EINVAL; ++ goto free_temp; ++ } ++ } ++ ++ /* Load optional system-supply voltages */ ++ if (of_find_property(vreg->of_node, "qcom,system-voltage", NULL)) { ++ rc = cpr3_parse_corner_array_property(vreg, ++ "qcom,system-voltage", 1, temp); ++ if (rc) ++ goto free_temp; ++ for (i = 0; i < vreg->corner_count; i++) ++ vreg->corner[i].system_volt = temp[i]; ++ } ++ ++ rc = cpr3_parse_corner_array_property(vreg, "qcom,corner-frequencies", ++ 1, temp); ++ if (rc) ++ goto free_temp; ++ for (i = 0; i < vreg->corner_count; i++) ++ vreg->corner[i].proc_freq = temp[i]; ++ ++ /* Validate frequencies */ ++ for (i = 1; i < vreg->corner_count; i++) { ++ if (vreg->corner[i].proc_freq ++ < vreg->corner[i - 1].proc_freq) { ++ cpr3_err(vreg, "invalid frequency: freq[%d]=%u < freq[%d]=%u\n", ++ i, vreg->corner[i].proc_freq, i - 1, ++ vreg->corner[i - 1].proc_freq); ++ rc = -EINVAL; ++ goto free_temp; ++ } ++ } ++ ++ vreg->fuse_corner_map = devm_kcalloc(ctrl->dev, vreg->fuse_corner_count, ++ sizeof(*vreg->fuse_corner_map), GFP_KERNEL); ++ if (!vreg->fuse_corner_map) { ++ rc = -ENOMEM; ++ goto free_temp; ++ } ++ ++ rc = cpr3_parse_array_property(vreg, "qcom,cpr-corner-fmax-map", ++ vreg->fuse_corner_count, temp); ++ if (rc) ++ goto free_temp; ++ for (i = 0; i < vreg->fuse_corner_count; i++) { ++ vreg->fuse_corner_map[i] = temp[i] - CPR3_CORNER_OFFSET; ++ if (temp[i] < CPR3_CORNER_OFFSET ++ || temp[i] > vreg->corner_count + CPR3_CORNER_OFFSET) { ++ cpr3_err(vreg, "invalid corner value specified in qcom,cpr-corner-fmax-map: %u\n", ++ temp[i]); ++ rc = -EINVAL; ++ goto free_temp; ++ } else if (i > 0 && temp[i - 1] >= temp[i]) { ++ cpr3_err(vreg, "invalid corner %u less than or equal to previous corner %u\n", ++ temp[i], temp[i - 1]); ++ rc = -EINVAL; ++ goto free_temp; ++ } ++ } ++ if (temp[vreg->fuse_corner_count - 1] != vreg->corner_count) ++ cpr3_debug(vreg, "Note: highest Fmax corner %u in qcom,cpr-corner-fmax-map does not match highest supported corner %d\n", ++ temp[vreg->fuse_corner_count - 1], ++ vreg->corner_count); ++ ++ for (i = 0; i < vreg->corner_count; i++) { ++ for (j = 0; j < vreg->fuse_corner_count; j++) { ++ if (i + CPR3_CORNER_OFFSET <= temp[j]) { ++ vreg->corner[i].cpr_fuse_corner = j; ++ break; ++ } ++ } ++ if (j == vreg->fuse_corner_count) { ++ /* ++ * Handle the case where the highest fuse corner maps ++ * to a corner below the highest corner. ++ */ ++ vreg->corner[i].cpr_fuse_corner ++ = vreg->fuse_corner_count - 1; ++ } ++ } ++ ++ if (of_find_property(vreg->of_node, ++ "qcom,allow-aging-voltage-adjustment", NULL)) { ++ rc = cpr3_parse_array_property(vreg, ++ "qcom,allow-aging-voltage-adjustment", ++ 1, &aging_allowed); ++ if (rc) ++ goto free_temp; ++ ++ vreg->aging_allowed = aging_allowed; ++ } ++ ++ if (of_find_property(vreg->of_node, ++ "qcom,allow-aging-open-loop-voltage-adjustment", NULL)) { ++ rc = cpr3_parse_array_property(vreg, ++ "qcom,allow-aging-open-loop-voltage-adjustment", ++ 1, &aging_allowed); ++ if (rc) ++ goto free_temp; ++ ++ vreg->aging_allow_open_loop_adj = aging_allowed; ++ } ++ ++ if (vreg->aging_allowed) { ++ if (ctrl->aging_ref_volt <= 0) { ++ cpr3_err(ctrl, "qcom,cpr-aging-ref-voltage must be specified\n"); ++ rc = -EINVAL; ++ goto free_temp; ++ } ++ ++ rc = cpr3_parse_array_property(vreg, ++ "qcom,cpr-aging-max-voltage-adjustment", ++ 1, &vreg->aging_max_adjust_volt); ++ if (rc) ++ goto free_temp; ++ ++ rc = cpr3_parse_array_property(vreg, ++ "qcom,cpr-aging-ref-corner", 1, &vreg->aging_corner); ++ if (rc) { ++ goto free_temp; ++ } else if (vreg->aging_corner < CPR3_CORNER_OFFSET ++ || vreg->aging_corner > vreg->corner_count - 1 ++ + CPR3_CORNER_OFFSET) { ++ cpr3_err(vreg, "aging reference corner=%d not in range [%d, %d]\n", ++ vreg->aging_corner, CPR3_CORNER_OFFSET, ++ vreg->corner_count - 1 + CPR3_CORNER_OFFSET); ++ rc = -EINVAL; ++ goto free_temp; ++ } ++ vreg->aging_corner -= CPR3_CORNER_OFFSET; ++ ++ if (of_find_property(vreg->of_node, "qcom,cpr-aging-derate", ++ NULL)) { ++ rc = cpr3_parse_corner_array_property(vreg, ++ "qcom,cpr-aging-derate", 1, temp); ++ if (rc) ++ goto free_temp; ++ ++ for (i = 0; i < vreg->corner_count; i++) ++ vreg->corner[i].aging_derate = temp[i]; ++ } else { ++ for (i = 0; i < vreg->corner_count; i++) ++ vreg->corner[i].aging_derate ++ = CPR3_AGING_DERATE_UNITY; ++ } ++ } ++ ++free_temp: ++ kfree(temp); ++ return rc; ++} ++ ++/** ++ * cpr3_parse_thread_u32() - parse the specified property from the CPR3 thread's ++ * device tree node and verify that it is within the allowed limits ++ * @thread: Pointer to the CPR3 thread ++ * @propname: The name of the device tree property to read ++ * @out_value: The output pointer to fill with the value read ++ * @value_min: The minimum allowed property value ++ * @value_max: The maximum allowed property value ++ * ++ * This function prints a verbose error message if the property is missing or ++ * has a value which is not within the specified range. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_parse_thread_u32(struct cpr3_thread *thread, const char *propname, ++ u32 *out_value, u32 value_min, u32 value_max) ++{ ++ int rc; ++ ++ rc = of_property_read_u32(thread->of_node, propname, out_value); ++ if (rc) { ++ cpr3_err(thread->ctrl, "thread %u error reading property %s, rc=%d\n", ++ thread->thread_id, propname, rc); ++ return rc; ++ } ++ ++ if (*out_value < value_min || *out_value > value_max) { ++ cpr3_err(thread->ctrl, "thread %u %s=%u is invalid; allowed range: [%u, %u]\n", ++ thread->thread_id, propname, *out_value, value_min, ++ value_max); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_parse_ctrl_u32() - parse the specified property from the CPR3 ++ * controller's device tree node and verify that it is within the ++ * allowed limits ++ * @ctrl: Pointer to the CPR3 controller ++ * @propname: The name of the device tree property to read ++ * @out_value: The output pointer to fill with the value read ++ * @value_min: The minimum allowed property value ++ * @value_max: The maximum allowed property value ++ * ++ * This function prints a verbose error message if the property is missing or ++ * has a value which is not within the specified range. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_parse_ctrl_u32(struct cpr3_controller *ctrl, const char *propname, ++ u32 *out_value, u32 value_min, u32 value_max) ++{ ++ int rc; ++ ++ rc = of_property_read_u32(ctrl->dev->of_node, propname, out_value); ++ if (rc) { ++ cpr3_err(ctrl, "error reading property %s, rc=%d\n", ++ propname, rc); ++ return rc; ++ } ++ ++ if (*out_value < value_min || *out_value > value_max) { ++ cpr3_err(ctrl, "%s=%u is invalid; allowed range: [%u, %u]\n", ++ propname, *out_value, value_min, value_max); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_parse_common_thread_data() - parse common CPR3 thread properties from ++ * device tree ++ * @thread: Pointer to the CPR3 thread ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_parse_common_thread_data(struct cpr3_thread *thread) ++{ ++ int rc; ++ ++ rc = cpr3_parse_thread_u32(thread, "qcom,cpr-consecutive-up", ++ &thread->consecutive_up, CPR3_CONSECUTIVE_UP_DOWN_MIN, ++ CPR3_CONSECUTIVE_UP_DOWN_MAX); ++ if (rc) ++ return rc; ++ ++ rc = cpr3_parse_thread_u32(thread, "qcom,cpr-consecutive-down", ++ &thread->consecutive_down, CPR3_CONSECUTIVE_UP_DOWN_MIN, ++ CPR3_CONSECUTIVE_UP_DOWN_MAX); ++ if (rc) ++ return rc; ++ ++ rc = cpr3_parse_thread_u32(thread, "qcom,cpr-up-threshold", ++ &thread->up_threshold, CPR3_UP_DOWN_THRESHOLD_MIN, ++ CPR3_UP_DOWN_THRESHOLD_MAX); ++ if (rc) ++ return rc; ++ ++ rc = cpr3_parse_thread_u32(thread, "qcom,cpr-down-threshold", ++ &thread->down_threshold, CPR3_UP_DOWN_THRESHOLD_MIN, ++ CPR3_UP_DOWN_THRESHOLD_MAX); ++ if (rc) ++ return rc; ++ ++ return rc; ++} ++ ++/** ++ * cpr3_parse_irq_affinity() - parse CPR IRQ affinity information ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_parse_irq_affinity(struct cpr3_controller *ctrl) ++{ ++ struct device_node *cpu_node; ++ int i, cpu; ++ int len = 0; ++ ++ if (!of_find_property(ctrl->dev->of_node, "qcom,cpr-interrupt-affinity", ++ &len)) { ++ /* No IRQ affinity required */ ++ return 0; ++ } ++ ++ len /= sizeof(u32); ++ ++ for (i = 0; i < len; i++) { ++ cpu_node = of_parse_phandle(ctrl->dev->of_node, ++ "qcom,cpr-interrupt-affinity", i); ++ if (!cpu_node) { ++ cpr3_err(ctrl, "could not find CPU node %d\n", i); ++ return -EINVAL; ++ } ++ ++ for_each_possible_cpu(cpu) { ++ if (of_get_cpu_node(cpu, NULL) == cpu_node) { ++ cpumask_set_cpu(cpu, &ctrl->irq_affinity_mask); ++ break; ++ } ++ } ++ of_node_put(cpu_node); ++ } ++ ++ return 0; ++} ++ ++static int cpr3_panic_notifier_init(struct cpr3_controller *ctrl) ++{ ++ struct device_node *node = ctrl->dev->of_node; ++ struct cpr3_panic_regs_info *panic_regs_info; ++ struct cpr3_reg_info *regs; ++ int i, reg_count, len, rc = 0; ++ ++ if (!of_find_property(node, "qcom,cpr-panic-reg-addr-list", &len)) { ++ /* panic register address list not specified */ ++ return rc; ++ } ++ ++ reg_count = len / sizeof(u32); ++ if (!reg_count) { ++ cpr3_err(ctrl, "qcom,cpr-panic-reg-addr-list has invalid len = %d\n", ++ len); ++ return -EINVAL; ++ } ++ ++ if (!of_find_property(node, "qcom,cpr-panic-reg-name-list", NULL)) { ++ cpr3_err(ctrl, "property qcom,cpr-panic-reg-name-list not specified\n"); ++ return -EINVAL; ++ } ++ ++ len = of_property_count_strings(node, "qcom,cpr-panic-reg-name-list"); ++ if (reg_count != len) { ++ cpr3_err(ctrl, "qcom,cpr-panic-reg-name-list should have %d strings\n", ++ reg_count); ++ return -EINVAL; ++ } ++ ++ panic_regs_info = devm_kzalloc(ctrl->dev, sizeof(*panic_regs_info), ++ GFP_KERNEL); ++ if (!panic_regs_info) ++ return -ENOMEM; ++ ++ regs = devm_kcalloc(ctrl->dev, reg_count, sizeof(*regs), GFP_KERNEL); ++ if (!regs) ++ return -ENOMEM; ++ ++ for (i = 0; i < reg_count; i++) { ++ rc = of_property_read_string_index(node, ++ "qcom,cpr-panic-reg-name-list", i, ++ &(regs[i].name)); ++ if (rc) { ++ cpr3_err(ctrl, "error reading property qcom,cpr-panic-reg-name-list, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ rc = of_property_read_u32_index(node, ++ "qcom,cpr-panic-reg-addr-list", i, ++ &(regs[i].addr)); ++ if (rc) { ++ cpr3_err(ctrl, "error reading property qcom,cpr-panic-reg-addr-list, rc=%d\n", ++ rc); ++ return rc; ++ } ++ regs[i].virt_addr = devm_ioremap(ctrl->dev, regs[i].addr, 0x4); ++ if (!regs[i].virt_addr) { ++ pr_err("Unable to map panic register addr 0x%08x\n", ++ regs[i].addr); ++ return -EINVAL; ++ } ++ regs[i].value = 0xFFFFFFFF; ++ } ++ ++ panic_regs_info->reg_count = reg_count; ++ panic_regs_info->regs = regs; ++ ctrl->panic_regs_info = panic_regs_info; ++ ++ return rc; ++} ++ ++/** ++ * cpr3_parse_common_ctrl_data() - parse common CPR3 controller properties from ++ * device tree ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl) ++{ ++ int rc; ++ ++ rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-sensor-time", ++ &ctrl->sensor_time, 0, UINT_MAX); ++ if (rc) ++ return rc; ++ ++ rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-loop-time", ++ &ctrl->loop_time, 0, UINT_MAX); ++ if (rc) ++ return rc; ++ ++ rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-idle-cycles", ++ &ctrl->idle_clocks, CPR3_IDLE_CLOCKS_MIN, ++ CPR3_IDLE_CLOCKS_MAX); ++ if (rc) ++ return rc; ++ ++ rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-step-quot-init-min", ++ &ctrl->step_quot_init_min, CPR3_STEP_QUOT_MIN, ++ CPR3_STEP_QUOT_MAX); ++ if (rc) ++ return rc; ++ ++ rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-step-quot-init-max", ++ &ctrl->step_quot_init_max, CPR3_STEP_QUOT_MIN, ++ CPR3_STEP_QUOT_MAX); ++ if (rc) ++ return rc; ++ ++ rc = of_property_read_u32(ctrl->dev->of_node, "qcom,voltage-step", ++ &ctrl->step_volt); ++ if (rc) { ++ cpr3_err(ctrl, "error reading property qcom,voltage-step, rc=%d\n", ++ rc); ++ return rc; ++ } ++ if (ctrl->step_volt <= 0) { ++ cpr3_err(ctrl, "qcom,voltage-step=%d is invalid\n", ++ ctrl->step_volt); ++ return -EINVAL; ++ } ++ ++ rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-count-mode", ++ &ctrl->count_mode, CPR3_COUNT_MODE_ALL_AT_ONCE_MIN, ++ CPR3_COUNT_MODE_STAGGERED); ++ if (rc) ++ return rc; ++ ++ /* Count repeat is optional */ ++ ctrl->count_repeat = 0; ++ of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-count-repeat", ++ &ctrl->count_repeat); ++ ++ ctrl->cpr_allowed_sw = ++ of_property_read_bool(ctrl->dev->of_node, "qcom,cpr-enable") || ++ ctrl->cpr_global_setting == CPR_CLOSED_LOOP_EN; ++ ++ rc = cpr3_parse_irq_affinity(ctrl); ++ if (rc) ++ return rc; ++ ++ /* Aging reference voltage is optional */ ++ ctrl->aging_ref_volt = 0; ++ of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-aging-ref-voltage", ++ &ctrl->aging_ref_volt); ++ ++ /* Aging possible bitmask is optional */ ++ ctrl->aging_possible_mask = 0; ++ of_property_read_u32(ctrl->dev->of_node, ++ "qcom,cpr-aging-allowed-reg-mask", ++ &ctrl->aging_possible_mask); ++ ++ if (ctrl->aging_possible_mask) { ++ /* ++ * Aging possible register value required if bitmask is ++ * specified ++ */ ++ rc = cpr3_parse_ctrl_u32(ctrl, ++ "qcom,cpr-aging-allowed-reg-value", ++ &ctrl->aging_possible_val, 0, UINT_MAX); ++ if (rc) ++ return rc; ++ } ++ ++ if (of_find_property(ctrl->dev->of_node, "clock-names", NULL)) { ++ ctrl->core_clk = devm_clk_get(ctrl->dev, "core_clk"); ++ if (IS_ERR(ctrl->core_clk)) { ++ rc = PTR_ERR(ctrl->core_clk); ++ if (rc != -EPROBE_DEFER) ++ cpr3_err(ctrl, "unable request core clock, rc=%d\n", ++ rc); ++ return rc; ++ } ++ } ++ ++ rc = cpr3_panic_notifier_init(ctrl); ++ if (rc) ++ return rc; ++ ++ if (of_find_property(ctrl->dev->of_node, "vdd-supply", NULL)) { ++ ctrl->vdd_regulator = devm_regulator_get(ctrl->dev, "vdd"); ++ if (IS_ERR(ctrl->vdd_regulator)) { ++ rc = PTR_ERR(ctrl->vdd_regulator); ++ if (rc != -EPROBE_DEFER) ++ cpr3_err(ctrl, "unable to request vdd regulator, rc=%d\n", ++ rc); ++ return rc; ++ } ++ } else { ++ cpr3_err(ctrl, "vdd supply is not defined\n"); ++ return -ENODEV; ++ } ++ ++ ctrl->system_regulator = devm_regulator_get_optional(ctrl->dev, ++ "system"); ++ if (IS_ERR(ctrl->system_regulator)) { ++ rc = PTR_ERR(ctrl->system_regulator); ++ if (rc != -EPROBE_DEFER) { ++ rc = 0; ++ ctrl->system_regulator = NULL; ++ } else { ++ return rc; ++ } ++ } ++ ++ ctrl->mem_acc_regulator = devm_regulator_get_optional(ctrl->dev, ++ "mem-acc"); ++ if (IS_ERR(ctrl->mem_acc_regulator)) { ++ rc = PTR_ERR(ctrl->mem_acc_regulator); ++ if (rc != -EPROBE_DEFER) { ++ rc = 0; ++ ctrl->mem_acc_regulator = NULL; ++ } else { ++ return rc; ++ } ++ } ++ ++ return rc; ++} ++ ++/** ++ * cpr3_parse_open_loop_common_ctrl_data() - parse common open loop CPR3 ++ * controller properties from device tree ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_parse_open_loop_common_ctrl_data(struct cpr3_controller *ctrl) ++{ ++ int rc; ++ ++ rc = of_property_read_u32(ctrl->dev->of_node, "qcom,voltage-step", ++ &ctrl->step_volt); ++ if (rc) { ++ cpr3_err(ctrl, "error reading property qcom,voltage-step, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ if (ctrl->step_volt <= 0) { ++ cpr3_err(ctrl, "qcom,voltage-step=%d is invalid\n", ++ ctrl->step_volt); ++ return -EINVAL; ++ } ++ ++ if (of_find_property(ctrl->dev->of_node, "vdd-supply", NULL)) { ++ ctrl->vdd_regulator = devm_regulator_get(ctrl->dev, "vdd"); ++ if (IS_ERR(ctrl->vdd_regulator)) { ++ rc = PTR_ERR(ctrl->vdd_regulator); ++ if (rc != -EPROBE_DEFER) ++ cpr3_err(ctrl, "unable to request vdd regulator, rc=%d\n", ++ rc); ++ return rc; ++ } ++ } else { ++ cpr3_err(ctrl, "vdd supply is not defined\n"); ++ return -ENODEV; ++ } ++ ++ ctrl->system_regulator = devm_regulator_get_optional(ctrl->dev, ++ "system"); ++ if (IS_ERR(ctrl->system_regulator)) { ++ rc = PTR_ERR(ctrl->system_regulator); ++ if (rc != -EPROBE_DEFER) { ++ rc = 0; ++ ctrl->system_regulator = NULL; ++ } else { ++ return rc; ++ } ++ } else { ++ rc = regulator_enable(ctrl->system_regulator); ++ } ++ ++ ctrl->mem_acc_regulator = devm_regulator_get_optional(ctrl->dev, ++ "mem-acc"); ++ if (IS_ERR(ctrl->mem_acc_regulator)) { ++ rc = PTR_ERR(ctrl->mem_acc_regulator); ++ if (rc != -EPROBE_DEFER) { ++ rc = 0; ++ ctrl->mem_acc_regulator = NULL; ++ } else { ++ return rc; ++ } ++ } ++ ++ return rc; ++} ++ ++/** ++ * cpr3_limit_open_loop_voltages() - modify the open-loop voltage of each corner ++ * so that it fits within the floor to ceiling ++ * voltage range of the corner ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * This function clips the open-loop voltage for each corner so that it is ++ * limited to the floor to ceiling range. It also rounds each open-loop voltage ++ * so that it corresponds to a set point available to the underlying regulator. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_limit_open_loop_voltages(struct cpr3_regulator *vreg) ++{ ++ int i, volt; ++ ++ cpr3_debug(vreg, "open-loop voltages after trimming and rounding:\n"); ++ for (i = 0; i < vreg->corner_count; i++) { ++ volt = CPR3_ROUND(vreg->corner[i].open_loop_volt, ++ vreg->thread->ctrl->step_volt); ++ if (volt < vreg->corner[i].floor_volt) ++ volt = vreg->corner[i].floor_volt; ++ else if (volt > vreg->corner[i].ceiling_volt) ++ volt = vreg->corner[i].ceiling_volt; ++ vreg->corner[i].open_loop_volt = volt; ++ cpr3_debug(vreg, "corner[%2d]: open-loop=%d uV\n", i, volt); ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr3_open_loop_voltage_as_ceiling() - configures the ceiling voltage for each ++ * corner to equal the open-loop voltage if the relevant device ++ * tree property is found for the CPR3 regulator ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * This function assumes that the the open-loop voltage for each corner has ++ * already been rounded to the nearest allowed set point and that it falls ++ * within the floor to ceiling range. ++ * ++ * Return: none ++ */ ++void cpr3_open_loop_voltage_as_ceiling(struct cpr3_regulator *vreg) ++{ ++ int i; ++ ++ if (!of_property_read_bool(vreg->of_node, ++ "qcom,cpr-scaled-open-loop-voltage-as-ceiling")) ++ return; ++ ++ for (i = 0; i < vreg->corner_count; i++) ++ vreg->corner[i].ceiling_volt ++ = vreg->corner[i].open_loop_volt; ++} ++ ++/** ++ * cpr3_limit_floor_voltages() - raise the floor voltage of each corner so that ++ * the optional maximum floor to ceiling voltage range specified in ++ * device tree is satisfied ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * This function also ensures that the open-loop voltage for each corner falls ++ * within the final floor to ceiling voltage range and that floor voltages ++ * increase monotonically. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_limit_floor_voltages(struct cpr3_regulator *vreg) ++{ ++ char *prop = "qcom,cpr-floor-to-ceiling-max-range"; ++ int i, floor_new; ++ u32 *floor_range; ++ int rc = 0; ++ ++ if (!of_find_property(vreg->of_node, prop, NULL)) ++ goto enforce_monotonicity; ++ ++ floor_range = kcalloc(vreg->corner_count, sizeof(*floor_range), ++ GFP_KERNEL); ++ if (!floor_range) ++ return -ENOMEM; ++ ++ rc = cpr3_parse_corner_array_property(vreg, prop, 1, floor_range); ++ if (rc) ++ goto free_floor_adjust; ++ ++ for (i = 0; i < vreg->corner_count; i++) { ++ if ((s32)floor_range[i] >= 0) { ++ floor_new = CPR3_ROUND(vreg->corner[i].ceiling_volt ++ - floor_range[i], ++ vreg->thread->ctrl->step_volt); ++ ++ vreg->corner[i].floor_volt = max(floor_new, ++ vreg->corner[i].floor_volt); ++ if (vreg->corner[i].open_loop_volt ++ < vreg->corner[i].floor_volt) ++ vreg->corner[i].open_loop_volt ++ = vreg->corner[i].floor_volt; ++ } ++ } ++ ++free_floor_adjust: ++ kfree(floor_range); ++ ++enforce_monotonicity: ++ /* Ensure that floor voltages increase monotonically. */ ++ for (i = 1; i < vreg->corner_count; i++) { ++ if (vreg->corner[i].floor_volt ++ < vreg->corner[i - 1].floor_volt) { ++ cpr3_debug(vreg, "corner %d floor voltage=%d uV < corner %d voltage=%d uV; overriding: corner %d voltage=%d\n", ++ i, vreg->corner[i].floor_volt, ++ i - 1, vreg->corner[i - 1].floor_volt, ++ i, vreg->corner[i - 1].floor_volt); ++ vreg->corner[i].floor_volt ++ = vreg->corner[i - 1].floor_volt; ++ ++ if (vreg->corner[i].open_loop_volt ++ < vreg->corner[i].floor_volt) ++ vreg->corner[i].open_loop_volt ++ = vreg->corner[i].floor_volt; ++ if (vreg->corner[i].ceiling_volt ++ < vreg->corner[i].floor_volt) ++ vreg->corner[i].ceiling_volt ++ = vreg->corner[i].floor_volt; ++ } ++ } ++ ++ return rc; ++} ++ ++/** ++ * cpr3_print_quots() - print CPR target quotients into the kernel log for ++ * debugging purposes ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * Return: none ++ */ ++void cpr3_print_quots(struct cpr3_regulator *vreg) ++{ ++ int i, j, pos; ++ size_t buflen; ++ char *buf; ++ ++ buflen = sizeof(*buf) * CPR3_RO_COUNT * (MAX_CHARS_PER_INT + 2); ++ buf = kzalloc(buflen, GFP_KERNEL); ++ if (!buf) ++ return; ++ ++ for (i = 0; i < vreg->corner_count; i++) { ++ for (j = 0, pos = 0; j < CPR3_RO_COUNT; j++) ++ pos += scnprintf(buf + pos, buflen - pos, " %u", ++ vreg->corner[i].target_quot[j]); ++ cpr3_debug(vreg, "target quots[%2d]:%s\n", i, buf); ++ } ++ ++ kfree(buf); ++} ++ ++/** ++ * cpr3_determine_part_type() - determine the part type (SS/TT/FF). ++ * ++ * qcom,cpr-part-types prop tells the number of part types for which correction ++ * voltages are different. Another prop qcom,cpr-parts-voltage will contain the ++ * open loop fuse voltage which will be compared with this part voltage ++ * and accordingly part type will de determined. ++ * ++ * if qcom,cpr-part-types has value n, then qcom,cpr-parts-voltage will be ++ * array of n - 1 elements which will contain the voltage in increasing order. ++ * This function compares the fused volatge with all these voltage and returns ++ * the first index for which the fused volatge is greater. ++ * ++ * @vreg: Pointer to the CPR3 regulator ++ * @fuse_volt: fused open loop voltage which will be compared with ++ * qcom,cpr-parts-voltage array ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_determine_part_type(struct cpr3_regulator *vreg, int fuse_volt) ++{ ++ int i, rc, len; ++ u32 volt; ++ int soc_version_major; ++ char prop_name[100]; ++ const char prop_name_def[] = "qcom,cpr-parts-voltage"; ++ const char prop_name_v2[] = "qcom,cpr-parts-voltage-v2"; ++ ++ soc_version_major = read_ipq_soc_version_major(); ++ BUG_ON(soc_version_major <= 0); ++ ++ if (of_property_read_u32(vreg->of_node, "qcom,cpr-part-types", ++ &vreg->part_type_supported)) ++ return 0; ++ ++ if (soc_version_major > 1) ++ strlcpy(prop_name, prop_name_v2, sizeof(prop_name_v2)); ++ else ++ strlcpy(prop_name, prop_name_def, sizeof(prop_name_def)); ++ ++ if (!of_find_property(vreg->of_node, prop_name, &len)) { ++ cpr3_err(vreg, "property %s is missing\n", prop_name); ++ return -EINVAL; ++ } ++ ++ if (len != (vreg->part_type_supported - 1) * sizeof(u32)) { ++ cpr3_err(vreg, "wrong len in qcom,cpr-parts-voltage\n"); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < vreg->part_type_supported - 1; i++) { ++ rc = of_property_read_u32_index(vreg->of_node, ++ prop_name, i, &volt); ++ if (rc) { ++ cpr3_err(vreg, "error reading property %s, rc=%d\n", ++ prop_name, rc); ++ return rc; ++ } ++ ++ if (fuse_volt < volt) ++ break; ++ } ++ ++ vreg->part_type = i; ++ return 0; ++} ++ ++int cpr3_determine_temp_base_open_loop_correction(struct cpr3_regulator *vreg, ++ int *fuse_volt) ++{ ++ int i, rc, prev_volt; ++ int *volt_adjust; ++ char prop_str[75]; ++ int soc_version_major = read_ipq_soc_version_major(); ++ ++ BUG_ON(soc_version_major <= 0); ++ ++ if (vreg->part_type_supported) { ++ if (soc_version_major > 1) ++ snprintf(prop_str, sizeof(prop_str), ++ "qcom,cpr-cold-temp-voltage-adjustment-v2-%d", ++ vreg->part_type); ++ else ++ snprintf(prop_str, sizeof(prop_str), ++ "qcom,cpr-cold-temp-voltage-adjustment-%d", ++ vreg->part_type); ++ } else { ++ strlcpy(prop_str, "qcom,cpr-cold-temp-voltage-adjustment", ++ sizeof(prop_str)); ++ } ++ ++ if (!of_find_property(vreg->of_node, prop_str, NULL)) { ++ /* No adjustment required. */ ++ cpr3_info(vreg, "No cold temperature adjustment required.\n"); ++ return 0; ++ } ++ ++ volt_adjust = kcalloc(vreg->fuse_corner_count, sizeof(*volt_adjust), ++ GFP_KERNEL); ++ if (!volt_adjust) ++ return -ENOMEM; ++ ++ rc = cpr3_parse_array_property(vreg, prop_str, ++ vreg->fuse_corner_count, volt_adjust); ++ if (rc) { ++ cpr3_err(vreg, "could not load cold temp voltage adjustments, rc=%d\n", ++ rc); ++ goto done; ++ } ++ ++ for (i = 0; i < vreg->fuse_corner_count; i++) { ++ if (volt_adjust[i]) { ++ prev_volt = fuse_volt[i]; ++ fuse_volt[i] += volt_adjust[i]; ++ cpr3_debug(vreg, ++ "adjusted fuse corner %d open-loop voltage: %d -> %d uV\n", ++ i, prev_volt, fuse_volt[i]); ++ } ++ } ++ ++done: ++ kfree(volt_adjust); ++ return rc; ++} ++ ++/** ++ * cpr3_can_adjust_cold_temp() - Is cold temperature adjustment available ++ * ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * This function checks the cold temperature threshold is available ++ * ++ * Return: true on cold temperature threshold is available, else false ++ */ ++bool cpr3_can_adjust_cold_temp(struct cpr3_regulator *vreg) ++{ ++ char prop_str[75]; ++ int soc_version_major = read_ipq_soc_version_major(); ++ ++ BUG_ON(soc_version_major <= 0); ++ ++ if (soc_version_major > 1) ++ strlcpy(prop_str, "qcom,cpr-cold-temp-threshold-v2", ++ sizeof(prop_str)); ++ else ++ strlcpy(prop_str, "qcom,cpr-cold-temp-threshold", ++ sizeof(prop_str)); ++ ++ if (!of_find_property(vreg->of_node, prop_str, NULL)) { ++ /* No adjustment required. */ ++ return false; ++ } else ++ return true; ++} ++ ++/** ++ * cpr3_get_cold_temp_threshold() - get cold temperature threshold ++ * ++ * @vreg: Pointer to the CPR3 regulator ++ * @cold_temp: cold temperature read. ++ * ++ * This function reads the cold temperature threshold below which ++ * cold temperature adjustment margins will be applied. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_get_cold_temp_threshold(struct cpr3_regulator *vreg, int *cold_temp) ++{ ++ int rc; ++ u32 temp; ++ char req_prop_str[75], prop_str[75]; ++ int soc_version_major = read_ipq_soc_version_major(); ++ ++ BUG_ON(soc_version_major <= 0); ++ ++ if (vreg->part_type_supported) { ++ if (soc_version_major > 1) ++ snprintf(req_prop_str, sizeof(req_prop_str), ++ "qcom,cpr-cold-temp-voltage-adjustment-v2-%d", ++ vreg->part_type); ++ else ++ snprintf(req_prop_str, sizeof(req_prop_str), ++ "qcom,cpr-cold-temp-voltage-adjustment-%d", ++ vreg->part_type); ++ } else { ++ strlcpy(req_prop_str, "qcom,cpr-cold-temp-voltage-adjustment", ++ sizeof(req_prop_str)); ++ } ++ ++ if (soc_version_major > 1) ++ strlcpy(prop_str, "qcom,cpr-cold-temp-threshold-v2", ++ sizeof(prop_str)); ++ else ++ strlcpy(prop_str, "qcom,cpr-cold-temp-threshold", ++ sizeof(prop_str)); ++ ++ if (!of_find_property(vreg->of_node, req_prop_str, NULL)) { ++ /* No adjustment required. */ ++ cpr3_info(vreg, "Cold temperature adjustment not required.\n"); ++ return 0; ++ } ++ ++ if (!of_find_property(vreg->of_node, prop_str, NULL)) { ++ /* No adjustment required. */ ++ cpr3_err(vreg, "Missing %s required for %s\n", ++ prop_str, req_prop_str); ++ return -EINVAL; ++ } ++ ++ rc = of_property_read_u32(vreg->of_node, prop_str, &temp); ++ if (rc) { ++ cpr3_err(vreg, "error reading property %s, rc=%d\n", ++ prop_str, rc); ++ return rc; ++ } ++ ++ *cold_temp = temp; ++ return 0; ++} ++ ++/** ++ * cpr3_adjust_fused_open_loop_voltages() - adjust the fused open-loop voltages ++ * for each fuse corner according to device tree values ++ * @vreg: Pointer to the CPR3 regulator ++ * @fuse_volt: Pointer to an array of the fused open-loop voltage ++ * values ++ * ++ * Voltage values in fuse_volt are modified in place. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_adjust_fused_open_loop_voltages(struct cpr3_regulator *vreg, ++ int *fuse_volt) ++{ ++ int i, rc, prev_volt; ++ int *volt_adjust; ++ char prop_str[75]; ++ int soc_version_major = read_ipq_soc_version_major(); ++ ++ BUG_ON(soc_version_major <= 0); ++ ++ if (vreg->part_type_supported) { ++ if (soc_version_major > 1) ++ snprintf(prop_str, sizeof(prop_str), ++ "qcom,cpr-open-loop-voltage-fuse-adjustment-v2-%d", ++ vreg->part_type); ++ else ++ snprintf(prop_str, sizeof(prop_str), ++ "qcom,cpr-open-loop-voltage-fuse-adjustment-%d", ++ vreg->part_type); ++ } else { ++ strlcpy(prop_str, "qcom,cpr-open-loop-voltage-fuse-adjustment", ++ sizeof(prop_str)); ++ } ++ ++ if (!of_find_property(vreg->of_node, prop_str, NULL)) { ++ /* No adjustment required. */ ++ return 0; ++ } ++ ++ volt_adjust = kcalloc(vreg->fuse_corner_count, sizeof(*volt_adjust), ++ GFP_KERNEL); ++ if (!volt_adjust) ++ return -ENOMEM; ++ ++ rc = cpr3_parse_array_property(vreg, ++ prop_str, vreg->fuse_corner_count, volt_adjust); ++ if (rc) { ++ cpr3_err(vreg, "could not load open-loop fused voltage adjustments, rc=%d\n", ++ rc); ++ goto done; ++ } ++ ++ for (i = 0; i < vreg->fuse_corner_count; i++) { ++ if (volt_adjust[i]) { ++ prev_volt = fuse_volt[i]; ++ fuse_volt[i] += volt_adjust[i]; ++ cpr3_debug(vreg, "adjusted fuse corner %d open-loop voltage: %d --> %d uV\n", ++ i, prev_volt, fuse_volt[i]); ++ } ++ } ++ ++done: ++ kfree(volt_adjust); ++ return rc; ++} ++ ++/** ++ * cpr3_adjust_open_loop_voltages() - adjust the open-loop voltages for each ++ * corner according to device tree values ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_adjust_open_loop_voltages(struct cpr3_regulator *vreg) ++{ ++ int i, rc, prev_volt, min_volt; ++ int *volt_adjust, *volt_diff; ++ ++ if (!of_find_property(vreg->of_node, ++ "qcom,cpr-open-loop-voltage-adjustment", NULL)) { ++ /* No adjustment required. */ ++ return 0; ++ } ++ ++ volt_adjust = kcalloc(vreg->corner_count, sizeof(*volt_adjust), ++ GFP_KERNEL); ++ volt_diff = kcalloc(vreg->corner_count, sizeof(*volt_diff), GFP_KERNEL); ++ if (!volt_adjust || !volt_diff) { ++ rc = -ENOMEM; ++ goto done; ++ } ++ ++ rc = cpr3_parse_corner_array_property(vreg, ++ "qcom,cpr-open-loop-voltage-adjustment", 1, volt_adjust); ++ if (rc) { ++ cpr3_err(vreg, "could not load open-loop voltage adjustments, rc=%d\n", ++ rc); ++ goto done; ++ } ++ ++ for (i = 0; i < vreg->corner_count; i++) { ++ if (volt_adjust[i]) { ++ prev_volt = vreg->corner[i].open_loop_volt; ++ vreg->corner[i].open_loop_volt += volt_adjust[i]; ++ cpr3_debug(vreg, "adjusted corner %d open-loop voltage: %d --> %d uV\n", ++ i, prev_volt, vreg->corner[i].open_loop_volt); ++ } ++ } ++ ++ if (of_find_property(vreg->of_node, ++ "qcom,cpr-open-loop-voltage-min-diff", NULL)) { ++ rc = cpr3_parse_corner_array_property(vreg, ++ "qcom,cpr-open-loop-voltage-min-diff", 1, volt_diff); ++ if (rc) { ++ cpr3_err(vreg, "could not load minimum open-loop voltage differences, rc=%d\n", ++ rc); ++ goto done; ++ } ++ } ++ ++ /* ++ * Ensure that open-loop voltages increase monotonically with respect ++ * to configurable minimum allowed differences. ++ */ ++ for (i = 1; i < vreg->corner_count; i++) { ++ min_volt = vreg->corner[i - 1].open_loop_volt + volt_diff[i]; ++ if (vreg->corner[i].open_loop_volt < min_volt) { ++ cpr3_debug(vreg, "adjusted corner %d open-loop voltage=%d uV < corner %d voltage=%d uV + min diff=%d uV; overriding: corner %d voltage=%d\n", ++ i, vreg->corner[i].open_loop_volt, ++ i - 1, vreg->corner[i - 1].open_loop_volt, ++ volt_diff[i], i, min_volt); ++ vreg->corner[i].open_loop_volt = min_volt; ++ } ++ } ++ ++done: ++ kfree(volt_diff); ++ kfree(volt_adjust); ++ return rc; ++} ++ ++/** ++ * cpr3_quot_adjustment() - returns the quotient adjustment value resulting from ++ * the specified voltage adjustment and RO scaling factor ++ * @ro_scale: The CPR ring oscillator (RO) scaling factor with units ++ * of QUOT/V ++ * @volt_adjust: The amount to adjust the voltage by in units of ++ * microvolts. This value may be positive or negative. ++ */ ++int cpr3_quot_adjustment(int ro_scale, int volt_adjust) ++{ ++ unsigned long long temp; ++ int quot_adjust; ++ int sign = 1; ++ ++ if (ro_scale < 0) { ++ sign = -sign; ++ ro_scale = -ro_scale; ++ } ++ ++ if (volt_adjust < 0) { ++ sign = -sign; ++ volt_adjust = -volt_adjust; ++ } ++ ++ temp = (unsigned long long)ro_scale * (unsigned long long)volt_adjust; ++ do_div(temp, 1000000); ++ ++ quot_adjust = temp; ++ quot_adjust *= sign; ++ ++ return quot_adjust; ++} ++ ++/** ++ * cpr3_voltage_adjustment() - returns the voltage adjustment value resulting ++ * from the specified quotient adjustment and RO scaling factor ++ * @ro_scale: The CPR ring oscillator (RO) scaling factor with units ++ * of QUOT/V ++ * @quot_adjust: The amount to adjust the quotient by in units of ++ * QUOT. This value may be positive or negative. ++ */ ++int cpr3_voltage_adjustment(int ro_scale, int quot_adjust) ++{ ++ unsigned long long temp; ++ int volt_adjust; ++ int sign = 1; ++ ++ if (ro_scale < 0) { ++ sign = -sign; ++ ro_scale = -ro_scale; ++ } ++ ++ if (quot_adjust < 0) { ++ sign = -sign; ++ quot_adjust = -quot_adjust; ++ } ++ ++ if (ro_scale == 0) ++ return 0; ++ ++ temp = (unsigned long long)quot_adjust * 1000000; ++ do_div(temp, ro_scale); ++ ++ volt_adjust = temp; ++ volt_adjust *= sign; ++ ++ return volt_adjust; ++} ++ ++/** ++ * cpr3_parse_closed_loop_voltage_adjustments() - load per-fuse-corner and ++ * per-corner closed-loop adjustment values from device tree ++ * @vreg: Pointer to the CPR3 regulator ++ * @ro_sel: Array of ring oscillator values selected for each ++ * fuse corner ++ * @volt_adjust: Pointer to array which will be filled with the ++ * per-corner closed-loop adjustment voltages ++ * @volt_adjust_fuse: Pointer to array which will be filled with the ++ * per-fuse-corner closed-loop adjustment voltages ++ * @ro_scale: Pointer to array which will be filled with the ++ * per-fuse-corner RO scaling factor values with units of ++ * QUOT/V ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_parse_closed_loop_voltage_adjustments( ++ struct cpr3_regulator *vreg, u64 *ro_sel, ++ int *volt_adjust, int *volt_adjust_fuse, int *ro_scale) ++{ ++ int i, rc; ++ u32 *ro_all_scale; ++ ++ char volt_adj[] = "qcom,cpr-closed-loop-voltage-adjustment"; ++ char volt_fuse_adj[] = "qcom,cpr-closed-loop-voltage-fuse-adjustment"; ++ char ro_scaling[] = "qcom,cpr-ro-scaling-factor"; ++ ++ if (!of_find_property(vreg->of_node, volt_adj, NULL) ++ && !of_find_property(vreg->of_node, volt_fuse_adj, NULL) ++ && !vreg->aging_allowed) { ++ /* No adjustment required. */ ++ return 0; ++ } else if (!of_find_property(vreg->of_node, ro_scaling, NULL)) { ++ cpr3_err(vreg, "Missing %s required for closed-loop voltage adjustment.\n", ++ ro_scaling); ++ return -EINVAL; ++ } ++ ++ ro_all_scale = kcalloc(vreg->fuse_corner_count * CPR3_RO_COUNT, ++ sizeof(*ro_all_scale), GFP_KERNEL); ++ if (!ro_all_scale) ++ return -ENOMEM; ++ ++ rc = cpr3_parse_array_property(vreg, ro_scaling, ++ vreg->fuse_corner_count * CPR3_RO_COUNT, ro_all_scale); ++ if (rc) { ++ cpr3_err(vreg, "could not load RO scaling factors, rc=%d\n", ++ rc); ++ goto done; ++ } ++ ++ for (i = 0; i < vreg->fuse_corner_count; i++) ++ ro_scale[i] = ro_all_scale[i * CPR3_RO_COUNT + ro_sel[i]]; ++ ++ for (i = 0; i < vreg->corner_count; i++) ++ memcpy(vreg->corner[i].ro_scale, ++ &ro_all_scale[vreg->corner[i].cpr_fuse_corner * CPR3_RO_COUNT], ++ sizeof(*ro_all_scale) * CPR3_RO_COUNT); ++ ++ if (of_find_property(vreg->of_node, volt_fuse_adj, NULL)) { ++ rc = cpr3_parse_array_property(vreg, volt_fuse_adj, ++ vreg->fuse_corner_count, volt_adjust_fuse); ++ if (rc) { ++ cpr3_err(vreg, "could not load closed-loop fused voltage adjustments, rc=%d\n", ++ rc); ++ goto done; ++ } ++ } ++ ++ if (of_find_property(vreg->of_node, volt_adj, NULL)) { ++ rc = cpr3_parse_corner_array_property(vreg, volt_adj, ++ 1, volt_adjust); ++ if (rc) { ++ cpr3_err(vreg, "could not load closed-loop voltage adjustments, rc=%d\n", ++ rc); ++ goto done; ++ } ++ } ++ ++done: ++ kfree(ro_all_scale); ++ return rc; ++} ++ ++/** ++ * cpr3_apm_init() - initialize APM data for a CPR3 controller ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * This function loads memory array power mux (APM) data from device tree ++ * if it is present and requests a handle to the appropriate APM controller ++ * device. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_apm_init(struct cpr3_controller *ctrl) ++{ ++ struct device_node *node = ctrl->dev->of_node; ++ int rc; ++ ++ if (!of_find_property(node, "qcom,apm-ctrl", NULL)) { ++ /* No APM used */ ++ return 0; ++ } ++ ++ ctrl->apm = msm_apm_ctrl_dev_get(ctrl->dev); ++ if (IS_ERR(ctrl->apm)) { ++ rc = PTR_ERR(ctrl->apm); ++ if (rc != -EPROBE_DEFER) ++ cpr3_err(ctrl, "APM get failed, rc=%d\n", rc); ++ return rc; ++ } ++ ++ rc = of_property_read_u32(node, "qcom,apm-threshold-voltage", ++ &ctrl->apm_threshold_volt); ++ if (rc) { ++ cpr3_err(ctrl, "error reading qcom,apm-threshold-voltage, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ctrl->apm_threshold_volt ++ = CPR3_ROUND(ctrl->apm_threshold_volt, ctrl->step_volt); ++ ++ /* No error check since this is an optional property. */ ++ of_property_read_u32(node, "qcom,apm-hysteresis-voltage", ++ &ctrl->apm_adj_volt); ++ ctrl->apm_adj_volt = CPR3_ROUND(ctrl->apm_adj_volt, ctrl->step_volt); ++ ++ ctrl->apm_high_supply = MSM_APM_SUPPLY_APCC; ++ ctrl->apm_low_supply = MSM_APM_SUPPLY_MX; ++ ++ return 0; ++} ++ ++/** ++ * cpr3_mem_acc_init() - initialize mem-acc regulator data for ++ * a CPR3 regulator ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_mem_acc_init(struct cpr3_regulator *vreg) ++{ ++ struct cpr3_controller *ctrl = vreg->thread->ctrl; ++ u32 *temp; ++ int i, rc; ++ ++ if (!ctrl->mem_acc_regulator) { ++ cpr3_info(ctrl, "not using memory accelerator regulator\n"); ++ return 0; ++ } ++ ++ temp = kcalloc(vreg->corner_count, sizeof(*temp), GFP_KERNEL); ++ if (!temp) ++ return -ENOMEM; ++ ++ rc = cpr3_parse_corner_array_property(vreg, "qcom,mem-acc-voltage", ++ 1, temp); ++ if (rc) { ++ cpr3_err(ctrl, "could not load mem-acc corners, rc=%d\n", rc); ++ } else { ++ for (i = 0; i < vreg->corner_count; i++) ++ vreg->corner[i].mem_acc_volt = temp[i]; ++ } ++ ++ kfree(temp); ++ return rc; ++} ++ ++/** ++ * cpr4_load_core_and_temp_adj() - parse amount of voltage adjustment for ++ * per-online-core and per-temperature voltage adjustment for a ++ * given corner or corner band from device tree. ++ * @vreg: Pointer to the CPR3 regulator ++ * @num: Corner number or corner band number ++ * @use_corner_band: Boolean indicating if the CPR3 regulator supports ++ * adjustments per corner band ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr4_load_core_and_temp_adj(struct cpr3_regulator *vreg, ++ int num, bool use_corner_band) ++{ ++ struct cpr3_controller *ctrl = vreg->thread->ctrl; ++ struct cpr4_sdelta *sdelta; ++ int sdelta_size, i, j, pos, rc = 0; ++ char str[75]; ++ size_t buflen; ++ char *buf; ++ ++ sdelta = use_corner_band ? vreg->corner_band[num].sdelta : ++ vreg->corner[num].sdelta; ++ ++ if (!sdelta->allow_core_count_adj && !sdelta->allow_temp_adj) { ++ /* corner doesn't need sdelta table */ ++ sdelta->max_core_count = 0; ++ sdelta->temp_band_count = 0; ++ return rc; ++ } ++ ++ sdelta_size = sdelta->max_core_count * sdelta->temp_band_count; ++ if (use_corner_band) ++ snprintf(str, sizeof(str), ++ "corner_band=%d core_config_count=%d temp_band_count=%d sdelta_size=%d\n", ++ num, sdelta->max_core_count, ++ sdelta->temp_band_count, sdelta_size); ++ else ++ snprintf(str, sizeof(str), ++ "corner=%d core_config_count=%d temp_band_count=%d sdelta_size=%d\n", ++ num, sdelta->max_core_count, ++ sdelta->temp_band_count, sdelta_size); ++ ++ cpr3_debug(vreg, "%s", str); ++ ++ sdelta->table = devm_kcalloc(ctrl->dev, sdelta_size, ++ sizeof(*sdelta->table), GFP_KERNEL); ++ if (!sdelta->table) ++ return -ENOMEM; ++ ++ if (use_corner_band) ++ snprintf(str, sizeof(str), ++ "qcom,cpr-corner-band%d-temp-core-voltage-adjustment", ++ num + CPR3_CORNER_OFFSET); ++ else ++ snprintf(str, sizeof(str), ++ "qcom,cpr-corner%d-temp-core-voltage-adjustment", ++ num + CPR3_CORNER_OFFSET); ++ ++ rc = cpr3_parse_array_property(vreg, str, sdelta_size, ++ sdelta->table); ++ if (rc) { ++ cpr3_err(vreg, "could not load %s, rc=%d\n", str, rc); ++ return rc; ++ } ++ ++ /* ++ * Convert sdelta margins from uV to PMIC steps and apply negation to ++ * follow the SDELTA register semantics. ++ */ ++ for (i = 0; i < sdelta_size; i++) ++ sdelta->table[i] = -(sdelta->table[i] / ctrl->step_volt); ++ ++ buflen = sizeof(*buf) * sdelta_size * (MAX_CHARS_PER_INT + 2); ++ buf = kzalloc(buflen, GFP_KERNEL); ++ if (!buf) ++ return rc; ++ ++ for (i = 0; i < sdelta->max_core_count; i++) { ++ for (j = 0, pos = 0; j < sdelta->temp_band_count; j++) ++ pos += scnprintf(buf + pos, buflen - pos, " %u", ++ sdelta->table[i * sdelta->temp_band_count + j]); ++ cpr3_debug(vreg, "sdelta[%d]:%s\n", i, buf); ++ } ++ ++ kfree(buf); ++ return rc; ++} ++ ++/** ++ * cpr4_parse_core_count_temp_voltage_adj() - parse configuration data for ++ * per-online-core and per-temperature voltage adjustment for ++ * a CPR3 regulator from device tree. ++ * @vreg: Pointer to the CPR3 regulator ++ * @use_corner_band: Boolean indicating if the CPR3 regulator supports ++ * adjustments per corner band ++ * ++ * This function supports parsing of per-online-core and per-temperature ++ * adjustments per corner or per corner band. CPR controllers which support ++ * corner bands apply the same adjustments to all corners within a corner band. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr4_parse_core_count_temp_voltage_adj( ++ struct cpr3_regulator *vreg, bool use_corner_band) ++{ ++ struct cpr3_controller *ctrl = vreg->thread->ctrl; ++ struct device_node *node = vreg->of_node; ++ struct cpr3_corner *corner; ++ struct cpr4_sdelta *sdelta; ++ int i, sdelta_table_count, rc = 0; ++ int *allow_core_count_adj = NULL, *allow_temp_adj = NULL; ++ char prop_str[75]; ++ ++ if (of_find_property(node, use_corner_band ? ++ "qcom,corner-band-allow-temp-adjustment" ++ : "qcom,corner-allow-temp-adjustment", NULL)) { ++ if (!ctrl->allow_temp_adj) { ++ cpr3_err(ctrl, "Temperature adjustment configurations missing\n"); ++ return -EINVAL; ++ } ++ ++ vreg->allow_temp_adj = true; ++ } ++ ++ if (of_find_property(node, use_corner_band ? ++ "qcom,corner-band-allow-core-count-adjustment" ++ : "qcom,corner-allow-core-count-adjustment", ++ NULL)) { ++ rc = of_property_read_u32(node, "qcom,max-core-count", ++ &vreg->max_core_count); ++ if (rc) { ++ cpr3_err(vreg, "error reading qcom,max-core-count, rc=%d\n", ++ rc); ++ return -EINVAL; ++ } ++ ++ vreg->allow_core_count_adj = true; ++ ctrl->allow_core_count_adj = true; ++ } ++ ++ if (!vreg->allow_temp_adj && !vreg->allow_core_count_adj) { ++ /* ++ * Both per-online-core and temperature based adjustments are ++ * disabled for this regulator. ++ */ ++ return 0; ++ } else if (!vreg->allow_core_count_adj) { ++ /* ++ * Only per-temperature voltage adjusments are allowed. ++ * Keep max core count value as 1 to allocate SDELTA. ++ */ ++ vreg->max_core_count = 1; ++ } ++ ++ if (vreg->allow_core_count_adj) { ++ allow_core_count_adj = kcalloc(vreg->corner_count, ++ sizeof(*allow_core_count_adj), ++ GFP_KERNEL); ++ if (!allow_core_count_adj) ++ return -ENOMEM; ++ ++ snprintf(prop_str, sizeof(prop_str), "%s", use_corner_band ? ++ "qcom,corner-band-allow-core-count-adjustment" : ++ "qcom,corner-allow-core-count-adjustment"); ++ ++ rc = use_corner_band ? ++ cpr3_parse_corner_band_array_property(vreg, prop_str, ++ 1, allow_core_count_adj) : ++ cpr3_parse_corner_array_property(vreg, prop_str, ++ 1, allow_core_count_adj); ++ if (rc) { ++ cpr3_err(vreg, "error reading %s, rc=%d\n", prop_str, ++ rc); ++ goto done; ++ } ++ } ++ ++ if (vreg->allow_temp_adj) { ++ allow_temp_adj = kcalloc(vreg->corner_count, ++ sizeof(*allow_temp_adj), GFP_KERNEL); ++ if (!allow_temp_adj) { ++ rc = -ENOMEM; ++ goto done; ++ } ++ ++ snprintf(prop_str, sizeof(prop_str), "%s", use_corner_band ? ++ "qcom,corner-band-allow-temp-adjustment" : ++ "qcom,corner-allow-temp-adjustment"); ++ ++ rc = use_corner_band ? ++ cpr3_parse_corner_band_array_property(vreg, prop_str, ++ 1, allow_temp_adj) : ++ cpr3_parse_corner_array_property(vreg, prop_str, ++ 1, allow_temp_adj); ++ if (rc) { ++ cpr3_err(vreg, "error reading %s, rc=%d\n", prop_str, ++ rc); ++ goto done; ++ } ++ } ++ ++ sdelta_table_count = use_corner_band ? vreg->corner_band_count : ++ vreg->corner_count; ++ ++ for (i = 0; i < sdelta_table_count; i++) { ++ sdelta = devm_kzalloc(ctrl->dev, sizeof(*corner->sdelta), ++ GFP_KERNEL); ++ if (!sdelta) { ++ rc = -ENOMEM; ++ goto done; ++ } ++ ++ if (allow_core_count_adj) ++ sdelta->allow_core_count_adj = allow_core_count_adj[i]; ++ if (allow_temp_adj) ++ sdelta->allow_temp_adj = allow_temp_adj[i]; ++ sdelta->max_core_count = vreg->max_core_count; ++ sdelta->temp_band_count = ctrl->temp_band_count; ++ ++ if (use_corner_band) ++ vreg->corner_band[i].sdelta = sdelta; ++ else ++ vreg->corner[i].sdelta = sdelta; ++ ++ rc = cpr4_load_core_and_temp_adj(vreg, i, use_corner_band); ++ if (rc) { ++ cpr3_err(vreg, "corner/band %d core and temp adjustment loading failed, rc=%d\n", ++ i, rc); ++ goto done; ++ } ++ } ++ ++done: ++ kfree(allow_core_count_adj); ++ kfree(allow_temp_adj); ++ ++ return rc; ++} ++ ++/** ++ * cprh_adjust_voltages_for_apm() - adjust per-corner floor and ceiling voltages ++ * so that they do not overlap the APM threshold voltage. ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * The memory array power mux (APM) must be configured for a specific supply ++ * based upon where the VDD voltage lies with respect to the APM threshold ++ * voltage. When using CPR hardware closed-loop, the voltage may vary anywhere ++ * between the floor and ceiling voltage without software notification. ++ * Therefore, it is required that the floor to ceiling range for every corner ++ * not intersect the APM threshold voltage. This function adjusts the floor to ++ * ceiling range for each corner which violates this requirement. ++ * ++ * The following algorithm is applied: ++ * if floor < threshold <= ceiling: ++ * if open_loop >= threshold, then floor = threshold - adj ++ * else ceiling = threshold - step ++ * where: ++ * adj = APM hysteresis voltage established to minimize the number of ++ * corners with artificially increased floor voltages ++ * step = voltage in microvolts of a single step of the VDD supply ++ * ++ * The open-loop voltage is also bounded by the new floor or ceiling value as ++ * needed. ++ * ++ * Return: none ++ */ ++void cprh_adjust_voltages_for_apm(struct cpr3_regulator *vreg) ++{ ++ struct cpr3_controller *ctrl = vreg->thread->ctrl; ++ struct cpr3_corner *corner; ++ int i, adj, threshold, prev_ceiling, prev_floor, prev_open_loop; ++ ++ if (!ctrl->apm_threshold_volt) { ++ /* APM not being used. */ ++ return; ++ } ++ ++ ctrl->apm_threshold_volt = CPR3_ROUND(ctrl->apm_threshold_volt, ++ ctrl->step_volt); ++ ctrl->apm_adj_volt = CPR3_ROUND(ctrl->apm_adj_volt, ctrl->step_volt); ++ ++ threshold = ctrl->apm_threshold_volt; ++ adj = ctrl->apm_adj_volt; ++ ++ for (i = 0; i < vreg->corner_count; i++) { ++ corner = &vreg->corner[i]; ++ ++ if (threshold <= corner->floor_volt ++ || threshold > corner->ceiling_volt) ++ continue; ++ ++ prev_floor = corner->floor_volt; ++ prev_ceiling = corner->ceiling_volt; ++ prev_open_loop = corner->open_loop_volt; ++ ++ if (corner->open_loop_volt >= threshold) { ++ corner->floor_volt = max(corner->floor_volt, ++ threshold - adj); ++ if (corner->open_loop_volt < corner->floor_volt) ++ corner->open_loop_volt = corner->floor_volt; ++ } else { ++ corner->ceiling_volt = threshold - ctrl->step_volt; ++ } ++ ++ if (corner->floor_volt != prev_floor ++ || corner->ceiling_volt != prev_ceiling ++ || corner->open_loop_volt != prev_open_loop) ++ cpr3_debug(vreg, "APM threshold=%d, APM adj=%d changed corner %d voltages; prev: floor=%d, ceiling=%d, open-loop=%d; new: floor=%d, ceiling=%d, open-loop=%d\n", ++ threshold, adj, i, prev_floor, prev_ceiling, ++ prev_open_loop, corner->floor_volt, ++ corner->ceiling_volt, corner->open_loop_volt); ++ } ++} ++ ++/** ++ * cprh_adjust_voltages_for_mem_acc() - adjust per-corner floor and ceiling ++ * voltages so that they do not intersect the MEM ACC threshold ++ * voltage ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * The following algorithm is applied: ++ * if floor < threshold <= ceiling: ++ * if open_loop >= threshold, then floor = threshold ++ * else ceiling = threshold - step ++ * where: ++ * step = voltage in microvolts of a single step of the VDD supply ++ * ++ * The open-loop voltage is also bounded by the new floor or ceiling value as ++ * needed. ++ * ++ * Return: none ++ */ ++void cprh_adjust_voltages_for_mem_acc(struct cpr3_regulator *vreg) ++{ ++ struct cpr3_controller *ctrl = vreg->thread->ctrl; ++ struct cpr3_corner *corner; ++ int i, threshold, prev_ceiling, prev_floor, prev_open_loop; ++ ++ if (!ctrl->mem_acc_threshold_volt) { ++ /* MEM ACC not being used. */ ++ return; ++ } ++ ++ ctrl->mem_acc_threshold_volt = CPR3_ROUND(ctrl->mem_acc_threshold_volt, ++ ctrl->step_volt); ++ ++ threshold = ctrl->mem_acc_threshold_volt; ++ ++ for (i = 0; i < vreg->corner_count; i++) { ++ corner = &vreg->corner[i]; ++ ++ if (threshold <= corner->floor_volt ++ || threshold > corner->ceiling_volt) ++ continue; ++ ++ prev_floor = corner->floor_volt; ++ prev_ceiling = corner->ceiling_volt; ++ prev_open_loop = corner->open_loop_volt; ++ ++ if (corner->open_loop_volt >= threshold) { ++ corner->floor_volt = max(corner->floor_volt, threshold); ++ if (corner->open_loop_volt < corner->floor_volt) ++ corner->open_loop_volt = corner->floor_volt; ++ } else { ++ corner->ceiling_volt = threshold - ctrl->step_volt; ++ } ++ ++ if (corner->floor_volt != prev_floor ++ || corner->ceiling_volt != prev_ceiling ++ || corner->open_loop_volt != prev_open_loop) ++ cpr3_debug(vreg, "MEM ACC threshold=%d changed corner %d voltages; prev: floor=%d, ceiling=%d, open-loop=%d; new: floor=%d, ceiling=%d, open-loop=%d\n", ++ threshold, i, prev_floor, prev_ceiling, ++ prev_open_loop, corner->floor_volt, ++ corner->ceiling_volt, corner->open_loop_volt); ++ } ++} ++ ++/** ++ * cpr3_apply_closed_loop_offset_voltages() - modify the closed-loop voltage ++ * adjustments by the amounts that are needed for this ++ * fuse combo ++ * @vreg: Pointer to the CPR3 regulator ++ * @volt_adjust: Array of closed-loop voltage adjustment values of length ++ * vreg->corner_count which is further adjusted based upon ++ * offset voltage fuse values. ++ * @fuse_volt_adjust: Fused closed-loop voltage adjustment values of length ++ * vreg->fuse_corner_count. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr3_apply_closed_loop_offset_voltages(struct cpr3_regulator *vreg, ++ int *volt_adjust, int *fuse_volt_adjust) ++{ ++ u32 *corner_map; ++ int rc = 0, i; ++ ++ if (!of_find_property(vreg->of_node, ++ "qcom,cpr-fused-closed-loop-voltage-adjustment-map", NULL)) { ++ /* No closed-loop offset required. */ ++ return 0; ++ } ++ ++ corner_map = kcalloc(vreg->corner_count, sizeof(*corner_map), ++ GFP_KERNEL); ++ if (!corner_map) ++ return -ENOMEM; ++ ++ rc = cpr3_parse_corner_array_property(vreg, ++ "qcom,cpr-fused-closed-loop-voltage-adjustment-map", ++ 1, corner_map); ++ if (rc) ++ goto done; ++ ++ for (i = 0; i < vreg->corner_count; i++) { ++ if (corner_map[i] == 0) { ++ continue; ++ } else if (corner_map[i] > vreg->fuse_corner_count) { ++ cpr3_err(vreg, "corner %d mapped to invalid fuse corner: %u\n", ++ i, corner_map[i]); ++ rc = -EINVAL; ++ goto done; ++ } ++ ++ volt_adjust[i] += fuse_volt_adjust[corner_map[i] - 1]; ++ } ++ ++done: ++ kfree(corner_map); ++ return rc; ++} ++ ++/** ++ * cpr3_enforce_inc_quotient_monotonicity() - Ensure that target quotients ++ * increase monotonically from lower to higher corners ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static void cpr3_enforce_inc_quotient_monotonicity(struct cpr3_regulator *vreg) ++{ ++ int i, j; ++ ++ for (i = 1; i < vreg->corner_count; i++) { ++ for (j = 0; j < CPR3_RO_COUNT; j++) { ++ if (vreg->corner[i].target_quot[j] ++ && vreg->corner[i].target_quot[j] ++ < vreg->corner[i - 1].target_quot[j]) { ++ cpr3_debug(vreg, "corner %d RO%u target quot=%u < corner %d RO%u target quot=%u; overriding: corner %d RO%u target quot=%u\n", ++ i, j, ++ vreg->corner[i].target_quot[j], ++ i - 1, j, ++ vreg->corner[i - 1].target_quot[j], ++ i, j, ++ vreg->corner[i - 1].target_quot[j]); ++ vreg->corner[i].target_quot[j] ++ = vreg->corner[i - 1].target_quot[j]; ++ } ++ } ++ } ++} ++ ++/** ++ * cpr3_enforce_dec_quotient_monotonicity() - Ensure that target quotients ++ * decrease monotonically from higher to lower corners ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static void cpr3_enforce_dec_quotient_monotonicity(struct cpr3_regulator *vreg) ++{ ++ int i, j; ++ ++ for (i = vreg->corner_count - 2; i >= 0; i--) { ++ for (j = 0; j < CPR3_RO_COUNT; j++) { ++ if (vreg->corner[i + 1].target_quot[j] ++ && vreg->corner[i].target_quot[j] ++ > vreg->corner[i + 1].target_quot[j]) { ++ cpr3_debug(vreg, "corner %d RO%u target quot=%u > corner %d RO%u target quot=%u; overriding: corner %d RO%u target quot=%u\n", ++ i, j, ++ vreg->corner[i].target_quot[j], ++ i + 1, j, ++ vreg->corner[i + 1].target_quot[j], ++ i, j, ++ vreg->corner[i + 1].target_quot[j]); ++ vreg->corner[i].target_quot[j] ++ = vreg->corner[i + 1].target_quot[j]; ++ } ++ } ++ } ++} ++ ++/** ++ * _cpr3_adjust_target_quotients() - adjust the target quotients for each ++ * corner of the regulator according to input adjustment and ++ * scaling arrays ++ * @vreg: Pointer to the CPR3 regulator ++ * @volt_adjust: Pointer to an array of closed-loop voltage adjustments ++ * with units of microvolts. The array must have ++ * vreg->corner_count number of elements. ++ * @ro_scale: Pointer to a flattened 2D array of RO scaling factors. ++ * The array must have an inner dimension of CPR3_RO_COUNT ++ * and an outer dimension of vreg->corner_count ++ * @label: Null terminated string providing a label for the type ++ * of adjustment. ++ * ++ * Return: true if any corners received a positive voltage adjustment (> 0), ++ * else false ++ */ ++static bool _cpr3_adjust_target_quotients(struct cpr3_regulator *vreg, ++ const int *volt_adjust, const int *ro_scale, const char *label) ++{ ++ int i, j, quot_adjust; ++ bool is_increasing = false; ++ u32 prev_quot; ++ ++ for (i = 0; i < vreg->corner_count; i++) { ++ for (j = 0; j < CPR3_RO_COUNT; j++) { ++ if (vreg->corner[i].target_quot[j]) { ++ quot_adjust = cpr3_quot_adjustment( ++ ro_scale[i * CPR3_RO_COUNT + j], ++ volt_adjust[i]); ++ if (quot_adjust) { ++ prev_quot = vreg->corner[i]. ++ target_quot[j]; ++ vreg->corner[i].target_quot[j] ++ += quot_adjust; ++ cpr3_debug(vreg, "adjusted corner %d RO%d target quot %s: %u --> %u (%d uV)\n", ++ i, j, label, prev_quot, ++ vreg->corner[i].target_quot[j], ++ volt_adjust[i]); ++ } ++ } ++ } ++ if (volt_adjust[i] > 0) ++ is_increasing = true; ++ } ++ ++ return is_increasing; ++} ++ ++/** ++ * cpr3_adjust_target_quotients() - adjust the target quotients for each ++ * corner according to device tree values and fuse values ++ * @vreg: Pointer to the CPR3 regulator ++ * @fuse_volt_adjust: Fused closed-loop voltage adjustment values of length ++ * vreg->fuse_corner_count. This parameter could be null ++ * pointer when no fused adjustments are needed. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++int cpr3_adjust_target_quotients(struct cpr3_regulator *vreg, ++ int *fuse_volt_adjust) ++{ ++ int i, rc; ++ int *volt_adjust, *ro_scale; ++ bool explicit_adjustment, fused_adjustment, is_increasing; ++ ++ explicit_adjustment = of_find_property(vreg->of_node, ++ "qcom,cpr-closed-loop-voltage-adjustment", NULL); ++ fused_adjustment = of_find_property(vreg->of_node, ++ "qcom,cpr-fused-closed-loop-voltage-adjustment-map", NULL); ++ ++ if (!explicit_adjustment && !fused_adjustment && !vreg->aging_allowed) { ++ /* No adjustment required. */ ++ return 0; ++ } else if (!of_find_property(vreg->of_node, ++ "qcom,cpr-ro-scaling-factor", NULL)) { ++ cpr3_err(vreg, "qcom,cpr-ro-scaling-factor is required for closed-loop voltage adjustment, but is missing\n"); ++ return -EINVAL; ++ } ++ ++ volt_adjust = kcalloc(vreg->corner_count, sizeof(*volt_adjust), ++ GFP_KERNEL); ++ ro_scale = kcalloc(vreg->corner_count * CPR3_RO_COUNT, ++ sizeof(*ro_scale), GFP_KERNEL); ++ if (!volt_adjust || !ro_scale) { ++ rc = -ENOMEM; ++ goto done; ++ } ++ ++ rc = cpr3_parse_corner_array_property(vreg, ++ "qcom,cpr-ro-scaling-factor", CPR3_RO_COUNT, ro_scale); ++ if (rc) { ++ cpr3_err(vreg, "could not load RO scaling factors, rc=%d\n", ++ rc); ++ goto done; ++ } ++ ++ for (i = 0; i < vreg->corner_count; i++) ++ memcpy(vreg->corner[i].ro_scale, &ro_scale[i * CPR3_RO_COUNT], ++ sizeof(*ro_scale) * CPR3_RO_COUNT); ++ ++ if (explicit_adjustment) { ++ rc = cpr3_parse_corner_array_property(vreg, ++ "qcom,cpr-closed-loop-voltage-adjustment", ++ 1, volt_adjust); ++ if (rc) { ++ cpr3_err(vreg, "could not load closed-loop voltage adjustments, rc=%d\n", ++ rc); ++ goto done; ++ } ++ ++ _cpr3_adjust_target_quotients(vreg, volt_adjust, ro_scale, ++ "from DT"); ++ cpr3_enforce_inc_quotient_monotonicity(vreg); ++ } ++ ++ if (fused_adjustment && fuse_volt_adjust) { ++ memset(volt_adjust, 0, ++ sizeof(*volt_adjust) * vreg->corner_count); ++ ++ rc = cpr3_apply_closed_loop_offset_voltages(vreg, volt_adjust, ++ fuse_volt_adjust); ++ if (rc) { ++ cpr3_err(vreg, "could not apply fused closed-loop voltage reductions, rc=%d\n", ++ rc); ++ goto done; ++ } ++ ++ is_increasing = _cpr3_adjust_target_quotients(vreg, volt_adjust, ++ ro_scale, "from fuse"); ++ if (is_increasing) ++ cpr3_enforce_inc_quotient_monotonicity(vreg); ++ else ++ cpr3_enforce_dec_quotient_monotonicity(vreg); ++ } ++ ++done: ++ kfree(volt_adjust); ++ kfree(ro_scale); ++ return rc; ++} +--- /dev/null ++++ b/drivers/regulator/cpr4-apss-regulator.c +@@ -0,0 +1,1819 @@ ++/* ++ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#define pr_fmt(fmt) "%s: " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "cpr3-regulator.h" ++ ++#define IPQ807x_APSS_FUSE_CORNERS 4 ++#define IPQ817x_APPS_FUSE_CORNERS 2 ++#define IPQ6018_APSS_FUSE_CORNERS 4 ++#define IPQ9574_APSS_FUSE_CORNERS 4 ++ ++u32 g_valid_fuse_count = IPQ807x_APSS_FUSE_CORNERS; ++ ++/** ++ * struct cpr4_ipq807x_apss_fuses - APSS specific fuse data for IPQ807x ++ * @ro_sel: Ring oscillator select fuse parameter value for each ++ * fuse corner ++ * @init_voltage: Initial (i.e. open-loop) voltage fuse parameter value ++ * for each fuse corner (raw, not converted to a voltage) ++ * @target_quot: CPR target quotient fuse parameter value for each fuse ++ * corner ++ * @quot_offset: CPR target quotient offset fuse parameter value for each ++ * fuse corner (raw, not unpacked) used for target quotient ++ * interpolation ++ * @speed_bin: Application processor speed bin fuse parameter value for ++ * the given chip ++ * @cpr_fusing_rev: CPR fusing revision fuse parameter value ++ * @boost_cfg: CPR boost configuration fuse parameter value ++ * @boost_voltage: CPR boost voltage fuse parameter value (raw, not ++ * converted to a voltage) ++ * ++ * This struct holds the values for all of the fuses read from memory. ++ */ ++struct cpr4_ipq807x_apss_fuses { ++ u64 ro_sel[IPQ807x_APSS_FUSE_CORNERS]; ++ u64 init_voltage[IPQ807x_APSS_FUSE_CORNERS]; ++ u64 target_quot[IPQ807x_APSS_FUSE_CORNERS]; ++ u64 quot_offset[IPQ807x_APSS_FUSE_CORNERS]; ++ u64 speed_bin; ++ u64 cpr_fusing_rev; ++ u64 boost_cfg; ++ u64 boost_voltage; ++ u64 misc; ++}; ++ ++/* ++ * fuse combo = fusing revision + 8 * (speed bin) ++ * where: fusing revision = 0 - 7 and speed bin = 0 - 7 ++ */ ++#define CPR4_IPQ807x_APSS_FUSE_COMBO_COUNT 64 ++ ++/* ++ * Constants which define the name of each fuse corner. ++ */ ++enum cpr4_ipq807x_apss_fuse_corner { ++ CPR4_IPQ807x_APSS_FUSE_CORNER_SVS = 0, ++ CPR4_IPQ807x_APSS_FUSE_CORNER_NOM = 1, ++ CPR4_IPQ807x_APSS_FUSE_CORNER_TURBO = 2, ++ CPR4_IPQ807x_APSS_FUSE_CORNER_STURBO = 3, ++}; ++ ++static const char * const cpr4_ipq807x_apss_fuse_corner_name[] = { ++ [CPR4_IPQ807x_APSS_FUSE_CORNER_SVS] = "SVS", ++ [CPR4_IPQ807x_APSS_FUSE_CORNER_NOM] = "NOM", ++ [CPR4_IPQ807x_APSS_FUSE_CORNER_TURBO] = "TURBO", ++ [CPR4_IPQ807x_APSS_FUSE_CORNER_STURBO] = "STURBO", ++}; ++ ++/* ++ * IPQ807x APSS fuse parameter locations: ++ * ++ * Structs are organized with the following dimensions: ++ * Outer: 0 to 3 for fuse corners from lowest to highest corner ++ * Inner: large enough to hold the longest set of parameter segments which ++ * fully defines a fuse parameter, +1 (for NULL termination). ++ * Each segment corresponds to a contiguous group of bits from a ++ * single fuse row. These segments are concatentated together in ++ * order to form the full fuse parameter value. The segments for ++ * a given parameter may correspond to different fuse rows. ++ */ ++static struct cpr3_fuse_param ++ipq807x_apss_ro_sel_param[IPQ807x_APSS_FUSE_CORNERS][2] = { ++ {{73, 8, 11}, {} }, ++ {{73, 4, 7}, {} }, ++ {{73, 0, 3}, {} }, ++ {{73, 12, 15}, {} }, ++}; ++ ++static struct cpr3_fuse_param ++ipq807x_apss_init_voltage_param[IPQ807x_APSS_FUSE_CORNERS][2] = { ++ {{71, 18, 23}, {} }, ++ {{71, 12, 17}, {} }, ++ {{71, 6, 11}, {} }, ++ {{71, 0, 5}, {} }, ++}; ++ ++static struct cpr3_fuse_param ++ipq807x_apss_target_quot_param[IPQ807x_APSS_FUSE_CORNERS][2] = { ++ {{72, 32, 43}, {} }, ++ {{72, 20, 31}, {} }, ++ {{72, 8, 19}, {} }, ++ {{72, 44, 55}, {} }, ++}; ++ ++static struct cpr3_fuse_param ++ipq807x_apss_quot_offset_param[IPQ807x_APSS_FUSE_CORNERS][2] = { ++ {{} }, ++ {{71, 46, 52}, {} }, ++ {{71, 39, 45}, {} }, ++ {{71, 32, 38}, {} }, ++}; ++ ++static struct cpr3_fuse_param ipq807x_cpr_fusing_rev_param[] = { ++ {71, 53, 55}, ++ {}, ++}; ++ ++static struct cpr3_fuse_param ipq807x_apss_speed_bin_param[] = { ++ {36, 40, 42}, ++ {}, ++}; ++ ++static struct cpr3_fuse_param ipq807x_cpr_boost_fuse_cfg_param[] = { ++ {36, 43, 45}, ++ {}, ++}; ++ ++static struct cpr3_fuse_param ipq807x_apss_boost_fuse_volt_param[] = { ++ {71, 0, 5}, ++ {}, ++}; ++ ++static struct cpr3_fuse_param ipq807x_misc_fuse_volt_adj_param[] = { ++ {36, 54, 54}, ++ {}, ++}; ++ ++static struct cpr3_fuse_parameters ipq807x_fuse_params = { ++ .apss_ro_sel_param = ipq807x_apss_ro_sel_param, ++ .apss_init_voltage_param = ipq807x_apss_init_voltage_param, ++ .apss_target_quot_param = ipq807x_apss_target_quot_param, ++ .apss_quot_offset_param = ipq807x_apss_quot_offset_param, ++ .cpr_fusing_rev_param = ipq807x_cpr_fusing_rev_param, ++ .apss_speed_bin_param = ipq807x_apss_speed_bin_param, ++ .cpr_boost_fuse_cfg_param = ipq807x_cpr_boost_fuse_cfg_param, ++ .apss_boost_fuse_volt_param = ipq807x_apss_boost_fuse_volt_param, ++ .misc_fuse_volt_adj_param = ipq807x_misc_fuse_volt_adj_param ++}; ++ ++/* ++ * The number of possible values for misc fuse is ++ * 2^(#bits defined for misc fuse) ++ */ ++#define IPQ807x_MISC_FUSE_VAL_COUNT BIT(1) ++ ++/* ++ * Open loop voltage fuse reference voltages in microvolts for IPQ807x ++ */ ++static int ipq807x_apss_fuse_ref_volt ++ [IPQ807x_APSS_FUSE_CORNERS] = { ++ 720000, ++ 864000, ++ 992000, ++ 1064000, ++}; ++ ++#define IPQ807x_APSS_FUSE_STEP_VOLT 8000 ++#define IPQ807x_APSS_VOLTAGE_FUSE_SIZE 6 ++#define IPQ807x_APSS_QUOT_OFFSET_SCALE 5 ++ ++#define IPQ807x_APSS_CPR_SENSOR_COUNT 6 ++ ++#define IPQ807x_APSS_CPR_CLOCK_RATE 19200000 ++ ++#define IPQ807x_APSS_MAX_TEMP_POINTS 3 ++#define IPQ807x_APSS_TEMP_SENSOR_ID_START 4 ++#define IPQ807x_APSS_TEMP_SENSOR_ID_END 13 ++/* ++ * Boost voltage fuse reference and ceiling voltages in microvolts for ++ * IPQ807x. ++ */ ++#define IPQ807x_APSS_BOOST_FUSE_REF_VOLT 1140000 ++#define IPQ807x_APSS_BOOST_CEILING_VOLT 1140000 ++#define IPQ807x_APSS_BOOST_FLOOR_VOLT 900000 ++#define MAX_BOOST_CONFIG_FUSE_VALUE 8 ++ ++#define IPQ807x_APSS_CPR_SDELTA_CORE_COUNT 15 ++ ++#define IPQ807x_APSS_CPR_TCSR_START 8 ++#define IPQ807x_APSS_CPR_TCSR_END 9 ++ ++/* ++ * Array of integer values mapped to each of the boost config fuse values to ++ * indicate boost enable/disable status. ++ */ ++static bool boost_fuse[MAX_BOOST_CONFIG_FUSE_VALUE] = {0, 1, 1, 1, 1, 1, 1, 1}; ++ ++/* ++ * IPQ6018 (Few parameters are changed, remaining are same as IPQ807x) ++ */ ++#define IPQ6018_APSS_FUSE_STEP_VOLT 12500 ++#define IPQ6018_APSS_CPR_CLOCK_RATE 24000000 ++ ++static struct cpr3_fuse_param ++ipq6018_apss_ro_sel_param[IPQ6018_APSS_FUSE_CORNERS][2] = { ++ {{75, 8, 11}, {} }, ++ {{75, 4, 7}, {} }, ++ {{75, 0, 3}, {} }, ++ {{75, 12, 15}, {} }, ++}; ++ ++static struct cpr3_fuse_param ++ipq6018_apss_init_voltage_param[IPQ6018_APSS_FUSE_CORNERS][2] = { ++ {{73, 18, 23}, {} }, ++ {{73, 12, 17}, {} }, ++ {{73, 6, 11}, {} }, ++ {{73, 0, 5}, {} }, ++}; ++ ++static struct cpr3_fuse_param ++ipq6018_apss_target_quot_param[IPQ6018_APSS_FUSE_CORNERS][2] = { ++ {{74, 32, 43}, {} }, ++ {{74, 20, 31}, {} }, ++ {{74, 8, 19}, {} }, ++ {{74, 44, 55}, {} }, ++}; ++ ++static struct cpr3_fuse_param ++ipq6018_apss_quot_offset_param[IPQ6018_APSS_FUSE_CORNERS][2] = { ++ {{} }, ++ {{73, 48, 55}, {} }, ++ {{73, 40, 47}, {} }, ++ {{73, 32, 39}, {} }, ++}; ++ ++static struct cpr3_fuse_param ipq6018_cpr_fusing_rev_param[] = { ++ {75, 16, 18}, ++ {}, ++}; ++ ++static struct cpr3_fuse_param ipq6018_apss_speed_bin_param[] = { ++ {36, 40, 42}, ++ {}, ++}; ++ ++static struct cpr3_fuse_param ipq6018_cpr_boost_fuse_cfg_param[] = { ++ {36, 43, 45}, ++ {}, ++}; ++ ++static struct cpr3_fuse_param ipq6018_apss_boost_fuse_volt_param[] = { ++ {73, 0, 5}, ++ {}, ++}; ++ ++static struct cpr3_fuse_param ipq6018_misc_fuse_volt_adj_param[] = { ++ {36, 54, 54}, ++ {}, ++}; ++ ++static struct cpr3_fuse_parameters ipq6018_fuse_params = { ++ .apss_ro_sel_param = ipq6018_apss_ro_sel_param, ++ .apss_init_voltage_param = ipq6018_apss_init_voltage_param, ++ .apss_target_quot_param = ipq6018_apss_target_quot_param, ++ .apss_quot_offset_param = ipq6018_apss_quot_offset_param, ++ .cpr_fusing_rev_param = ipq6018_cpr_fusing_rev_param, ++ .apss_speed_bin_param = ipq6018_apss_speed_bin_param, ++ .cpr_boost_fuse_cfg_param = ipq6018_cpr_boost_fuse_cfg_param, ++ .apss_boost_fuse_volt_param = ipq6018_apss_boost_fuse_volt_param, ++ .misc_fuse_volt_adj_param = ipq6018_misc_fuse_volt_adj_param ++}; ++ ++ ++/* ++ * Boost voltage fuse reference and ceiling voltages in microvolts for ++ * IPQ6018. ++ */ ++#define IPQ6018_APSS_BOOST_FUSE_REF_VOLT 1140000 ++#define IPQ6018_APSS_BOOST_CEILING_VOLT 1140000 ++#define IPQ6018_APSS_BOOST_FLOOR_VOLT 900000 ++ ++/* ++ * Open loop voltage fuse reference voltages in microvolts for IPQ807x ++ */ ++static int ipq6018_apss_fuse_ref_volt ++ [IPQ6018_APSS_FUSE_CORNERS] = { ++ 725000, ++ 862500, ++ 987500, ++ 1062500, ++}; ++ ++/* ++ * IPQ6018 Memory ACC settings on TCSR ++ * ++ * Turbo_L1: write TCSR_MEM_ACC_SW_OVERRIDE_LEGACY_APC0 0x10 ++ * write TCSR_CUSTOM_VDDAPC0_ACC_1 0x1 ++ * Other modes: write TCSR_MEM_ACC_SW_OVERRIDE_LEGACY_APC0 0x0 ++ * write TCSR_CUSTOM_VDDAPC0_ACC_1 0x0 ++ * ++ */ ++#define IPQ6018_APSS_MEM_ACC_TCSR_COUNT 2 ++#define TCSR_MEM_ACC_SW_OVERRIDE_LEGACY_APC0 0x1946178 ++#define TCSR_CUSTOM_VDDAPC0_ACC_1 0x1946124 ++ ++struct mem_acc_tcsr { ++ u32 phy_addr; ++ void __iomem *ioremap_addr; ++ u32 value; ++}; ++ ++static struct mem_acc_tcsr ipq6018_mem_acc_tcsr[IPQ6018_APSS_MEM_ACC_TCSR_COUNT] = { ++ {TCSR_MEM_ACC_SW_OVERRIDE_LEGACY_APC0, NULL, 0x10}, ++ {TCSR_CUSTOM_VDDAPC0_ACC_1, NULL, 0x1}, ++}; ++ ++/* ++ * IPQ9574 (Few parameters are changed, remaining are same as IPQ6018) ++ */ ++#define IPQ9574_APSS_FUSE_STEP_VOLT 10000 ++ ++static struct cpr3_fuse_param ++ipq9574_apss_ro_sel_param[IPQ9574_APSS_FUSE_CORNERS][2] = { ++ {{107, 4, 7}, {} }, ++ {{107, 0, 3}, {} }, ++ {{106, 4, 7}, {} }, ++ {{106, 0, 3}, {} }, ++}; ++ ++static struct cpr3_fuse_param ++ipq9574_apss_init_voltage_param[IPQ9574_APSS_FUSE_CORNERS][2] = { ++ {{104, 24, 29}, {} }, ++ {{104, 18, 23}, {} }, ++ {{104, 12, 17}, {} }, ++ {{104, 6, 11}, {} }, ++}; ++ ++static struct cpr3_fuse_param ++ipq9574_apss_target_quot_param[IPQ9574_APSS_FUSE_CORNERS][2] = { ++ {{106, 32, 43}, {} }, ++ {{106, 20, 31}, {} }, ++ {{106, 8, 19}, {} }, ++ {{106, 44, 55}, {} }, ++}; ++ ++static struct cpr3_fuse_param ++ipq9574_apss_quot_offset_param[IPQ9574_APSS_FUSE_CORNERS][2] = { ++ {{} }, ++ {{105, 48, 55}, {} }, ++ {{105, 40, 47}, {} }, ++ {{105, 32, 39}, {} }, ++}; ++ ++static struct cpr3_fuse_param ipq9574_cpr_fusing_rev_param[] = { ++ {107, 8, 10}, ++ {}, ++}; ++ ++static struct cpr3_fuse_param ipq9574_apss_speed_bin_param[] = { ++ {0, 40, 42}, ++ {}, ++}; ++ ++static struct cpr3_fuse_param ipq9574_cpr_boost_fuse_cfg_param[] = { ++ {0, 43, 45}, ++ {}, ++}; ++ ++static struct cpr3_fuse_param ipq9574_apss_boost_fuse_volt_param[] = { ++ {104, 0, 5}, ++ {}, ++}; ++ ++static struct cpr3_fuse_param ipq9574_misc_fuse_volt_adj_param[] = { ++ {0, 54, 54}, ++ {}, ++}; ++ ++static struct cpr3_fuse_parameters ipq9574_fuse_params = { ++ .apss_ro_sel_param = ipq9574_apss_ro_sel_param, ++ .apss_init_voltage_param = ipq9574_apss_init_voltage_param, ++ .apss_target_quot_param = ipq9574_apss_target_quot_param, ++ .apss_quot_offset_param = ipq9574_apss_quot_offset_param, ++ .cpr_fusing_rev_param = ipq9574_cpr_fusing_rev_param, ++ .apss_speed_bin_param = ipq9574_apss_speed_bin_param, ++ .cpr_boost_fuse_cfg_param = ipq9574_cpr_boost_fuse_cfg_param, ++ .apss_boost_fuse_volt_param = ipq9574_apss_boost_fuse_volt_param, ++ .misc_fuse_volt_adj_param = ipq9574_misc_fuse_volt_adj_param ++}; ++ ++/* ++ * Open loop voltage fuse reference voltages in microvolts for IPQ9574 ++ */ ++static int ipq9574_apss_fuse_ref_volt ++ [IPQ9574_APSS_FUSE_CORNERS] = { ++ 725000, ++ 862500, ++ 987500, ++ 1062500, ++}; ++ ++/** ++ * cpr4_ipq807x_apss_read_fuse_data() - load APSS specific fuse parameter values ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * This function allocates a cpr4_ipq807x_apss_fuses struct, fills it with ++ * values read out of hardware fuses, and finally copies common fuse values ++ * into the CPR3 regulator struct. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr4_ipq807x_apss_read_fuse_data(struct cpr3_regulator *vreg) ++{ ++ void __iomem *base = vreg->thread->ctrl->fuse_base; ++ struct cpr4_ipq807x_apss_fuses *fuse; ++ int i, rc; ++ ++ fuse = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*fuse), GFP_KERNEL); ++ if (!fuse) ++ return -ENOMEM; ++ ++ rc = cpr3_read_fuse_param(base, vreg->cpr4_regulator_data->cpr3_fuse_params->apss_speed_bin_param, ++ &fuse->speed_bin); ++ if (rc) { ++ cpr3_err(vreg, "Unable to read speed bin fuse, rc=%d\n", rc); ++ return rc; ++ } ++ cpr3_info(vreg, "speed bin = %llu\n", fuse->speed_bin); ++ ++ rc = cpr3_read_fuse_param(base, vreg->cpr4_regulator_data->cpr3_fuse_params->cpr_fusing_rev_param, ++ &fuse->cpr_fusing_rev); ++ if (rc) { ++ cpr3_err(vreg, "Unable to read CPR fusing revision fuse, rc=%d\n", ++ rc); ++ return rc; ++ } ++ cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev); ++ ++ rc = cpr3_read_fuse_param(base, vreg->cpr4_regulator_data->cpr3_fuse_params->misc_fuse_volt_adj_param, ++ &fuse->misc); ++ if (rc) { ++ cpr3_err(vreg, "Unable to read misc voltage adjustment fuse, rc=%d\n", ++ rc); ++ return rc; ++ } ++ cpr3_info(vreg, "CPR misc fuse value = %llu\n", fuse->misc); ++ if (fuse->misc >= IPQ807x_MISC_FUSE_VAL_COUNT) { ++ cpr3_err(vreg, "CPR misc fuse value = %llu, should be < %lu\n", ++ fuse->misc, IPQ807x_MISC_FUSE_VAL_COUNT); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < g_valid_fuse_count; i++) { ++ rc = cpr3_read_fuse_param(base, ++ vreg->cpr4_regulator_data->cpr3_fuse_params->apss_init_voltage_param[i], ++ &fuse->init_voltage[i]); ++ if (rc) { ++ cpr3_err(vreg, "Unable to read fuse-corner %d initial voltage fuse, rc=%d\n", ++ i, rc); ++ return rc; ++ } ++ ++ rc = cpr3_read_fuse_param(base, ++ vreg->cpr4_regulator_data->cpr3_fuse_params->apss_target_quot_param[i], ++ &fuse->target_quot[i]); ++ if (rc) { ++ cpr3_err(vreg, "Unable to read fuse-corner %d target quotient fuse, rc=%d\n", ++ i, rc); ++ return rc; ++ } ++ ++ rc = cpr3_read_fuse_param(base, ++ vreg->cpr4_regulator_data->cpr3_fuse_params->apss_ro_sel_param[i], ++ &fuse->ro_sel[i]); ++ if (rc) { ++ cpr3_err(vreg, "Unable to read fuse-corner %d RO select fuse, rc=%d\n", ++ i, rc); ++ return rc; ++ } ++ ++ rc = cpr3_read_fuse_param(base, ++ vreg->cpr4_regulator_data->cpr3_fuse_params->apss_quot_offset_param[i], ++ &fuse->quot_offset[i]); ++ if (rc) { ++ cpr3_err(vreg, "Unable to read fuse-corner %d quotient offset fuse, rc=%d\n", ++ i, rc); ++ return rc; ++ } ++ } ++ ++ rc = cpr3_read_fuse_param(base, vreg->cpr4_regulator_data->cpr3_fuse_params->cpr_boost_fuse_cfg_param, ++ &fuse->boost_cfg); ++ if (rc) { ++ cpr3_err(vreg, "Unable to read CPR boost config fuse, rc=%d\n", ++ rc); ++ return rc; ++ } ++ cpr3_info(vreg, "Voltage boost fuse config = %llu boost = %s\n", ++ fuse->boost_cfg, boost_fuse[fuse->boost_cfg] ++ ? "enable" : "disable"); ++ ++ rc = cpr3_read_fuse_param(base, ++ vreg->cpr4_regulator_data->cpr3_fuse_params->apss_boost_fuse_volt_param, ++ &fuse->boost_voltage); ++ if (rc) { ++ cpr3_err(vreg, "failed to read boost fuse voltage, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ vreg->fuse_combo = fuse->cpr_fusing_rev + 8 * fuse->speed_bin; ++ if (vreg->fuse_combo >= CPR4_IPQ807x_APSS_FUSE_COMBO_COUNT) { ++ cpr3_err(vreg, "invalid CPR fuse combo = %d found\n", ++ vreg->fuse_combo); ++ return -EINVAL; ++ } ++ ++ vreg->speed_bin_fuse = fuse->speed_bin; ++ vreg->cpr_rev_fuse = fuse->cpr_fusing_rev; ++ vreg->fuse_corner_count = g_valid_fuse_count; ++ vreg->platform_fuses = fuse; ++ ++ return 0; ++} ++ ++/** ++ * cpr4_apss_parse_corner_data() - parse APSS corner data from device tree ++ * properties of the CPR3 regulator's device node ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr4_apss_parse_corner_data(struct cpr3_regulator *vreg) ++{ ++ struct device_node *node = vreg->of_node; ++ struct cpr4_ipq807x_apss_fuses *fuse = vreg->platform_fuses; ++ u32 *temp = NULL; ++ int i, rc; ++ ++ rc = cpr3_parse_common_corner_data(vreg); ++ if (rc) { ++ cpr3_err(vreg, "error reading corner data, rc=%d\n", rc); ++ return rc; ++ } ++ ++ /* If fuse has incorrect RO Select values and dtsi has "qcom,cpr-ro-sel" ++ * entry with RO select values other than zero, then dtsi values will ++ * be used. ++ */ ++ if (of_find_property(node, "qcom,cpr-ro-sel", NULL)) { ++ temp = kcalloc(vreg->fuse_corner_count, sizeof(*temp), ++ GFP_KERNEL); ++ if (!temp) ++ return -ENOMEM; ++ ++ rc = cpr3_parse_array_property(vreg, "qcom,cpr-ro-sel", ++ vreg->fuse_corner_count, temp); ++ if (rc) ++ goto done; ++ ++ for (i = 0; i < vreg->fuse_corner_count; i++) { ++ if (temp[i] != 0) ++ fuse->ro_sel[i] = temp[i]; ++ } ++ } ++done: ++ kfree(temp); ++ return rc; ++} ++ ++/** ++ * cpr4_apss_parse_misc_fuse_voltage_adjustments() - fill an array from a ++ * portion of the voltage adjustments specified based on ++ * miscellaneous fuse bits. ++ * @vreg: Pointer to the CPR3 regulator ++ * @volt_adjust: Voltage adjustment output data array which must be ++ * of size vreg->corner_count ++ * ++ * cpr3_parse_common_corner_data() must be called for vreg before this function ++ * is called so that speed bin size elements are initialized. ++ * ++ * Two formats are supported for the device tree property: ++ * 1. Length == tuple_list_size * vreg->corner_count ++ * (reading begins at index 0) ++ * 2. Length == tuple_list_size * vreg->speed_bin_corner_sum ++ * (reading begins at index tuple_list_size * vreg->speed_bin_offset) ++ * ++ * Here, tuple_list_size is the number of possible values for misc fuse. ++ * All other property lengths are treated as errors. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr4_apss_parse_misc_fuse_voltage_adjustments( ++ struct cpr3_regulator *vreg, u32 *volt_adjust) ++{ ++ struct device_node *node = vreg->of_node; ++ struct cpr4_ipq807x_apss_fuses *fuse = vreg->platform_fuses; ++ int tuple_list_size = IPQ807x_MISC_FUSE_VAL_COUNT; ++ int i, offset, rc, len = 0; ++ const char *prop_name = "qcom,cpr-misc-fuse-voltage-adjustment"; ++ ++ if (!of_find_property(node, prop_name, &len)) { ++ cpr3_err(vreg, "property %s is missing\n", prop_name); ++ return -EINVAL; ++ } ++ ++ if (len == tuple_list_size * vreg->corner_count * sizeof(u32)) { ++ offset = 0; ++ } else if (vreg->speed_bin_corner_sum > 0 && ++ len == tuple_list_size * vreg->speed_bin_corner_sum ++ * sizeof(u32)) { ++ offset = tuple_list_size * vreg->speed_bin_offset ++ + fuse->misc * vreg->corner_count; ++ } else { ++ if (vreg->speed_bin_corner_sum > 0) ++ cpr3_err(vreg, "property %s has invalid length=%d, should be %zu or %zu\n", ++ prop_name, len, ++ tuple_list_size * vreg->corner_count ++ * sizeof(u32), ++ tuple_list_size * vreg->speed_bin_corner_sum ++ * sizeof(u32)); ++ else ++ cpr3_err(vreg, "property %s has invalid length=%d, should be %zu\n", ++ prop_name, len, ++ tuple_list_size * vreg->corner_count ++ * sizeof(u32)); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < vreg->corner_count; i++) { ++ rc = of_property_read_u32_index(node, prop_name, offset + i, ++ &volt_adjust[i]); ++ if (rc) { ++ cpr3_err(vreg, "error reading property %s, rc=%d\n", ++ prop_name, rc); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr4_ipq807x_apss_calculate_open_loop_voltages() - calculate the open-loop ++ * voltage for each corner of a CPR3 regulator ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * If open-loop voltage interpolation is allowed in device tree, then ++ * this function calculates the open-loop voltage for a given corner using ++ * linear interpolation. This interpolation is performed using the processor ++ * frequencies of the lower and higher Fmax corners along with their fused ++ * open-loop voltages. ++ * ++ * If open-loop voltage interpolation is not allowed, then this function uses ++ * the Fmax fused open-loop voltage for all of the corners associated with a ++ * given fuse corner. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr4_ipq807x_apss_calculate_open_loop_voltages( ++ struct cpr3_regulator *vreg) ++{ ++ struct device_node *node = vreg->of_node; ++ struct cpr4_ipq807x_apss_fuses *fuse = vreg->platform_fuses; ++ struct cpr3_controller *ctrl = vreg->thread->ctrl; ++ int i, j, rc = 0; ++ bool allow_interpolation; ++ u64 freq_low, volt_low, freq_high, volt_high; ++ int *fuse_volt, *misc_adj_volt; ++ int *fmax_corner; ++ ++ fuse_volt = kcalloc(vreg->fuse_corner_count, sizeof(*fuse_volt), ++ GFP_KERNEL); ++ fmax_corner = kcalloc(vreg->fuse_corner_count, sizeof(*fmax_corner), ++ GFP_KERNEL); ++ if (!fuse_volt || !fmax_corner) { ++ rc = -ENOMEM; ++ goto done; ++ } ++ ++ for (i = 0; i < vreg->fuse_corner_count; i++) { ++ if (ctrl->cpr_global_setting == CPR_DISABLED) ++ fuse_volt[i] = vreg->cpr4_regulator_data->fuse_ref_volt[i]; ++ else ++ fuse_volt[i] = cpr3_convert_open_loop_voltage_fuse( ++ vreg->cpr4_regulator_data->fuse_ref_volt[i], ++ vreg->cpr4_regulator_data->fuse_step_volt, ++ fuse->init_voltage[i], ++ IPQ807x_APSS_VOLTAGE_FUSE_SIZE); ++ ++ /* Log fused open-loop voltage values for debugging purposes. */ ++ cpr3_info(vreg, "fused %8s: open-loop=%7d uV\n", ++ cpr4_ipq807x_apss_fuse_corner_name[i], ++ fuse_volt[i]); ++ } ++ ++ rc = cpr3_determine_part_type(vreg, ++ fuse_volt[vreg->fuse_corner_count - 1]); ++ if (rc) { ++ cpr3_err(vreg, "fused part type detection failed failed, rc=%d\n", ++ rc); ++ goto done; ++ } ++ ++ rc = cpr3_adjust_fused_open_loop_voltages(vreg, fuse_volt); ++ if (rc) { ++ cpr3_err(vreg, "fused open-loop voltage adjustment failed, rc=%d\n", ++ rc); ++ goto done; ++ } ++ ++ allow_interpolation = of_property_read_bool(node, ++ "qcom,allow-voltage-interpolation"); ++ ++ for (i = 1; i < vreg->fuse_corner_count; i++) { ++ if (fuse_volt[i] < fuse_volt[i - 1]) { ++ cpr3_info(vreg, "fuse corner %d voltage=%d uV < fuse corner %d voltage=%d uV; overriding: fuse corner %d voltage=%d\n", ++ i, fuse_volt[i], i - 1, fuse_volt[i - 1], ++ i, fuse_volt[i - 1]); ++ fuse_volt[i] = fuse_volt[i - 1]; ++ } ++ } ++ ++ if (!allow_interpolation) { ++ /* Use fused open-loop voltage for lower frequencies. */ ++ for (i = 0; i < vreg->corner_count; i++) ++ vreg->corner[i].open_loop_volt ++ = fuse_volt[vreg->corner[i].cpr_fuse_corner]; ++ goto done; ++ } ++ ++ /* Determine highest corner mapped to each fuse corner */ ++ j = vreg->fuse_corner_count - 1; ++ for (i = vreg->corner_count - 1; i >= 0; i--) { ++ if (vreg->corner[i].cpr_fuse_corner == j) { ++ fmax_corner[j] = i; ++ j--; ++ } ++ } ++ if (j >= 0) { ++ cpr3_err(vreg, "invalid fuse corner mapping\n"); ++ rc = -EINVAL; ++ goto done; ++ } ++ ++ /* ++ * Interpolation is not possible for corners mapped to the lowest fuse ++ * corner so use the fuse corner value directly. ++ */ ++ for (i = 0; i <= fmax_corner[0]; i++) ++ vreg->corner[i].open_loop_volt = fuse_volt[0]; ++ ++ /* Interpolate voltages for the higher fuse corners. */ ++ for (i = 1; i < vreg->fuse_corner_count; i++) { ++ freq_low = vreg->corner[fmax_corner[i - 1]].proc_freq; ++ volt_low = fuse_volt[i - 1]; ++ freq_high = vreg->corner[fmax_corner[i]].proc_freq; ++ volt_high = fuse_volt[i]; ++ ++ for (j = fmax_corner[i - 1] + 1; j <= fmax_corner[i]; j++) ++ vreg->corner[j].open_loop_volt = cpr3_interpolate( ++ freq_low, volt_low, freq_high, volt_high, ++ vreg->corner[j].proc_freq); ++ } ++ ++done: ++ if (rc == 0) { ++ cpr3_debug(vreg, "unadjusted per-corner open-loop voltages:\n"); ++ for (i = 0; i < vreg->corner_count; i++) ++ cpr3_debug(vreg, "open-loop[%2d] = %d uV\n", i, ++ vreg->corner[i].open_loop_volt); ++ ++ rc = cpr3_adjust_open_loop_voltages(vreg); ++ if (rc) ++ cpr3_err(vreg, "open-loop voltage adjustment failed, rc=%d\n", ++ rc); ++ ++ if (of_find_property(node, ++ "qcom,cpr-misc-fuse-voltage-adjustment", ++ NULL)) { ++ misc_adj_volt = kcalloc(vreg->corner_count, ++ sizeof(*misc_adj_volt), GFP_KERNEL); ++ if (!misc_adj_volt) { ++ rc = -ENOMEM; ++ goto _exit; ++ } ++ ++ rc = cpr4_apss_parse_misc_fuse_voltage_adjustments(vreg, ++ misc_adj_volt); ++ if (rc) { ++ cpr3_err(vreg, "qcom,cpr-misc-fuse-voltage-adjustment reading failed, rc=%d\n", ++ rc); ++ kfree(misc_adj_volt); ++ goto _exit; ++ } ++ ++ for (i = 0; i < vreg->corner_count; i++) ++ vreg->corner[i].open_loop_volt ++ += misc_adj_volt[i]; ++ kfree(misc_adj_volt); ++ } ++ } ++ ++_exit: ++ kfree(fuse_volt); ++ kfree(fmax_corner); ++ return rc; ++} ++ ++/** ++ * cpr4_ipq807x_apss_set_no_interpolation_quotients() - use the fused target ++ * quotient values for lower frequencies. ++ * @vreg: Pointer to the CPR3 regulator ++ * @volt_adjust: Pointer to array of per-corner closed-loop adjustment ++ * voltages ++ * @volt_adjust_fuse: Pointer to array of per-fuse-corner closed-loop ++ * adjustment voltages ++ * @ro_scale: Pointer to array of per-fuse-corner RO scaling factor ++ * values with units of QUOT/V ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr4_ipq807x_apss_set_no_interpolation_quotients( ++ struct cpr3_regulator *vreg, int *volt_adjust, ++ int *volt_adjust_fuse, int *ro_scale) ++{ ++ struct cpr4_ipq807x_apss_fuses *fuse = vreg->platform_fuses; ++ u32 quot, ro; ++ int quot_adjust; ++ int i, fuse_corner; ++ ++ for (i = 0; i < vreg->corner_count; i++) { ++ fuse_corner = vreg->corner[i].cpr_fuse_corner; ++ quot = fuse->target_quot[fuse_corner]; ++ quot_adjust = cpr3_quot_adjustment(ro_scale[fuse_corner], ++ volt_adjust_fuse[fuse_corner] + ++ volt_adjust[i]); ++ ro = fuse->ro_sel[fuse_corner]; ++ vreg->corner[i].target_quot[ro] = quot + quot_adjust; ++ cpr3_debug(vreg, "corner=%d RO=%u target quot=%u\n", ++ i, ro, quot); ++ ++ if (quot_adjust) ++ cpr3_debug(vreg, "adjusted corner %d RO%u target quot: %u --> %u (%d uV)\n", ++ i, ro, quot, vreg->corner[i].target_quot[ro], ++ volt_adjust_fuse[fuse_corner] + ++ volt_adjust[i]); ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr4_ipq807x_apss_calculate_target_quotients() - calculate the CPR target ++ * quotient for each corner of a CPR3 regulator ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * If target quotient interpolation is allowed in device tree, then this ++ * function calculates the target quotient for a given corner using linear ++ * interpolation. This interpolation is performed using the processor ++ * frequencies of the lower and higher Fmax corners along with the fused ++ * target quotient and quotient offset of the higher Fmax corner. ++ * ++ * If target quotient interpolation is not allowed, then this function uses ++ * the Fmax fused target quotient for all of the corners associated with a ++ * given fuse corner. ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr4_ipq807x_apss_calculate_target_quotients( ++ struct cpr3_regulator *vreg) ++{ ++ struct cpr4_ipq807x_apss_fuses *fuse = vreg->platform_fuses; ++ int rc; ++ bool allow_interpolation; ++ u64 freq_low, freq_high, prev_quot; ++ u64 *quot_low; ++ u64 *quot_high; ++ u32 quot, ro; ++ int i, j, fuse_corner, quot_adjust; ++ int *fmax_corner; ++ int *volt_adjust, *volt_adjust_fuse, *ro_scale; ++ int *voltage_adj_misc; ++ ++ /* Log fused quotient values for debugging purposes. */ ++ for (i = CPR4_IPQ807x_APSS_FUSE_CORNER_SVS; ++ i < vreg->fuse_corner_count; i++) ++ cpr3_info(vreg, "fused %8s: quot[%2llu]=%4llu, quot_offset[%2llu]=%4llu\n", ++ cpr4_ipq807x_apss_fuse_corner_name[i], ++ fuse->ro_sel[i], fuse->target_quot[i], ++ fuse->ro_sel[i], fuse->quot_offset[i] * ++ IPQ807x_APSS_QUOT_OFFSET_SCALE); ++ ++ allow_interpolation = of_property_read_bool(vreg->of_node, ++ "qcom,allow-quotient-interpolation"); ++ ++ volt_adjust = kcalloc(vreg->corner_count, sizeof(*volt_adjust), ++ GFP_KERNEL); ++ volt_adjust_fuse = kcalloc(vreg->fuse_corner_count, ++ sizeof(*volt_adjust_fuse), GFP_KERNEL); ++ ro_scale = kcalloc(vreg->fuse_corner_count, sizeof(*ro_scale), ++ GFP_KERNEL); ++ fmax_corner = kcalloc(vreg->fuse_corner_count, sizeof(*fmax_corner), ++ GFP_KERNEL); ++ quot_low = kcalloc(vreg->fuse_corner_count, sizeof(*quot_low), ++ GFP_KERNEL); ++ quot_high = kcalloc(vreg->fuse_corner_count, sizeof(*quot_high), ++ GFP_KERNEL); ++ if (!volt_adjust || !volt_adjust_fuse || !ro_scale || ++ !fmax_corner || !quot_low || !quot_high) { ++ rc = -ENOMEM; ++ goto done; ++ } ++ ++ rc = cpr3_parse_closed_loop_voltage_adjustments(vreg, &fuse->ro_sel[0], ++ volt_adjust, volt_adjust_fuse, ro_scale); ++ if (rc) { ++ cpr3_err(vreg, "could not load closed-loop voltage adjustments, rc=%d\n", ++ rc); ++ goto done; ++ } ++ ++ if (of_find_property(vreg->of_node, ++ "qcom,cpr-misc-fuse-voltage-adjustment", NULL)) { ++ voltage_adj_misc = kcalloc(vreg->corner_count, ++ sizeof(*voltage_adj_misc), GFP_KERNEL); ++ if (!voltage_adj_misc) { ++ rc = -ENOMEM; ++ goto done; ++ } ++ ++ rc = cpr4_apss_parse_misc_fuse_voltage_adjustments(vreg, ++ voltage_adj_misc); ++ if (rc) { ++ cpr3_err(vreg, "qcom,cpr-misc-fuse-voltage-adjustment reading failed, rc=%d\n", ++ rc); ++ kfree(voltage_adj_misc); ++ goto done; ++ } ++ ++ for (i = 0; i < vreg->corner_count; i++) ++ volt_adjust[i] += voltage_adj_misc[i]; ++ ++ kfree(voltage_adj_misc); ++ } ++ ++ if (!allow_interpolation) { ++ /* Use fused target quotients for lower frequencies. */ ++ return cpr4_ipq807x_apss_set_no_interpolation_quotients( ++ vreg, volt_adjust, volt_adjust_fuse, ro_scale); ++ } ++ ++ /* Determine highest corner mapped to each fuse corner */ ++ j = vreg->fuse_corner_count - 1; ++ for (i = vreg->corner_count - 1; i >= 0; i--) { ++ if (vreg->corner[i].cpr_fuse_corner == j) { ++ fmax_corner[j] = i; ++ j--; ++ } ++ } ++ if (j >= 0) { ++ cpr3_err(vreg, "invalid fuse corner mapping\n"); ++ rc = -EINVAL; ++ goto done; ++ } ++ ++ /* ++ * Interpolation is not possible for corners mapped to the lowest fuse ++ * corner so use the fuse corner value directly. ++ */ ++ i = CPR4_IPQ807x_APSS_FUSE_CORNER_SVS; ++ quot_adjust = cpr3_quot_adjustment(ro_scale[i], volt_adjust_fuse[i]); ++ quot = fuse->target_quot[i] + quot_adjust; ++ quot_high[i] = quot_low[i] = quot; ++ ro = fuse->ro_sel[i]; ++ if (quot_adjust) ++ cpr3_debug(vreg, "adjusted fuse corner %d RO%u target quot: %llu --> %u (%d uV)\n", ++ i, ro, fuse->target_quot[i], quot, volt_adjust_fuse[i]); ++ ++ for (i = 0; i <= fmax_corner[CPR4_IPQ807x_APSS_FUSE_CORNER_SVS]; ++ i++) ++ vreg->corner[i].target_quot[ro] = quot; ++ ++ for (i = CPR4_IPQ807x_APSS_FUSE_CORNER_NOM; ++ i < vreg->fuse_corner_count; i++) { ++ quot_high[i] = fuse->target_quot[i]; ++ if (fuse->ro_sel[i] == fuse->ro_sel[i - 1]) ++ quot_low[i] = quot_high[i - 1]; ++ else ++ quot_low[i] = quot_high[i] ++ - fuse->quot_offset[i] ++ * IPQ807x_APSS_QUOT_OFFSET_SCALE; ++ if (quot_high[i] < quot_low[i]) { ++ cpr3_debug(vreg, "quot_high[%d]=%llu < quot_low[%d]=%llu; overriding: quot_high[%d]=%llu\n", ++ i, quot_high[i], i, quot_low[i], ++ i, quot_low[i]); ++ quot_high[i] = quot_low[i]; ++ } ++ } ++ ++ /* Perform per-fuse-corner target quotient adjustment */ ++ for (i = 1; i < vreg->fuse_corner_count; i++) { ++ quot_adjust = cpr3_quot_adjustment(ro_scale[i], ++ volt_adjust_fuse[i]); ++ if (quot_adjust) { ++ prev_quot = quot_high[i]; ++ quot_high[i] += quot_adjust; ++ cpr3_debug(vreg, "adjusted fuse corner %d RO%llu target quot: %llu --> %llu (%d uV)\n", ++ i, fuse->ro_sel[i], prev_quot, quot_high[i], ++ volt_adjust_fuse[i]); ++ } ++ ++ if (fuse->ro_sel[i] == fuse->ro_sel[i - 1]) ++ quot_low[i] = quot_high[i - 1]; ++ else ++ quot_low[i] += cpr3_quot_adjustment(ro_scale[i], ++ volt_adjust_fuse[i - 1]); ++ ++ if (quot_high[i] < quot_low[i]) { ++ cpr3_debug(vreg, "quot_high[%d]=%llu < quot_low[%d]=%llu after adjustment; overriding: quot_high[%d]=%llu\n", ++ i, quot_high[i], i, quot_low[i], ++ i, quot_low[i]); ++ quot_high[i] = quot_low[i]; ++ } ++ } ++ ++ /* Interpolate voltages for the higher fuse corners. */ ++ for (i = 1; i < vreg->fuse_corner_count; i++) { ++ freq_low = vreg->corner[fmax_corner[i - 1]].proc_freq; ++ freq_high = vreg->corner[fmax_corner[i]].proc_freq; ++ ++ ro = fuse->ro_sel[i]; ++ for (j = fmax_corner[i - 1] + 1; j <= fmax_corner[i]; j++) ++ vreg->corner[j].target_quot[ro] = cpr3_interpolate( ++ freq_low, quot_low[i], freq_high, quot_high[i], ++ vreg->corner[j].proc_freq); ++ } ++ ++ /* Perform per-corner target quotient adjustment */ ++ for (i = 0; i < vreg->corner_count; i++) { ++ fuse_corner = vreg->corner[i].cpr_fuse_corner; ++ ro = fuse->ro_sel[fuse_corner]; ++ quot_adjust = cpr3_quot_adjustment(ro_scale[fuse_corner], ++ volt_adjust[i]); ++ if (quot_adjust) { ++ prev_quot = vreg->corner[i].target_quot[ro]; ++ vreg->corner[i].target_quot[ro] += quot_adjust; ++ cpr3_debug(vreg, "adjusted corner %d RO%u target quot: %llu --> %u (%d uV)\n", ++ i, ro, prev_quot, ++ vreg->corner[i].target_quot[ro], ++ volt_adjust[i]); ++ } ++ } ++ ++ /* Ensure that target quotients increase monotonically */ ++ for (i = 1; i < vreg->corner_count; i++) { ++ ro = fuse->ro_sel[vreg->corner[i].cpr_fuse_corner]; ++ if (fuse->ro_sel[vreg->corner[i - 1].cpr_fuse_corner] == ro ++ && vreg->corner[i].target_quot[ro] ++ < vreg->corner[i - 1].target_quot[ro]) { ++ cpr3_debug(vreg, "adjusted corner %d RO%u target quot=%u < adjusted corner %d RO%u target quot=%u; overriding: corner %d RO%u target quot=%u\n", ++ i, ro, vreg->corner[i].target_quot[ro], ++ i - 1, ro, vreg->corner[i - 1].target_quot[ro], ++ i, ro, vreg->corner[i - 1].target_quot[ro]); ++ vreg->corner[i].target_quot[ro] ++ = vreg->corner[i - 1].target_quot[ro]; ++ } ++ } ++ ++done: ++ kfree(volt_adjust); ++ kfree(volt_adjust_fuse); ++ kfree(ro_scale); ++ kfree(fmax_corner); ++ kfree(quot_low); ++ kfree(quot_high); ++ return rc; ++} ++ ++/** ++ * cpr4_apss_print_settings() - print out APSS CPR configuration settings into ++ * the kernel log for debugging purposes ++ * @vreg: Pointer to the CPR3 regulator ++ */ ++static void cpr4_apss_print_settings(struct cpr3_regulator *vreg) ++{ ++ struct cpr3_corner *corner; ++ int i; ++ ++ cpr3_debug(vreg, "Corner: Frequency (Hz), Fuse Corner, Floor (uV), Open-Loop (uV), Ceiling (uV)\n"); ++ for (i = 0; i < vreg->corner_count; i++) { ++ corner = &vreg->corner[i]; ++ cpr3_debug(vreg, "%3d: %10u, %2d, %7d, %7d, %7d\n", ++ i, corner->proc_freq, corner->cpr_fuse_corner, ++ corner->floor_volt, corner->open_loop_volt, ++ corner->ceiling_volt); ++ } ++ ++ if (vreg->thread->ctrl->apm) ++ cpr3_debug(vreg, "APM threshold = %d uV, APM adjust = %d uV\n", ++ vreg->thread->ctrl->apm_threshold_volt, ++ vreg->thread->ctrl->apm_adj_volt); ++} ++ ++/** ++ * cpr4_apss_init_thread() - perform steps necessary to initialize the ++ * configuration data for a CPR3 thread ++ * @thread: Pointer to the CPR3 thread ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr4_apss_init_thread(struct cpr3_thread *thread) ++{ ++ int rc; ++ ++ rc = cpr3_parse_common_thread_data(thread); ++ if (rc) { ++ cpr3_err(thread->ctrl, "thread %u unable to read CPR thread data from device tree, rc=%d\n", ++ thread->thread_id, rc); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++/** ++ * cpr4_apss_parse_temp_adj_properties() - parse temperature based ++ * adjustment properties from device tree. ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr4_apss_parse_temp_adj_properties(struct cpr3_controller *ctrl) ++{ ++ struct device_node *of_node = ctrl->dev->of_node; ++ int rc, i, len, temp_point_count; ++ ++ if (!of_find_property(of_node, "qcom,cpr-temp-point-map", &len)) { ++ /* ++ * Temperature based adjustments are not defined. Single ++ * temperature band is still valid for per-online-core ++ * adjustments. ++ */ ++ ctrl->temp_band_count = 1; ++ return 0; ++ } ++ ++ temp_point_count = len / sizeof(u32); ++ if (temp_point_count <= 0 || ++ temp_point_count > IPQ807x_APSS_MAX_TEMP_POINTS) { ++ cpr3_err(ctrl, "invalid number of temperature points %d > %d (max)\n", ++ temp_point_count, IPQ807x_APSS_MAX_TEMP_POINTS); ++ return -EINVAL; ++ } ++ ++ ctrl->temp_points = devm_kcalloc(ctrl->dev, temp_point_count, ++ sizeof(*ctrl->temp_points), GFP_KERNEL); ++ if (!ctrl->temp_points) ++ return -ENOMEM; ++ ++ rc = of_property_read_u32_array(of_node, "qcom,cpr-temp-point-map", ++ ctrl->temp_points, temp_point_count); ++ if (rc) { ++ cpr3_err(ctrl, "error reading property qcom,cpr-temp-point-map, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ for (i = 0; i < temp_point_count; i++) ++ cpr3_debug(ctrl, "Temperature Point %d=%d\n", i, ++ ctrl->temp_points[i]); ++ ++ /* ++ * If t1, t2, and t3 are the temperature points, then the temperature ++ * bands are: (-inf, t1], (t1, t2], (t2, t3], and (t3, inf). ++ */ ++ ctrl->temp_band_count = temp_point_count + 1; ++ cpr3_debug(ctrl, "Number of temp bands =%d\n", ctrl->temp_band_count); ++ ++ rc = of_property_read_u32(of_node, "qcom,cpr-initial-temp-band", ++ &ctrl->initial_temp_band); ++ if (rc) { ++ cpr3_err(ctrl, "error reading qcom,cpr-initial-temp-band, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ if (ctrl->initial_temp_band >= ctrl->temp_band_count) { ++ cpr3_err(ctrl, "Initial temperature band value %d should be in range [0 - %d]\n", ++ ctrl->initial_temp_band, ctrl->temp_band_count - 1); ++ return -EINVAL; ++ } ++ ++ ctrl->temp_sensor_id_start = IPQ807x_APSS_TEMP_SENSOR_ID_START; ++ ctrl->temp_sensor_id_end = IPQ807x_APSS_TEMP_SENSOR_ID_END; ++ ctrl->allow_temp_adj = true; ++ return rc; ++} ++ ++/** ++ * cpr4_apss_parse_boost_properties() - parse configuration data for boost ++ * voltage adjustment for CPR3 regulator from device tree. ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr4_apss_parse_boost_properties(struct cpr3_regulator *vreg) ++{ ++ struct cpr3_controller *ctrl = vreg->thread->ctrl; ++ struct cpr4_ipq807x_apss_fuses *fuse = vreg->platform_fuses; ++ struct cpr3_corner *corner; ++ int i, boost_voltage, final_boost_volt, rc = 0; ++ int *boost_table = NULL, *boost_temp_adj = NULL; ++ int boost_voltage_adjust = 0, boost_num_cores = 0; ++ u32 boost_allowed = 0; ++ ++ if (!boost_fuse[fuse->boost_cfg]) ++ /* Voltage boost is disabled in fuse */ ++ return 0; ++ ++ if (of_find_property(vreg->of_node, "qcom,allow-boost", NULL)) { ++ rc = cpr3_parse_array_property(vreg, "qcom,allow-boost", 1, ++ &boost_allowed); ++ if (rc) ++ return rc; ++ } ++ ++ if (!boost_allowed) { ++ /* Voltage boost is not enabled for this regulator */ ++ return 0; ++ } ++ ++ boost_voltage = cpr3_convert_open_loop_voltage_fuse( ++ vreg->cpr4_regulator_data->boost_fuse_ref_volt, ++ vreg->cpr4_regulator_data->fuse_step_volt, ++ fuse->boost_voltage, ++ IPQ807x_APSS_VOLTAGE_FUSE_SIZE); ++ ++ /* Log boost voltage value for debugging purposes. */ ++ cpr3_info(vreg, "Boost open-loop=%7d uV\n", boost_voltage); ++ ++ if (of_find_property(vreg->of_node, ++ "qcom,cpr-boost-voltage-fuse-adjustment", NULL)) { ++ rc = cpr3_parse_array_property(vreg, ++ "qcom,cpr-boost-voltage-fuse-adjustment", ++ 1, &boost_voltage_adjust); ++ if (rc) { ++ cpr3_err(vreg, "qcom,cpr-boost-voltage-fuse-adjustment reading failed, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ boost_voltage += boost_voltage_adjust; ++ /* Log boost voltage value for debugging purposes. */ ++ cpr3_info(vreg, "Adjusted boost open-loop=%7d uV\n", ++ boost_voltage); ++ } ++ ++ /* Limit boost voltage value between ceiling and floor voltage limits */ ++ boost_voltage = min(boost_voltage, vreg->cpr4_regulator_data->boost_ceiling_volt); ++ boost_voltage = max(boost_voltage, vreg->cpr4_regulator_data->boost_floor_volt); ++ ++ /* ++ * The boost feature can only be used for the highest voltage corner. ++ * Also, keep core-count adjustments disabled when the boost feature ++ * is enabled. ++ */ ++ corner = &vreg->corner[vreg->corner_count - 1]; ++ if (!corner->sdelta) { ++ /* ++ * If core-count/temp adjustments are not defined, the cpr4 ++ * sdelta for this corner will not be allocated. Allocate it ++ * here for boost configuration. ++ */ ++ corner->sdelta = devm_kzalloc(ctrl->dev, ++ sizeof(*corner->sdelta), GFP_KERNEL); ++ if (!corner->sdelta) ++ return -ENOMEM; ++ } ++ corner->sdelta->temp_band_count = ctrl->temp_band_count; ++ ++ rc = of_property_read_u32(vreg->of_node, "qcom,cpr-num-boost-cores", ++ &boost_num_cores); ++ if (rc) { ++ cpr3_err(vreg, "qcom,cpr-num-boost-cores reading failed, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ if (boost_num_cores <= 0 || ++ boost_num_cores > IPQ807x_APSS_CPR_SDELTA_CORE_COUNT) { ++ cpr3_err(vreg, "Invalid boost number of cores = %d\n", ++ boost_num_cores); ++ return -EINVAL; ++ } ++ corner->sdelta->boost_num_cores = boost_num_cores; ++ ++ boost_table = devm_kcalloc(ctrl->dev, corner->sdelta->temp_band_count, ++ sizeof(*boost_table), GFP_KERNEL); ++ if (!boost_table) ++ return -ENOMEM; ++ ++ if (of_find_property(vreg->of_node, ++ "qcom,cpr-boost-temp-adjustment", NULL)) { ++ boost_temp_adj = kcalloc(corner->sdelta->temp_band_count, ++ sizeof(*boost_temp_adj), GFP_KERNEL); ++ if (!boost_temp_adj) ++ return -ENOMEM; ++ ++ rc = cpr3_parse_array_property(vreg, ++ "qcom,cpr-boost-temp-adjustment", ++ corner->sdelta->temp_band_count, ++ boost_temp_adj); ++ if (rc) { ++ cpr3_err(vreg, "qcom,cpr-boost-temp-adjustment reading failed, rc=%d\n", ++ rc); ++ goto done; ++ } ++ } ++ ++ for (i = 0; i < corner->sdelta->temp_band_count; i++) { ++ /* Apply static adjustments to boost voltage */ ++ final_boost_volt = boost_voltage + (boost_temp_adj == NULL ++ ? 0 : boost_temp_adj[i]); ++ /* ++ * Limit final adjusted boost voltage value between ceiling ++ * and floor voltage limits ++ */ ++ final_boost_volt = min(final_boost_volt, ++ vreg->cpr4_regulator_data->boost_ceiling_volt); ++ final_boost_volt = max(final_boost_volt, ++ vreg->cpr4_regulator_data->boost_floor_volt); ++ ++ boost_table[i] = (corner->open_loop_volt - final_boost_volt) ++ / ctrl->step_volt; ++ cpr3_debug(vreg, "Adjusted boost voltage margin for temp band %d = %d steps\n", ++ i, boost_table[i]); ++ } ++ ++ corner->ceiling_volt = vreg->cpr4_regulator_data->boost_ceiling_volt; ++ corner->sdelta->boost_table = boost_table; ++ corner->sdelta->allow_boost = true; ++ corner->sdelta->allow_core_count_adj = false; ++ vreg->allow_boost = true; ++ ctrl->allow_boost = true; ++done: ++ kfree(boost_temp_adj); ++ return rc; ++} ++ ++/** ++ * cpr4_apss_init_regulator() - perform all steps necessary to initialize the ++ * configuration data for a CPR3 regulator ++ * @vreg: Pointer to the CPR3 regulator ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr4_apss_init_regulator(struct cpr3_regulator *vreg) ++{ ++ struct cpr4_ipq807x_apss_fuses *fuse; ++ int rc; ++ ++ rc = cpr4_ipq807x_apss_read_fuse_data(vreg); ++ if (rc) { ++ cpr3_err(vreg, "unable to read CPR fuse data, rc=%d\n", rc); ++ return rc; ++ } ++ ++ fuse = vreg->platform_fuses; ++ ++ rc = cpr4_apss_parse_corner_data(vreg); ++ if (rc) { ++ cpr3_err(vreg, "unable to read CPR corner data from device tree, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ rc = cpr3_mem_acc_init(vreg); ++ if (rc) { ++ if (rc != -EPROBE_DEFER) ++ cpr3_err(vreg, "unable to initialize mem-acc regulator settings, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ rc = cpr4_ipq807x_apss_calculate_open_loop_voltages(vreg); ++ if (rc) { ++ cpr3_err(vreg, "unable to calculate open-loop voltages, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ rc = cpr3_limit_open_loop_voltages(vreg); ++ if (rc) { ++ cpr3_err(vreg, "unable to limit open-loop voltages, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ cpr3_open_loop_voltage_as_ceiling(vreg); ++ ++ rc = cpr3_limit_floor_voltages(vreg); ++ if (rc) { ++ cpr3_err(vreg, "unable to limit floor voltages, rc=%d\n", rc); ++ return rc; ++ } ++ ++ rc = cpr4_ipq807x_apss_calculate_target_quotients(vreg); ++ if (rc) { ++ cpr3_err(vreg, "unable to calculate target quotients, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ rc = cpr4_parse_core_count_temp_voltage_adj(vreg, false); ++ if (rc) { ++ cpr3_err(vreg, "unable to parse temperature and core count voltage adjustments, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ if (vreg->allow_core_count_adj && (vreg->max_core_count <= 0 ++ || vreg->max_core_count > ++ IPQ807x_APSS_CPR_SDELTA_CORE_COUNT)) { ++ cpr3_err(vreg, "qcom,max-core-count has invalid value = %d\n", ++ vreg->max_core_count); ++ return -EINVAL; ++ } ++ ++ rc = cpr4_apss_parse_boost_properties(vreg); ++ if (rc) { ++ cpr3_err(vreg, "unable to parse boost adjustments, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ cpr4_apss_print_settings(vreg); ++ ++ return rc; ++} ++ ++/** ++ * cpr4_apss_init_controller() - perform APSS CPR4 controller specific ++ * initializations ++ * @ctrl: Pointer to the CPR3 controller ++ * ++ * Return: 0 on success, errno on failure ++ */ ++static int cpr4_apss_init_controller(struct cpr3_controller *ctrl) ++{ ++ int rc; ++ ++ rc = cpr3_parse_common_ctrl_data(ctrl); ++ if (rc) { ++ if (rc != -EPROBE_DEFER) ++ cpr3_err(ctrl, "unable to parse common controller data, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ rc = of_property_read_u32(ctrl->dev->of_node, ++ "qcom,cpr-down-error-step-limit", ++ &ctrl->down_error_step_limit); ++ if (rc) { ++ cpr3_err(ctrl, "error reading qcom,cpr-down-error-step-limit, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ rc = of_property_read_u32(ctrl->dev->of_node, ++ "qcom,cpr-up-error-step-limit", ++ &ctrl->up_error_step_limit); ++ if (rc) { ++ cpr3_err(ctrl, "error reading qcom,cpr-up-error-step-limit, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ /* ++ * Use fixed step quotient if specified otherwise use dynamic ++ * calculated per RO step quotient ++ */ ++ of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-step-quot-fixed", ++ &ctrl->step_quot_fixed); ++ ctrl->use_dynamic_step_quot = ctrl->step_quot_fixed ? false : true; ++ ++ ctrl->saw_use_unit_mV = of_property_read_bool(ctrl->dev->of_node, ++ "qcom,cpr-saw-use-unit-mV"); ++ ++ of_property_read_u32(ctrl->dev->of_node, ++ "qcom,cpr-voltage-settling-time", ++ &ctrl->voltage_settling_time); ++ ++ if (of_find_property(ctrl->dev->of_node, "vdd-limit-supply", NULL)) { ++ ctrl->vdd_limit_regulator = ++ devm_regulator_get(ctrl->dev, "vdd-limit"); ++ if (IS_ERR(ctrl->vdd_limit_regulator)) { ++ rc = PTR_ERR(ctrl->vdd_limit_regulator); ++ if (rc != -EPROBE_DEFER) ++ cpr3_err(ctrl, "unable to request vdd-limit regulator, rc=%d\n", ++ rc); ++ return rc; ++ } ++ } ++ ++ rc = cpr3_apm_init(ctrl); ++ if (rc) { ++ if (rc != -EPROBE_DEFER) ++ cpr3_err(ctrl, "unable to initialize APM settings, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ rc = cpr4_apss_parse_temp_adj_properties(ctrl); ++ if (rc) { ++ cpr3_err(ctrl, "unable to parse temperature adjustment properties, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ ctrl->sensor_count = IPQ807x_APSS_CPR_SENSOR_COUNT; ++ ++ /* ++ * APSS only has one thread (0) per controller so the zeroed ++ * array does not need further modification. ++ */ ++ ctrl->sensor_owner = devm_kcalloc(ctrl->dev, ctrl->sensor_count, ++ sizeof(*ctrl->sensor_owner), GFP_KERNEL); ++ if (!ctrl->sensor_owner) ++ return -ENOMEM; ++ ++ ctrl->ctrl_type = CPR_CTRL_TYPE_CPR4; ++ ctrl->supports_hw_closed_loop = false; ++ ctrl->use_hw_closed_loop = of_property_read_bool(ctrl->dev->of_node, ++ "qcom,cpr-hw-closed-loop"); ++ return 0; ++} ++ ++static int cpr4_apss_regulator_suspend(struct platform_device *pdev, ++ pm_message_t state) ++{ ++ struct cpr3_controller *ctrl = platform_get_drvdata(pdev); ++ ++ return cpr3_regulator_suspend(ctrl); ++} ++ ++static int cpr4_apss_regulator_resume(struct platform_device *pdev) ++{ ++ struct cpr3_controller *ctrl = platform_get_drvdata(pdev); ++ ++ return cpr3_regulator_resume(ctrl); ++} ++ ++static void ipq6018_set_mem_acc(struct regulator_dev *rdev) ++{ ++ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev); ++ ++ ipq6018_mem_acc_tcsr[0].ioremap_addr = ++ ioremap(ipq6018_mem_acc_tcsr[0].phy_addr, 0x4); ++ ipq6018_mem_acc_tcsr[1].ioremap_addr = ++ ioremap(ipq6018_mem_acc_tcsr[1].phy_addr, 0x4); ++ ++ if ((ipq6018_mem_acc_tcsr[0].ioremap_addr != NULL) && ++ (ipq6018_mem_acc_tcsr[1].ioremap_addr != NULL) && ++ (vreg->current_corner == (vreg->corner_count - CPR3_CORNER_OFFSET))) { ++ ++ writel_relaxed(ipq6018_mem_acc_tcsr[0].value, ++ ipq6018_mem_acc_tcsr[0].ioremap_addr); ++ writel_relaxed(ipq6018_mem_acc_tcsr[1].value, ++ ipq6018_mem_acc_tcsr[1].ioremap_addr); ++ } ++} ++ ++static void ipq6018_clr_mem_acc(struct regulator_dev *rdev) ++{ ++ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev); ++ ++ if ((ipq6018_mem_acc_tcsr[0].ioremap_addr != NULL) && ++ (ipq6018_mem_acc_tcsr[1].ioremap_addr != NULL) && ++ (vreg->current_corner != vreg->corner_count - CPR3_CORNER_OFFSET)) { ++ writel_relaxed(0x0, ipq6018_mem_acc_tcsr[0].ioremap_addr); ++ writel_relaxed(0x0, ipq6018_mem_acc_tcsr[1].ioremap_addr); ++ } ++ ++ iounmap(ipq6018_mem_acc_tcsr[0].ioremap_addr); ++ iounmap(ipq6018_mem_acc_tcsr[1].ioremap_addr); ++} ++ ++static struct cpr4_mem_acc_func ipq6018_mem_acc_funcs = { ++ .set_mem_acc = ipq6018_set_mem_acc, ++ .clear_mem_acc = ipq6018_clr_mem_acc ++}; ++ ++static const struct cpr4_reg_data ipq807x_cpr_apss = { ++ .cpr_valid_fuse_count = IPQ807x_APSS_FUSE_CORNERS, ++ .fuse_ref_volt = ipq807x_apss_fuse_ref_volt, ++ .fuse_step_volt = IPQ807x_APSS_FUSE_STEP_VOLT, ++ .cpr_clk_rate = IPQ807x_APSS_CPR_CLOCK_RATE, ++ .boost_fuse_ref_volt= IPQ807x_APSS_BOOST_FUSE_REF_VOLT, ++ .boost_ceiling_volt= IPQ807x_APSS_BOOST_CEILING_VOLT, ++ .boost_floor_volt= IPQ807x_APSS_BOOST_FLOOR_VOLT, ++ .cpr3_fuse_params = &ipq807x_fuse_params, ++ .mem_acc_funcs = NULL, ++}; ++ ++static const struct cpr4_reg_data ipq817x_cpr_apss = { ++ .cpr_valid_fuse_count = IPQ817x_APPS_FUSE_CORNERS, ++ .fuse_ref_volt = ipq807x_apss_fuse_ref_volt, ++ .fuse_step_volt = IPQ807x_APSS_FUSE_STEP_VOLT, ++ .cpr_clk_rate = IPQ807x_APSS_CPR_CLOCK_RATE, ++ .boost_fuse_ref_volt= IPQ807x_APSS_BOOST_FUSE_REF_VOLT, ++ .boost_ceiling_volt= IPQ807x_APSS_BOOST_CEILING_VOLT, ++ .boost_floor_volt= IPQ807x_APSS_BOOST_FLOOR_VOLT, ++ .cpr3_fuse_params = &ipq807x_fuse_params, ++ .mem_acc_funcs = NULL, ++}; ++ ++static const struct cpr4_reg_data ipq6018_cpr_apss = { ++ .cpr_valid_fuse_count = IPQ6018_APSS_FUSE_CORNERS, ++ .fuse_ref_volt = ipq6018_apss_fuse_ref_volt, ++ .fuse_step_volt = IPQ6018_APSS_FUSE_STEP_VOLT, ++ .cpr_clk_rate = IPQ6018_APSS_CPR_CLOCK_RATE, ++ .boost_fuse_ref_volt = IPQ6018_APSS_BOOST_FUSE_REF_VOLT, ++ .boost_ceiling_volt = IPQ6018_APSS_BOOST_CEILING_VOLT, ++ .boost_floor_volt = IPQ6018_APSS_BOOST_FLOOR_VOLT, ++ .cpr3_fuse_params = &ipq6018_fuse_params, ++ .mem_acc_funcs = &ipq6018_mem_acc_funcs, ++}; ++ ++static const struct cpr4_reg_data ipq9574_cpr_apss = { ++ .cpr_valid_fuse_count = IPQ9574_APSS_FUSE_CORNERS, ++ .fuse_ref_volt = ipq9574_apss_fuse_ref_volt, ++ .fuse_step_volt = IPQ9574_APSS_FUSE_STEP_VOLT, ++ .cpr_clk_rate = IPQ6018_APSS_CPR_CLOCK_RATE, ++ .boost_fuse_ref_volt = IPQ6018_APSS_BOOST_FUSE_REF_VOLT, ++ .boost_ceiling_volt = IPQ6018_APSS_BOOST_CEILING_VOLT, ++ .boost_floor_volt = IPQ6018_APSS_BOOST_FLOOR_VOLT, ++ .cpr3_fuse_params = &ipq9574_fuse_params, ++ .mem_acc_funcs = NULL, ++}; ++ ++static struct of_device_id cpr4_regulator_match_table[] = { ++ { ++ .compatible = "qcom,cpr4-ipq807x-apss-regulator", ++ .data = &ipq807x_cpr_apss ++ }, ++ { ++ .compatible = "qcom,cpr4-ipq817x-apss-regulator", ++ .data = &ipq817x_cpr_apss ++ }, ++ { ++ .compatible = "qcom,cpr4-ipq6018-apss-regulator", ++ .data = &ipq6018_cpr_apss ++ }, ++ { ++ .compatible = "qcom,cpr4-ipq9574-apss-regulator", ++ .data = &ipq9574_cpr_apss ++ }, ++ {} ++}; ++ ++static int cpr4_apss_regulator_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct cpr3_controller *ctrl; ++ const struct of_device_id *match; ++ struct cpr4_reg_data *cpr_data; ++ int i, rc; ++ ++ if (!dev->of_node) { ++ dev_err(dev, "Device tree node is missing\n"); ++ return -EINVAL; ++ } ++ ++ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); ++ if (!ctrl) ++ return -ENOMEM; ++ ++ match = of_match_device(cpr4_regulator_match_table, &pdev->dev); ++ if (!match) ++ return -ENODEV; ++ ++ cpr_data = (struct cpr4_reg_data *)match->data; ++ g_valid_fuse_count = cpr_data->cpr_valid_fuse_count; ++ dev_info(dev, "CPR valid fuse count: %d\n", g_valid_fuse_count); ++ ctrl->cpr_clock_rate = cpr_data->cpr_clk_rate; ++ ++ ctrl->dev = dev; ++ /* Set to false later if anything precludes CPR operation. */ ++ ctrl->cpr_allowed_hw = true; ++ ++ rc = of_property_read_string(dev->of_node, "qcom,cpr-ctrl-name", ++ &ctrl->name); ++ if (rc) { ++ cpr3_err(ctrl, "unable to read qcom,cpr-ctrl-name, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ rc = cpr3_map_fuse_base(ctrl, pdev); ++ if (rc) { ++ cpr3_err(ctrl, "could not map fuse base address\n"); ++ return rc; ++ } ++ ++ rc = cpr3_read_tcsr_setting(ctrl, pdev, IPQ807x_APSS_CPR_TCSR_START, ++ IPQ807x_APSS_CPR_TCSR_END); ++ if (rc) { ++ cpr3_err(ctrl, "could not read CPR tcsr setting\n"); ++ return rc; ++ } ++ ++ rc = cpr3_allocate_threads(ctrl, 0, 0); ++ if (rc) { ++ cpr3_err(ctrl, "failed to allocate CPR thread array, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ if (ctrl->thread_count != 1) { ++ cpr3_err(ctrl, "expected 1 thread but found %d\n", ++ ctrl->thread_count); ++ return -EINVAL; ++ } ++ ++ rc = cpr4_apss_init_controller(ctrl); ++ if (rc) { ++ if (rc != -EPROBE_DEFER) ++ cpr3_err(ctrl, "failed to initialize CPR controller parameters, rc=%d\n", ++ rc); ++ return rc; ++ } ++ ++ rc = cpr4_apss_init_thread(&ctrl->thread[0]); ++ if (rc) { ++ cpr3_err(ctrl, "thread initialization failed, rc=%d\n", rc); ++ return rc; ++ } ++ ++ for (i = 0; i < ctrl->thread[0].vreg_count; i++) { ++ ctrl->thread[0].vreg[i].cpr4_regulator_data = cpr_data; ++ rc = cpr4_apss_init_regulator(&ctrl->thread[0].vreg[i]); ++ if (rc) { ++ cpr3_err(&ctrl->thread[0].vreg[i], "regulator initialization failed, rc=%d\n", ++ rc); ++ return rc; ++ } ++ } ++ ++ platform_set_drvdata(pdev, ctrl); ++ ++ return cpr3_regulator_register(pdev, ctrl); ++} ++ ++static int cpr4_apss_regulator_remove(struct platform_device *pdev) ++{ ++ struct cpr3_controller *ctrl = platform_get_drvdata(pdev); ++ ++ return cpr3_regulator_unregister(ctrl); ++} ++ ++static struct platform_driver cpr4_apss_regulator_driver = { ++ .driver = { ++ .name = "qcom,cpr4-apss-regulator", ++ .of_match_table = cpr4_regulator_match_table, ++ .owner = THIS_MODULE, ++ }, ++ .probe = cpr4_apss_regulator_probe, ++ .remove = cpr4_apss_regulator_remove, ++ .suspend = cpr4_apss_regulator_suspend, ++ .resume = cpr4_apss_regulator_resume, ++}; ++ ++static int cpr4_regulator_init(void) ++{ ++ return platform_driver_register(&cpr4_apss_regulator_driver); ++} ++ ++static void cpr4_regulator_exit(void) ++{ ++ platform_driver_unregister(&cpr4_apss_regulator_driver); ++} ++ ++MODULE_DESCRIPTION("CPR4 APSS regulator driver"); ++MODULE_LICENSE("GPL v2"); ++ ++arch_initcall(cpr4_regulator_init); ++module_exit(cpr4_regulator_exit); diff --git a/target/linux/qualcommax/patches-6.6/0902-arm64-dts-ipq8074-add-label-to-clocks.patch b/target/linux/qualcommax/patches-6.6/0902-arm64-dts-ipq8074-add-label-to-clocks.patch new file mode 100644 index 000000000..9b8b4df12 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0902-arm64-dts-ipq8074-add-label-to-clocks.patch @@ -0,0 +1,24 @@ +From 6baf7e4abcea6f7ac21eccf072a20078b39d064c Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Wed, 9 Feb 2022 23:13:26 +0100 +Subject: [PATCH] arm64: dts: ipq8074: add label to clocks + +Add label to clocks node as that makes it easy to add the NSS fixed +clocks that are required in their DTSI. + +Signed-off-by: Robert Marko +--- + arch/arm64/boot/dts/qcom/ipq8074.dtsi | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi +@@ -15,7 +15,7 @@ + compatible = "qcom,ipq8074"; + interrupt-parent = <&intc>; + +- clocks { ++ clocks: clocks { + sleep_clk: sleep_clk { + compatible = "fixed-clock"; + clock-frequency = <32768>; diff --git a/target/linux/qualcommax/patches-6.6/0903-psci-dont-advertise-OSI-support-for-IPQ6018.patch b/target/linux/qualcommax/patches-6.6/0903-psci-dont-advertise-OSI-support-for-IPQ6018.patch new file mode 100644 index 000000000..5fcb90098 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0903-psci-dont-advertise-OSI-support-for-IPQ6018.patch @@ -0,0 +1,40 @@ +From 563db68137475d011b355bfe674d1b7a24778091 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Sat, 8 Oct 2022 22:26:31 +0200 +Subject: [PATCH] psci: dont advertise OSI support for IPQ6018 + +Some older IPQ60xx SoC series boards ship with TrustZone/QSEE firmware +older than TZ.WNS.5.1-00084 which will advertise OSI[1] but are broken +and trying to use OSI will cause the board to hang until WDT kicks in. + +So workaround it by checking for SoC compatible and returning false so +OSI is not used. + +[1] https://www.spinics.net/lists/linux-arm-msm/msg79916.html + +Signed-off-by: Robert Marko +--- + drivers/firmware/psci/psci.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +--- a/drivers/firmware/psci/psci.c ++++ b/drivers/firmware/psci/psci.c +@@ -87,6 +87,18 @@ static inline bool psci_has_ext_power_st + + bool psci_has_osi_support(void) + { ++ /* ++ * Some older IPQ60xx SoC series boards ship with ++ * TrustZone/QSEE firmware older than TZ.WNS.5.1-00084 ++ * which will advertise OSI but is broken and trying ++ * to use OSI will cause the board to hang until WDT ++ * kicks in. ++ * So workaround it by checking for SoC compatible ++ * and returning false so OSI is not used. ++ */ ++ if (of_machine_is_compatible("qcom,ipq6018")) ++ return false; ++ + return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED; + } + diff --git a/target/linux/qualcommax/patches-6.6/0904-clk-qcom-ipq6018-workaround-networking-clock-parenti.patch b/target/linux/qualcommax/patches-6.6/0904-clk-qcom-ipq6018-workaround-networking-clock-parenti.patch new file mode 100644 index 000000000..2e7a56f72 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0904-clk-qcom-ipq6018-workaround-networking-clock-parenti.patch @@ -0,0 +1,109 @@ +From 0c5b5243ad55ae744e790ba90c5ad37a93bd1377 Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Tue, 11 Oct 2022 23:38:45 +0200 +Subject: [PATCH] clk: qcom: ipq6018: workaround networking clock parenting + +Currently, networking clocks are only looked up by fw_name however, +these are registered and setup by SSDK and are not available to the +GCC driver at all, so work around that by providing a global name +fallback. + +While we are here, provide global fallback for bias_pll_cc_clk and +bias_pll_nss_noc_clk as well as these are fixed clocks also not available +to the driver. + +Signed-off-by: Robert Marko +--- + drivers/clk/qcom/gcc-ipq6018.c | 39 +++++++++++++++++----------------- + 1 file changed, 19 insertions(+), 20 deletions(-) + +--- a/drivers/clk/qcom/gcc-ipq6018.c ++++ b/drivers/clk/qcom/gcc-ipq6018.c +@@ -360,7 +360,7 @@ static const struct freq_tbl ftbl_nss_pp + + static const struct clk_parent_data gcc_xo_bias_gpll0_gpll4_nss_ubi32[] = { + { .fw_name = "xo" }, +- { .fw_name = "bias_pll_cc_clk" }, ++ { .fw_name = "bias_pll_cc_clk", .name = "bias_pll_cc_clk" }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll4.clkr.hw }, + { .hw = &nss_crypto_pll.clkr.hw }, +@@ -526,12 +526,12 @@ static const struct freq_tbl ftbl_nss_po + static const struct clk_parent_data + gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias[] = { + { .fw_name = "xo" }, +- { .fw_name = "uniphy0_gcc_rx_clk" }, +- { .fw_name = "uniphy0_gcc_tx_clk" }, +- { .fw_name = "uniphy1_gcc_rx_clk" }, +- { .fw_name = "uniphy1_gcc_tx_clk" }, ++ { .fw_name = "uniphy0_gcc_rx_clk", .name = "uniphy0_gcc_rx_clk" }, ++ { .fw_name = "uniphy0_gcc_tx_clk", .name = "uniphy0_gcc_tx_clk" }, ++ { .fw_name = "uniphy1_gcc_rx_clk", .name = "uniphy1_gcc_rx_clk" }, ++ { .fw_name = "uniphy1_gcc_tx_clk", .name = "uniphy1_gcc_tx_clk" }, + { .hw = &ubi32_pll.clkr.hw }, +- { .fw_name = "bias_pll_cc_clk" }, ++ { .fw_name = "bias_pll_cc_clk", .name = "bias_pll_cc_clk" }, + }; + + static const struct parent_map +@@ -573,12 +573,12 @@ static const struct freq_tbl ftbl_nss_po + static const struct clk_parent_data + gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias[] = { + { .fw_name = "xo" }, +- { .fw_name = "uniphy0_gcc_tx_clk" }, +- { .fw_name = "uniphy0_gcc_rx_clk" }, +- { .fw_name = "uniphy1_gcc_tx_clk" }, +- { .fw_name = "uniphy1_gcc_rx_clk" }, ++ { .fw_name = "uniphy0_gcc_tx_clk", .name = "uniphy0_gcc_tx_clk" }, ++ { .fw_name = "uniphy0_gcc_rx_clk", .name = "uniphy0_gcc_rx_clk" }, ++ { .fw_name = "uniphy1_gcc_tx_clk", .name = "uniphy1_gcc_tx_clk" }, ++ { .fw_name = "uniphy1_gcc_rx_clk", .name = "uniphy1_gcc_rx_clk" }, + { .hw = &ubi32_pll.clkr.hw }, +- { .fw_name = "bias_pll_cc_clk" }, ++ { .fw_name = "bias_pll_cc_clk", .name = "bias_pll_cc_clk" }, + }; + + static const struct parent_map +@@ -714,10 +714,10 @@ static const struct freq_tbl ftbl_nss_po + + static const struct clk_parent_data gcc_xo_uniphy0_rx_tx_ubi32_bias[] = { + { .fw_name = "xo" }, +- { .fw_name = "uniphy0_gcc_rx_clk" }, +- { .fw_name = "uniphy0_gcc_tx_clk" }, ++ { .fw_name = "uniphy0_gcc_rx_clk", .name = "uniphy0_gcc_rx_clk" }, ++ { .fw_name = "uniphy0_gcc_tx_clk", .name = "uniphy0_gcc_tx_clk" }, + { .hw = &ubi32_pll.clkr.hw }, +- { .fw_name = "bias_pll_cc_clk" }, ++ { .fw_name = "bias_pll_cc_clk", .name = "bias_pll_cc_clk" }, + }; + + static const struct parent_map gcc_xo_uniphy0_rx_tx_ubi32_bias_map[] = { +@@ -750,10 +750,10 @@ static const struct freq_tbl ftbl_nss_po + + static const struct clk_parent_data gcc_xo_uniphy0_tx_rx_ubi32_bias[] = { + { .fw_name = "xo" }, +- { .fw_name = "uniphy0_gcc_tx_clk" }, +- { .fw_name = "uniphy0_gcc_rx_clk" }, ++ { .fw_name = "uniphy0_gcc_tx_clk", .name = "uniphy0_gcc_tx_clk" }, ++ { .fw_name = "uniphy0_gcc_rx_clk", .name = "uniphy0_gcc_rx_clk" }, + { .hw = &ubi32_pll.clkr.hw }, +- { .fw_name = "bias_pll_cc_clk" }, ++ { .fw_name = "bias_pll_cc_clk", .name = "bias_pll_cc_clk" }, + }; + + static const struct parent_map gcc_xo_uniphy0_tx_rx_ubi32_bias_map[] = { +@@ -1899,12 +1899,11 @@ static const struct freq_tbl ftbl_ubi32_ + { } + }; + +-static const struct clk_parent_data +- gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk[] = { ++static const struct clk_parent_data gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk[] = { + { .fw_name = "xo" }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll2.clkr.hw }, +- { .fw_name = "bias_pll_nss_noc_clk" }, ++ { .fw_name = "bias_pll_nss_noc_clk", .name = "bias_pll_nss_noc_clk" }, + }; + + static const struct parent_map gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk_map[] = { diff --git a/target/linux/qualcommax/patches-6.6/0905-remoteproc-q6v5_wcss-change-ssr-name-for-ipq6018-wif.patch b/target/linux/qualcommax/patches-6.6/0905-remoteproc-q6v5_wcss-change-ssr-name-for-ipq6018-wif.patch new file mode 100644 index 000000000..db20d3f2c --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0905-remoteproc-q6v5_wcss-change-ssr-name-for-ipq6018-wif.patch @@ -0,0 +1,40 @@ +From 505f9c8653fc218ca47a153ec58ebc16bef5502f Mon Sep 17 00:00:00 2001 +From: Mantas Pucka +Date: Tue, 16 Jan 2024 10:42:40 +0200 +Subject: [PATCH 16/19] remoteproc: q6v5_wcss: change ssr name for ipq6018 wifi + subsystem + +On IPQ6018 this string ends up being sent to RPM when remoteproc stops +(on crash or rmmod ath11k). "q6wcss" is not a valid name (not found by +`strings` in rpm.mbn), so this causes RPM do 'something' (presumably crash) +causing a system reboot followed by hang in XBL, with no WDT running. +Let's change ssr_name to a more sensible 'wcnss', that does not cause such +issues. + +Signed-off-by: Mantas Pucka +--- + drivers/remoteproc/qcom_q6v5_wcss.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/remoteproc/qcom_q6v5_wcss.c ++++ b/drivers/remoteproc/qcom_q6v5_wcss.c +@@ -1142,8 +1142,8 @@ static int q6v5_wcss_probe(struct platfo + if (ret) + goto free_rproc; + +- qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss"); +- qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss"); ++ qcom_add_glink_subdev(rproc, &wcss->glink_subdev, desc->ssr_name); ++ qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, desc->ssr_name); + + if (desc->ssctl_id) + wcss->sysmon = qcom_add_sysmon_subdev(rproc, +@@ -1198,7 +1198,7 @@ static const struct wcss_data wcss_ipq60 + .aon_reset_required = true, + .wcss_q6_reset_required = true, + .bcr_reset_required = false, +- .ssr_name = "q6wcss", ++ .ssr_name = "wcnss", + .ops = &q6v5_wcss_ipq8074_ops, + .requires_force_stop = true, + .need_mem_protection = true, diff --git a/target/linux/qualcommax/patches-6.6/0906-arm64-dts-qcom-ipq6018-add-wifi-node.patch b/target/linux/qualcommax/patches-6.6/0906-arm64-dts-qcom-ipq6018-add-wifi-node.patch new file mode 100644 index 000000000..f4968f1a4 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0906-arm64-dts-qcom-ipq6018-add-wifi-node.patch @@ -0,0 +1,120 @@ +From 153c74fc80b9f33ed1a50d7790bf6979fdceb370 Mon Sep 17 00:00:00 2001 +From: Mantas Pucka +Date: Tue, 16 Jan 2024 11:41:06 +0200 +Subject: [PATCH 19/19] arm64: dts: qcom: ipq6018: add wifi node + +IPQ6018 has a AHB based Q6v5 802.11ax radios that are supported +by the ath11k. + +Add the required DT node to enable the built-in radios. + +Signed-off-by: Mantas Pucka +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 96 +++++++++++++++++++++++++++++++++++ + 1 file changed, 96 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -809,6 +809,102 @@ + }; + }; + ++ wifi: wifi@c000000 { ++ compatible = "qcom,ipq6018-wifi"; ++ reg = <0x0 0xc000000 0x0 0x1000000>; ++ qcom,rproc = <&q6v5_wcss>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ interrupt-names = "misc-pulse1", "misc-latch", "sw-exception", ++ "watchdog", "ce0", "ce1", "ce2", "ce3", "ce4", ++ "ce5", "ce6", "ce7", "ce8", "ce9", "ce10", ++ "ce11", "host2wbm-desc-feed", ++ "host2reo-re-injection", "host2reo-command", ++ "host2rxdma-monitor-ring3", ++ "host2rxdma-monitor-ring2", ++ "host2rxdma-monitor-ring1", ++ "reo2ost-exception", "wbm2host-rx-release", ++ "reo2host-status", ++ "reo2host-destination-ring4", ++ "reo2host-destination-ring3", ++ "reo2host-destination-ring2", ++ "reo2host-destination-ring1", ++ "rxdma2host-monitor-destination-mac3", ++ "rxdma2host-monitor-destination-mac2", ++ "rxdma2host-monitor-destination-mac1", ++ "ppdu-end-interrupts-mac3", ++ "ppdu-end-interrupts-mac2", ++ "ppdu-end-interrupts-mac1", ++ "rxdma2host-monitor-status-ring-mac3", ++ "rxdma2host-monitor-status-ring-mac2", ++ "rxdma2host-monitor-status-ring-mac1", ++ "host2rxdma-host-buf-ring-mac3", ++ "host2rxdma-host-buf-ring-mac2", ++ "host2rxdma-host-buf-ring-mac1", ++ "rxdma2host-destination-ring-mac3", ++ "rxdma2host-destination-ring-mac2", ++ "rxdma2host-destination-ring-mac1", ++ "host2tcl-input-ring4", ++ "host2tcl-input-ring3", ++ "host2tcl-input-ring2", ++ "host2tcl-input-ring1", ++ "wbm2host-tx-completions-ring3", ++ "wbm2host-tx-completions-ring2", ++ "wbm2host-tx-completions-ring1", ++ "tcl2host-status-ring"; ++ status = "disabled"; ++ }; ++ + q6v5_wcss: remoteproc@cd00000 { + compatible = "qcom,ipq6018-wcss-pil"; + reg = <0x0 0x0cd00000 0x0 0x4040>, diff --git a/target/linux/qualcommax/patches-6.6/0907-soc-qcom-fix-smp2p-ack-on-ipq6018.patch b/target/linux/qualcommax/patches-6.6/0907-soc-qcom-fix-smp2p-ack-on-ipq6018.patch new file mode 100644 index 000000000..094442a59 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0907-soc-qcom-fix-smp2p-ack-on-ipq6018.patch @@ -0,0 +1,53 @@ +From d93936f175bd914067df8f63f5fbe6e3b77bb4d2 Mon Sep 17 00:00:00 2001 +From: Mantas Pucka +Date: Tue, 23 May 2023 14:46:28 +0300 +Subject: [PATCH 11/19] soc: qcom: fix smp2p ack on ipq6018 + +IPQ6018 seem to need different ack mechanism for smp2p messaging. This +fixes q6v5_wcss remoteproc firmware reloading. Without this first load +is OK, but subsequent loads would hang and fail to complete. + +Signed-off-by: Mantas Pucka +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 1 + + drivers/soc/qcom/smp2p.c | 6 +++++- + 2 files changed, 6 insertions(+), 1 deletion(-) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -1157,6 +1157,7 @@ + + wcss_smp2p_out: master-kernel { + qcom,entry-name = "master-kernel"; ++ qcom,smp2p-feature-ssr-ack; + #qcom,smem-state-cells = <1>; + }; + +--- a/drivers/soc/qcom/smp2p.c ++++ b/drivers/soc/qcom/smp2p.c +@@ -158,6 +158,8 @@ struct qcom_smp2p { + + struct list_head inbound; + struct list_head outbound; ++ ++ bool need_ssr_ack; + }; + + static void qcom_smp2p_kick(struct qcom_smp2p *smp2p) +@@ -306,7 +308,7 @@ static irqreturn_t qcom_smp2p_intr(int i + ack_restart = qcom_smp2p_check_ssr(smp2p); + qcom_smp2p_notify_in(smp2p); + +- if (ack_restart) ++ if (ack_restart || smp2p->need_ssr_ack) + qcom_smp2p_do_ssr_ack(smp2p); + } + +@@ -427,6 +429,7 @@ static int qcom_smp2p_outbound_entry(str + + /* Make the logical entry reference the physical value */ + entry->value = &out->entries[out->valid_entries].value; ++ smp2p->need_ssr_ack = of_property_read_bool(node, "qcom,smp2p-feature-ssr-ack"); + + out->valid_entries++; + diff --git a/target/linux/qualcommax/patches-6.6/0908-remoteproc-qcom_q6v5_wcss-add-optional-qdss_at-clock.patch b/target/linux/qualcommax/patches-6.6/0908-remoteproc-qcom_q6v5_wcss-add-optional-qdss_at-clock.patch new file mode 100644 index 000000000..309c42473 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0908-remoteproc-qcom_q6v5_wcss-add-optional-qdss_at-clock.patch @@ -0,0 +1,55 @@ +From 87dbcc69a7e3fe6ccddf4fe9bdbf51330f5e4a77 Mon Sep 17 00:00:00 2001 +From: Mantas Pucka +Date: Tue, 23 Jan 2024 11:04:04 +0200 +Subject: [PATCH] remoteproc: qcom_q6v5_wcss: add optional qdss_at clock + +IPQ6018 needs QDSS_AT clock enabled when loading wifi. Optionally enable it +when provided by DT. + +Signed-off-by: Mantas Pucka +--- + drivers/remoteproc/qcom_q6v5_wcss.c | 24 ++++++++++++++++++++++++ + 1 file changed, 24 insertions(+) + +--- a/drivers/remoteproc/qcom_q6v5_wcss.c ++++ b/drivers/remoteproc/qcom_q6v5_wcss.c +@@ -120,6 +120,7 @@ struct q6v5_wcss { + struct clk *qdsp6ss_core_gfmux; + struct clk *lcc_bcr_sleep; + struct clk *prng_clk; ++ struct clk *qdss_clk; + struct regulator *cx_supply; + struct qcom_sysmon *sysmon; + +@@ -259,6 +260,9 @@ static int q6v5_wcss_start(struct rproc + return ret; + } + ++ if (wcss->qdss_clk) ++ clk_prepare_enable(wcss->qdss_clk); ++ + qcom_q6v5_prepare(&wcss->q6v5); + + if (wcss->need_mem_protection) { +@@ -772,6 +776,8 @@ static int q6v5_wcss_stop(struct rproc * + } + + pas_done: ++ if (wcss->qdss_clk) ++ clk_disable_unprepare(wcss->qdss_clk); + clk_disable_unprepare(wcss->prng_clk); + qcom_q6v5_unprepare(&wcss->q6v5); + +@@ -980,6 +986,12 @@ static int ipq_init_clock(struct q6v5_wc + dev_err(wcss->dev, "Failed to get prng clock\n"); + return ret; + } ++ ++ wcss->qdss_clk = devm_clk_get(wcss->dev, "qdss"); ++ if (IS_ERR(wcss->qdss_clk)) { ++ wcss->qdss_clk = NULL; ++ } ++ + return 0; + } + diff --git a/target/linux/qualcommax/patches-6.6/0909-arm64-dts-qcom-ipq6018-assign-QDSS_AT-clock-to-wifi-.patch b/target/linux/qualcommax/patches-6.6/0909-arm64-dts-qcom-ipq6018-assign-QDSS_AT-clock-to-wifi-.patch new file mode 100644 index 000000000..a0528e7f5 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0909-arm64-dts-qcom-ipq6018-assign-QDSS_AT-clock-to-wifi-.patch @@ -0,0 +1,26 @@ +From 71f30e25d21ae4981ecef6653a4ba7dfeb80db7b Mon Sep 17 00:00:00 2001 +From: Mantas Pucka +Date: Tue, 23 Jan 2024 11:04:57 +0200 +Subject: [PATCH] arm64: dts: qcom: ipq6018: assign QDSS_AT clock to wifi remoteproc + +IPQ6018 needs to enable QDSS_AT clock when loading wifi firmware, +add it to wifi remoteproc clock list. + +Signed-off-by: Mantas Pucka +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 15 ++++++++------- + 1 file changed, 9 insertions(+), 8 deletions(-) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -930,8 +930,8 @@ + "wcss_reset", + "wcss_q6_reset"; + +- clocks = <&gcc GCC_PRNG_AHB_CLK>; +- clock-names = "prng"; ++ clocks = <&gcc GCC_PRNG_AHB_CLK>, <&gcc GCC_QDSS_AT_CLK>; ++ clock-names = "prng", "qdss" ; + + qcom,halt-regs = <&tcsr 0x18000 0x1b000 0xe000>; + diff --git a/target/linux/qualcommax/patches-6.6/0910-arm64-dts-qcom-ipq6018-change-voltage-to-perf-levels.patch b/target/linux/qualcommax/patches-6.6/0910-arm64-dts-qcom-ipq6018-change-voltage-to-perf-levels.patch new file mode 100644 index 000000000..25fa31367 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/0910-arm64-dts-qcom-ipq6018-change-voltage-to-perf-levels.patch @@ -0,0 +1,65 @@ +From c67a1814bb1d0df290cf1e3f9c966f04aa41b9b9 Mon Sep 17 00:00:00 2001 +From: Mantas Pucka +Date: Tue, 30 Jan 2024 12:43:56 +0200 +Subject: [PATCH] arm64: dts: qcom: ipq6018: change voltage to perf levels for + CPR4 driver + +Current CPR4 driver requires opp-microvolt to be an abstract +performance level instead of actual voltage level. + +Signed-off-by: Mantas Pucka +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -107,42 +107,42 @@ + + opp-864000000 { + opp-hz = /bits/ 64 <864000000>; +- opp-microvolt = <725000>; ++ opp-microvolt = <1>; + opp-supported-hw = <0xf>; + clock-latency-ns = <200000>; + }; + + opp-1056000000 { + opp-hz = /bits/ 64 <1056000000>; +- opp-microvolt = <787500>; ++ opp-microvolt = <2>; + opp-supported-hw = <0xf>; + clock-latency-ns = <200000>; + }; + + opp-1320000000 { + opp-hz = /bits/ 64 <1320000000>; +- opp-microvolt = <862500>; ++ opp-microvolt = <3>; + opp-supported-hw = <0x3>; + clock-latency-ns = <200000>; + }; + + opp-1440000000 { + opp-hz = /bits/ 64 <1440000000>; +- opp-microvolt = <925000>; ++ opp-microvolt = <4>; + opp-supported-hw = <0x3>; + clock-latency-ns = <200000>; + }; + + opp-1608000000 { + opp-hz = /bits/ 64 <1608000000>; +- opp-microvolt = <987500>; ++ opp-microvolt = <5>; + opp-supported-hw = <0x1>; + clock-latency-ns = <200000>; + }; + + opp-1800000000 { + opp-hz = /bits/ 64 <1800000000>; +- opp-microvolt = <1062500>; ++ opp-microvolt = <6>; + opp-supported-hw = <0x1>; + clock-latency-ns = <200000>; + };